2023-03-28 18:39:22 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2017-10-25 09:18:07 +00:00
|
|
|
package checks
|
2014-01-21 02:58:05 +00:00
|
|
|
|
|
|
|
import (
|
2016-04-14 21:28:07 +00:00
|
|
|
"bytes"
|
2022-06-06 19:13:19 +00:00
|
|
|
"context"
|
2015-01-13 00:09:42 +00:00
|
|
|
"fmt"
|
2022-06-06 19:13:19 +00:00
|
|
|
"log"
|
2015-07-23 11:45:08 +00:00
|
|
|
"net"
|
2015-01-13 00:09:42 +00:00
|
|
|
"net/http"
|
|
|
|
"net/http/httptest"
|
2017-11-09 04:02:22 +00:00
|
|
|
"os"
|
2017-06-06 23:11:56 +00:00
|
|
|
"reflect"
|
2017-07-12 14:01:42 +00:00
|
|
|
"regexp"
|
2022-06-06 19:13:19 +00:00
|
|
|
"strconv"
|
2016-03-03 01:58:01 +00:00
|
|
|
"strings"
|
2022-06-06 19:13:19 +00:00
|
|
|
"sync"
|
2014-01-21 02:58:05 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
2015-01-24 00:07:20 +00:00
|
|
|
|
2022-10-24 14:21:01 +00:00
|
|
|
"github.com/hashicorp/go-uuid"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"golang.org/x/net/http2"
|
|
|
|
"golang.org/x/net/http2/h2c"
|
|
|
|
|
pkg refactor
command/agent/* -> agent/*
command/consul/* -> agent/consul/*
command/agent/command{,_test}.go -> command/agent{,_test}.go
command/base/command.go -> command/base.go
command/base/* -> command/*
commands.go -> command/commands.go
The script which did the refactor is:
(
cd $GOPATH/src/github.com/hashicorp/consul
git mv command/agent/command.go command/agent.go
git mv command/agent/command_test.go command/agent_test.go
git mv command/agent/flag_slice_value{,_test}.go command/
git mv command/agent .
git mv command/base/command.go command/base.go
git mv command/base/config_util{,_test}.go command/
git mv commands.go command/
git mv consul agent
rmdir command/base/
gsed -i -e 's|package agent|package command|' command/agent{,_test}.go
gsed -i -e 's|package agent|package command|' command/flag_slice_value{,_test}.go
gsed -i -e 's|package base|package command|' command/base.go command/config_util{,_test}.go
gsed -i -e 's|package main|package command|' command/commands.go
gsed -i -e 's|base.Command|BaseCommand|' command/commands.go
gsed -i -e 's|agent.Command|AgentCommand|' command/commands.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/commands.go
gsed -i -e 's|base\.||' command/commands.go
gsed -i -e 's|command\.||' command/commands.go
gsed -i -e 's|command|c|' main.go
gsed -i -e 's|range Commands|range command.Commands|' main.go
gsed -i -e 's|Commands: Commands|Commands: command.Commands|' main.go
gsed -i -e 's|base\.BoolValue|BoolValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.DurationValue|DurationValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.StringValue|StringValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.UintValue|UintValue|' command/operator_autopilot_set.go
gsed -i -e 's|\bCommand\b|BaseCommand|' command/base.go
gsed -i -e 's|BaseCommand Options|Command Options|' command/base.go
gsed -i -e 's|base.Command|BaseCommand|' command/*.go
gsed -i -e 's|c\.Command|c.BaseCommand|g' command/*.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/*_test.go
gsed -i -e 's|base\.||' command/*_test.go
gsed -i -e 's|\bCommand\b|AgentCommand|' command/agent{,_test}.go
gsed -i -e 's|cmd.AgentCommand|cmd.BaseCommand|' command/agent.go
gsed -i -e 's|cli.AgentCommand = new(Command)|cli.Command = new(AgentCommand)|' command/agent_test.go
gsed -i -e 's|exec.AgentCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|exec.BaseCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|NewTestAgent|agent.NewTestAgent|' command/agent_test.go
gsed -i -e 's|= TestConfig|= agent.TestConfig|' command/agent_test.go
gsed -i -e 's|: RetryJoin|: agent.RetryJoin|' command/agent_test.go
gsed -i -e 's|\.\./\.\./|../|' command/config_util_test.go
gsed -i -e 's|\bverifyUniqueListeners|VerifyUniqueListeners|' agent/config{,_test}.go command/agent.go
gsed -i -e 's|\bserfLANKeyring\b|SerfLANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bserfWANKeyring\b|SerfWANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bNewAgent\b|agent.New|g' command/agent{,_test}.go
gsed -i -e 's|\bNewAgent|New|' agent/{acl_test,agent,testagent}.go
gsed -i -e 's|\bAgent\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bBool\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDefaultConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDevConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bMergeConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bReadConfigPaths\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bParseMetaPair\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfLANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfWANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|circonus\.agent|circonus|g' command/agent{,_test}.go
gsed -i -e 's|logger\.agent|logger|g' command/agent{,_test}.go
gsed -i -e 's|metrics\.agent|metrics|g' command/agent{,_test}.go
gsed -i -e 's|// agent.Agent|// agent|' command/agent{,_test}.go
gsed -i -e 's|a\.agent\.Config|a.Config|' command/agent{,_test}.go
gsed -i -e 's|agent\.AppendSliceValue|AppendSliceValue|' command/{configtest,validate}.go
gsed -i -e 's|consul/consul|agent/consul|' GNUmakefile
gsed -i -e 's|\.\./test|../../test|' agent/consul/server_test.go
# fix imports
f=$(grep -rl 'github.com/hashicorp/consul/command/agent' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/command/agent|github.com/hashicorp/consul/agent|' $f
goimports -w $f
f=$(grep -rl 'github.com/hashicorp/consul/consul' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/consul|github.com/hashicorp/consul/agent/consul|' $f
goimports -w $f
goimports -w command/*.go main.go
)
2017-06-09 22:28:28 +00:00
|
|
|
"github.com/hashicorp/consul/agent/mock"
|
2019-12-10 02:26:41 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2017-04-19 23:00:11 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2022-06-06 19:13:19 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/freeport"
|
2020-01-28 23:50:41 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/testutil"
|
2019-03-27 12:54:56 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
2014-01-21 02:58:05 +00:00
|
|
|
)
|
|
|
|
|
2017-10-25 17:21:38 +00:00
|
|
|
func uniqueID() string {
|
|
|
|
id, err := uuid.GenerateUUID()
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return id
|
|
|
|
}
|
|
|
|
|
2017-10-04 23:48:00 +00:00
|
|
|
func TestCheckMonitor_Script(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-07-04 10:44:24 +00:00
|
|
|
tests := []struct {
|
|
|
|
script, status string
|
|
|
|
}{
|
|
|
|
{"exit 0", "passing"},
|
|
|
|
{"exit 1", "warning"},
|
|
|
|
{"exit 2", "critical"},
|
|
|
|
{"foobarbaz", "critical"},
|
2014-01-21 02:58:05 +00:00
|
|
|
}
|
|
|
|
|
2017-07-04 10:44:24 +00:00
|
|
|
for _, tt := range tests {
|
|
|
|
t.Run(tt.status, func(t *testing.T) {
|
|
|
|
notif := mock.NewNotify()
|
2020-01-28 23:50:41 +00:00
|
|
|
logger := testutil.Logger(t)
|
2021-09-14 16:47:52 +00:00
|
|
|
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
2019-10-14 20:49:49 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
cid := structs.NewCheckID("foo", nil)
|
2017-07-04 10:44:24 +00:00
|
|
|
check := &CheckMonitor{
|
2019-06-26 15:43:25 +00:00
|
|
|
Notify: notif,
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID: cid,
|
2019-06-26 15:43:25 +00:00
|
|
|
Script: tt.script,
|
|
|
|
Interval: 25 * time.Millisecond,
|
|
|
|
OutputMaxSize: DefaultBufSize,
|
2019-10-14 20:49:49 +00:00
|
|
|
Logger: logger,
|
|
|
|
StatusHandler: statusHandler,
|
2017-07-04 10:44:24 +00:00
|
|
|
}
|
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
if got, want := notif.Updates(cid), 2; got < want {
|
2017-07-04 10:44:24 +00:00
|
|
|
r.Fatalf("got %d updates want at least %d", got, want)
|
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
if got, want := notif.State(cid), tt.status; got != want {
|
2017-07-04 10:44:24 +00:00
|
|
|
r.Fatalf("got state %q want %q", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
2014-01-21 02:58:05 +00:00
|
|
|
}
|
2014-01-21 03:12:40 +00:00
|
|
|
|
2017-10-04 23:48:00 +00:00
|
|
|
func TestCheckMonitor_Args(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-10-04 23:48:00 +00:00
|
|
|
tests := []struct {
|
|
|
|
args []string
|
|
|
|
status string
|
|
|
|
}{
|
|
|
|
{[]string{"sh", "-c", "exit 0"}, "passing"},
|
|
|
|
{[]string{"sh", "-c", "exit 1"}, "warning"},
|
|
|
|
{[]string{"sh", "-c", "exit 2"}, "critical"},
|
|
|
|
{[]string{"foobarbaz"}, "critical"},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range tests {
|
|
|
|
t.Run(tt.status, func(t *testing.T) {
|
|
|
|
notif := mock.NewNotify()
|
2020-01-28 23:50:41 +00:00
|
|
|
logger := testutil.Logger(t)
|
2021-09-14 16:47:52 +00:00
|
|
|
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
2019-12-10 02:26:41 +00:00
|
|
|
cid := structs.NewCheckID("foo", nil)
|
2020-01-28 23:50:41 +00:00
|
|
|
|
2017-10-04 23:48:00 +00:00
|
|
|
check := &CheckMonitor{
|
2019-06-26 15:43:25 +00:00
|
|
|
Notify: notif,
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID: cid,
|
2019-06-26 15:43:25 +00:00
|
|
|
ScriptArgs: tt.args,
|
|
|
|
Interval: 25 * time.Millisecond,
|
|
|
|
OutputMaxSize: DefaultBufSize,
|
2019-10-14 20:49:49 +00:00
|
|
|
Logger: logger,
|
|
|
|
StatusHandler: statusHandler,
|
2017-10-04 23:48:00 +00:00
|
|
|
}
|
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
if got, want := notif.Updates(cid), 2; got < want {
|
2017-10-04 23:48:00 +00:00
|
|
|
r.Fatalf("got %d updates want at least %d", got, want)
|
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
if got, want := notif.State(cid), tt.status; got != want {
|
2017-10-04 23:48:00 +00:00
|
|
|
r.Fatalf("got state %q want %q", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-26 03:18:20 +00:00
|
|
|
func TestCheckMonitor_Timeout(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-07-04 10:44:24 +00:00
|
|
|
// t.Parallel() // timing test. no parallel
|
2017-05-22 20:07:40 +00:00
|
|
|
notif := mock.NewNotify()
|
2020-01-28 23:50:41 +00:00
|
|
|
logger := testutil.Logger(t)
|
2021-09-14 16:47:52 +00:00
|
|
|
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
2019-10-14 20:49:49 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
cid := structs.NewCheckID("foo", nil)
|
2016-02-26 03:18:20 +00:00
|
|
|
check := &CheckMonitor{
|
2019-06-26 15:43:25 +00:00
|
|
|
Notify: notif,
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID: cid,
|
2019-06-26 15:43:25 +00:00
|
|
|
ScriptArgs: []string{"sh", "-c", "sleep 1 && exit 0"},
|
|
|
|
Interval: 50 * time.Millisecond,
|
|
|
|
Timeout: 25 * time.Millisecond,
|
|
|
|
OutputMaxSize: DefaultBufSize,
|
2019-10-14 20:49:49 +00:00
|
|
|
Logger: logger,
|
|
|
|
StatusHandler: statusHandler,
|
2016-02-26 03:18:20 +00:00
|
|
|
}
|
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
|
|
|
|
2017-07-04 10:44:24 +00:00
|
|
|
time.Sleep(250 * time.Millisecond)
|
2016-02-26 03:18:20 +00:00
|
|
|
|
|
|
|
// Should have at least 2 updates
|
2019-12-10 02:26:41 +00:00
|
|
|
if notif.Updates(cid) < 2 {
|
2017-05-22 20:07:40 +00:00
|
|
|
t.Fatalf("should have at least 2 updates %v", notif.UpdatesMap())
|
2016-02-26 03:18:20 +00:00
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
if notif.State(cid) != "critical" {
|
2017-05-22 20:07:40 +00:00
|
|
|
t.Fatalf("should be critical %v", notif.StateMap())
|
2016-02-26 03:18:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-18 03:39:11 +00:00
|
|
|
func TestCheckMonitor_RandomStagger(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-07-04 10:44:24 +00:00
|
|
|
// t.Parallel() // timing test. no parallel
|
2017-05-22 20:07:40 +00:00
|
|
|
notif := mock.NewNotify()
|
2020-01-28 23:50:41 +00:00
|
|
|
logger := testutil.Logger(t)
|
2021-09-14 16:47:52 +00:00
|
|
|
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
2019-12-10 02:26:41 +00:00
|
|
|
|
|
|
|
cid := structs.NewCheckID("foo", nil)
|
|
|
|
|
2014-12-18 03:39:11 +00:00
|
|
|
check := &CheckMonitor{
|
2019-06-26 15:43:25 +00:00
|
|
|
Notify: notif,
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID: cid,
|
2019-06-26 15:43:25 +00:00
|
|
|
ScriptArgs: []string{"sh", "-c", "exit 0"},
|
|
|
|
Interval: 25 * time.Millisecond,
|
|
|
|
OutputMaxSize: DefaultBufSize,
|
2019-10-14 20:49:49 +00:00
|
|
|
Logger: logger,
|
|
|
|
StatusHandler: statusHandler,
|
2014-12-18 03:39:11 +00:00
|
|
|
}
|
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
|
|
|
|
2017-07-04 10:44:24 +00:00
|
|
|
time.Sleep(500 * time.Millisecond)
|
2014-12-18 03:39:11 +00:00
|
|
|
|
|
|
|
// Should have at least 1 update
|
2019-12-10 02:26:41 +00:00
|
|
|
if notif.Updates(cid) < 1 {
|
2017-05-22 20:07:40 +00:00
|
|
|
t.Fatalf("should have 1 or more updates %v", notif.UpdatesMap())
|
2014-12-18 03:39:11 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
if notif.State(cid) != api.HealthPassing {
|
2017-05-22 20:07:40 +00:00
|
|
|
t.Fatalf("should be %v %v", api.HealthPassing, notif.StateMap())
|
2014-12-18 03:39:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-29 22:28:56 +00:00
|
|
|
func TestCheckMonitor_LimitOutput(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-05-22 20:07:40 +00:00
|
|
|
notif := mock.NewNotify()
|
2020-01-28 23:50:41 +00:00
|
|
|
logger := testutil.Logger(t)
|
2021-09-14 16:47:52 +00:00
|
|
|
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
2019-12-10 02:26:41 +00:00
|
|
|
cid := structs.NewCheckID("foo", nil)
|
|
|
|
|
2014-04-29 22:28:56 +00:00
|
|
|
check := &CheckMonitor{
|
2019-06-26 15:43:25 +00:00
|
|
|
Notify: notif,
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID: cid,
|
2019-06-26 15:43:25 +00:00
|
|
|
ScriptArgs: []string{"od", "-N", "81920", "/dev/urandom"},
|
|
|
|
Interval: 25 * time.Millisecond,
|
|
|
|
OutputMaxSize: DefaultBufSize,
|
2019-10-14 20:49:49 +00:00
|
|
|
Logger: logger,
|
|
|
|
StatusHandler: statusHandler,
|
2014-04-29 22:28:56 +00:00
|
|
|
}
|
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
|
|
|
|
|
|
|
time.Sleep(50 * time.Millisecond)
|
|
|
|
|
|
|
|
// Allow for extra bytes for the truncation message
|
2019-12-10 02:26:41 +00:00
|
|
|
if len(notif.Output(cid)) > DefaultBufSize+100 {
|
2014-04-29 22:28:56 +00:00
|
|
|
t.Fatalf("output size is too long")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-21 03:12:40 +00:00
|
|
|
func TestCheckTTL(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-07-04 10:44:24 +00:00
|
|
|
// t.Parallel() // timing test. no parallel
|
2017-05-22 20:07:40 +00:00
|
|
|
notif := mock.NewNotify()
|
2020-01-28 23:50:41 +00:00
|
|
|
logger := testutil.Logger(t)
|
2019-12-10 02:26:41 +00:00
|
|
|
cid := structs.NewCheckID("foo", nil)
|
|
|
|
|
2014-01-21 03:12:40 +00:00
|
|
|
check := &CheckTTL{
|
2017-05-22 20:07:40 +00:00
|
|
|
Notify: notif,
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID: cid,
|
2017-07-04 10:44:24 +00:00
|
|
|
TTL: 200 * time.Millisecond,
|
2020-01-28 23:50:41 +00:00
|
|
|
Logger: logger,
|
2014-01-21 03:12:40 +00:00
|
|
|
}
|
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
|
|
|
|
2017-07-04 10:44:24 +00:00
|
|
|
time.Sleep(100 * time.Millisecond)
|
2017-04-19 23:00:11 +00:00
|
|
|
check.SetStatus(api.HealthPassing, "test-output")
|
2014-01-21 03:12:40 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
if notif.Updates(cid) != 1 {
|
2017-05-22 20:07:40 +00:00
|
|
|
t.Fatalf("should have 1 updates %v", notif.UpdatesMap())
|
2014-01-21 03:12:40 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
if notif.State(cid) != api.HealthPassing {
|
2017-05-22 20:07:40 +00:00
|
|
|
t.Fatalf("should be passing %v", notif.StateMap())
|
2014-01-21 03:12:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we don't fail early
|
2017-07-04 10:44:24 +00:00
|
|
|
time.Sleep(150 * time.Millisecond)
|
2019-12-10 02:26:41 +00:00
|
|
|
if notif.Updates(cid) != 1 {
|
2017-05-22 20:07:40 +00:00
|
|
|
t.Fatalf("should have 1 updates %v", notif.UpdatesMap())
|
2014-01-21 03:12:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the TTL to expire
|
2017-07-04 10:44:24 +00:00
|
|
|
time.Sleep(150 * time.Millisecond)
|
2014-01-21 03:12:40 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
if notif.Updates(cid) != 2 {
|
2017-05-22 20:07:40 +00:00
|
|
|
t.Fatalf("should have 2 updates %v", notif.UpdatesMap())
|
2014-01-21 03:12:40 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
if notif.State(cid) != api.HealthCritical {
|
2017-05-22 20:07:40 +00:00
|
|
|
t.Fatalf("should be critical %v", notif.StateMap())
|
2014-01-21 03:12:40 +00:00
|
|
|
}
|
2016-03-03 01:58:01 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
if !strings.Contains(notif.Output(cid), "test-output") {
|
2017-05-22 20:07:40 +00:00
|
|
|
t.Fatalf("should have retained output %v", notif.OutputMap())
|
2016-03-03 01:58:01 +00:00
|
|
|
}
|
2014-01-21 03:12:40 +00:00
|
|
|
}
|
2015-01-13 00:09:42 +00:00
|
|
|
|
2017-06-06 23:11:56 +00:00
|
|
|
func TestCheckHTTP(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2015-02-02 08:30:44 +00:00
|
|
|
|
2017-06-06 23:11:56 +00:00
|
|
|
tests := []struct {
|
|
|
|
desc string
|
|
|
|
code int
|
|
|
|
method string
|
|
|
|
header http.Header
|
|
|
|
status string
|
|
|
|
}{
|
|
|
|
// passing
|
|
|
|
{code: 200, status: api.HealthPassing},
|
|
|
|
{code: 201, status: api.HealthPassing},
|
|
|
|
{code: 250, status: api.HealthPassing},
|
|
|
|
{code: 299, status: api.HealthPassing},
|
|
|
|
|
|
|
|
// warning
|
|
|
|
{code: 429, status: api.HealthWarning},
|
|
|
|
|
|
|
|
// critical
|
|
|
|
{code: 300, status: api.HealthCritical},
|
|
|
|
{code: 400, status: api.HealthCritical},
|
|
|
|
{code: 500, status: api.HealthCritical},
|
|
|
|
|
|
|
|
// custom method
|
|
|
|
{desc: "custom method GET", code: 200, method: "GET", status: api.HealthPassing},
|
2017-06-08 17:24:50 +00:00
|
|
|
{desc: "custom method POST", code: 200, header: http.Header{"Content-Length": []string{"0"}}, method: "POST", status: api.HealthPassing},
|
2017-06-06 23:11:56 +00:00
|
|
|
{desc: "custom method abc", code: 200, method: "abc", status: api.HealthPassing},
|
|
|
|
|
|
|
|
// custom header
|
|
|
|
{desc: "custom header", code: 200, header: http.Header{"A": []string{"b", "c"}}, status: api.HealthPassing},
|
2017-06-29 23:26:08 +00:00
|
|
|
{desc: "host header", code: 200, header: http.Header{"Host": []string{"a"}}, status: api.HealthPassing},
|
2017-06-06 23:11:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range tests {
|
|
|
|
desc := tt.desc
|
|
|
|
if desc == "" {
|
|
|
|
desc = fmt.Sprintf("code %d -> status %s", tt.code, tt.status)
|
|
|
|
}
|
|
|
|
t.Run(desc, func(t *testing.T) {
|
|
|
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if tt.method != "" && tt.method != r.Method {
|
|
|
|
w.WriteHeader(999)
|
|
|
|
return
|
|
|
|
}
|
2017-06-08 17:24:50 +00:00
|
|
|
|
|
|
|
expectedHeader := http.Header{
|
|
|
|
"Accept": []string{"text/plain, text/*, */*"},
|
|
|
|
"Accept-Encoding": []string{"gzip"},
|
|
|
|
"Connection": []string{"close"},
|
|
|
|
"User-Agent": []string{"Consul Health Check"},
|
|
|
|
}
|
|
|
|
for k, v := range tt.header {
|
|
|
|
expectedHeader[k] = v
|
|
|
|
}
|
2017-06-29 23:26:08 +00:00
|
|
|
|
|
|
|
// the Host header is in r.Host and not in the headers
|
|
|
|
host := expectedHeader.Get("Host")
|
|
|
|
if host != "" && host != r.Host {
|
|
|
|
w.WriteHeader(999)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
expectedHeader.Del("Host")
|
|
|
|
|
2017-06-08 17:24:50 +00:00
|
|
|
if !reflect.DeepEqual(expectedHeader, r.Header) {
|
2017-06-06 23:11:56 +00:00
|
|
|
w.WriteHeader(999)
|
|
|
|
return
|
|
|
|
}
|
2017-06-08 17:24:50 +00:00
|
|
|
|
2017-06-06 23:11:56 +00:00
|
|
|
// Body larger than 4k limit
|
2019-06-26 15:43:25 +00:00
|
|
|
body := bytes.Repeat([]byte{'a'}, 2*DefaultBufSize)
|
2017-06-06 23:11:56 +00:00
|
|
|
w.WriteHeader(tt.code)
|
|
|
|
w.Write(body)
|
|
|
|
}))
|
|
|
|
defer server.Close()
|
|
|
|
|
|
|
|
notif := mock.NewNotify()
|
2020-01-28 23:50:41 +00:00
|
|
|
logger := testutil.Logger(t)
|
2021-09-14 16:47:52 +00:00
|
|
|
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
2019-10-14 20:49:49 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
cid := structs.NewCheckID("foo", nil)
|
|
|
|
|
2017-06-06 23:11:56 +00:00
|
|
|
check := &CheckHTTP{
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID: cid,
|
2019-06-26 15:43:25 +00:00
|
|
|
HTTP: server.URL,
|
|
|
|
Method: tt.method,
|
|
|
|
Header: tt.header,
|
|
|
|
Interval: 10 * time.Millisecond,
|
2019-10-14 20:49:49 +00:00
|
|
|
Logger: logger,
|
|
|
|
StatusHandler: statusHandler,
|
2017-06-06 23:11:56 +00:00
|
|
|
}
|
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
if got, want := notif.Updates(cid), 2; got < want {
|
2017-06-06 23:11:56 +00:00
|
|
|
r.Fatalf("got %d updates want at least %d", got, want)
|
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
if got, want := notif.State(cid), tt.status; got != want {
|
2017-06-06 23:11:56 +00:00
|
|
|
r.Fatalf("got state %q want %q", got, want)
|
|
|
|
}
|
2019-06-26 15:43:25 +00:00
|
|
|
// Allow slightly more data than DefaultBufSize, for the header
|
2019-12-10 02:26:41 +00:00
|
|
|
if n := len(notif.Output(cid)); n > (DefaultBufSize + 256) {
|
2019-06-26 15:43:25 +00:00
|
|
|
r.Fatalf("output too long: %d (%d-byte limit)", n, DefaultBufSize)
|
2017-06-06 23:11:56 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
2015-02-02 08:30:44 +00:00
|
|
|
}
|
|
|
|
|
2019-09-26 02:55:52 +00:00
|
|
|
func TestCheckHTTP_Proxied(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
proxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
fmt.Fprintln(w, "Proxy Server")
|
|
|
|
}))
|
|
|
|
defer proxy.Close()
|
|
|
|
|
|
|
|
notif := mock.NewNotify()
|
2020-01-28 23:50:41 +00:00
|
|
|
|
|
|
|
logger := testutil.Logger(t)
|
2021-09-14 16:47:52 +00:00
|
|
|
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
2019-12-10 02:26:41 +00:00
|
|
|
cid := structs.NewCheckID("foo", nil)
|
2019-10-14 20:49:49 +00:00
|
|
|
|
2019-09-26 02:55:52 +00:00
|
|
|
check := &CheckHTTP{
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID: cid,
|
2019-09-26 02:55:52 +00:00
|
|
|
HTTP: "",
|
|
|
|
Method: "GET",
|
|
|
|
OutputMaxSize: DefaultBufSize,
|
|
|
|
Interval: 10 * time.Millisecond,
|
2019-10-14 20:49:49 +00:00
|
|
|
Logger: logger,
|
2019-09-26 02:55:52 +00:00
|
|
|
ProxyHTTP: proxy.URL,
|
2019-10-14 20:49:49 +00:00
|
|
|
StatusHandler: statusHandler,
|
2019-09-26 02:55:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
|
|
|
|
|
|
|
// If ProxyHTTP is set, check() reqs should go to that address
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
output := notif.Output(cid)
|
2019-09-26 02:55:52 +00:00
|
|
|
if !strings.Contains(output, "Proxy Server") {
|
|
|
|
r.Fatalf("c.ProxyHTTP server did not receive request, but should")
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCheckHTTP_NotProxied(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
fmt.Fprintln(w, "Original Server")
|
|
|
|
}))
|
|
|
|
defer server.Close()
|
|
|
|
|
|
|
|
notif := mock.NewNotify()
|
2020-01-28 23:50:41 +00:00
|
|
|
logger := testutil.Logger(t)
|
2021-09-14 16:47:52 +00:00
|
|
|
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
2019-12-10 02:26:41 +00:00
|
|
|
cid := structs.NewCheckID("foo", nil)
|
2019-10-14 20:49:49 +00:00
|
|
|
|
2019-09-26 02:55:52 +00:00
|
|
|
check := &CheckHTTP{
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID: cid,
|
2019-09-26 02:55:52 +00:00
|
|
|
HTTP: server.URL,
|
|
|
|
Method: "GET",
|
|
|
|
OutputMaxSize: DefaultBufSize,
|
|
|
|
Interval: 10 * time.Millisecond,
|
2019-10-14 20:49:49 +00:00
|
|
|
Logger: logger,
|
2019-09-26 02:55:52 +00:00
|
|
|
ProxyHTTP: "",
|
2019-10-14 20:49:49 +00:00
|
|
|
StatusHandler: statusHandler,
|
2019-09-26 02:55:52 +00:00
|
|
|
}
|
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
|
|
|
|
|
|
|
// If ProxyHTTP is not set, check() reqs should go to the address in CheckHTTP.HTTP
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
output := notif.Output(cid)
|
2019-09-26 02:55:52 +00:00
|
|
|
if !strings.Contains(output, "Original Server") {
|
|
|
|
r.Fatalf("server did not receive request")
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-04-01 21:31:15 +00:00
|
|
|
func TestCheckHTTP_DisableRedirects(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
server1 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
fmt.Fprintln(w, "server1")
|
|
|
|
}))
|
|
|
|
defer server1.Close()
|
|
|
|
|
|
|
|
server2 := httptest.NewServer(http.RedirectHandler(server1.URL, 301))
|
|
|
|
defer server2.Close()
|
|
|
|
|
|
|
|
notif := mock.NewNotify()
|
|
|
|
logger := testutil.Logger(t)
|
|
|
|
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
|
|
|
cid := structs.NewCheckID("foo", nil)
|
|
|
|
|
|
|
|
check := &CheckHTTP{
|
|
|
|
CheckID: cid,
|
|
|
|
HTTP: server2.URL,
|
|
|
|
Method: "GET",
|
|
|
|
OutputMaxSize: DefaultBufSize,
|
|
|
|
Interval: 10 * time.Millisecond,
|
|
|
|
DisableRedirects: true,
|
|
|
|
Logger: logger,
|
|
|
|
StatusHandler: statusHandler,
|
|
|
|
}
|
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
output := notif.Output(cid)
|
|
|
|
if !strings.Contains(output, "Moved Permanently") {
|
|
|
|
r.Fatalf("should have returned 301 body instead of redirecting")
|
|
|
|
}
|
|
|
|
if strings.Contains(output, "server1") {
|
|
|
|
r.Fatalf("followed redirect")
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-07-16 22:13:26 +00:00
|
|
|
func TestCheckHTTPTCP_BigTimeout(t *testing.T) {
|
|
|
|
testCases := []struct {
|
|
|
|
timeoutIn, intervalIn, timeoutWant time.Duration
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
timeoutIn: 31 * time.Second,
|
|
|
|
intervalIn: 30 * time.Second,
|
|
|
|
timeoutWant: 31 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
timeoutIn: 30 * time.Second,
|
|
|
|
intervalIn: 30 * time.Second,
|
|
|
|
timeoutWant: 30 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
timeoutIn: 29 * time.Second,
|
|
|
|
intervalIn: 30 * time.Second,
|
|
|
|
timeoutWant: 29 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
timeoutIn: 0 * time.Second,
|
|
|
|
intervalIn: 10 * time.Second,
|
|
|
|
timeoutWant: 10 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
timeoutIn: 0 * time.Second,
|
|
|
|
intervalIn: 30 * time.Second,
|
|
|
|
timeoutWant: 10 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
timeoutIn: 10 * time.Second,
|
|
|
|
intervalIn: 30 * time.Second,
|
|
|
|
timeoutWant: 10 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
timeoutIn: 9 * time.Second,
|
|
|
|
intervalIn: 30 * time.Second,
|
|
|
|
timeoutWant: 9 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
timeoutIn: -1 * time.Second,
|
|
|
|
intervalIn: 10 * time.Second,
|
|
|
|
timeoutWant: 10 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
timeoutIn: 0 * time.Second,
|
|
|
|
intervalIn: 5 * time.Second,
|
|
|
|
timeoutWant: 10 * time.Second,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range testCases {
|
|
|
|
desc := fmt.Sprintf("timeoutIn: %v, intervalIn: %v", tc.timeoutIn, tc.intervalIn)
|
|
|
|
t.Run(desc, func(t *testing.T) {
|
|
|
|
checkHTTP := &CheckHTTP{
|
|
|
|
Timeout: tc.timeoutIn,
|
|
|
|
Interval: tc.intervalIn,
|
|
|
|
}
|
|
|
|
checkHTTP.Start()
|
|
|
|
defer checkHTTP.Stop()
|
|
|
|
if checkHTTP.httpClient.Timeout != tc.timeoutWant {
|
|
|
|
t.Fatalf("expected HTTP timeout to be %v, got %v", tc.timeoutWant, checkHTTP.httpClient.Timeout)
|
|
|
|
}
|
|
|
|
|
|
|
|
checkTCP := &CheckTCP{
|
|
|
|
Timeout: tc.timeoutIn,
|
|
|
|
Interval: tc.intervalIn,
|
|
|
|
}
|
|
|
|
checkTCP.Start()
|
|
|
|
defer checkTCP.Stop()
|
|
|
|
if checkTCP.dialer.Timeout != tc.timeoutWant {
|
|
|
|
t.Fatalf("expected TCP timeout to be %v, got %v", tc.timeoutWant, checkTCP.dialer.Timeout)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-26 15:43:25 +00:00
|
|
|
func TestCheckMaxOutputSize(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
timeout := 5 * time.Millisecond
|
|
|
|
server := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, req *http.Request) {
|
|
|
|
body := bytes.Repeat([]byte{'x'}, 2*DefaultBufSize)
|
|
|
|
writer.WriteHeader(200)
|
|
|
|
writer.Write(body)
|
|
|
|
}))
|
|
|
|
defer server.Close()
|
|
|
|
|
|
|
|
notif := mock.NewNotify()
|
2020-01-28 23:50:41 +00:00
|
|
|
logger := testutil.Logger(t)
|
2019-06-26 15:43:25 +00:00
|
|
|
maxOutputSize := 32
|
2019-12-10 02:26:41 +00:00
|
|
|
cid := structs.NewCheckID("bar", nil)
|
|
|
|
|
2019-06-26 15:43:25 +00:00
|
|
|
check := &CheckHTTP{
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID: cid,
|
2019-06-26 15:43:25 +00:00
|
|
|
HTTP: server.URL + "/v1/agent/self",
|
|
|
|
Timeout: timeout,
|
|
|
|
Interval: 2 * time.Millisecond,
|
2019-10-14 20:49:49 +00:00
|
|
|
Logger: logger,
|
2019-06-26 15:43:25 +00:00
|
|
|
OutputMaxSize: maxOutputSize,
|
2021-09-14 16:47:52 +00:00
|
|
|
StatusHandler: NewStatusHandler(notif, logger, 0, 0, 0),
|
2019-06-26 15:43:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
if got, want := notif.Updates(cid), 2; got < want {
|
2019-06-26 15:43:25 +00:00
|
|
|
r.Fatalf("got %d updates want at least %d", got, want)
|
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
if got, want := notif.State(cid), api.HealthPassing; got != want {
|
2019-06-26 15:43:25 +00:00
|
|
|
r.Fatalf("got state %q want %q", got, want)
|
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
if got, want := notif.Output(cid), "HTTP GET "+server.URL+"/v1/agent/self: 200 OK Output: "+strings.Repeat("x", maxOutputSize); got != want {
|
2019-06-26 15:43:25 +00:00
|
|
|
r.Fatalf("got state %q want %q", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-02-02 08:30:44 +00:00
|
|
|
func TestCheckHTTPTimeout(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-06-06 23:11:56 +00:00
|
|
|
timeout := 5 * time.Millisecond
|
|
|
|
server := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
|
|
|
|
time.Sleep(2 * timeout)
|
|
|
|
}))
|
2015-02-02 08:30:44 +00:00
|
|
|
defer server.Close()
|
|
|
|
|
2017-05-22 20:07:40 +00:00
|
|
|
notif := mock.NewNotify()
|
2020-01-28 23:50:41 +00:00
|
|
|
logger := testutil.Logger(t)
|
2021-09-14 16:47:52 +00:00
|
|
|
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
2019-10-14 20:49:49 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
cid := structs.NewCheckID("bar", nil)
|
|
|
|
|
2015-02-02 08:30:44 +00:00
|
|
|
check := &CheckHTTP{
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID: cid,
|
2019-10-14 20:49:49 +00:00
|
|
|
HTTP: server.URL,
|
|
|
|
Timeout: timeout,
|
|
|
|
Interval: 10 * time.Millisecond,
|
|
|
|
Logger: logger,
|
|
|
|
StatusHandler: statusHandler,
|
2015-02-02 08:30:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
if got, want := notif.Updates(cid), 2; got < want {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("got %d updates want at least %d", got, want)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
if got, want := notif.State(cid), api.HealthCritical; got != want {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("got state %q want %q", got, want)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2015-02-02 08:30:44 +00:00
|
|
|
}
|
2015-03-15 20:30:50 +00:00
|
|
|
|
2020-02-10 16:27:12 +00:00
|
|
|
func TestCheckHTTPBody(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
timeout := 5 * time.Millisecond
|
|
|
|
|
|
|
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
var (
|
|
|
|
buf bytes.Buffer
|
|
|
|
body []byte
|
|
|
|
)
|
|
|
|
code := 200
|
|
|
|
if _, err := buf.ReadFrom(r.Body); err != nil {
|
|
|
|
code = 999
|
|
|
|
body = []byte(err.Error())
|
|
|
|
} else {
|
|
|
|
body = buf.Bytes()
|
|
|
|
}
|
|
|
|
|
|
|
|
w.WriteHeader(code)
|
|
|
|
w.Write(body)
|
|
|
|
}))
|
|
|
|
defer server.Close()
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
desc string
|
|
|
|
method string
|
|
|
|
header http.Header
|
|
|
|
body string
|
|
|
|
}{
|
|
|
|
{desc: "get body", method: "GET", body: "hello world"},
|
|
|
|
{desc: "post body", method: "POST", body: "hello world"},
|
|
|
|
{desc: "post json body", header: http.Header{"Content-Type": []string{"application/json"}}, method: "POST", body: "{\"foo\":\"bar\"}"},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range tests {
|
|
|
|
t.Run(tt.desc, func(t *testing.T) {
|
|
|
|
notif := mock.NewNotify()
|
|
|
|
|
|
|
|
cid := structs.NewCheckID("checkbody", nil)
|
|
|
|
logger := testutil.Logger(t)
|
|
|
|
check := &CheckHTTP{
|
|
|
|
CheckID: cid,
|
|
|
|
HTTP: server.URL,
|
|
|
|
Header: tt.header,
|
|
|
|
Method: tt.method,
|
|
|
|
Body: tt.body,
|
|
|
|
Timeout: timeout,
|
|
|
|
Interval: 2 * time.Millisecond,
|
|
|
|
Logger: logger,
|
2021-09-14 16:47:52 +00:00
|
|
|
StatusHandler: NewStatusHandler(notif, logger, 0, 0, 0),
|
2020-02-10 16:27:12 +00:00
|
|
|
}
|
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if got, want := notif.Updates(cid), 2; got < want {
|
|
|
|
r.Fatalf("got %d updates want at least %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := notif.State(cid), api.HealthPassing; got != want {
|
|
|
|
r.Fatalf("got status %q want %q", got, want)
|
|
|
|
}
|
|
|
|
if got, want := notif.Output(cid), tt.body; !strings.HasSuffix(got, want) {
|
|
|
|
r.Fatalf("got output %q want suffix %q", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-15 20:30:50 +00:00
|
|
|
func TestCheckHTTP_disablesKeepAlives(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2019-10-14 20:49:49 +00:00
|
|
|
notif := mock.NewNotify()
|
2020-01-28 23:50:41 +00:00
|
|
|
logger := testutil.Logger(t)
|
2019-12-10 02:26:41 +00:00
|
|
|
cid := structs.NewCheckID("foo", nil)
|
|
|
|
|
2015-03-15 20:30:50 +00:00
|
|
|
check := &CheckHTTP{
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID: cid,
|
2019-10-14 20:49:49 +00:00
|
|
|
HTTP: "http://foo.bar/baz",
|
|
|
|
Interval: 10 * time.Second,
|
|
|
|
Logger: logger,
|
2021-09-14 16:47:52 +00:00
|
|
|
StatusHandler: NewStatusHandler(notif, logger, 0, 0, 0),
|
2015-03-15 20:30:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
|
|
|
|
|
|
|
if !check.httpClient.Transport.(*http.Transport).DisableKeepAlives {
|
|
|
|
t.Fatalf("should have disabled keepalives")
|
|
|
|
}
|
|
|
|
}
|
2015-07-23 11:45:08 +00:00
|
|
|
|
2017-07-04 09:01:16 +00:00
|
|
|
func largeBodyHandler(code int) http.Handler {
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
2017-06-06 23:11:56 +00:00
|
|
|
// Body larger than 4k limit
|
2019-06-26 15:43:25 +00:00
|
|
|
body := bytes.Repeat([]byte{'a'}, 2*DefaultBufSize)
|
2017-06-06 23:11:56 +00:00
|
|
|
w.WriteHeader(code)
|
|
|
|
w.Write(body)
|
2017-07-04 09:01:16 +00:00
|
|
|
})
|
2017-06-06 23:11:56 +00:00
|
|
|
}
|
|
|
|
|
2017-11-08 02:22:09 +00:00
|
|
|
func TestCheckHTTP_TLS_SkipVerify(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-07-04 09:01:16 +00:00
|
|
|
server := httptest.NewTLSServer(largeBodyHandler(200))
|
2016-11-03 20:17:30 +00:00
|
|
|
defer server.Close()
|
|
|
|
|
2017-11-08 02:22:09 +00:00
|
|
|
tlsConfig := &api.TLSConfig{
|
|
|
|
InsecureSkipVerify: true,
|
2016-11-03 20:17:30 +00:00
|
|
|
}
|
2017-11-08 02:22:09 +00:00
|
|
|
tlsClientConfig, err := api.SetupTLSConfig(tlsConfig)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
2016-11-03 20:17:30 +00:00
|
|
|
}
|
|
|
|
|
2017-05-22 20:07:40 +00:00
|
|
|
notif := mock.NewNotify()
|
2020-01-28 23:50:41 +00:00
|
|
|
logger := testutil.Logger(t)
|
2021-09-14 16:47:52 +00:00
|
|
|
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
2019-10-14 20:49:49 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
cid := structs.NewCheckID("skipverify_true", nil)
|
2016-11-03 20:17:30 +00:00
|
|
|
check := &CheckHTTP{
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID: cid,
|
2017-11-08 02:22:09 +00:00
|
|
|
HTTP: server.URL,
|
|
|
|
Interval: 25 * time.Millisecond,
|
2019-10-14 20:49:49 +00:00
|
|
|
Logger: logger,
|
2017-11-08 02:22:09 +00:00
|
|
|
TLSClientConfig: tlsClientConfig,
|
2019-10-14 20:49:49 +00:00
|
|
|
StatusHandler: statusHandler,
|
2016-11-03 20:17:30 +00:00
|
|
|
}
|
2017-11-08 02:22:09 +00:00
|
|
|
|
2016-11-03 20:17:30 +00:00
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
|
|
|
|
|
|
|
if !check.httpClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify {
|
|
|
|
t.Fatalf("should be true")
|
|
|
|
}
|
2017-11-08 02:22:09 +00:00
|
|
|
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
if got, want := notif.State(cid), api.HealthPassing; got != want {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("got state %q want %q", got, want)
|
2016-11-05 04:55:55 +00:00
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2016-11-03 20:17:30 +00:00
|
|
|
}
|
|
|
|
|
2017-11-08 02:22:09 +00:00
|
|
|
func TestCheckHTTP_TLS_BadVerify(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-07-04 09:01:16 +00:00
|
|
|
server := httptest.NewTLSServer(largeBodyHandler(200))
|
2016-11-03 20:17:30 +00:00
|
|
|
defer server.Close()
|
|
|
|
|
2017-11-08 02:22:09 +00:00
|
|
|
tlsClientConfig, err := api.SetupTLSConfig(&api.TLSConfig{})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-11-03 20:17:30 +00:00
|
|
|
|
2017-11-08 02:22:09 +00:00
|
|
|
notif := mock.NewNotify()
|
2020-01-28 23:50:41 +00:00
|
|
|
logger := testutil.Logger(t)
|
2021-09-14 16:47:52 +00:00
|
|
|
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
2019-12-10 02:26:41 +00:00
|
|
|
cid := structs.NewCheckID("skipverify_false", nil)
|
2020-01-28 23:50:41 +00:00
|
|
|
|
2016-11-03 20:17:30 +00:00
|
|
|
check := &CheckHTTP{
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID: cid,
|
2017-11-08 02:22:09 +00:00
|
|
|
HTTP: server.URL,
|
|
|
|
Interval: 100 * time.Millisecond,
|
2019-10-14 20:49:49 +00:00
|
|
|
Logger: logger,
|
2017-11-08 02:22:09 +00:00
|
|
|
TLSClientConfig: tlsClientConfig,
|
2019-10-14 20:49:49 +00:00
|
|
|
StatusHandler: statusHandler,
|
2016-11-03 20:17:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
2016-11-05 04:55:55 +00:00
|
|
|
|
2016-11-03 20:17:30 +00:00
|
|
|
if check.httpClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify {
|
2017-11-08 02:22:09 +00:00
|
|
|
t.Fatalf("should default to false")
|
2016-11-03 20:17:30 +00:00
|
|
|
}
|
2017-11-08 02:22:09 +00:00
|
|
|
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2016-11-05 04:55:55 +00:00
|
|
|
// This should fail due to an invalid SSL cert
|
2019-12-10 02:26:41 +00:00
|
|
|
if got, want := notif.State(cid), api.HealthCritical; got != want {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("got state %q want %q", got, want)
|
2016-11-05 04:55:55 +00:00
|
|
|
}
|
2023-01-05 17:47:45 +00:00
|
|
|
if !isInvalidCertificateError(notif.Output(cid)) {
|
2017-05-22 20:07:40 +00:00
|
|
|
r.Fatalf("should fail with certificate error %v", notif.OutputMap())
|
2016-11-05 04:55:55 +00:00
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2016-11-03 20:17:30 +00:00
|
|
|
}
|
|
|
|
|
2023-01-05 17:47:45 +00:00
|
|
|
// isInvalidCertificateError checks the error string for an untrusted certificate error.
|
|
|
|
// The specific error message is different on Linux and macOS.
|
|
|
|
//
|
|
|
|
// TODO: Revisit this when https://github.com/golang/go/issues/52010 is resolved.
|
|
|
|
// We may be able to simplify this to check only one error string.
|
|
|
|
func isInvalidCertificateError(err string) bool {
|
|
|
|
return strings.Contains(err, "certificate signed by unknown authority") ||
|
|
|
|
strings.Contains(err, "certificate is not trusted")
|
|
|
|
}
|
|
|
|
|
2015-07-23 11:45:08 +00:00
|
|
|
func mockTCPServer(network string) net.Listener {
|
|
|
|
var (
|
|
|
|
addr string
|
|
|
|
)
|
|
|
|
|
|
|
|
if network == `tcp6` {
|
|
|
|
addr = `[::1]:0`
|
|
|
|
} else {
|
|
|
|
addr = `127.0.0.1:0`
|
|
|
|
}
|
|
|
|
|
|
|
|
listener, err := net.Listen(network, addr)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return listener
|
|
|
|
}
|
|
|
|
|
|
|
|
func expectTCPStatus(t *testing.T, tcp string, status string) {
|
2017-05-22 20:07:40 +00:00
|
|
|
notif := mock.NewNotify()
|
2020-01-28 23:50:41 +00:00
|
|
|
logger := testutil.Logger(t)
|
2021-09-14 16:47:52 +00:00
|
|
|
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
2019-12-10 02:26:41 +00:00
|
|
|
cid := structs.NewCheckID("foo", nil)
|
2020-01-28 23:50:41 +00:00
|
|
|
|
2015-07-23 11:45:08 +00:00
|
|
|
check := &CheckTCP{
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID: cid,
|
2019-10-14 20:49:49 +00:00
|
|
|
TCP: tcp,
|
|
|
|
Interval: 10 * time.Millisecond,
|
|
|
|
Logger: logger,
|
|
|
|
StatusHandler: statusHandler,
|
2015-07-23 11:45:08 +00:00
|
|
|
}
|
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
if got, want := notif.Updates(cid), 2; got < want {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("got %d updates want at least %d", got, want)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
if got, want := notif.State(cid), status; got != want {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("got state %q want %q", got, want)
|
2016-10-31 16:59:20 +00:00
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2015-07-23 11:45:08 +00:00
|
|
|
}
|
|
|
|
|
2019-10-14 20:49:49 +00:00
|
|
|
func TestStatusHandlerUpdateStatusAfterConsecutiveChecksThresholdIsReached(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-12-10 02:26:41 +00:00
|
|
|
cid := structs.NewCheckID("foo", nil)
|
2019-10-14 20:49:49 +00:00
|
|
|
notif := mock.NewNotify()
|
2020-01-28 23:50:41 +00:00
|
|
|
logger := testutil.Logger(t)
|
2021-09-14 16:47:52 +00:00
|
|
|
statusHandler := NewStatusHandler(notif, logger, 2, 2, 3)
|
2019-10-14 20:49:49 +00:00
|
|
|
|
|
|
|
// Set the initial status to passing after a single success
|
2019-12-10 02:26:41 +00:00
|
|
|
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
2019-10-14 20:49:49 +00:00
|
|
|
|
2021-09-14 16:47:52 +00:00
|
|
|
// Status should still be passing after 1 failed check only
|
2019-12-10 02:26:41 +00:00
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
2019-10-14 20:49:49 +00:00
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(r, 1, notif.Updates(cid))
|
|
|
|
require.Equal(r, api.HealthPassing, notif.State(cid))
|
2019-10-14 20:49:49 +00:00
|
|
|
})
|
|
|
|
|
2021-09-14 16:47:52 +00:00
|
|
|
// Status should become warning after 2 failed checks only
|
2019-12-10 02:26:41 +00:00
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
2019-10-14 20:49:49 +00:00
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(r, 2, notif.Updates(cid))
|
2021-09-14 16:47:52 +00:00
|
|
|
require.Equal(r, api.HealthWarning, notif.State(cid))
|
|
|
|
})
|
|
|
|
|
|
|
|
// Status should become critical after 4 failed checks only
|
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.Equal(r, 3, notif.Updates(cid))
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(r, api.HealthCritical, notif.State(cid))
|
2019-10-14 20:49:49 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// Status should be passing after 2 passing check
|
2019-12-10 02:26:41 +00:00
|
|
|
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
2019-10-14 20:49:49 +00:00
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2021-09-14 16:47:52 +00:00
|
|
|
require.Equal(r, 3, notif.Updates(cid))
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(r, api.HealthCritical, notif.State(cid))
|
2019-10-14 20:49:49 +00:00
|
|
|
})
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
2019-10-14 20:49:49 +00:00
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2021-09-14 16:47:52 +00:00
|
|
|
require.Equal(r, 4, notif.Updates(cid))
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(r, api.HealthPassing, notif.State(cid))
|
2019-10-14 20:49:49 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestStatusHandlerResetCountersOnNonIdenticalsConsecutiveChecks(t *testing.T) {
|
|
|
|
t.Parallel()
|
2019-12-10 02:26:41 +00:00
|
|
|
cid := structs.NewCheckID("foo", nil)
|
2019-10-14 20:49:49 +00:00
|
|
|
notif := mock.NewNotify()
|
2020-01-28 23:50:41 +00:00
|
|
|
logger := testutil.Logger(t)
|
2021-09-14 16:47:52 +00:00
|
|
|
statusHandler := NewStatusHandler(notif, logger, 2, 2, 3)
|
2019-10-14 20:49:49 +00:00
|
|
|
|
|
|
|
// Set the initial status to passing after a single success
|
2019-12-10 02:26:41 +00:00
|
|
|
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
2019-10-14 20:49:49 +00:00
|
|
|
|
2021-09-14 16:47:52 +00:00
|
|
|
// Status should remain passing after FAIL PASS FAIL PASS FAIL sequence
|
2019-10-14 20:49:49 +00:00
|
|
|
// Although we have 3 FAILS, they are not consecutive
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
|
|
|
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
2021-09-14 16:47:52 +00:00
|
|
|
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
2019-12-10 02:26:41 +00:00
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
2019-10-14 20:49:49 +00:00
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(r, 1, notif.Updates(cid))
|
|
|
|
require.Equal(r, api.HealthPassing, notif.State(cid))
|
2019-10-14 20:49:49 +00:00
|
|
|
})
|
|
|
|
|
2021-09-14 16:47:52 +00:00
|
|
|
// Warning after a 2rd consecutive FAIL
|
2019-12-10 02:26:41 +00:00
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
2019-10-14 20:49:49 +00:00
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(r, 2, notif.Updates(cid))
|
2021-09-14 16:47:52 +00:00
|
|
|
require.Equal(r, api.HealthWarning, notif.State(cid))
|
|
|
|
})
|
|
|
|
|
|
|
|
// Critical after a 3rd consecutive FAIL
|
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.Equal(r, 3, notif.Updates(cid))
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(r, api.HealthCritical, notif.State(cid))
|
2019-10-14 20:49:49 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// Status should remain critical after PASS FAIL PASS sequence
|
2019-12-10 02:26:41 +00:00
|
|
|
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
|
|
|
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
2019-10-14 20:49:49 +00:00
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2021-09-14 16:47:52 +00:00
|
|
|
require.Equal(r, 3, notif.Updates(cid))
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(r, api.HealthCritical, notif.State(cid))
|
2019-10-14 20:49:49 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// Passing after a 2nd consecutive PASS
|
2019-12-10 02:26:41 +00:00
|
|
|
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
2019-10-14 20:49:49 +00:00
|
|
|
|
2021-09-14 16:47:52 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.Equal(r, 4, notif.Updates(cid))
|
|
|
|
require.Equal(r, api.HealthPassing, notif.State(cid))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestStatusHandlerWarningAndCriticalThresholdsTheSameSetsCritical(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
cid := structs.NewCheckID("foo", nil)
|
|
|
|
notif := mock.NewNotify()
|
|
|
|
logger := testutil.Logger(t)
|
|
|
|
statusHandler := NewStatusHandler(notif, logger, 2, 3, 3)
|
|
|
|
|
|
|
|
// Set the initial status to passing after a single success
|
|
|
|
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
|
|
|
|
|
|
|
// Status should remain passing after FAIL FAIL sequence
|
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.Equal(r, 1, notif.Updates(cid))
|
|
|
|
require.Equal(r, api.HealthPassing, notif.State(cid))
|
|
|
|
})
|
|
|
|
|
|
|
|
// Critical and not Warning after a 3rd consecutive FAIL
|
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.Equal(r, 2, notif.Updates(cid))
|
|
|
|
require.Equal(r, api.HealthCritical, notif.State(cid))
|
|
|
|
})
|
|
|
|
|
|
|
|
// Passing after consecutive PASS PASS sequence
|
|
|
|
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
|
|
|
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
|
|
|
|
2019-10-14 20:49:49 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-12-10 02:26:41 +00:00
|
|
|
require.Equal(r, 3, notif.Updates(cid))
|
|
|
|
require.Equal(r, api.HealthPassing, notif.State(cid))
|
2019-10-14 20:49:49 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-09-14 16:47:52 +00:00
|
|
|
func TestStatusHandlerMaintainWarningStatusWhenCheckIsFlapping(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
cid := structs.NewCheckID("foo", nil)
|
|
|
|
notif := mock.NewNotify()
|
|
|
|
logger := testutil.Logger(t)
|
|
|
|
statusHandler := NewStatusHandler(notif, logger, 3, 3, 5)
|
|
|
|
|
|
|
|
// Set the initial status to passing after a single success.
|
|
|
|
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
|
|
|
|
|
|
|
// Status should remain passing after a FAIL FAIL sequence.
|
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.Equal(r, 1, notif.Updates(cid))
|
|
|
|
require.Equal(r, api.HealthPassing, notif.State(cid))
|
|
|
|
})
|
|
|
|
|
|
|
|
// Warning after a 3rd consecutive FAIL.
|
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.Equal(r, 2, notif.Updates(cid))
|
|
|
|
require.Equal(r, api.HealthWarning, notif.State(cid))
|
|
|
|
})
|
|
|
|
|
|
|
|
// Status should remain passing after PASS FAIL FAIL FAIL PASS FAIL FAIL FAIL PASS sequence.
|
|
|
|
// Although we have 6 FAILS, they are not consecutive.
|
|
|
|
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
|
|
|
|
|
|
|
// The status gets updated due to failuresCounter being reset
|
|
|
|
// but the status itself remains as Warning.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.Equal(r, 3, notif.Updates(cid))
|
|
|
|
require.Equal(r, api.HealthWarning, notif.State(cid))
|
|
|
|
})
|
|
|
|
|
|
|
|
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
|
|
|
|
|
|
|
// Status doesn'tn change, but the state update is triggered.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.Equal(r, 4, notif.Updates(cid))
|
|
|
|
require.Equal(r, api.HealthWarning, notif.State(cid))
|
|
|
|
})
|
|
|
|
|
|
|
|
// Status should change only after 5 consecutive FAIL updates.
|
|
|
|
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
|
|
|
|
|
|
|
// The status doesn't change, but a status update is triggered.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.Equal(r, 5, notif.Updates(cid))
|
|
|
|
require.Equal(r, api.HealthWarning, notif.State(cid))
|
|
|
|
})
|
|
|
|
|
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
|
|
|
|
|
|
|
// The status doesn't change, but a status update is triggered.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.Equal(r, 6, notif.Updates(cid))
|
|
|
|
require.Equal(r, api.HealthWarning, notif.State(cid))
|
|
|
|
})
|
|
|
|
|
|
|
|
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
|
|
|
|
|
|
|
// The FailuresBeforeCritical threshold is finally breached.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
require.Equal(r, 7, notif.Updates(cid))
|
|
|
|
require.Equal(r, api.HealthCritical, notif.State(cid))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-07-23 11:45:08 +00:00
|
|
|
func TestCheckTCPCritical(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2015-07-23 11:45:08 +00:00
|
|
|
var (
|
|
|
|
tcpServer net.Listener
|
|
|
|
)
|
|
|
|
|
|
|
|
tcpServer = mockTCPServer(`tcp`)
|
2017-04-19 23:00:11 +00:00
|
|
|
expectTCPStatus(t, `127.0.0.1:0`, api.HealthCritical)
|
2015-07-23 11:45:08 +00:00
|
|
|
tcpServer.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCheckTCPPassing(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2015-07-23 11:45:08 +00:00
|
|
|
var (
|
|
|
|
tcpServer net.Listener
|
|
|
|
)
|
|
|
|
|
|
|
|
tcpServer = mockTCPServer(`tcp`)
|
2017-04-19 23:00:11 +00:00
|
|
|
expectTCPStatus(t, tcpServer.Addr().String(), api.HealthPassing)
|
2015-07-23 11:45:08 +00:00
|
|
|
tcpServer.Close()
|
|
|
|
|
2017-11-09 02:28:45 +00:00
|
|
|
if os.Getenv("TRAVIS") == "true" {
|
|
|
|
t.Skip("IPV6 not supported on travis-ci")
|
|
|
|
}
|
2015-07-23 11:45:08 +00:00
|
|
|
tcpServer = mockTCPServer(`tcp6`)
|
2017-04-19 23:00:11 +00:00
|
|
|
expectTCPStatus(t, tcpServer.Addr().String(), api.HealthPassing)
|
2015-07-23 11:45:08 +00:00
|
|
|
tcpServer.Close()
|
|
|
|
}
|
2015-10-26 19:59:40 +00:00
|
|
|
|
2022-06-06 19:13:19 +00:00
|
|
|
func sendResponse(conn *net.UDPConn, addr *net.UDPAddr) {
|
|
|
|
_, err := conn.WriteToUDP([]byte("healthy"), addr)
|
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("Couldn't send response %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func mockUDPServer(ctx context.Context, network string, port int) {
|
|
|
|
|
|
|
|
b := make([]byte, 1024)
|
|
|
|
addr := fmt.Sprintf(`127.0.0.1:%d`, port)
|
|
|
|
|
|
|
|
udpAddr, err := net.ResolveUDPAddr(network, addr)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatal("Error resolving UDP address: ", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
ser, err := net.ListenUDP("udp", udpAddr)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatal("Error listening UDP: ", err)
|
|
|
|
}
|
|
|
|
defer ser.Close()
|
|
|
|
|
|
|
|
chClose := make(chan interface{})
|
|
|
|
wg := sync.WaitGroup{}
|
|
|
|
wg.Add(1)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
log.Print("Waiting for UDP message")
|
|
|
|
_, remoteaddr, err := ser.ReadFromUDP(b)
|
|
|
|
log.Printf("Read a message from %v %s \n", remoteaddr, b)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("Error reading from UDP %s", err.Error())
|
|
|
|
}
|
|
|
|
sendResponse(ser, remoteaddr)
|
|
|
|
select {
|
|
|
|
case <-chClose:
|
|
|
|
fmt.Println("cancelled")
|
|
|
|
wg.Done()
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
fmt.Println("cancelled")
|
|
|
|
close(chClose)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func expectUDPStatus(t *testing.T, udp string, status string) {
|
|
|
|
notif := mock.NewNotify()
|
|
|
|
logger := testutil.Logger(t)
|
|
|
|
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
|
|
|
cid := structs.NewCheckID("foo", nil)
|
|
|
|
|
|
|
|
check := &CheckUDP{
|
|
|
|
CheckID: cid,
|
|
|
|
UDP: udp,
|
|
|
|
Interval: 10 * time.Millisecond,
|
|
|
|
Logger: logger,
|
|
|
|
StatusHandler: statusHandler,
|
|
|
|
}
|
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if got, want := notif.Updates(cid), 2; got < want {
|
|
|
|
r.Fatalf("got %d updates want at least %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := notif.State(cid), status; got != want {
|
|
|
|
r.Fatalf("got state %q want %q", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func expectUDPTimeout(t *testing.T, udp string, status string) {
|
|
|
|
notif := mock.NewNotify()
|
|
|
|
logger := testutil.Logger(t)
|
|
|
|
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
|
|
|
cid := structs.NewCheckID("foo", nil)
|
|
|
|
|
|
|
|
check := &CheckUDP{
|
|
|
|
CheckID: cid,
|
|
|
|
UDP: udp,
|
|
|
|
Interval: 10 * time.Millisecond,
|
|
|
|
Timeout: 5 * time.Nanosecond,
|
|
|
|
Logger: logger,
|
|
|
|
StatusHandler: statusHandler,
|
|
|
|
}
|
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if got, want := notif.Updates(cid), 2; got < want {
|
|
|
|
r.Fatalf("got %d updates want at least %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := notif.State(cid), status; got != want {
|
|
|
|
r.Fatalf("got state %q want %q", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCheckUDPTimeoutPassing(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
port := freeport.GetOne(t)
|
|
|
|
serverUrl := "127.0.0.1:" + strconv.Itoa(port)
|
|
|
|
|
|
|
|
go mockUDPServer(ctx, `udp`, port)
|
|
|
|
expectUDPTimeout(t, serverUrl, api.HealthPassing) // Should pass since timeout is handled as success, from specification
|
|
|
|
}
|
|
|
|
func TestCheckUDPCritical(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
port := freeport.GetOne(t)
|
|
|
|
notExistentPort := freeport.GetOne(t)
|
|
|
|
serverUrl := "127.0.0.1:" + strconv.Itoa(notExistentPort)
|
|
|
|
|
|
|
|
go mockUDPServer(ctx, `udp`, port)
|
|
|
|
|
|
|
|
expectUDPStatus(t, serverUrl, api.HealthCritical) // Should be unhealthy since we never connect to mocked udp server.
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCheckUDPPassing(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
port := freeport.GetOne(t)
|
|
|
|
serverUrl := "127.0.0.1:" + strconv.Itoa(port)
|
|
|
|
|
|
|
|
go mockUDPServer(ctx, `udp`, port)
|
|
|
|
expectUDPStatus(t, serverUrl, api.HealthPassing)
|
|
|
|
}
|
|
|
|
|
2021-04-09 19:12:10 +00:00
|
|
|
func TestCheckH2PING(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
desc string
|
|
|
|
passing bool
|
|
|
|
timeout time.Duration
|
|
|
|
connTimeout time.Duration
|
|
|
|
}{
|
|
|
|
{desc: "passing", passing: true, timeout: 1 * time.Second, connTimeout: 1 * time.Second},
|
|
|
|
{desc: "failing because of time out", passing: false, timeout: 1 * time.Nanosecond, connTimeout: 1 * time.Second},
|
|
|
|
{desc: "failing because of closed connection", passing: false, timeout: 1 * time.Nanosecond, connTimeout: 1 * time.Millisecond},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range tests {
|
|
|
|
t.Run(tt.desc, func(t *testing.T) {
|
|
|
|
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return })
|
|
|
|
server := httptest.NewUnstartedServer(handler)
|
|
|
|
server.EnableHTTP2 = true
|
|
|
|
server.Config.ReadTimeout = tt.connTimeout
|
|
|
|
server.StartTLS()
|
|
|
|
defer server.Close()
|
|
|
|
serverAddress := server.Listener.Addr()
|
|
|
|
target := serverAddress.String()
|
|
|
|
|
|
|
|
notif := mock.NewNotify()
|
|
|
|
logger := testutil.Logger(t)
|
2021-09-14 16:47:52 +00:00
|
|
|
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
2021-04-09 19:12:10 +00:00
|
|
|
cid := structs.NewCheckID("foo", nil)
|
|
|
|
tlsCfg := &api.TLSConfig{
|
|
|
|
InsecureSkipVerify: true,
|
|
|
|
}
|
|
|
|
tlsClientCfg, err := api.SetupTLSConfig(tlsCfg)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("%v", err)
|
|
|
|
}
|
|
|
|
tlsClientCfg.NextProtos = []string{http2.NextProtoTLS}
|
|
|
|
|
|
|
|
check := &CheckH2PING{
|
|
|
|
CheckID: cid,
|
|
|
|
H2PING: target,
|
|
|
|
Interval: 5 * time.Second,
|
|
|
|
Timeout: tt.timeout,
|
|
|
|
Logger: logger,
|
|
|
|
TLSClientConfig: tlsClientCfg,
|
|
|
|
StatusHandler: statusHandler,
|
|
|
|
}
|
|
|
|
|
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
|
|
|
|
|
|
|
if tt.passing {
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if got, want := notif.State(cid), api.HealthPassing; got != want {
|
|
|
|
r.Fatalf("got state %q want %q", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if got, want := notif.State(cid), api.HealthCritical; got != want {
|
|
|
|
r.Fatalf("got state %q want %q", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCheckH2PING_TLS_BadVerify(t *testing.T) {
|
|
|
|
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return })
|
|
|
|
server := httptest.NewUnstartedServer(handler)
|
|
|
|
server.EnableHTTP2 = true
|
|
|
|
server.StartTLS()
|
|
|
|
defer server.Close()
|
|
|
|
serverAddress := server.Listener.Addr()
|
|
|
|
target := serverAddress.String()
|
|
|
|
|
|
|
|
notif := mock.NewNotify()
|
|
|
|
logger := testutil.Logger(t)
|
2021-09-14 16:47:52 +00:00
|
|
|
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
2021-04-09 19:12:10 +00:00
|
|
|
cid := structs.NewCheckID("foo", nil)
|
|
|
|
tlsCfg := &api.TLSConfig{}
|
|
|
|
tlsClientCfg, err := api.SetupTLSConfig(tlsCfg)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("%v", err)
|
|
|
|
}
|
|
|
|
tlsClientCfg.NextProtos = []string{http2.NextProtoTLS}
|
|
|
|
|
|
|
|
check := &CheckH2PING{
|
|
|
|
CheckID: cid,
|
|
|
|
H2PING: target,
|
|
|
|
Interval: 5 * time.Second,
|
|
|
|
Timeout: 2 * time.Second,
|
|
|
|
Logger: logger,
|
|
|
|
TLSClientConfig: tlsClientCfg,
|
|
|
|
StatusHandler: statusHandler,
|
|
|
|
}
|
|
|
|
|
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
|
|
|
|
|
|
|
insecureSkipVerifyValue := check.TLSClientConfig.InsecureSkipVerify
|
|
|
|
if insecureSkipVerifyValue {
|
|
|
|
t.Fatalf("The default value for InsecureSkipVerify should be false but was %v", insecureSkipVerifyValue)
|
|
|
|
}
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if got, want := notif.State(cid), api.HealthCritical; got != want {
|
|
|
|
r.Fatalf("got state %q want %q", got, want)
|
|
|
|
}
|
2023-01-05 17:47:45 +00:00
|
|
|
if !isInvalidCertificateError(notif.Output(cid)) {
|
|
|
|
r.Fatalf("should fail with certificate error %v", notif.OutputMap())
|
2021-04-09 19:12:10 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
func TestCheckH2PINGInvalidListener(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
notif := mock.NewNotify()
|
|
|
|
logger := testutil.Logger(t)
|
2021-09-14 16:47:52 +00:00
|
|
|
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
2021-04-09 19:12:10 +00:00
|
|
|
cid := structs.NewCheckID("foo", nil)
|
|
|
|
tlsCfg := &api.TLSConfig{
|
|
|
|
InsecureSkipVerify: true,
|
|
|
|
}
|
|
|
|
tlsClientCfg, err := api.SetupTLSConfig(tlsCfg)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("%v", err)
|
|
|
|
}
|
|
|
|
tlsClientCfg.NextProtos = []string{http2.NextProtoTLS}
|
|
|
|
|
|
|
|
check := &CheckH2PING{
|
|
|
|
CheckID: cid,
|
|
|
|
H2PING: "localhost:55555",
|
|
|
|
Interval: 5 * time.Second,
|
|
|
|
Timeout: 1 * time.Second,
|
|
|
|
Logger: logger,
|
|
|
|
TLSClientConfig: tlsClientCfg,
|
|
|
|
StatusHandler: statusHandler,
|
|
|
|
}
|
|
|
|
|
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
|
|
|
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if got, want := notif.State(cid), api.HealthCritical; got != want {
|
|
|
|
r.Fatalf("got state %q want %q", got, want)
|
|
|
|
}
|
|
|
|
expectedOutput := "Failed to dial to"
|
|
|
|
if !strings.Contains(notif.Output(cid), expectedOutput) {
|
|
|
|
r.Fatalf("should have included output %s: %v", expectedOutput, notif.OutputMap())
|
|
|
|
}
|
|
|
|
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-07-25 20:08:44 +00:00
|
|
|
func TestCheckH2CPING(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
desc string
|
|
|
|
passing bool
|
|
|
|
timeout time.Duration
|
|
|
|
connTimeout time.Duration
|
|
|
|
}{
|
|
|
|
{desc: "passing", passing: true, timeout: 1 * time.Second, connTimeout: 1 * time.Second},
|
|
|
|
{desc: "failing because of time out", passing: false, timeout: 1 * time.Nanosecond, connTimeout: 1 * time.Second},
|
|
|
|
{desc: "failing because of closed connection", passing: false, timeout: 1 * time.Nanosecond, connTimeout: 1 * time.Millisecond},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range tests {
|
|
|
|
t.Run(tt.desc, func(t *testing.T) {
|
|
|
|
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return })
|
|
|
|
h2chandler := h2c.NewHandler(handler, &http2.Server{})
|
|
|
|
server := httptest.NewUnstartedServer(h2chandler)
|
|
|
|
server.Config.ReadTimeout = tt.connTimeout
|
|
|
|
server.Start()
|
|
|
|
defer server.Close()
|
|
|
|
serverAddress := server.Listener.Addr()
|
|
|
|
target := serverAddress.String()
|
|
|
|
|
|
|
|
notif := mock.NewNotify()
|
|
|
|
logger := testutil.Logger(t)
|
2021-10-05 04:48:09 +00:00
|
|
|
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
2021-07-25 20:08:44 +00:00
|
|
|
cid := structs.NewCheckID("foo", nil)
|
|
|
|
check := &CheckH2PING{
|
|
|
|
CheckID: cid,
|
|
|
|
H2PING: target,
|
|
|
|
Interval: 5 * time.Second,
|
|
|
|
Timeout: tt.timeout,
|
|
|
|
Logger: logger,
|
|
|
|
TLSClientConfig: nil,
|
|
|
|
StatusHandler: statusHandler,
|
|
|
|
}
|
|
|
|
|
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
|
|
|
if tt.passing {
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if got, want := notif.State(cid), api.HealthPassing; got != want {
|
|
|
|
r.Fatalf("got state %q want %q", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if got, want := notif.State(cid), api.HealthCritical; got != want {
|
|
|
|
r.Fatalf("got state %q want %q", got, want)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-12 14:01:42 +00:00
|
|
|
func TestCheck_Docker(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-07-12 14:01:42 +00:00
|
|
|
tests := []struct {
|
|
|
|
desc string
|
|
|
|
handlers map[string]http.HandlerFunc
|
|
|
|
out *regexp.Regexp
|
|
|
|
state string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
desc: "create exec: bad container id",
|
|
|
|
handlers: map[string]http.HandlerFunc{
|
|
|
|
"POST /containers/123/exec": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(404)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
out: regexp.MustCompile("^create exec failed for unknown container 123$"),
|
|
|
|
state: api.HealthCritical,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "create exec: paused container",
|
|
|
|
handlers: map[string]http.HandlerFunc{
|
|
|
|
"POST /containers/123/exec": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(409)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
out: regexp.MustCompile("^create exec failed since container 123 is paused or stopped$"),
|
|
|
|
state: api.HealthCritical,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "create exec: bad status code",
|
|
|
|
handlers: map[string]http.HandlerFunc{
|
|
|
|
"POST /containers/123/exec": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(999)
|
|
|
|
fmt.Fprint(w, "some output")
|
|
|
|
},
|
|
|
|
},
|
|
|
|
out: regexp.MustCompile("^create exec failed for container 123 with status 999: some output$"),
|
|
|
|
state: api.HealthCritical,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "create exec: bad json",
|
|
|
|
handlers: map[string]http.HandlerFunc{
|
|
|
|
"POST /containers/123/exec": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(201)
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
fmt.Fprint(w, `this is not json`)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
out: regexp.MustCompile("^create exec response for container 123 cannot be parsed: .*$"),
|
|
|
|
state: api.HealthCritical,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "start exec: bad exec id",
|
|
|
|
handlers: map[string]http.HandlerFunc{
|
|
|
|
"POST /containers/123/exec": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(201)
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
fmt.Fprint(w, `{"Id":"456"}`)
|
|
|
|
},
|
|
|
|
"POST /exec/456/start": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(404)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
out: regexp.MustCompile("^start exec failed for container 123: invalid exec id 456$"),
|
|
|
|
state: api.HealthCritical,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "start exec: paused container",
|
|
|
|
handlers: map[string]http.HandlerFunc{
|
|
|
|
"POST /containers/123/exec": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(201)
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
fmt.Fprint(w, `{"Id":"456"}`)
|
|
|
|
},
|
|
|
|
"POST /exec/456/start": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(409)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
out: regexp.MustCompile("^start exec failed since container 123 is paused or stopped$"),
|
|
|
|
state: api.HealthCritical,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "start exec: bad status code",
|
|
|
|
handlers: map[string]http.HandlerFunc{
|
|
|
|
"POST /containers/123/exec": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(201)
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
fmt.Fprint(w, `{"Id":"456"}`)
|
|
|
|
},
|
|
|
|
"POST /exec/456/start": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(999)
|
|
|
|
fmt.Fprint(w, "some output")
|
|
|
|
},
|
|
|
|
},
|
2017-10-31 08:26:34 +00:00
|
|
|
out: regexp.MustCompile("^start exec failed for container 123 with status 999: body: some output err: <nil>$"),
|
2017-07-12 14:01:42 +00:00
|
|
|
state: api.HealthCritical,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "inspect exec: bad exec id",
|
|
|
|
handlers: map[string]http.HandlerFunc{
|
|
|
|
"POST /containers/123/exec": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(201)
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
fmt.Fprint(w, `{"Id":"456"}`)
|
|
|
|
},
|
|
|
|
"POST /exec/456/start": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(200)
|
|
|
|
fmt.Fprint(w, "OK")
|
|
|
|
},
|
|
|
|
"GET /exec/456/json": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(404)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
out: regexp.MustCompile("^inspect exec failed for container 123: invalid exec id 456$"),
|
|
|
|
state: api.HealthCritical,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "inspect exec: bad status code",
|
|
|
|
handlers: map[string]http.HandlerFunc{
|
|
|
|
"POST /containers/123/exec": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(201)
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
fmt.Fprint(w, `{"Id":"456"}`)
|
|
|
|
},
|
|
|
|
"POST /exec/456/start": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(200)
|
|
|
|
fmt.Fprint(w, "OK")
|
|
|
|
},
|
|
|
|
"GET /exec/456/json": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(999)
|
|
|
|
fmt.Fprint(w, "some output")
|
|
|
|
},
|
|
|
|
},
|
|
|
|
out: regexp.MustCompile("^inspect exec failed for container 123 with status 999: some output$"),
|
|
|
|
state: api.HealthCritical,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "inspect exec: bad json",
|
|
|
|
handlers: map[string]http.HandlerFunc{
|
|
|
|
"POST /containers/123/exec": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(201)
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
fmt.Fprint(w, `{"Id":"456"}`)
|
|
|
|
},
|
|
|
|
"POST /exec/456/start": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(200)
|
|
|
|
fmt.Fprint(w, "OK")
|
|
|
|
},
|
|
|
|
"GET /exec/456/json": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(200)
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
fmt.Fprint(w, `this is not json`)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
out: regexp.MustCompile("^inspect exec response for container 123 cannot be parsed: .*$"),
|
|
|
|
state: api.HealthCritical,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "inspect exec: exit code 0: passing",
|
|
|
|
handlers: map[string]http.HandlerFunc{
|
|
|
|
"POST /containers/123/exec": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(201)
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
fmt.Fprint(w, `{"Id":"456"}`)
|
|
|
|
},
|
|
|
|
"POST /exec/456/start": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(200)
|
|
|
|
fmt.Fprint(w, "OK")
|
|
|
|
},
|
|
|
|
"GET /exec/456/json": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(200)
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
fmt.Fprint(w, `{"ExitCode":0}`)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
out: regexp.MustCompile("^OK$"),
|
|
|
|
state: api.HealthPassing,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "inspect exec: exit code 0: passing: truncated",
|
|
|
|
handlers: map[string]http.HandlerFunc{
|
|
|
|
"POST /containers/123/exec": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(201)
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
fmt.Fprint(w, `{"Id":"456"}`)
|
|
|
|
},
|
|
|
|
"POST /exec/456/start": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(200)
|
|
|
|
fmt.Fprint(w, "01234567890123456789OK") // more than 20 bytes
|
|
|
|
},
|
|
|
|
"GET /exec/456/json": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(200)
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
fmt.Fprint(w, `{"ExitCode":0}`)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
out: regexp.MustCompile("^Captured 20 of 22 bytes\n...\n234567890123456789OK$"),
|
|
|
|
state: api.HealthPassing,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "inspect exec: exit code 1: warning",
|
|
|
|
handlers: map[string]http.HandlerFunc{
|
|
|
|
"POST /containers/123/exec": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(201)
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
fmt.Fprint(w, `{"Id":"456"}`)
|
|
|
|
},
|
|
|
|
"POST /exec/456/start": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(200)
|
|
|
|
fmt.Fprint(w, "WARN")
|
|
|
|
},
|
|
|
|
"GET /exec/456/json": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(200)
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
fmt.Fprint(w, `{"ExitCode":1}`)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
out: regexp.MustCompile("^WARN$"),
|
|
|
|
state: api.HealthWarning,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "inspect exec: exit code 2: critical",
|
|
|
|
handlers: map[string]http.HandlerFunc{
|
|
|
|
"POST /containers/123/exec": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(201)
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
fmt.Fprint(w, `{"Id":"456"}`)
|
|
|
|
},
|
|
|
|
"POST /exec/456/start": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(200)
|
|
|
|
fmt.Fprint(w, "NOK")
|
|
|
|
},
|
|
|
|
"GET /exec/456/json": func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
w.WriteHeader(200)
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
fmt.Fprint(w, `{"ExitCode":2}`)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
out: regexp.MustCompile("^NOK$"),
|
|
|
|
state: api.HealthCritical,
|
|
|
|
},
|
2015-10-27 03:21:50 +00:00
|
|
|
}
|
|
|
|
|
2017-07-12 14:01:42 +00:00
|
|
|
for _, tt := range tests {
|
|
|
|
t.Run(tt.desc, func(t *testing.T) {
|
|
|
|
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
x := r.Method + " " + r.RequestURI
|
|
|
|
h := tt.handlers[x]
|
|
|
|
if h == nil {
|
|
|
|
t.Fatalf("bad url %s", x)
|
|
|
|
}
|
|
|
|
h(w, r)
|
|
|
|
}))
|
|
|
|
defer srv.Close()
|
2015-10-27 03:21:50 +00:00
|
|
|
|
2017-07-12 14:01:42 +00:00
|
|
|
// create a docker client with a tiny output buffer
|
|
|
|
// to test the truncation
|
|
|
|
c, err := NewDockerClient(srv.URL, 20)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2015-10-27 03:21:50 +00:00
|
|
|
|
2017-07-12 14:01:42 +00:00
|
|
|
notif, upd := mock.NewNotifyChan()
|
2020-01-28 23:50:41 +00:00
|
|
|
logger := testutil.Logger(t)
|
2021-09-14 16:47:52 +00:00
|
|
|
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
2019-12-10 02:26:41 +00:00
|
|
|
id := structs.NewCheckID("chk", nil)
|
2020-01-28 23:50:41 +00:00
|
|
|
|
2017-07-12 14:01:42 +00:00
|
|
|
check := &CheckDocker{
|
|
|
|
CheckID: id,
|
2017-10-04 23:48:00 +00:00
|
|
|
ScriptArgs: []string{"/health.sh"},
|
2017-07-12 14:01:42 +00:00
|
|
|
DockerContainerID: "123",
|
|
|
|
Interval: 25 * time.Millisecond,
|
2017-10-25 09:18:07 +00:00
|
|
|
Client: c,
|
2019-10-14 20:49:49 +00:00
|
|
|
StatusHandler: statusHandler,
|
2017-07-12 14:01:42 +00:00
|
|
|
}
|
|
|
|
check.Start()
|
|
|
|
defer check.Stop()
|
2015-10-27 01:06:55 +00:00
|
|
|
|
2017-07-12 14:01:42 +00:00
|
|
|
<-upd // wait for update
|
2015-10-27 01:06:55 +00:00
|
|
|
|
2017-07-12 14:01:42 +00:00
|
|
|
if got, want := notif.Output(id), tt.out; !want.MatchString(got) {
|
|
|
|
t.Fatalf("got %q want %q", got, want)
|
|
|
|
}
|
|
|
|
if got, want := notif.State(id), tt.state; got != want {
|
|
|
|
t.Fatalf("got status %q want %q", got, want)
|
|
|
|
}
|
|
|
|
})
|
2015-10-27 01:06:55 +00:00
|
|
|
}
|
2015-10-26 19:59:40 +00:00
|
|
|
}
|