agent: make timing sensitive tests more robust

* make timing less aggressive
* mark timing tests as non-parallel
This commit is contained in:
Frank Schroeder 2017-07-04 12:44:24 +02:00 committed by Frank Schröder
parent b12b914017
commit c218fdbc77
4 changed files with 57 additions and 64 deletions

View File

@ -1513,7 +1513,7 @@ func TestAgent_Service_MaintenanceMode(t *testing.T) {
}
func TestAgent_Service_Reap(t *testing.T) {
t.Parallel()
// t.Parallel() // timing test. no parallel
cfg := TestConfig()
cfg.CheckReapInterval = time.Millisecond
cfg.CheckDeregisterIntervalMin = 0
@ -1529,8 +1529,8 @@ func TestAgent_Service_Reap(t *testing.T) {
chkTypes := []*structs.CheckType{
&structs.CheckType{
Status: api.HealthPassing,
TTL: 10 * time.Millisecond,
DeregisterCriticalServiceAfter: 100 * time.Millisecond,
TTL: 25 * time.Millisecond,
DeregisterCriticalServiceAfter: 200 * time.Millisecond,
},
}
@ -1547,8 +1547,8 @@ func TestAgent_Service_Reap(t *testing.T) {
t.Fatalf("should not have critical checks")
}
// Wait for the check TTL to fail.
time.Sleep(30 * time.Millisecond)
// Wait for the check TTL to fail but before the check is reaped.
time.Sleep(100 * time.Millisecond)
if _, ok := a.state.Services()["redis"]; !ok {
t.Fatalf("should have redis service")
}
@ -1568,7 +1568,7 @@ func TestAgent_Service_Reap(t *testing.T) {
}
// Wait for the check TTL to fail again.
time.Sleep(30 * time.Millisecond)
time.Sleep(100 * time.Millisecond)
if _, ok := a.state.Services()["redis"]; !ok {
t.Fatalf("should have redis service")
}
@ -1577,7 +1577,7 @@ func TestAgent_Service_Reap(t *testing.T) {
}
// Wait for the reap.
time.Sleep(300 * time.Millisecond)
time.Sleep(400 * time.Millisecond)
if _, ok := a.state.Services()["redis"]; ok {
t.Fatalf("redis service should have been reaped")
}
@ -1587,7 +1587,7 @@ func TestAgent_Service_Reap(t *testing.T) {
}
func TestAgent_Service_NoReap(t *testing.T) {
t.Parallel()
// t.Parallel() // timing test. no parallel
cfg := TestConfig()
cfg.CheckReapInterval = time.Millisecond
cfg.CheckDeregisterIntervalMin = 0
@ -1603,7 +1603,7 @@ func TestAgent_Service_NoReap(t *testing.T) {
chkTypes := []*structs.CheckType{
&structs.CheckType{
Status: api.HealthPassing,
TTL: 10 * time.Millisecond,
TTL: 25 * time.Millisecond,
},
}
@ -1621,7 +1621,7 @@ func TestAgent_Service_NoReap(t *testing.T) {
}
// Wait for the check TTL to fail.
time.Sleep(30 * time.Millisecond)
time.Sleep(100 * time.Millisecond)
if _, ok := a.state.Services()["redis"]; !ok {
t.Fatalf("should have redis service")
}
@ -1630,7 +1630,7 @@ func TestAgent_Service_NoReap(t *testing.T) {
}
// Wait a while and make sure it doesn't reap.
time.Sleep(300 * time.Millisecond)
time.Sleep(200 * time.Millisecond)
if _, ok := a.state.Services()["redis"]; !ok {
t.Fatalf("should have redis service")
}

View File

@ -23,62 +23,55 @@ import (
"github.com/hashicorp/consul/types"
)
func expectStatus(t *testing.T, script, status string) {
notif := mock.NewNotify()
check := &CheckMonitor{
Notify: notif,
CheckID: types.CheckID("foo"),
Script: script,
Interval: 10 * time.Millisecond,
Logger: log.New(ioutil.Discard, UniqueID(), log.LstdFlags),
func TestCheckMonitor(t *testing.T) {
tests := []struct {
script, status string
}{
{"exit 0", "passing"},
{"exit 1", "warning"},
{"exit 2", "critical"},
{"foobarbaz", "critical"},
}
check.Start()
defer check.Stop()
retry.Run(t, func(r *retry.R) {
if got, want := notif.Updates("foo"), 2; got < want {
r.Fatalf("got %d updates want at least %d", got, want)
}
if got, want := notif.State("foo"), status; got != want {
r.Fatalf("got state %q want %q", got, want)
}
})
}
func TestCheckMonitor_Passing(t *testing.T) {
t.Parallel()
expectStatus(t, "exit 0", api.HealthPassing)
}
func TestCheckMonitor_Warning(t *testing.T) {
t.Parallel()
expectStatus(t, "exit 1", api.HealthWarning)
}
func TestCheckMonitor_Critical(t *testing.T) {
t.Parallel()
expectStatus(t, "exit 2", api.HealthCritical)
}
func TestCheckMonitor_BadCmd(t *testing.T) {
t.Parallel()
expectStatus(t, "foobarbaz", api.HealthCritical)
for _, tt := range tests {
t.Run(tt.status, func(t *testing.T) {
notif := mock.NewNotify()
check := &CheckMonitor{
Notify: notif,
CheckID: types.CheckID("foo"),
Script: tt.script,
Interval: 25 * time.Millisecond,
Logger: log.New(ioutil.Discard, UniqueID(), log.LstdFlags),
}
check.Start()
defer check.Stop()
retry.Run(t, func(r *retry.R) {
if got, want := notif.Updates("foo"), 2; got < want {
r.Fatalf("got %d updates want at least %d", got, want)
}
if got, want := notif.State("foo"), tt.status; got != want {
r.Fatalf("got state %q want %q", got, want)
}
})
})
}
}
func TestCheckMonitor_Timeout(t *testing.T) {
t.Parallel()
// t.Parallel() // timing test. no parallel
notif := mock.NewNotify()
check := &CheckMonitor{
Notify: notif,
CheckID: types.CheckID("foo"),
Script: "sleep 1 && exit 0",
Interval: 10 * time.Millisecond,
Timeout: 5 * time.Millisecond,
Interval: 50 * time.Millisecond,
Timeout: 25 * time.Millisecond,
Logger: log.New(ioutil.Discard, UniqueID(), log.LstdFlags),
}
check.Start()
defer check.Stop()
time.Sleep(150 * time.Millisecond)
time.Sleep(250 * time.Millisecond)
// Should have at least 2 updates
if notif.Updates("foo") < 2 {
@ -90,7 +83,7 @@ func TestCheckMonitor_Timeout(t *testing.T) {
}
func TestCheckMonitor_RandomStagger(t *testing.T) {
t.Parallel()
// t.Parallel() // timing test. no parallel
notif := mock.NewNotify()
check := &CheckMonitor{
Notify: notif,
@ -102,7 +95,7 @@ func TestCheckMonitor_RandomStagger(t *testing.T) {
check.Start()
defer check.Stop()
time.Sleep(50 * time.Millisecond)
time.Sleep(500 * time.Millisecond)
// Should have at least 1 update
if notif.Updates("foo") < 1 {
@ -136,18 +129,18 @@ func TestCheckMonitor_LimitOutput(t *testing.T) {
}
func TestCheckTTL(t *testing.T) {
t.Parallel()
// t.Parallel() // timing test. no parallel
notif := mock.NewNotify()
check := &CheckTTL{
Notify: notif,
CheckID: types.CheckID("foo"),
TTL: 100 * time.Millisecond,
TTL: 200 * time.Millisecond,
Logger: log.New(ioutil.Discard, UniqueID(), log.LstdFlags),
}
check.Start()
defer check.Stop()
time.Sleep(50 * time.Millisecond)
time.Sleep(100 * time.Millisecond)
check.SetStatus(api.HealthPassing, "test-output")
if notif.Updates("foo") != 1 {
@ -159,13 +152,13 @@ func TestCheckTTL(t *testing.T) {
}
// Ensure we don't fail early
time.Sleep(75 * time.Millisecond)
time.Sleep(150 * time.Millisecond)
if notif.Updates("foo") != 1 {
t.Fatalf("should have 1 updates %v", notif.UpdatesMap())
}
// Wait for the TTL to expire
time.Sleep(75 * time.Millisecond)
time.Sleep(150 * time.Millisecond)
if notif.Updates("foo") != 2 {
t.Fatalf("should have 2 updates %v", notif.UpdatesMap())
@ -655,14 +648,14 @@ func expectDockerCheckStatus(t *testing.T, dockerClient DockerClient, status str
Script: "/health.sh",
DockerContainerID: "54432bad1fc7",
Shell: "/bin/sh",
Interval: 10 * time.Millisecond,
Interval: 25 * time.Millisecond,
Logger: log.New(ioutil.Discard, UniqueID(), log.LstdFlags),
dockerClient: dockerClient,
}
check.Start()
defer check.Stop()
time.Sleep(50 * time.Millisecond)
time.Sleep(250 * time.Millisecond)
// Should have at least 2 updates
if notif.Updates("foo") < 2 {

View File

@ -22,11 +22,11 @@ func generateUUID() (ret string) {
}
func TestRexecWriter(t *testing.T) {
t.Parallel()
// t.Parallel() // timing test. no parallel
writer := &rexecWriter{
BufCh: make(chan []byte, 16),
BufSize: 16,
BufIdle: 10 * time.Millisecond,
BufIdle: 100 * time.Millisecond,
CancelCh: make(chan struct{}),
}

View File

@ -250,7 +250,7 @@ func TestSessionCustomTTL(t *testing.T) {
}
func TestSessionTTLRenew(t *testing.T) {
t.Parallel()
// t.Parallel() // timing test. no parallel
ttl := 250 * time.Millisecond
cfg := TestConfig()
cfg.SessionTTLMin = ttl