Fix another round of flaky tests (#996)

This commit is contained in:
Adam Babik 2018-05-29 13:24:23 +02:00 committed by GitHub
parent c41ae530c3
commit 717d0fbadb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 48 additions and 33 deletions

View File

@ -92,8 +92,8 @@ func (s *CellTestSuite) TestCellFetchRace() {
defer server.Close()
cell := s.cell
dataCh := make(chan otto.Value, 1)
errCh := make(chan otto.Value, 1)
dataCh := make(chan otto.Value, requestCount)
errCh := make(chan otto.Value, requestCount)
err := cell.Set("__captureSuccess", func(res otto.Value) { dataCh <- res })
s.NoError(err)

View File

@ -232,16 +232,16 @@ func TestStatusNodeReconnectStaticPeers(t *testing.T) {
require.NoError(t, <-errCh)
}
require.Equal(t, 1, n.PeerCount())
require.Equal(t, peer.Server().Self().ID.String(), n.GethNode().Server().PeersInfo()[0].ID)
// reconnect static peers
errCh = waitForPeerAsync(n, peerURL, p2p.PeerEventTypeDrop, time.Second*30)
errDropCh := waitForPeerAsync(n, peerURL, p2p.PeerEventTypeDrop, time.Second*30)
// it takes at least 30 seconds to bring back previously connected peer
errAddCh := waitForPeerAsync(n, peerURL, p2p.PeerEventTypeAdd, time.Second*60)
require.NoError(t, n.ReconnectStaticPeers())
// first check if a peer gets disconnected
require.NoError(t, <-errCh)
// it takes at least 30 seconds to bring back previously connected peer
errCh = waitForPeerAsync(n, peerURL, p2p.PeerEventTypeAdd, time.Second*60)
require.NoError(t, <-errCh)
require.Equal(t, 1, n.PeerCount())
require.NoError(t, <-errDropCh)
require.NoError(t, <-errAddCh)
}
func isPeerConnected(node *StatusNode, peerURL string) (bool, error) {

View File

@ -187,6 +187,8 @@ func (s *PeerPoolSimulationSuite) TestSingleTopicDiscoveryWithFailover() {
s.Require().NoError(peerPool.Start(s.peers[1]))
defer peerPool.Stop()
s.Equal(signal.EventDiscoveryStarted, s.getPoolEvent(poolEvents))
// wait for the peer to be found and connected
connectedPeer := s.getPeerFromEvent(events, p2p.PeerEventTypeAdd)
s.Equal(s.peers[0].Self().ID, connectedPeer)
// as the upper limit was reached, Discovery should be stoped

View File

@ -1074,7 +1074,7 @@ func testDiscardTransaction(t *testing.T) bool { //nolint: gocyclo
if envelope.Type == signal.EventSignRequestFailed {
event := envelope.Event.(map[string]interface{})
t.Logf("transaction return event received: {id: %s}\n", event["id"].(string))
t.Logf("transaction return event received: %+v\n", event)
receivedErrMessage := event["error_message"].(string)
expectedErrMessage := sign.ErrSignReqDiscarded.Error()
@ -1099,7 +1099,14 @@ func testDiscardTransaction(t *testing.T) bool { //nolint: gocyclo
To: account.ToAddress(TestConfig.Account2.Address),
Value: (*hexutil.Big)(big.NewInt(1000000000000)),
})
time.Sleep(1 * time.Second)
select {
case <-txFailedEventCalled:
case <-time.After(time.Second * 10):
t.Error("expected tx failure signal is not received")
return false
}
if err != sign.ErrSignReqDiscarded {
t.Errorf("expected error not thrown: %v", err)
return false
@ -1115,13 +1122,7 @@ func testDiscardTransaction(t *testing.T) bool { //nolint: gocyclo
return false
}
select {
case <-txFailedEventCalled:
return true
default:
t.Error("expected tx failure signal is not received")
return false
}
return true
}
func testDiscardMultipleQueuedTransactions(t *testing.T) bool { //nolint: gocyclo

View File

@ -3,6 +3,7 @@ package rpcfilters
import (
"math/big"
"sync"
"sync/atomic"
"testing"
"time"
@ -37,11 +38,11 @@ func TestEventSubscribe(t *testing.T) {
}
func TestZeroSubsciptionsOptimization(t *testing.T) {
counter := 0
counter := int64(0)
hash := common.HexToHash("0xFF")
f := func() (blockInfo, error) {
counter++
atomic.AddInt64(&counter, 1)
number := big.NewInt(1)
return blockInfo{hash, hexutil.Bytes(number.Bytes())}, nil
}
@ -56,7 +57,7 @@ func TestZeroSubsciptionsOptimization(t *testing.T) {
time.Sleep(10 * time.Millisecond)
// check that our provider function wasn't called when there are no subscribers to it
assert.Equal(t, 0, counter)
assert.Equal(t, int64(0), atomic.LoadInt64(&counter))
// subscribing an event, checking that it works
id, channel := event.Subscribe()
@ -72,14 +73,14 @@ func TestZeroSubsciptionsOptimization(t *testing.T) {
event.Unsubscribe(id)
// check that our function was called multiple times
assert.True(t, counter > 0)
counterValue := counter
counterValue := atomic.LoadInt64(&counter)
assert.True(t, counterValue > 0)
// let the ticker to call ~10 times
time.Sleep(10 * time.Millisecond)
// check that our provider function wasn't called when there are no subscribers to it
assert.Equal(t, counterValue, counter)
assert.Equal(t, counterValue, atomic.LoadInt64(&counter))
}
func TestMultipleSubscribe(t *testing.T) {

View File

@ -9,6 +9,8 @@ import (
"testing"
"time"
"github.com/robertkrimen/otto"
gethcommon "github.com/ethereum/go-ethereum/common"
"github.com/status-im/status-go/geth/jail"
@ -253,6 +255,10 @@ func (s *JailRPCTestSuite) TestJailVMPersistence() {
err := s.Backend.SelectAccount(TestConfig.Account1.Address, TestConfig.Account1.Password)
s.NoError(err, "cannot select account: %v", TestConfig.Account1.Address)
// there are two `sendTestTx` calls in `testCases`
var wgTransctions sync.WaitGroup
wgTransctions.Add(2)
type testCase struct {
command string
params string
@ -323,6 +329,7 @@ func (s *JailRPCTestSuite) TestJailVMPersistence() {
web3.eth.sendTransaction(transaction, function (error, result) {
if(!error) {
total += Number(amount);
_done();
}
});
}
@ -331,7 +338,17 @@ func (s *JailRPCTestSuite) TestJailVMPersistence() {
`)
s.NotContains(parseResult, "error", "further will fail if initial parsing failed")
var wg sync.WaitGroup
cell, err := jail.Cell(testChatID)
s.NoError(err)
// create a bridge between JS code and Go
// to notify when the tx callback is called
err = cell.Set("_done", func(call otto.FunctionCall) otto.Value {
wgTransctions.Done()
return otto.UndefinedValue()
})
s.NoError(err)
signal.SetDefaultNodeNotificationHandler(func(jsonEvent string) {
var envelope signal.Envelope
if e := json.Unmarshal([]byte(jsonEvent), &envelope); e != nil {
@ -348,12 +365,12 @@ func (s *JailRPCTestSuite) TestJailVMPersistence() {
s.NoError(result.Error, "cannot complete queued transaction[%v]: %v", event["id"], result.Error)
txHash.SetBytes(result.Response.Bytes())
s.T().Logf("Transaction complete: https://rinkeby.etherscan.io/tx/%s", txHash.Hex())
s.T().Logf("Transaction complete: %s", txHash.Hex())
}
})
// run commands concurrently
var wg sync.WaitGroup
for _, tc := range testCases {
wg.Add(1)
go func(tc testCase) {
@ -371,6 +388,7 @@ func (s *JailRPCTestSuite) TestJailVMPersistence() {
finishTestCases := make(chan struct{})
go func() {
wg.Wait()
wgTransctions.Wait()
close(finishTestCases)
}()
@ -380,14 +398,7 @@ func (s *JailRPCTestSuite) TestJailVMPersistence() {
s.FailNow("some tests failed to finish in time")
}
// Wait till eth_sendTransaction callbacks have been executed.
// FIXME(tiabc): more reliable means of testing that.
time.Sleep(5 * time.Second)
// Validate total.
cell, err := jail.Cell(testChatID)
s.NoError(err)
totalOtto, err := cell.Get("total")
s.NoError(err)