les: fix and slim the unit tests of les (#20247)

* les: loose restriction of unit tests

* les: update unit tests

* les, light: slim the unit tests
This commit is contained in:
gary rong 2019-11-07 05:09:37 +08:00 committed by Felföldi Zsolt
parent fc3661f89c
commit b9bac1f384
8 changed files with 61 additions and 31 deletions

View File

@ -459,6 +459,9 @@ func (f *clientPool) addBalance(id enode.ID, amount uint64, setTotal bool) {
defer func() { defer func() {
c.balanceTracker.setBalance(pb.value, negBalance) c.balanceTracker.setBalance(pb.value, negBalance)
if !c.priority && pb.value > 0 { if !c.priority && pb.value > 0 {
// The capacity should be adjusted based on the requirement,
// but we have no idea about the new capacity, need a second
// call to udpate it.
c.priority = true c.priority = true
c.balanceTracker.addCallback(balanceCallbackZero, 0, func() { f.balanceExhausted(id) }) c.balanceTracker.addCallback(balanceCallbackZero, 0, func() { f.balanceExhausted(id) })
} }

View File

@ -68,6 +68,14 @@ func (i poolTestPeer) freeClientId() string {
func (i poolTestPeer) updateCapacity(uint64) {} func (i poolTestPeer) updateCapacity(uint64) {}
type poolTestPeerWithCap struct {
poolTestPeer
cap uint64
}
func (i *poolTestPeerWithCap) updateCapacity(cap uint64) { i.cap = cap }
func testClientPool(t *testing.T, connLimit, clientCount, paidCount int, randomDisconnect bool) { func testClientPool(t *testing.T, connLimit, clientCount, paidCount int, randomDisconnect bool) {
rand.Seed(time.Now().UnixNano()) rand.Seed(time.Now().UnixNano())
var ( var (
@ -308,9 +316,9 @@ func TestFreeClientKickedOut(t *testing.T) {
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
pool.connect(poolTestPeer(i), 1) pool.connect(poolTestPeer(i), 1)
clock.Run(100 * time.Millisecond) clock.Run(time.Millisecond)
} }
if pool.connect(poolTestPeer(11), 1) { if pool.connect(poolTestPeer(10), 1) {
t.Fatalf("New free client should be rejected") t.Fatalf("New free client should be rejected")
} }
clock.Run(5 * time.Minute) clock.Run(5 * time.Minute)
@ -320,8 +328,8 @@ func TestFreeClientKickedOut(t *testing.T) {
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
select { select {
case id := <-kicked: case id := <-kicked:
if id != i { if id >= 10 {
t.Fatalf("Kicked client mismatch, want %v, got %v", i, id) t.Fatalf("Old client should be kicked, now got: %d", id)
} }
case <-time.NewTimer(time.Second).C: case <-time.NewTimer(time.Second).C:
t.Fatalf("timeout") t.Fatalf("timeout")
@ -364,11 +372,20 @@ func TestDowngradePriorityClient(t *testing.T) {
pool.setLimits(10, uint64(10)) // Total capacity limit is 10 pool.setLimits(10, uint64(10)) // Total capacity limit is 10
pool.setPriceFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1}) pool.setPriceFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})
pool.addBalance(poolTestPeer(0).ID(), uint64(time.Minute), false) p := &poolTestPeerWithCap{
pool.connect(poolTestPeer(0), 10) poolTestPeer: poolTestPeer(0),
}
pool.addBalance(p.ID(), uint64(time.Minute), false)
pool.connect(p, 10)
if p.cap != 10 {
t.Fatalf("The capcacity of priority peer hasn't been updated, got: %d", p.cap)
}
clock.Run(time.Minute) // All positive balance should be used up. clock.Run(time.Minute) // All positive balance should be used up.
time.Sleep(300 * time.Millisecond) // Ensure the callback is called time.Sleep(300 * time.Millisecond) // Ensure the callback is called
if p.cap != 1 {
t.Fatalf("The capcacity of peer should be downgraded, got: %d", p.cap)
}
pb := pool.ndb.getOrNewPB(poolTestPeer(0).ID()) pb := pool.ndb.getOrNewPB(poolTestPeer(0).ID())
if pb.value != 0 { if pb.value != 0 {
t.Fatalf("Positive balance mismatch, want %v, got %v", 0, pb.value) t.Fatalf("Positive balance mismatch, want %v, got %v", 0, pb.value)

View File

@ -110,13 +110,15 @@ func (d *requestDistributor) registerTestPeer(p distPeer) {
d.peerLock.Unlock() d.peerLock.Unlock()
} }
// distMaxWait is the maximum waiting time after which further necessary waiting var (
// times are recalculated based on new feedback from the servers // distMaxWait is the maximum waiting time after which further necessary waiting
const distMaxWait = time.Millisecond * 50 // times are recalculated based on new feedback from the servers
distMaxWait = time.Millisecond * 50
// waitForPeers is the time window in which a request does not fail even if it // waitForPeers is the time window in which a request does not fail even if it
// has no suitable peers to send to at the moment // has no suitable peers to send to at the moment
const waitForPeers = time.Second * 3 waitForPeers = time.Second * 3
)
// main event loop // main event loop
func (d *requestDistributor) loop() { func (d *requestDistributor) loop() {

View File

@ -86,8 +86,8 @@ func (p *testDistPeer) worker(t *testing.T, checkOrder bool, stop chan struct{})
const ( const (
testDistBufLimit = 10000000 testDistBufLimit = 10000000
testDistMaxCost = 1000000 testDistMaxCost = 1000000
testDistPeerCount = 5 testDistPeerCount = 2
testDistReqCount = 5000 testDistReqCount = 10
testDistMaxResendCount = 3 testDistMaxResendCount = 3
) )
@ -128,6 +128,9 @@ func testRequestDistributor(t *testing.T, resend bool) {
go peers[i].worker(t, !resend, stop) go peers[i].worker(t, !resend, stop)
dist.registerTestPeer(peers[i]) dist.registerTestPeer(peers[i])
} }
// Disable the mechanism that we will wait a few time for request
// even there is no suitable peer to send right now.
waitForPeers = 0
var wg sync.WaitGroup var wg sync.WaitGroup

View File

@ -193,6 +193,9 @@ func testOdr(t *testing.T, protocol int, expFail uint64, checkCached bool, fn od
if clientHead.Number.Uint64() != 4 { if clientHead.Number.Uint64() != 4 {
t.Fatalf("Failed to sync the chain with server, head: %v", clientHead.Number.Uint64()) t.Fatalf("Failed to sync the chain with server, head: %v", clientHead.Number.Uint64())
} }
// Disable the mechanism that we will wait a few time for request
// even there is no suitable peer to send right now.
waitForPeers = 0
test := func(expFail uint64) { test := func(expFail uint64) {
// Mark this as a helper to put the failures at the correct lines // Mark this as a helper to put the failures at the correct lines
@ -202,7 +205,9 @@ func testOdr(t *testing.T, protocol int, expFail uint64, checkCached bool, fn od
bhash := rawdb.ReadCanonicalHash(server.db, i) bhash := rawdb.ReadCanonicalHash(server.db, i)
b1 := fn(light.NoOdr, server.db, server.handler.server.chainConfig, server.handler.blockchain, nil, bhash) b1 := fn(light.NoOdr, server.db, server.handler.server.chainConfig, server.handler.blockchain, nil, bhash)
ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) // Set the timeout as 1 second here, ensure there is enough time
// for travis to make the action.
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
b2 := fn(ctx, client.db, client.handler.backend.chainConfig, nil, client.handler.backend.blockchain, bhash) b2 := fn(ctx, client.db, client.handler.backend.chainConfig, nil, client.handler.backend.blockchain, bhash)
cancel() cancel()

View File

@ -89,7 +89,7 @@ func testCheckpointSyncing(t *testing.T, protocol int, syncMode int) {
for { for {
_, hash, _, err := server.handler.server.oracle.contract.Contract().GetLatestCheckpoint(nil) _, hash, _, err := server.handler.server.oracle.contract.Contract().GetLatestCheckpoint(nil)
if err != nil || hash == [32]byte{} { if err != nil || hash == [32]byte{} {
time.Sleep(100 * time.Millisecond) time.Sleep(10 * time.Millisecond)
continue continue
} }
break break

View File

@ -71,10 +71,10 @@ var (
var ( var (
// The block frequency for creating checkpoint(only used in test) // The block frequency for creating checkpoint(only used in test)
sectionSize = big.NewInt(512) sectionSize = big.NewInt(128)
// The number of confirmations needed to generate a checkpoint(only used in test). // The number of confirmations needed to generate a checkpoint(only used in test).
processConfirms = big.NewInt(4) processConfirms = big.NewInt(1)
// The token bucket buffer limit for testing purpose. // The token bucket buffer limit for testing purpose.
testBufLimit = uint64(1000000) testBufLimit = uint64(1000000)

View File

@ -79,21 +79,21 @@ var (
} }
// TestServerIndexerConfig wraps a set of configs as a test indexer config for server side. // TestServerIndexerConfig wraps a set of configs as a test indexer config for server side.
TestServerIndexerConfig = &IndexerConfig{ TestServerIndexerConfig = &IndexerConfig{
ChtSize: 512, ChtSize: 128,
ChtConfirms: 4, ChtConfirms: 1,
BloomSize: 64, BloomSize: 16,
BloomConfirms: 4, BloomConfirms: 1,
BloomTrieSize: 512, BloomTrieSize: 128,
BloomTrieConfirms: 4, BloomTrieConfirms: 1,
} }
// TestClientIndexerConfig wraps a set of configs as a test indexer config for client side. // TestClientIndexerConfig wraps a set of configs as a test indexer config for client side.
TestClientIndexerConfig = &IndexerConfig{ TestClientIndexerConfig = &IndexerConfig{
ChtSize: 512, ChtSize: 128,
ChtConfirms: 32, ChtConfirms: 8,
BloomSize: 512, BloomSize: 128,
BloomConfirms: 32, BloomConfirms: 8,
BloomTrieSize: 512, BloomTrieSize: 128,
BloomTrieConfirms: 32, BloomTrieConfirms: 8,
} }
) )