fix(wallet): Made an interface for BlockRangesSequentialDAO to

mock it in tests.
Made a configurable timeout interval for Commander interface.
Added tests to verify loadBlocksAndTransfers command is stopped
correctly on max errors limit reached
This commit is contained in:
Ivan Belyakov 2023-12-11 14:29:10 +01:00 committed by IvanBelyakoff
parent 670954b71b
commit 81073b208e
4 changed files with 301 additions and 72 deletions

View File

@ -9,7 +9,7 @@ import (
type Command func(context.Context) error type Command func(context.Context) error
type Commander interface { type Commander interface {
Command() Command Command(inteval ...time.Duration) Command
} }
// SingleShotCommand runs once. // SingleShotCommand runs once.

View File

@ -9,6 +9,13 @@ import (
"github.com/status-im/status-go/services/wallet/bigint" "github.com/status-im/status-go/services/wallet/bigint"
) )
type BlockRangeDAOer interface {
getBlockRange(chainID uint64, address common.Address) (blockRange *ethTokensBlockRanges, err error)
upsertRange(chainID uint64, account common.Address, newBlockRange *ethTokensBlockRanges) (err error)
updateTokenRange(chainID uint64, account common.Address, newBlockRange *BlockRange) (err error)
upsertEthRange(chainID uint64, account common.Address, newBlockRange *BlockRange) (err error)
}
type BlockRangeSequentialDAO struct { type BlockRangeSequentialDAO struct {
db *sql.DB db *sql.DB
} }

View File

@ -3,6 +3,7 @@ package transfer
import ( import (
"context" "context"
"math/big" "math/big"
"sync"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -19,14 +20,38 @@ import (
"github.com/status-im/status-go/transactions" "github.com/status-im/status-go/transactions"
) )
var trLoopCnt int = 0 func newErrorCounter(msg string) *errorCounter {
var fetchNewBlocksCnt int = 0 return &errorCounter{maxErrors: 3, err: nil, cnt: 0, msg: msg}
}
func verifyOnce(cnt *int, msg string) { type errorCounter struct {
if *cnt > 0 { cnt int
panic("verifyOnce, function: " + msg) maxErrors int
err error
msg string
}
// Returns false in case of counter overflow
func (ec *errorCounter) setError(err error) bool {
log.Debug("errorCounter setError", "msg", ec.msg, "err", err, "cnt", ec.cnt)
ec.cnt++
// do not overwrite the first error
if ec.err == nil {
ec.err = err
} }
*cnt++
if ec.cnt >= ec.maxErrors {
log.Error("errorCounter overflow", "msg", ec.msg)
return false
}
return true
}
func (ec *errorCounter) Error() error {
return ec.err
} }
type findNewBlocksCommand struct { type findNewBlocksCommand struct {
@ -86,7 +111,12 @@ func (c *findNewBlocksCommand) findAndSaveEthBlocks(parent context.Context, from
log.Debug("start findNewBlocksCommand", "account", account, "chain", c.chainClient.NetworkID(), "noLimit", c.noLimit, "from", fromNum, "to", headNum) log.Debug("start findNewBlocksCommand", "account", account, "chain", c.chainClient.NetworkID(), "noLimit", c.noLimit, "from", fromNum, "to", headNum)
headers, startBlockNum, _ := c.findBlocksWithEthTransfers(parent, account, fromNum, headNum) headers, startBlockNum, err := c.findBlocksWithEthTransfers(parent, account, fromNum, headNum)
if err != nil {
c.error = err
break
}
if len(headers) > 0 { if len(headers) > 0 {
log.Debug("findNewBlocksCommand saving headers", "len", len(headers), "lastBlockNumber", headNum, log.Debug("findNewBlocksCommand saving headers", "len", len(headers), "lastBlockNumber", headNum,
"balance", c.balanceCacher.Cache().GetBalance(account, c.chainClient.NetworkID(), headNum), "balance", c.balanceCacher.Cache().GetBalance(account, c.chainClient.NetworkID(), headNum),
@ -101,7 +131,7 @@ func (c *findNewBlocksCommand) findAndSaveEthBlocks(parent context.Context, from
c.blocksFound(headers) c.blocksFound(headers)
} }
err := c.markEthBlockRangeChecked(account, &BlockRange{startBlockNum, fromNum, headNum}) err = c.markEthBlockRangeChecked(account, &BlockRange{startBlockNum, fromNum, headNum})
if err != nil { if err != nil {
c.error = err c.error = err
break break
@ -211,9 +241,7 @@ func (c *findNewBlocksCommand) findBlocksWithEthTransfers(parent context.Context
if err != nil { if err != nil {
log.Error("findNewBlocksCommand checkRange fastIndex", "err", err, "account", account, log.Error("findNewBlocksCommand checkRange fastIndex", "err", err, "account", account,
"chain", c.chainClient.NetworkID()) "chain", c.chainClient.NetworkID())
c.error = err return nil, nil, err
// return err // In case c.noLimit is true, hystrix "max concurrency" may be reached and we will not be able to index ETH transfers
return nil, nil, nil
} }
log.Debug("findNewBlocksCommand checkRange", "chainID", c.chainClient.NetworkID(), "account", account, log.Debug("findNewBlocksCommand checkRange", "chainID", c.chainClient.NetworkID(), "account", account,
"startBlock", startBlockNum, "newFromBlock", newFromBlock.Number, "toBlockNumber", to, "noLimit", c.noLimit) "startBlock", startBlockNum, "newFromBlock", newFromBlock.Number, "toBlockNumber", to, "noLimit", c.noLimit)
@ -246,7 +274,7 @@ type findBlocksCommand struct {
accounts []common.Address accounts []common.Address
db *Database db *Database
accountsDB *accounts.Database accountsDB *accounts.Database
blockRangeDAO *BlockRangeSequentialDAO blockRangeDAO BlockRangeDAOer
chainClient chain.ClientInterface chainClient chain.ClientInterface
balanceCacher balance.Cacher balanceCacher balance.Cacher
feed *event.Feed feed *event.Feed
@ -565,7 +593,7 @@ func (c *findBlocksCommand) checkRange(parent context.Context, from *big.Int, to
return return
} }
func loadBlockRangeInfo(chainID uint64, account common.Address, blockDAO *BlockRangeSequentialDAO) ( func loadBlockRangeInfo(chainID uint64, account common.Address, blockDAO BlockRangeDAOer) (
*ethTokensBlockRanges, error) { *ethTokensBlockRanges, error) {
blockRange, err := blockDAO.getBlockRange(chainID, account) blockRange, err := blockDAO.getBlockRange(chainID, account)
@ -589,7 +617,7 @@ func areAllHistoryBlocksLoaded(blockInfo *BlockRange) bool {
return false return false
} }
func areAllHistoryBlocksLoadedForAddress(blockRangeDAO *BlockRangeSequentialDAO, chainID uint64, func areAllHistoryBlocksLoadedForAddress(blockRangeDAO BlockRangeDAOer, chainID uint64,
address common.Address) (bool, error) { address common.Address) (bool, error) {
blockRange, err := blockRangeDAO.getBlockRange(chainID, address) blockRange, err := blockRangeDAO.getBlockRange(chainID, address)
@ -673,38 +701,41 @@ func (c *findBlocksCommand) fastIndexErc20(ctx context.Context, fromBlockNumber
} }
} }
func loadTransfersLoop(ctx context.Context, blockDAO *BlockDAO, db *Database, // Start transfers loop to load transfers for new blocks
chainClient chain.ClientInterface, transactionManager *TransactionManager, pendingTxManager *transactions.PendingTxTracker, func (c *loadBlocksAndTransfersCommand) startTransfersLoop(ctx context.Context) {
tokenManager *token.Manager, feed *event.Feed, blocksLoadedCh <-chan []*DBHeader) { go func() {
defer func() {
c.decStarted()
}()
c.incStarted()
log.Debug("loadTransfersLoop start", "chain", chainClient.NetworkID()) log.Debug("loadTransfersLoop start", "chain", c.chainClient.NetworkID())
verifyOnce(&trLoopCnt, "loadTransfersLoop") for {
select {
case <-ctx.Done():
log.Debug("startTransfersLoop done", "chain", c.chainClient.NetworkID(), "error", ctx.Err())
return
case dbHeaders := <-c.blocksLoadedCh:
log.Debug("loadTransfersOnDemand transfers received", "chain", c.chainClient.NetworkID(), "headers", len(dbHeaders))
for { blocksByAddress := map[common.Address][]*big.Int{}
select { // iterate over headers and group them by address
case <-ctx.Done(): for _, dbHeader := range dbHeaders {
log.Info("loadTransfersLoop done", "chain", chainClient.NetworkID(), "error", ctx.Err()) blocksByAddress[dbHeader.Address] = append(blocksByAddress[dbHeader.Address], dbHeader.Number)
return }
case dbHeaders := <-blocksLoadedCh:
log.Debug("loadTransfersOnDemand transfers received", "chain", chainClient.NetworkID(), "headers", len(dbHeaders))
blocksByAddress := map[common.Address][]*big.Int{} go func() {
// iterate over headers and group them by address _ = loadTransfers(ctx, c.blockDAO, c.db, c.chainClient, noBlockLimit,
for _, dbHeader := range dbHeaders { blocksByAddress, c.transactionManager, c.pendingTxManager, c.tokenManager, c.feed)
blocksByAddress[dbHeader.Address] = append(blocksByAddress[dbHeader.Address], dbHeader.Number) }()
} }
go func() {
_ = loadTransfers(ctx, blockDAO, db, chainClient, noBlockLimit,
blocksByAddress, transactionManager, pendingTxManager, tokenManager, feed)
}()
} }
} }()
} }
func newLoadBlocksAndTransfersCommand(accounts []common.Address, db *Database, accountsDB *accounts.Database, func newLoadBlocksAndTransfersCommand(accounts []common.Address, db *Database, accountsDB *accounts.Database,
blockDAO *BlockDAO, blockRangesSeqDAO *BlockRangeSequentialDAO, chainClient chain.ClientInterface, feed *event.Feed, blockDAO *BlockDAO, blockRangesSeqDAO BlockRangeDAOer, chainClient chain.ClientInterface, feed *event.Feed,
transactionManager *TransactionManager, pendingTxManager *transactions.PendingTxTracker, transactionManager *TransactionManager, pendingTxManager *transactions.PendingTxTracker,
tokenManager *token.Manager, balanceCacher balance.Cacher, omitHistory bool) *loadBlocksAndTransfersCommand { tokenManager *token.Manager, balanceCacher balance.Cacher, omitHistory bool) *loadBlocksAndTransfersCommand {
@ -722,6 +753,7 @@ func newLoadBlocksAndTransfersCommand(accounts []common.Address, db *Database, a
tokenManager: tokenManager, tokenManager: tokenManager,
blocksLoadedCh: make(chan []*DBHeader, 100), blocksLoadedCh: make(chan []*DBHeader, 100),
omitHistory: omitHistory, omitHistory: omitHistory,
errorCounter: *newErrorCounter("loadBlocksAndTransfersCommand"),
} }
} }
@ -729,7 +761,7 @@ type loadBlocksAndTransfersCommand struct {
accounts []common.Address accounts []common.Address
db *Database db *Database
accountsDB *accounts.Database accountsDB *accounts.Database
blockRangeDAO *BlockRangeSequentialDAO blockRangeDAO BlockRangeDAOer
blockDAO *BlockDAO blockDAO *BlockDAO
chainClient chain.ClientInterface chainClient chain.ClientInterface
feed *event.Feed feed *event.Feed
@ -743,11 +775,30 @@ type loadBlocksAndTransfersCommand struct {
// Not to be set by the caller // Not to be set by the caller
transfersLoaded map[common.Address]bool // For event RecentHistoryReady to be sent only once per account during app lifetime transfersLoaded map[common.Address]bool // For event RecentHistoryReady to be sent only once per account during app lifetime
started bool loops int
errorCounter
mu sync.Mutex
} }
// func (c *loadBlocksAndTransfersCommand) Run(ctx context.Context) error { func (c *loadBlocksAndTransfersCommand) incStarted() {
func (c *loadBlocksAndTransfersCommand) Run(ctx context.Context) error { c.mu.Lock()
defer c.mu.Unlock()
c.loops++
}
func (c *loadBlocksAndTransfersCommand) decStarted() {
c.mu.Lock()
defer c.mu.Unlock()
c.loops--
}
func (c *loadBlocksAndTransfersCommand) isStarted() bool {
c.mu.Lock()
defer c.mu.Unlock()
return c.loops > 0
}
func (c *loadBlocksAndTransfersCommand) Run(parent context.Context) (err error) {
log.Debug("start load all transfers command", "chain", c.chainClient.NetworkID(), "accounts", c.accounts) log.Debug("start load all transfers command", "chain", c.chainClient.NetworkID(), "accounts", c.accounts)
// Finite processes (to be restarted on error, but stopped on success): // Finite processes (to be restarted on error, but stopped on success):
@ -758,33 +809,40 @@ func (c *loadBlocksAndTransfersCommand) Run(ctx context.Context) error {
// fetching new blocks // fetching new blocks
// fetching transfers for new blocks // fetching transfers for new blocks
ctx, cancel := context.WithCancel(parent)
finiteGroup := async.NewAtomicGroup(ctx)
defer func() {
finiteGroup.Stop()
finiteGroup.Wait()
// if there was an error, and errors overflowed, stop the command
if err != nil && !c.setError(err) {
log.Error("loadBlocksAndTransfersCommand", "error", c.Error(), "err", err)
err = nil // stop the commands
cancel() // stop inner loops
}
}()
fromNum := big.NewInt(0) fromNum := big.NewInt(0)
headNum, err := getHeadBlockNumber(ctx, c.chainClient) headNum, err := getHeadBlockNumber(ctx, c.chainClient)
if err != nil { if err != nil {
return err return err
} }
group := async.NewAtomicGroup(ctx)
defer func() {
group.Stop()
group.Wait()
}()
// It will start loadTransfersCommand which will run until success when all transfers from DB are loaded // It will start loadTransfersCommand which will run until success when all transfers from DB are loaded
err = c.startFetchingTransfersForLoadedBlocks(group) err = c.startFetchingTransfersForLoadedBlocks(finiteGroup)
if err != nil { if err != nil {
log.Error("loadBlocksAndTransfersCommand fetchTransfersForLoadedBlocks", "error", err) log.Error("loadBlocksAndTransfersCommand fetchTransfersForLoadedBlocks", "error", err)
return err return err
} }
if !c.started { if !c.isStarted() {
c.started = true
c.startTransfersLoop(ctx) c.startTransfersLoop(ctx)
c.startFetchingNewBlocks(ctx, c.accounts, headNum, c.blocksLoadedCh) c.startFetchingNewBlocks(ctx, c.accounts, headNum, c.blocksLoadedCh)
} }
// It will start findBlocksCommands which will run until success when all blocks are loaded // It will start findBlocksCommands which will run until success when all blocks are loaded
err = c.fetchHistoryBlocks(group, c.accounts, fromNum, headNum, c.blocksLoadedCh) err = c.fetchHistoryBlocks(finiteGroup, c.accounts, fromNum, headNum, c.blocksLoadedCh)
if err != nil { if err != nil {
log.Error("loadBlocksAndTransfersCommand fetchHistoryBlocks", "error", err) log.Error("loadBlocksAndTransfersCommand fetchHistoryBlocks", "error", err)
return err return err
@ -792,27 +850,29 @@ func (c *loadBlocksAndTransfersCommand) Run(ctx context.Context) error {
select { select {
case <-ctx.Done(): case <-ctx.Done():
return ctx.Err() log.Debug("loadBlocksAndTransfers command cancelled", "chain", c.chainClient.NetworkID(), "accounts", c.accounts, "error", ctx.Err())
case <-group.WaitAsync(): case <-finiteGroup.WaitAsync():
log.Debug("end loadBlocksAndTransfers command", "chain", c.chainClient.NetworkID(), "accounts", c.accounts) err = finiteGroup.Error() // if there was an error, rerun the command
return nil log.Debug("end loadBlocksAndTransfers command", "chain", c.chainClient.NetworkID(), "accounts", c.accounts, "error", err)
} }
return nil
return err
} }
func (c *loadBlocksAndTransfersCommand) Command() async.Command { func (c *loadBlocksAndTransfersCommand) Command(interval ...time.Duration) async.Command {
return async.InfiniteCommand{ // 30s - default interval for Infura's delay returned in error. That should increase chances
Interval: 5 * time.Second, // for request to succeed with the next attempt for now until we have a proper retry mechanism
intvl := 30 * time.Second
if len(interval) > 0 {
intvl = interval[0]
}
return async.FiniteCommand{
Interval: intvl,
Runable: c.Run, Runable: c.Run,
}.Run }.Run
} }
// Start transfers loop to load transfers for new blocks
func (c *loadBlocksAndTransfersCommand) startTransfersLoop(ctx context.Context) {
go loadTransfersLoop(ctx, c.blockDAO, c.db, c.chainClient, c.transactionManager,
c.pendingTxManager, c.tokenManager, c.feed, c.blocksLoadedCh)
}
func (c *loadBlocksAndTransfersCommand) fetchHistoryBlocks(group *async.AtomicGroup, accounts []common.Address, fromNum, toNum *big.Int, blocksLoadedCh chan []*DBHeader) (err error) { func (c *loadBlocksAndTransfersCommand) fetchHistoryBlocks(group *async.AtomicGroup, accounts []common.Address, fromNum, toNum *big.Int, blocksLoadedCh chan []*DBHeader) (err error) {
for _, account := range accounts { for _, account := range accounts {
err = c.fetchHistoryBlocksForAccount(group, account, fromNum, toNum, c.blocksLoadedCh) err = c.fetchHistoryBlocksForAccount(group, account, fromNum, toNum, c.blocksLoadedCh)
@ -837,8 +897,7 @@ func (c *loadBlocksAndTransfersCommand) fetchHistoryBlocksForAccount(group *asyn
blockRange, err := loadBlockRangeInfo(c.chainClient.NetworkID(), account, c.blockRangeDAO) blockRange, err := loadBlockRangeInfo(c.chainClient.NetworkID(), account, c.blockRangeDAO)
if err != nil { if err != nil {
log.Error("fetchHistoryBlocks loadBlockRangeInfo", "error", err) log.Error("fetchHistoryBlocks loadBlockRangeInfo", "error", err)
// c.error = err return err
return err // Will keep spinning forever nomatter what
} }
ranges := [][]*big.Int{} ranges := [][]*big.Int{}
@ -906,9 +965,12 @@ func (c *loadBlocksAndTransfersCommand) fetchHistoryBlocksForAccount(group *asyn
func (c *loadBlocksAndTransfersCommand) startFetchingNewBlocks(ctx context.Context, addresses []common.Address, fromNum *big.Int, blocksLoadedCh chan<- []*DBHeader) { func (c *loadBlocksAndTransfersCommand) startFetchingNewBlocks(ctx context.Context, addresses []common.Address, fromNum *big.Int, blocksLoadedCh chan<- []*DBHeader) {
log.Debug("startFetchingNewBlocks start", "chainID", c.chainClient.NetworkID(), "accounts", addresses) log.Debug("startFetchingNewBlocks start", "chainID", c.chainClient.NetworkID(), "accounts", addresses)
verifyOnce(&fetchNewBlocksCnt, "startFetchingNewBlocks")
go func() { go func() {
defer func() {
c.decStarted()
}()
c.incStarted()
newBlocksCmd := &findNewBlocksCommand{ newBlocksCmd := &findNewBlocksCommand{
findBlocksCommand: &findBlocksCommand{ findBlocksCommand: &findBlocksCommand{
accounts: addresses, accounts: addresses,
@ -931,9 +993,9 @@ func (c *loadBlocksAndTransfersCommand) startFetchingNewBlocks(ctx context.Conte
// No need to wait for the group since it is infinite // No need to wait for the group since it is infinite
<-ctx.Done() <-ctx.Done()
}()
log.Debug("startFetchingNewBlocks end", "chainID", c.chainClient.NetworkID(), "accounts", addresses, "error", ctx.Err()) log.Debug("startFetchingNewBlocks end", "chainID", c.chainClient.NetworkID(), "accounts", addresses, "error", ctx.Err())
}()
} }
func (c *loadBlocksAndTransfersCommand) getBlocksToLoad() (map[common.Address][]*big.Int, error) { func (c *loadBlocksAndTransfersCommand) getBlocksToLoad() (map[common.Address][]*big.Int, error) {

View File

@ -10,6 +10,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/pkg/errors"
"github.com/stretchr/testify/mock" "github.com/stretchr/testify/mock"
"golang.org/x/exp/slices" // since 1.21, this is in the standard library "golang.org/x/exp/slices" // since 1.21, this is in the standard library
@ -29,6 +30,7 @@ import (
"github.com/status-im/status-go/services/wallet/async" "github.com/status-im/status-go/services/wallet/async"
"github.com/status-im/status-go/services/wallet/balance" "github.com/status-im/status-go/services/wallet/balance"
"github.com/status-im/status-go/t/helpers" "github.com/status-im/status-go/t/helpers"
"github.com/status-im/status-go/t/utils"
"github.com/status-im/status-go/multiaccounts/accounts" "github.com/status-im/status-go/multiaccounts/accounts"
"github.com/status-im/status-go/params" "github.com/status-im/status-go/params"
@ -1337,3 +1339,161 @@ func TestFetchNewBlocksCommand(t *testing.T) {
// We must check all the logs for all accounts with a single iteration of eth_getLogs call // We must check all the logs for all accounts with a single iteration of eth_getLogs call
require.Equal(t, 3, tc.callsCounter["FilterLogs"], "calls to FilterLogs") require.Equal(t, 3, tc.callsCounter["FilterLogs"], "calls to FilterLogs")
} }
type TestClientWithError struct {
*TestClient
}
func (tc *TestClientWithError) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) {
tc.incCounter("HeaderByNumber")
if tc.traceAPICalls {
tc.t.Log("HeaderByNumber", number)
}
return nil, errors.New("Network error")
}
func TestLoadBlocksAndTransfersCommand_StopOnErrorsOverflow(t *testing.T) {
tc := &TestClientWithError{
&TestClient{
t: t,
callsCounter: map[string]int{},
},
}
cmd := &loadBlocksAndTransfersCommand{
chainClient: tc,
errorCounter: *newErrorCounter("testLoadBlocksAndTransfersCommand"),
}
ctx := context.Background()
group := async.NewGroup(ctx)
group.Add(cmd.Command(1 * time.Millisecond))
select {
case <-ctx.Done():
t.Log("Done")
case <-group.WaitAsync():
t.Log("Command finished", "error", cmd.Error())
require.Equal(t, cmd.maxErrors, tc.callsCounter["HeaderByNumber"])
_, expectedErr := tc.HeaderByNumber(ctx, nil)
require.Error(t, expectedErr, cmd.Error())
}
}
type BlockRangeSequentialDAOMockError struct {
*BlockRangeSequentialDAO
}
func (b *BlockRangeSequentialDAOMockError) getBlockRange(chainID uint64, address common.Address) (blockRange *ethTokensBlockRanges, err error) {
return nil, errors.New("DB error")
}
func TestLoadBlocksAndTransfersCommand_StopOnErrorsOverflowWhenStarted(t *testing.T) {
appdb, err := helpers.SetupTestMemorySQLDB(appdatabase.DbInitializer{})
require.NoError(t, err)
db, err := helpers.SetupTestMemorySQLDB(walletdatabase.DbInitializer{})
require.NoError(t, err)
wdb := NewDB(db)
tc := &TestClient{
t: t,
callsCounter: map[string]int{},
}
accDB, err := accounts.NewDB(appdb)
require.NoError(t, err)
cmd := &loadBlocksAndTransfersCommand{
accounts: []common.Address{common.HexToAddress("0x1234")},
chainClient: tc,
blockDAO: &BlockDAO{db},
blockRangeDAO: &BlockRangeSequentialDAOMockError{
&BlockRangeSequentialDAO{
wdb.client,
},
},
accountsDB: accDB,
}
ctx := context.Background()
group := async.NewGroup(ctx)
group.Add(cmd.Command(1 * time.Millisecond))
select {
case <-ctx.Done():
t.Log("Done")
case <-group.WaitAsync():
t.Log("Command finished", "error", cmd.Error())
_, expectedErr := cmd.blockRangeDAO.getBlockRange(0, common.Address{})
require.Error(t, expectedErr, cmd.Error())
require.NoError(t, utils.Eventually(func() error {
if !cmd.isStarted() {
return nil
}
return errors.New("command is still running")
}, 100*time.Millisecond, 10*time.Millisecond))
}
}
type BlockRangeSequentialDAOMockSuccess struct {
*BlockRangeSequentialDAO
}
func (b *BlockRangeSequentialDAOMockSuccess) getBlockRange(chainID uint64, address common.Address) (blockRange *ethTokensBlockRanges, err error) {
return newEthTokensBlockRanges(), nil
}
func TestLoadBlocksAndTransfersCommand_FiniteFinishedInfiniteRunning(t *testing.T) {
appdb, err := helpers.SetupTestMemorySQLDB(appdatabase.DbInitializer{})
require.NoError(t, err)
db, err := helpers.SetupTestMemorySQLDB(walletdatabase.DbInitializer{})
require.NoError(t, err)
wdb := NewDB(db)
tc := &TestClient{
t: t,
callsCounter: map[string]int{},
}
accDB, err := accounts.NewDB(appdb)
require.NoError(t, err)
cmd := &loadBlocksAndTransfersCommand{
accounts: []common.Address{common.HexToAddress("0x1234")},
chainClient: tc,
blockDAO: &BlockDAO{db},
blockRangeDAO: &BlockRangeSequentialDAOMockSuccess{
&BlockRangeSequentialDAO{
wdb.client,
},
},
accountsDB: accDB,
}
ctx, cancel := context.WithCancel(context.Background())
group := async.NewGroup(ctx)
group.Add(cmd.Command(1 * time.Millisecond))
select {
case <-ctx.Done():
cancel() // linter is not happy if cancel is not called on all code paths
t.Log("Done")
case <-group.WaitAsync():
t.Log("Command finished", "error", cmd.Error())
require.NoError(t, cmd.Error())
require.True(t, cmd.isStarted())
cancel()
require.NoError(t, utils.Eventually(func() error {
if !cmd.isStarted() {
return nil
}
return errors.New("command is still running")
}, 100*time.Millisecond, 10*time.Millisecond))
}
}