2023-05-19 08:19:48 +00:00
|
|
|
package transfer
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"math/big"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
|
|
|
"github.com/ethereum/go-ethereum/event"
|
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2023-12-01 11:30:42 +00:00
|
|
|
nodetypes "github.com/status-im/status-go/eth-node/types"
|
|
|
|
"github.com/status-im/status-go/multiaccounts/accounts"
|
2023-05-19 08:19:48 +00:00
|
|
|
"github.com/status-im/status-go/rpc/chain"
|
|
|
|
"github.com/status-im/status-go/services/wallet/async"
|
2023-09-04 05:34:09 +00:00
|
|
|
"github.com/status-im/status-go/services/wallet/balance"
|
2023-06-02 20:08:45 +00:00
|
|
|
"github.com/status-im/status-go/services/wallet/token"
|
2023-05-19 08:19:48 +00:00
|
|
|
"github.com/status-im/status-go/services/wallet/walletevent"
|
2023-06-21 14:09:55 +00:00
|
|
|
"github.com/status-im/status-go/transactions"
|
2023-05-19 08:19:48 +00:00
|
|
|
)
|
|
|
|
|
2023-06-14 10:00:56 +00:00
|
|
|
type findNewBlocksCommand struct {
|
|
|
|
*findBlocksCommand
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *findNewBlocksCommand) Command() async.Command {
|
|
|
|
return async.InfiniteCommand{
|
2023-10-25 08:00:28 +00:00
|
|
|
// TODO - make it configurable based on chain block mining time
|
|
|
|
// NOTE(rasom): ^ it is unclear why each block has to be checked,
|
|
|
|
// that is rather undesirable, as it causes a lot of RPC requests
|
|
|
|
Interval: 2 * time.Minute,
|
2023-06-14 10:00:56 +00:00
|
|
|
Runable: c.Run,
|
|
|
|
}.Run
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *findNewBlocksCommand) Run(parent context.Context) (err error) {
|
|
|
|
headNum, err := getHeadBlockNumber(parent, c.chainClient)
|
|
|
|
if err != nil {
|
2023-11-27 10:08:17 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// In case this is the first check, skip it, history fetching will do it
|
|
|
|
if c.fromBlockNumber.Cmp(headNum) >= 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
c.findAndSaveEthBlocks(parent, c.fromBlockNumber, headNum)
|
|
|
|
c.findAndSaveTokenBlocks(parent, c.fromBlockNumber, headNum)
|
|
|
|
|
|
|
|
c.fromBlockNumber = headNum
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *findNewBlocksCommand) findAndSaveEthBlocks(parent context.Context, fromNum, headNum *big.Int) {
|
|
|
|
// Check ETH transfers for each account independently
|
2023-12-06 11:09:58 +00:00
|
|
|
mnemonicWasNotShown, err := c.accountsDB.GetMnemonicWasNotShown()
|
|
|
|
if err != nil {
|
|
|
|
c.error = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
for _, account := range c.accounts {
|
2023-12-06 11:09:58 +00:00
|
|
|
if mnemonicCheckEnabled && mnemonicWasNotShown {
|
|
|
|
acc, err := c.accountsDB.GetAccountByAddress(nodetypes.Address(account))
|
|
|
|
if err != nil {
|
|
|
|
c.error = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if acc.AddressWasNotShown {
|
|
|
|
log.Info("skip findNewBlocksCommand, mnemonic has not been shown and the address has not been shared yet", "address", account)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("start findNewBlocksCommand", "account", account, "chain", c.chainClient.NetworkID(), "noLimit", c.noLimit, "from", fromNum, "to", headNum)
|
|
|
|
|
|
|
|
headers, startBlockNum, _ := c.findBlocksWithEthTransfers(parent, account, fromNum, headNum)
|
|
|
|
if len(headers) > 0 {
|
|
|
|
log.Debug("findNewBlocksCommand saving headers", "len", len(headers), "lastBlockNumber", headNum,
|
|
|
|
"balance", c.balanceCacher.Cache().GetBalance(account, c.chainClient.NetworkID(), headNum),
|
|
|
|
"nonce", c.balanceCacher.Cache().GetNonce(account, c.chainClient.NetworkID(), headNum))
|
|
|
|
|
|
|
|
err := c.db.SaveBlocks(c.chainClient.NetworkID(), headers)
|
|
|
|
if err != nil {
|
|
|
|
c.error = err
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
c.blocksFound(headers)
|
|
|
|
}
|
|
|
|
|
|
|
|
err := c.markEthBlockRangeChecked(account, &BlockRange{startBlockNum, fromNum, headNum})
|
|
|
|
if err != nil {
|
|
|
|
c.error = err
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Debug("end findNewBlocksCommand", "account", account, "chain", c.chainClient.NetworkID(), "noLimit", c.noLimit, "from", fromNum, "to", headNum)
|
2023-06-14 10:00:56 +00:00
|
|
|
}
|
2023-11-27 10:08:17 +00:00
|
|
|
}
|
2023-06-14 10:00:56 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
func (c *findNewBlocksCommand) findAndSaveTokenBlocks(parent context.Context, fromNum, headNum *big.Int) {
|
|
|
|
// Check token transfers for all accounts.
|
|
|
|
// Each account's last checked block can be different, so we can get duplicated headers,
|
|
|
|
// so we need to deduplicate them
|
|
|
|
const incomingOnly = false
|
|
|
|
erc20Headers, err := c.fastIndexErc20(parent, fromNum, headNum, incomingOnly)
|
2023-06-14 10:00:56 +00:00
|
|
|
if err != nil {
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Error("findNewBlocksCommand fastIndexErc20", "err", err, "account", c.accounts, "chain", c.chainClient.NetworkID())
|
|
|
|
c.error = err
|
|
|
|
return
|
2023-06-14 10:00:56 +00:00
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
if len(erc20Headers) > 0 {
|
|
|
|
log.Debug("findNewBlocksCommand saving headers", "len", len(erc20Headers), "from", fromNum, "to", headNum)
|
2023-06-14 10:00:56 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
// get not loaded headers from DB for all accs and blocks
|
|
|
|
preLoadedTransactions, err := c.db.GetTransactionsToLoad(c.chainClient.NetworkID(), common.Address{}, nil)
|
|
|
|
if err != nil {
|
|
|
|
c.error = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
tokenBlocksFiltered := filterNewPreloadedTransactions(erc20Headers, preLoadedTransactions)
|
2023-06-14 10:00:56 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
err = c.db.SaveBlocks(c.chainClient.NetworkID(), tokenBlocksFiltered)
|
|
|
|
if err != nil {
|
|
|
|
c.error = err
|
2023-06-14 10:00:56 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
c.blocksFound(tokenBlocksFiltered)
|
|
|
|
}
|
2023-06-14 10:00:56 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
err = c.markTokenBlockRangeChecked(c.accounts, fromNum, headNum)
|
|
|
|
if err != nil {
|
|
|
|
c.error = err
|
|
|
|
return
|
2023-06-14 10:00:56 +00:00
|
|
|
}
|
2023-11-27 10:08:17 +00:00
|
|
|
}
|
2023-06-14 10:00:56 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
func (c *findNewBlocksCommand) markTokenBlockRangeChecked(accounts []common.Address, from, to *big.Int) error {
|
|
|
|
log.Debug("markTokenBlockRangeChecked", "chain", c.chainClient.NetworkID(), "from", from.Uint64(), "to", to.Uint64())
|
|
|
|
|
|
|
|
for _, account := range accounts {
|
2023-12-05 15:11:15 +00:00
|
|
|
err := c.blockRangeDAO.updateTokenRange(c.chainClient.NetworkID(), account, &BlockRange{LastKnown: to})
|
2023-11-27 10:08:17 +00:00
|
|
|
if err != nil {
|
|
|
|
c.error = err
|
|
|
|
log.Error("findNewBlocksCommand upsertTokenRange", "error", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2023-11-02 17:24:23 +00:00
|
|
|
|
2023-06-14 10:00:56 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
func filterNewPreloadedTransactions(erc20Headers []*DBHeader, preLoadedTransfers []*PreloadedTransaction) []*DBHeader {
|
|
|
|
var uniqueErc20Headers []*DBHeader
|
|
|
|
for _, header := range erc20Headers {
|
|
|
|
loaded := false
|
|
|
|
for _, transfer := range preLoadedTransfers {
|
|
|
|
if header.PreloadedTransactions[0].ID == transfer.ID {
|
|
|
|
loaded = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !loaded {
|
|
|
|
uniqueErc20Headers = append(uniqueErc20Headers, header)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return uniqueErc20Headers
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *findNewBlocksCommand) findBlocksWithEthTransfers(parent context.Context, account common.Address, fromOrig, toOrig *big.Int) (headers []*DBHeader, startBlockNum *big.Int, err error) {
|
|
|
|
log.Debug("start findNewBlocksCommand::findBlocksWithEthTransfers", "account", account, "chain", c.chainClient.NetworkID(), "noLimit", c.noLimit, "from", c.fromBlockNumber, "to", c.toBlockNumber)
|
|
|
|
|
|
|
|
rangeSize := big.NewInt(int64(c.defaultNodeBlockChunkSize))
|
|
|
|
|
|
|
|
from, to := new(big.Int).Set(fromOrig), new(big.Int).Set(toOrig)
|
|
|
|
|
|
|
|
// Limit the range size to DefaultNodeBlockChunkSize
|
|
|
|
if new(big.Int).Sub(to, from).Cmp(rangeSize) > 0 {
|
|
|
|
from.Sub(to, rangeSize)
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
if from.Cmp(to) == 0 {
|
|
|
|
log.Debug("findNewBlocksCommand empty range", "from", from, "to", to)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
fromBlock := &Block{Number: from}
|
|
|
|
|
|
|
|
var newFromBlock *Block
|
|
|
|
var ethHeaders []*DBHeader
|
|
|
|
newFromBlock, ethHeaders, startBlockNum, err = c.fastIndex(parent, c.balanceCacher, fromBlock, to)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("findNewBlocksCommand checkRange fastIndex", "err", err, "account", account,
|
|
|
|
"chain", c.chainClient.NetworkID())
|
|
|
|
c.error = err
|
|
|
|
// return err // In case c.noLimit is true, hystrix "max concurrency" may be reached and we will not be able to index ETH transfers
|
|
|
|
return nil, nil, nil
|
|
|
|
}
|
|
|
|
log.Debug("findNewBlocksCommand checkRange", "chainID", c.chainClient.NetworkID(), "account", account,
|
|
|
|
"startBlock", startBlockNum, "newFromBlock", newFromBlock.Number, "toBlockNumber", to, "noLimit", c.noLimit)
|
|
|
|
|
|
|
|
headers = append(headers, ethHeaders...)
|
|
|
|
|
|
|
|
if startBlockNum != nil && startBlockNum.Cmp(from) >= 0 {
|
|
|
|
log.Debug("Checked all ranges, stop execution", "startBlock", startBlockNum, "from", from, "to", to)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
nextFrom, nextTo := nextRange(c.defaultNodeBlockChunkSize, newFromBlock.Number, fromOrig)
|
|
|
|
|
|
|
|
if nextFrom.Cmp(from) == 0 && nextTo.Cmp(to) == 0 {
|
|
|
|
log.Debug("findNewBlocksCommand empty next range", "from", from, "to", to)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
from = nextFrom
|
|
|
|
to = nextTo
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Debug("end findNewBlocksCommand::findBlocksWithEthTransfers", "account", account, "chain", c.chainClient.NetworkID(), "noLimit", c.noLimit)
|
|
|
|
|
|
|
|
return headers, startBlockNum, nil
|
|
|
|
}
|
|
|
|
|
2023-05-19 08:19:48 +00:00
|
|
|
// TODO NewFindBlocksCommand
|
|
|
|
type findBlocksCommand struct {
|
2023-11-27 10:08:17 +00:00
|
|
|
accounts []common.Address
|
2023-09-20 08:41:23 +00:00
|
|
|
db *Database
|
2023-12-01 11:30:42 +00:00
|
|
|
accountsDB *accounts.Database
|
2023-09-20 08:41:23 +00:00
|
|
|
blockRangeDAO *BlockRangeSequentialDAO
|
|
|
|
chainClient chain.ClientInterface
|
|
|
|
balanceCacher balance.Cacher
|
|
|
|
feed *event.Feed
|
|
|
|
noLimit bool
|
|
|
|
transactionManager *TransactionManager
|
|
|
|
tokenManager *token.Manager
|
|
|
|
fromBlockNumber *big.Int
|
|
|
|
toBlockNumber *big.Int
|
|
|
|
blocksLoadedCh chan<- []*DBHeader
|
|
|
|
defaultNodeBlockChunkSize int
|
2023-05-26 08:27:48 +00:00
|
|
|
|
|
|
|
// Not to be set by the caller
|
2023-09-20 08:41:23 +00:00
|
|
|
resFromBlock *Block
|
|
|
|
startBlockNumber *big.Int
|
|
|
|
reachedETHHistoryStart bool
|
|
|
|
error error
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *findBlocksCommand) Command() async.Command {
|
|
|
|
return async.FiniteCommand{
|
|
|
|
Interval: 5 * time.Second,
|
|
|
|
Runable: c.Run,
|
|
|
|
}.Run
|
|
|
|
}
|
|
|
|
|
2023-10-05 12:35:16 +00:00
|
|
|
type ERC20BlockRange struct {
|
|
|
|
from *big.Int
|
|
|
|
to *big.Int
|
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
func (c *findBlocksCommand) ERC20ScanByBalance(parent context.Context, account common.Address, fromBlock, toBlock *big.Int, token common.Address) ([]ERC20BlockRange, error) {
|
2023-09-20 08:41:23 +00:00
|
|
|
var err error
|
|
|
|
batchSize := getErc20BatchSize(c.chainClient.NetworkID())
|
|
|
|
ranges := [][]*big.Int{{fromBlock, toBlock}}
|
2023-10-05 12:35:16 +00:00
|
|
|
foundRanges := []ERC20BlockRange{}
|
2023-09-20 08:41:23 +00:00
|
|
|
cache := map[int64]*big.Int{}
|
|
|
|
for {
|
|
|
|
nextRanges := [][]*big.Int{}
|
|
|
|
for _, blockRange := range ranges {
|
|
|
|
from, to := blockRange[0], blockRange[1]
|
|
|
|
fromBalance, ok := cache[from.Int64()]
|
|
|
|
if !ok {
|
2023-11-27 10:08:17 +00:00
|
|
|
fromBalance, err = c.tokenManager.GetTokenBalanceAt(parent, c.chainClient, account, token, from)
|
2023-09-20 08:41:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if fromBalance == nil {
|
|
|
|
fromBalance = big.NewInt(0)
|
|
|
|
}
|
|
|
|
cache[from.Int64()] = fromBalance
|
|
|
|
}
|
|
|
|
|
|
|
|
toBalance, ok := cache[to.Int64()]
|
|
|
|
if !ok {
|
2023-11-27 10:08:17 +00:00
|
|
|
toBalance, err = c.tokenManager.GetTokenBalanceAt(parent, c.chainClient, account, token, to)
|
2023-09-20 08:41:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if toBalance == nil {
|
|
|
|
toBalance = big.NewInt(0)
|
|
|
|
}
|
|
|
|
cache[to.Int64()] = toBalance
|
|
|
|
}
|
|
|
|
|
|
|
|
if fromBalance.Cmp(toBalance) != 0 {
|
|
|
|
diff := new(big.Int).Sub(to, from)
|
|
|
|
if diff.Cmp(batchSize) <= 0 {
|
2023-10-05 12:35:16 +00:00
|
|
|
foundRanges = append(foundRanges, ERC20BlockRange{from, to})
|
2023-09-20 08:41:23 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
halfOfDiff := new(big.Int).Div(diff, big.NewInt(2))
|
|
|
|
mid := new(big.Int).Add(from, halfOfDiff)
|
|
|
|
|
|
|
|
nextRanges = append(nextRanges, []*big.Int{from, mid})
|
|
|
|
nextRanges = append(nextRanges, []*big.Int{mid, to})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(nextRanges) == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
ranges = nextRanges
|
|
|
|
}
|
|
|
|
|
2023-10-05 12:35:16 +00:00
|
|
|
return foundRanges, nil
|
2023-09-20 08:41:23 +00:00
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
func (c *findBlocksCommand) checkERC20Tail(parent context.Context, account common.Address) ([]*DBHeader, error) {
|
|
|
|
log.Debug("checkERC20Tail", "account", account, "to block", c.startBlockNumber, "from", c.resFromBlock.Number)
|
2023-10-17 15:05:05 +00:00
|
|
|
tokens, err := c.tokenManager.GetTokens(c.chainClient.NetworkID())
|
2023-09-20 08:41:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
addresses := make([]common.Address, len(tokens))
|
|
|
|
for i, token := range tokens {
|
|
|
|
addresses[i] = token.Address
|
|
|
|
}
|
|
|
|
|
|
|
|
from := new(big.Int).Sub(c.resFromBlock.Number, big.NewInt(1))
|
|
|
|
|
|
|
|
clients := make(map[uint64]chain.ClientInterface, 1)
|
|
|
|
clients[c.chainClient.NetworkID()] = c.chainClient
|
|
|
|
atBlocks := make(map[uint64]*big.Int, 1)
|
|
|
|
atBlocks[c.chainClient.NetworkID()] = from
|
2023-11-27 10:08:17 +00:00
|
|
|
balances, err := c.tokenManager.GetBalancesAtByChain(parent, clients, []common.Address{account}, addresses, atBlocks)
|
2023-09-20 08:41:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-10-05 12:35:16 +00:00
|
|
|
foundRanges := []ERC20BlockRange{}
|
2023-11-27 10:08:17 +00:00
|
|
|
for token, balance := range balances[c.chainClient.NetworkID()][account] {
|
2023-09-20 08:41:23 +00:00
|
|
|
bigintBalance := big.NewInt(balance.ToInt().Int64())
|
|
|
|
if bigintBalance.Cmp(big.NewInt(0)) <= 0 {
|
|
|
|
continue
|
|
|
|
}
|
2023-11-27 10:08:17 +00:00
|
|
|
result, err := c.ERC20ScanByBalance(parent, account, big.NewInt(0), from, token)
|
2023-09-20 08:41:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-10-05 12:35:16 +00:00
|
|
|
foundRanges = append(foundRanges, result...)
|
|
|
|
}
|
|
|
|
|
|
|
|
uniqRanges := []ERC20BlockRange{}
|
|
|
|
rangesMap := map[string]bool{}
|
|
|
|
for _, rangeItem := range foundRanges {
|
|
|
|
key := rangeItem.from.String() + "-" + rangeItem.to.String()
|
|
|
|
if _, ok := rangesMap[key]; !ok {
|
|
|
|
rangesMap[key] = true
|
|
|
|
uniqRanges = append(uniqRanges, rangeItem)
|
|
|
|
}
|
2023-09-20 08:41:23 +00:00
|
|
|
}
|
|
|
|
|
2023-10-05 12:35:16 +00:00
|
|
|
foundHeaders := []*DBHeader{}
|
|
|
|
for _, rangeItem := range uniqRanges {
|
|
|
|
headers, err := c.fastIndexErc20(parent, rangeItem.from, rangeItem.to, true)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
foundHeaders = append(foundHeaders, headers...)
|
|
|
|
}
|
|
|
|
|
|
|
|
return foundHeaders, nil
|
2023-09-20 08:41:23 +00:00
|
|
|
}
|
|
|
|
|
2023-12-01 11:30:42 +00:00
|
|
|
var mnemonicCheckEnabled = false
|
|
|
|
|
2023-05-19 08:19:48 +00:00
|
|
|
func (c *findBlocksCommand) Run(parent context.Context) (err error) {
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("start findBlocksCommand", "accounts", c.accounts, "chain", c.chainClient.NetworkID(), "noLimit", c.noLimit, "from", c.fromBlockNumber, "to", c.toBlockNumber)
|
|
|
|
|
|
|
|
account := c.accounts[0] // For now this command supports only 1 account
|
2023-12-01 11:30:42 +00:00
|
|
|
mnemonicWasNotShown, err := c.accountsDB.GetMnemonicWasNotShown()
|
|
|
|
if err != nil {
|
|
|
|
c.error = err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if mnemonicCheckEnabled && mnemonicWasNotShown {
|
2023-11-27 10:08:17 +00:00
|
|
|
account, err := c.accountsDB.GetAccountByAddress(nodetypes.BytesToAddress(account.Bytes()))
|
2023-12-01 11:30:42 +00:00
|
|
|
if err != nil {
|
|
|
|
c.error = err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if account.AddressWasNotShown {
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Info("skip findBlocksCommand, mnemonic has not been shown and the address has not been shared yet", "address", account)
|
2023-12-01 11:30:42 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-09-20 08:41:23 +00:00
|
|
|
rangeSize := big.NewInt(int64(c.defaultNodeBlockChunkSize))
|
2023-05-26 08:27:48 +00:00
|
|
|
from, to := new(big.Int).Set(c.fromBlockNumber), new(big.Int).Set(c.toBlockNumber)
|
2023-06-01 13:09:50 +00:00
|
|
|
|
2023-05-26 08:27:48 +00:00
|
|
|
// Limit the range size to DefaultNodeBlockChunkSize
|
|
|
|
if new(big.Int).Sub(to, from).Cmp(rangeSize) > 0 {
|
2023-05-19 08:19:48 +00:00
|
|
|
from.Sub(to, rangeSize)
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
2023-10-12 13:07:21 +00:00
|
|
|
if from.Cmp(to) == 0 {
|
|
|
|
log.Debug("findBlocksCommand empty range", "from", from, "to", to)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2023-09-20 08:41:23 +00:00
|
|
|
var headers []*DBHeader
|
|
|
|
if c.reachedETHHistoryStart {
|
|
|
|
if c.fromBlockNumber.Cmp(zero) == 0 && c.startBlockNumber != nil && c.startBlockNumber.Cmp(zero) == 1 {
|
2023-11-27 10:08:17 +00:00
|
|
|
headers, err = c.checkERC20Tail(parent, account)
|
2023-09-20 08:41:23 +00:00
|
|
|
if err != nil {
|
|
|
|
c.error = err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
headers, _ = c.checkRange(parent, from, to)
|
|
|
|
}
|
|
|
|
|
2023-05-19 08:19:48 +00:00
|
|
|
if c.error != nil {
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Error("findBlocksCommand checkRange", "error", c.error, "account", account,
|
2023-09-19 11:17:36 +00:00
|
|
|
"chain", c.chainClient.NetworkID(), "from", from, "to", to)
|
2023-05-19 08:19:48 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2023-06-01 13:09:50 +00:00
|
|
|
if len(headers) > 0 {
|
|
|
|
log.Debug("findBlocksCommand saving headers", "len", len(headers), "lastBlockNumber", to,
|
2023-11-27 10:08:17 +00:00
|
|
|
"balance", c.balanceCacher.Cache().GetBalance(account, c.chainClient.NetworkID(), to),
|
|
|
|
"nonce", c.balanceCacher.Cache().GetNonce(account, c.chainClient.NetworkID(), to))
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
err = c.db.SaveBlocks(c.chainClient.NetworkID(), headers)
|
2023-06-01 13:09:50 +00:00
|
|
|
if err != nil {
|
|
|
|
c.error = err
|
|
|
|
// return err
|
|
|
|
break
|
|
|
|
}
|
2023-06-14 10:00:56 +00:00
|
|
|
|
|
|
|
c.blocksFound(headers)
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
2023-09-20 08:41:23 +00:00
|
|
|
if c.reachedETHHistoryStart {
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("findBlocksCommand reached first ETH transfer and checked erc20 tail", "chain", c.chainClient.NetworkID(), "account", account)
|
2023-09-20 08:41:23 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
err = c.markEthBlockRangeChecked(account, &BlockRange{c.startBlockNumber, c.resFromBlock.Number, to})
|
2023-05-26 08:27:48 +00:00
|
|
|
if err != nil {
|
|
|
|
break
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
2023-09-22 16:09:14 +00:00
|
|
|
// if we have found first ETH block and we have not reached the start of ETH history yet
|
2023-10-12 13:07:21 +00:00
|
|
|
if c.startBlockNumber != nil && c.fromBlockNumber.Cmp(from) == -1 {
|
|
|
|
log.Debug("ERC20 tail should be checked", "initial from", c.fromBlockNumber, "actual from", from, "first ETH block", c.startBlockNumber)
|
|
|
|
c.reachedETHHistoryStart = true
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.startBlockNumber != nil && c.startBlockNumber.Cmp(from) >= 0 {
|
|
|
|
log.Debug("Checked all ranges, stop execution", "startBlock", c.startBlockNumber, "from", from, "to", to)
|
2023-09-20 08:41:23 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
nextFrom, nextTo := nextRange(c.defaultNodeBlockChunkSize, c.resFromBlock.Number, c.fromBlockNumber)
|
|
|
|
|
|
|
|
if nextFrom.Cmp(from) == 0 && nextTo.Cmp(to) == 0 {
|
2023-10-12 13:07:21 +00:00
|
|
|
log.Debug("findBlocksCommand empty next range", "from", from, "to", to)
|
2023-09-20 08:41:23 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
from = nextFrom
|
|
|
|
to = nextTo
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("end findBlocksCommand", "account", account, "chain", c.chainClient.NetworkID(), "noLimit", c.noLimit)
|
2023-05-19 08:19:48 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-06-14 10:00:56 +00:00
|
|
|
func (c *findBlocksCommand) blocksFound(headers []*DBHeader) {
|
2023-10-04 12:00:12 +00:00
|
|
|
c.blocksLoadedCh <- headers
|
2023-06-14 10:00:56 +00:00
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
func (c *findBlocksCommand) markEthBlockRangeChecked(account common.Address, blockRange *BlockRange) error {
|
2023-05-26 08:27:48 +00:00
|
|
|
log.Debug("upsert block range", "Start", blockRange.Start, "FirstKnown", blockRange.FirstKnown, "LastKnown", blockRange.LastKnown,
|
2023-11-27 10:08:17 +00:00
|
|
|
"chain", c.chainClient.NetworkID(), "account", account)
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
err := c.blockRangeDAO.upsertEthRange(c.chainClient.NetworkID(), account, blockRange)
|
2023-05-19 08:19:48 +00:00
|
|
|
if err != nil {
|
|
|
|
c.error = err
|
2023-05-26 08:27:48 +00:00
|
|
|
log.Error("findBlocksCommand upsertRange", "error", err)
|
|
|
|
return err
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
2023-05-26 08:27:48 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *findBlocksCommand) checkRange(parent context.Context, from *big.Int, to *big.Int) (
|
|
|
|
foundHeaders []*DBHeader, err error) {
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
account := c.accounts[0]
|
2023-05-26 08:27:48 +00:00
|
|
|
fromBlock := &Block{Number: from}
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-09-04 05:34:09 +00:00
|
|
|
newFromBlock, ethHeaders, startBlock, err := c.fastIndex(parent, c.balanceCacher, fromBlock, to)
|
2023-05-19 08:19:48 +00:00
|
|
|
if err != nil {
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Error("findBlocksCommand checkRange fastIndex", "err", err, "account", account,
|
2023-09-19 11:17:36 +00:00
|
|
|
"chain", c.chainClient.NetworkID())
|
2023-05-19 08:19:48 +00:00
|
|
|
c.error = err
|
2023-05-26 08:27:48 +00:00
|
|
|
// return err // In case c.noLimit is true, hystrix "max concurrency" may be reached and we will not be able to index ETH transfers
|
2023-05-19 08:19:48 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("findBlocksCommand checkRange", "chainID", c.chainClient.NetworkID(), "account", account,
|
2023-06-14 10:00:56 +00:00
|
|
|
"startBlock", startBlock, "newFromBlock", newFromBlock.Number, "toBlockNumber", to, "noLimit", c.noLimit)
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-06-01 13:09:50 +00:00
|
|
|
// There could be incoming ERC20 transfers which don't change the balance
|
|
|
|
// and nonce of ETH account, so we keep looking for them
|
2023-10-05 09:11:47 +00:00
|
|
|
erc20Headers, err := c.fastIndexErc20(parent, newFromBlock.Number, to, false)
|
2023-06-01 13:09:50 +00:00
|
|
|
if err != nil {
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Error("findBlocksCommand checkRange fastIndexErc20", "err", err, "account", account, "chain", c.chainClient.NetworkID())
|
2023-06-01 13:09:50 +00:00
|
|
|
c.error = err
|
|
|
|
// return err
|
|
|
|
return nil, nil
|
|
|
|
}
|
2023-05-26 08:27:48 +00:00
|
|
|
|
2023-06-01 13:09:50 +00:00
|
|
|
allHeaders := append(ethHeaders, erc20Headers...)
|
2023-05-26 08:27:48 +00:00
|
|
|
|
2023-06-01 13:09:50 +00:00
|
|
|
if len(allHeaders) > 0 {
|
2023-06-02 20:08:45 +00:00
|
|
|
foundHeaders = uniqueHeaderPerBlockHash(allHeaders)
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
c.resFromBlock = newFromBlock
|
|
|
|
c.startBlockNumber = startBlock
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("end findBlocksCommand checkRange", "chainID", c.chainClient.NetworkID(), "account", account,
|
2023-06-14 10:00:56 +00:00
|
|
|
"c.startBlock", c.startBlockNumber, "newFromBlock", newFromBlock.Number,
|
2023-05-19 08:19:48 +00:00
|
|
|
"toBlockNumber", to, "c.resFromBlock", c.resFromBlock.Number)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-05-26 08:27:48 +00:00
|
|
|
func loadBlockRangeInfo(chainID uint64, account common.Address, blockDAO *BlockRangeSequentialDAO) (
|
2023-11-27 10:08:17 +00:00
|
|
|
*ethTokensBlockRanges, error) {
|
2023-05-26 08:27:48 +00:00
|
|
|
|
|
|
|
blockRange, err := blockDAO.getBlockRange(chainID, account)
|
2023-05-19 08:19:48 +00:00
|
|
|
if err != nil {
|
2023-05-26 08:27:48 +00:00
|
|
|
log.Error("failed to load block ranges from database", "chain", chainID, "account", account,
|
|
|
|
"error", err)
|
2023-05-19 08:19:48 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-05-26 08:27:48 +00:00
|
|
|
return blockRange, nil
|
|
|
|
}
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-06-01 13:09:50 +00:00
|
|
|
// Returns if all blocks are loaded, which means that start block (beginning of account history)
|
|
|
|
// has been found and all block headers saved to the DB
|
2023-05-26 08:27:48 +00:00
|
|
|
func areAllHistoryBlocksLoaded(blockInfo *BlockRange) bool {
|
2023-11-27 10:08:17 +00:00
|
|
|
if blockInfo != nil && blockInfo.FirstKnown != nil && blockInfo.Start != nil &&
|
2023-05-26 08:27:48 +00:00
|
|
|
blockInfo.Start.Cmp(blockInfo.FirstKnown) >= 0 {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
2023-06-01 13:09:50 +00:00
|
|
|
func areAllHistoryBlocksLoadedForAddress(blockRangeDAO *BlockRangeSequentialDAO, chainID uint64,
|
|
|
|
address common.Address) (bool, error) {
|
|
|
|
|
|
|
|
blockRange, err := blockRangeDAO.getBlockRange(chainID, address)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("findBlocksCommand getBlockRange", "error", err)
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
return areAllHistoryBlocksLoaded(blockRange.eth) && areAllHistoryBlocksLoaded(blockRange.tokens), nil
|
2023-06-01 13:09:50 +00:00
|
|
|
}
|
|
|
|
|
2023-05-19 08:19:48 +00:00
|
|
|
// run fast indexing for every accont up to canonical chain head minus safety depth.
|
|
|
|
// every account will run it from last synced header.
|
2023-09-04 05:34:09 +00:00
|
|
|
func (c *findBlocksCommand) fastIndex(ctx context.Context, bCacher balance.Cacher,
|
2023-05-19 08:19:48 +00:00
|
|
|
fromBlock *Block, toBlockNumber *big.Int) (resultingFrom *Block, headers []*DBHeader,
|
|
|
|
startBlock *big.Int, err error) {
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
account := c.accounts[0]
|
|
|
|
log.Debug("fast index started", "chainID", c.chainClient.NetworkID(), "account", account,
|
2023-06-14 10:00:56 +00:00
|
|
|
"from", fromBlock.Number, "to", toBlockNumber)
|
2023-05-19 08:19:48 +00:00
|
|
|
|
|
|
|
start := time.Now()
|
|
|
|
group := async.NewGroup(ctx)
|
|
|
|
|
|
|
|
command := ðHistoricalCommand{
|
2023-09-04 05:34:09 +00:00
|
|
|
chainClient: c.chainClient,
|
|
|
|
balanceCacher: bCacher,
|
2023-11-27 10:08:17 +00:00
|
|
|
address: account,
|
2023-09-04 05:34:09 +00:00
|
|
|
feed: c.feed,
|
|
|
|
from: fromBlock,
|
|
|
|
to: toBlockNumber,
|
|
|
|
noLimit: c.noLimit,
|
|
|
|
threadLimit: SequentialThreadLimit,
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
group.Add(command.Command())
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
err = ctx.Err()
|
2023-11-02 17:24:23 +00:00
|
|
|
log.Debug("fast indexer ctx Done", "error", err)
|
2023-05-19 08:19:48 +00:00
|
|
|
return
|
|
|
|
case <-group.WaitAsync():
|
|
|
|
if command.error != nil {
|
|
|
|
err = command.error
|
|
|
|
return
|
|
|
|
}
|
|
|
|
resultingFrom = &Block{Number: command.resultingFrom}
|
|
|
|
headers = command.foundHeaders
|
|
|
|
startBlock = command.startBlock
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("fast indexer finished", "chainID", c.chainClient.NetworkID(), "account", account, "in", time.Since(start),
|
2023-06-14 10:00:56 +00:00
|
|
|
"startBlock", command.startBlock, "resultingFrom", resultingFrom.Number, "headers", len(headers))
|
2023-05-19 08:19:48 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// run fast indexing for every accont up to canonical chain head minus safety depth.
|
|
|
|
// every account will run it from last synced header.
|
|
|
|
func (c *findBlocksCommand) fastIndexErc20(ctx context.Context, fromBlockNumber *big.Int,
|
2023-10-05 09:11:47 +00:00
|
|
|
toBlockNumber *big.Int, incomingOnly bool) ([]*DBHeader, error) {
|
2023-05-19 08:19:48 +00:00
|
|
|
|
|
|
|
start := time.Now()
|
|
|
|
group := async.NewGroup(ctx)
|
|
|
|
|
|
|
|
erc20 := &erc20HistoricalCommand{
|
2023-11-27 10:08:17 +00:00
|
|
|
erc20: NewERC20TransfersDownloader(c.chainClient, c.accounts, types.LatestSignerForChainID(c.chainClient.ToBigInt()), incomingOnly),
|
2023-05-19 08:19:48 +00:00
|
|
|
chainClient: c.chainClient,
|
|
|
|
feed: c.feed,
|
|
|
|
from: fromBlockNumber,
|
|
|
|
to: toBlockNumber,
|
|
|
|
foundHeaders: []*DBHeader{},
|
|
|
|
}
|
|
|
|
group.Add(erc20.Command())
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return nil, ctx.Err()
|
|
|
|
case <-group.WaitAsync():
|
|
|
|
headers := erc20.foundHeaders
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("fast indexer Erc20 finished", "chainID", c.chainClient.NetworkID(),
|
2023-06-14 10:00:56 +00:00
|
|
|
"in", time.Since(start), "headers", len(headers))
|
2023-05-19 08:19:48 +00:00
|
|
|
return headers, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
func loadTransfersLoop(ctx context.Context, blockDAO *BlockDAO, db *Database,
|
2023-09-20 08:41:23 +00:00
|
|
|
chainClient chain.ClientInterface, transactionManager *TransactionManager, pendingTxManager *transactions.PendingTxTracker,
|
2023-06-21 14:09:55 +00:00
|
|
|
tokenManager *token.Manager, feed *event.Feed, blocksLoadedCh <-chan []*DBHeader) {
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("loadTransfersLoop start", "chain", chainClient.NetworkID())
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-06-28 13:48:37 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Info("loadTransfersLoop done", "chain", chainClient.NetworkID(), "error", ctx.Err())
|
2023-06-28 13:48:37 +00:00
|
|
|
return
|
|
|
|
case dbHeaders := <-blocksLoadedCh:
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("loadTransfersOnDemand transfers received", "chain", chainClient.NetworkID(), "headers", len(dbHeaders))
|
2023-06-14 10:00:56 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
blocksByAddress := map[common.Address][]*big.Int{}
|
|
|
|
// iterate over headers and group them by address
|
|
|
|
for _, dbHeader := range dbHeaders {
|
|
|
|
blocksByAddress[dbHeader.Address] = append(blocksByAddress[dbHeader.Address], dbHeader.Number)
|
2023-06-28 13:48:37 +00:00
|
|
|
}
|
2023-06-01 13:09:50 +00:00
|
|
|
|
2023-06-28 13:48:37 +00:00
|
|
|
go func() {
|
2023-11-27 10:08:17 +00:00
|
|
|
_ = loadTransfers(ctx, blockDAO, db, chainClient, noBlockLimit,
|
2023-06-21 14:09:55 +00:00
|
|
|
blocksByAddress, transactionManager, pendingTxManager, tokenManager, feed)
|
2023-06-28 13:48:37 +00:00
|
|
|
}()
|
|
|
|
}
|
2023-06-01 13:09:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
func newLoadBlocksAndTransfersCommand(accounts []common.Address, db *Database, accountsDB *accounts.Database,
|
2023-11-28 14:23:03 +00:00
|
|
|
blockDAO *BlockDAO, blockRangesSeqDAO *BlockRangeSequentialDAO, chainClient chain.ClientInterface, feed *event.Feed,
|
2023-08-01 18:50:30 +00:00
|
|
|
transactionManager *TransactionManager, pendingTxManager *transactions.PendingTxTracker,
|
2023-10-18 10:02:35 +00:00
|
|
|
tokenManager *token.Manager, balanceCacher balance.Cacher, omitHistory bool) *loadBlocksAndTransfersCommand {
|
2023-06-01 13:09:50 +00:00
|
|
|
|
|
|
|
return &loadBlocksAndTransfersCommand{
|
2023-11-27 10:08:17 +00:00
|
|
|
accounts: accounts,
|
2023-06-01 13:09:50 +00:00
|
|
|
db: db,
|
2023-11-28 14:23:03 +00:00
|
|
|
blockRangeDAO: blockRangesSeqDAO,
|
2023-12-01 11:30:42 +00:00
|
|
|
accountsDB: accountsDB,
|
2023-06-01 13:09:50 +00:00
|
|
|
blockDAO: blockDAO,
|
|
|
|
chainClient: chainClient,
|
|
|
|
feed: feed,
|
2023-09-04 05:34:09 +00:00
|
|
|
balanceCacher: balanceCacher,
|
2023-06-01 13:09:50 +00:00
|
|
|
transactionManager: transactionManager,
|
2023-06-21 14:09:55 +00:00
|
|
|
pendingTxManager: pendingTxManager,
|
2023-06-02 20:08:45 +00:00
|
|
|
tokenManager: tokenManager,
|
2023-06-28 13:48:37 +00:00
|
|
|
blocksLoadedCh: make(chan []*DBHeader, 100),
|
2023-10-18 10:02:35 +00:00
|
|
|
omitHistory: omitHistory,
|
2023-06-01 13:09:50 +00:00
|
|
|
}
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type loadBlocksAndTransfersCommand struct {
|
2023-11-27 10:08:17 +00:00
|
|
|
accounts []common.Address
|
2023-05-19 08:19:48 +00:00
|
|
|
db *Database
|
2023-12-01 11:30:42 +00:00
|
|
|
accountsDB *accounts.Database
|
2023-05-19 08:19:48 +00:00
|
|
|
blockRangeDAO *BlockRangeSequentialDAO
|
|
|
|
blockDAO *BlockDAO
|
2023-09-20 08:41:23 +00:00
|
|
|
chainClient chain.ClientInterface
|
2023-05-19 08:19:48 +00:00
|
|
|
feed *event.Feed
|
2023-09-04 05:34:09 +00:00
|
|
|
balanceCacher balance.Cacher
|
2023-05-19 08:19:48 +00:00
|
|
|
// nonArchivalRPCNode bool // TODO Make use of it
|
|
|
|
transactionManager *TransactionManager
|
2023-08-01 18:50:30 +00:00
|
|
|
pendingTxManager *transactions.PendingTxTracker
|
2023-06-02 20:08:45 +00:00
|
|
|
tokenManager *token.Manager
|
2023-06-14 10:00:56 +00:00
|
|
|
blocksLoadedCh chan []*DBHeader
|
2023-10-18 10:02:35 +00:00
|
|
|
omitHistory bool
|
2023-06-01 13:09:50 +00:00
|
|
|
|
|
|
|
// Not to be set by the caller
|
2023-11-27 10:08:17 +00:00
|
|
|
transfersLoaded map[common.Address]bool // For event RecentHistoryReady to be sent only once per account during app lifetime
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *loadBlocksAndTransfersCommand) Run(parent context.Context) error {
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("start load all transfers command", "chain", c.chainClient.NetworkID(), "accounts", c.accounts)
|
2023-05-19 08:19:48 +00:00
|
|
|
|
|
|
|
ctx := parent
|
2023-11-02 17:24:23 +00:00
|
|
|
|
|
|
|
// This wait group is used to wait for all the async commands to finish
|
|
|
|
// but fetchNewBlocksCommand, which is infinite, never finishes, can only be stopped
|
|
|
|
// by canceling the context which does not happen here, as we don't call group.Stop().
|
2023-05-19 08:19:48 +00:00
|
|
|
group := async.NewGroup(ctx)
|
|
|
|
|
2023-11-02 17:24:23 +00:00
|
|
|
// It will start loadTransfersCommand which will run until success when all transfers from DB are loaded
|
2023-06-14 10:00:56 +00:00
|
|
|
err := c.fetchTransfersForLoadedBlocks(group)
|
|
|
|
for err != nil {
|
|
|
|
return err
|
2023-05-26 08:27:48 +00:00
|
|
|
}
|
|
|
|
|
2023-11-02 17:24:23 +00:00
|
|
|
// Start transfers loop to load transfers for new blocks
|
2023-06-14 10:00:56 +00:00
|
|
|
c.startTransfersLoop(ctx)
|
2023-05-26 08:27:48 +00:00
|
|
|
|
2023-11-02 17:24:23 +00:00
|
|
|
fromNum := big.NewInt(0)
|
2023-11-27 10:08:17 +00:00
|
|
|
headNum, err := getHeadBlockNumber(ctx, c.chainClient)
|
2023-11-02 17:24:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// This will start findBlocksCommand which will run until success when all blocks are loaded
|
2023-11-27 10:08:17 +00:00
|
|
|
// Iterate over all accounts and load blocks for each account
|
|
|
|
for _, account := range c.accounts {
|
|
|
|
err = c.fetchHistoryBlocks(parent, group, account, fromNum, headNum, c.blocksLoadedCh)
|
|
|
|
for err != nil {
|
|
|
|
group.Stop()
|
|
|
|
group.Wait()
|
|
|
|
return err
|
|
|
|
}
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
c.startFetchingNewBlocks(group, c.accounts, headNum, c.blocksLoadedCh)
|
2023-05-19 08:19:48 +00:00
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
case <-group.WaitAsync():
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("end loadBlocksAndTransfers command", "chain", c.chainClient.NetworkID(), "accounts", c.accounts)
|
2023-05-19 08:19:48 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *loadBlocksAndTransfersCommand) Command() async.Command {
|
|
|
|
return async.InfiniteCommand{
|
2023-06-14 10:00:56 +00:00
|
|
|
Interval: 5 * time.Second,
|
2023-05-19 08:19:48 +00:00
|
|
|
Runable: c.Run,
|
|
|
|
}.Run
|
|
|
|
}
|
|
|
|
|
2023-06-14 10:00:56 +00:00
|
|
|
func (c *loadBlocksAndTransfersCommand) startTransfersLoop(ctx context.Context) {
|
2023-11-27 10:08:17 +00:00
|
|
|
go loadTransfersLoop(ctx, c.blockDAO, c.db, c.chainClient, c.transactionManager,
|
2023-06-21 14:09:55 +00:00
|
|
|
c.pendingTxManager, c.tokenManager, c.feed, c.blocksLoadedCh)
|
2023-06-14 10:00:56 +00:00
|
|
|
}
|
2023-05-26 08:27:48 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
func (c *loadBlocksAndTransfersCommand) fetchHistoryBlocks(ctx context.Context, group *async.Group, account common.Address, fromNum, toNum *big.Int, blocksLoadedCh chan []*DBHeader) error {
|
|
|
|
|
|
|
|
log.Debug("fetchHistoryBlocks start", "chainID", c.chainClient.NetworkID(), "account", account, "omit", c.omitHistory)
|
2023-06-14 10:00:56 +00:00
|
|
|
|
2023-10-18 10:02:35 +00:00
|
|
|
if c.omitHistory {
|
2023-11-27 10:08:17 +00:00
|
|
|
blockRange := ðTokensBlockRanges{eth: &BlockRange{nil, big.NewInt(0), toNum}, tokens: &BlockRange{nil, big.NewInt(0), toNum}}
|
|
|
|
err := c.blockRangeDAO.upsertRange(c.chainClient.NetworkID(), account, blockRange)
|
|
|
|
log.Error("fetchHistoryBlocks upsertRange", "error", err)
|
2023-10-18 10:02:35 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
blockRange, err := loadBlockRangeInfo(c.chainClient.NetworkID(), account, c.blockRangeDAO)
|
2023-06-14 10:00:56 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Error("findBlocksCommand loadBlockRangeInfo", "error", err)
|
|
|
|
// c.error = err
|
|
|
|
return err // Will keep spinning forever nomatter what
|
|
|
|
}
|
2023-05-26 08:27:48 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
ranges := [][]*big.Int{}
|
2023-06-01 13:09:50 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
// There are 2 history intervals:
|
|
|
|
// 1) from 0 to FirstKnown
|
|
|
|
// 2) from LastKnown to `toNum`` (head)
|
|
|
|
// If we blockRange is nil, we need to load all blocks from `fromNum` to `toNum`
|
|
|
|
// As current implementation checks ETH first then tokens, tokens ranges maybe behind ETH ranges in
|
|
|
|
// cases when block searching was interrupted, so we use tokens ranges
|
2023-12-07 07:11:22 +00:00
|
|
|
if blockRange != nil && blockRange.tokens != nil {
|
2023-11-27 10:08:17 +00:00
|
|
|
if blockRange.tokens.LastKnown != nil && toNum.Cmp(blockRange.tokens.LastKnown) > 0 {
|
|
|
|
ranges = append(ranges, []*big.Int{blockRange.tokens.LastKnown, toNum})
|
|
|
|
}
|
2023-11-02 17:24:23 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
if blockRange.tokens.FirstKnown != nil {
|
|
|
|
if fromNum.Cmp(blockRange.tokens.FirstKnown) < 0 {
|
|
|
|
ranges = append(ranges, []*big.Int{fromNum, blockRange.tokens.FirstKnown})
|
|
|
|
} else {
|
|
|
|
if !c.transfersLoaded[account] {
|
|
|
|
transfersLoaded, err := c.areAllTransfersLoaded(account)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if transfersLoaded {
|
|
|
|
if c.transfersLoaded == nil {
|
|
|
|
c.transfersLoaded = make(map[common.Address]bool)
|
|
|
|
}
|
|
|
|
c.transfersLoaded[account] = true
|
|
|
|
c.notifyHistoryReady(account)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ranges = append(ranges, []*big.Int{fromNum, toNum})
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, rangeItem := range ranges {
|
2023-06-14 10:00:56 +00:00
|
|
|
fbc := &findBlocksCommand{
|
2023-11-27 10:08:17 +00:00
|
|
|
accounts: []common.Address{account},
|
2023-09-20 08:41:23 +00:00
|
|
|
db: c.db,
|
2023-12-01 11:30:42 +00:00
|
|
|
accountsDB: c.accountsDB,
|
2023-09-20 08:41:23 +00:00
|
|
|
blockRangeDAO: c.blockRangeDAO,
|
|
|
|
chainClient: c.chainClient,
|
|
|
|
balanceCacher: c.balanceCacher,
|
|
|
|
feed: c.feed,
|
|
|
|
noLimit: false,
|
2023-11-27 10:08:17 +00:00
|
|
|
fromBlockNumber: rangeItem[0],
|
|
|
|
toBlockNumber: rangeItem[1],
|
2023-09-20 08:41:23 +00:00
|
|
|
transactionManager: c.transactionManager,
|
|
|
|
tokenManager: c.tokenManager,
|
|
|
|
blocksLoadedCh: blocksLoadedCh,
|
|
|
|
defaultNodeBlockChunkSize: DefaultNodeBlockChunkSize,
|
2023-06-14 10:00:56 +00:00
|
|
|
}
|
|
|
|
group.Add(fbc.Command())
|
2023-05-26 08:27:48 +00:00
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("fetchHistoryBlocks end", "chainID", c.chainClient.NetworkID(), "account", account)
|
2023-06-14 10:00:56 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
func (c *loadBlocksAndTransfersCommand) startFetchingNewBlocks(group *async.Group, addresses []common.Address, fromNum *big.Int, blocksLoadedCh chan<- []*DBHeader) {
|
2023-06-14 10:00:56 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("startFetchingNewBlocks", "chainID", c.chainClient.NetworkID(), "accounts", addresses, "db", c.accountsDB)
|
2023-06-14 10:00:56 +00:00
|
|
|
|
|
|
|
newBlocksCmd := &findNewBlocksCommand{
|
|
|
|
findBlocksCommand: &findBlocksCommand{
|
2023-11-27 10:08:17 +00:00
|
|
|
accounts: addresses,
|
2023-10-05 14:11:58 +00:00
|
|
|
db: c.db,
|
2023-12-01 11:30:42 +00:00
|
|
|
accountsDB: c.accountsDB,
|
2023-10-05 14:11:58 +00:00
|
|
|
blockRangeDAO: c.blockRangeDAO,
|
|
|
|
chainClient: c.chainClient,
|
|
|
|
balanceCacher: c.balanceCacher,
|
|
|
|
feed: c.feed,
|
|
|
|
noLimit: false,
|
2023-11-27 10:08:17 +00:00
|
|
|
fromBlockNumber: fromNum,
|
2023-10-05 14:11:58 +00:00
|
|
|
transactionManager: c.transactionManager,
|
|
|
|
tokenManager: c.tokenManager,
|
|
|
|
blocksLoadedCh: blocksLoadedCh,
|
|
|
|
defaultNodeBlockChunkSize: DefaultNodeBlockChunkSize,
|
2023-06-14 10:00:56 +00:00
|
|
|
},
|
2023-05-26 08:27:48 +00:00
|
|
|
}
|
|
|
|
group.Add(newBlocksCmd.Command())
|
|
|
|
}
|
|
|
|
|
2023-06-14 10:00:56 +00:00
|
|
|
func (c *loadBlocksAndTransfersCommand) fetchTransfersForLoadedBlocks(group *async.Group) error {
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("fetchTransfers start", "chainID", c.chainClient.NetworkID(), "accounts", c.accounts)
|
2023-06-14 10:00:56 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
blocksMap := make(map[common.Address][]*big.Int)
|
|
|
|
for _, account := range c.accounts {
|
|
|
|
blocks, err := c.blockDAO.GetBlocksToLoadByAddress(c.chainClient.NetworkID(), account, numberOfBlocksCheckedPerIteration)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("loadBlocksAndTransfersCommand GetBlocksToLoadByAddress", "error", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(blocks) == 0 {
|
|
|
|
log.Debug("fetchTransfers no blocks to load", "chainID", c.chainClient.NetworkID(), "account", account)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
blocksMap[account] = blocks
|
2023-06-14 10:00:56 +00:00
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
if len(blocksMap) == 0 {
|
|
|
|
log.Debug("fetchTransfers no blocks to load", "chainID", c.chainClient.NetworkID())
|
2023-11-02 17:24:23 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-06-14 10:00:56 +00:00
|
|
|
txCommand := &loadTransfersCommand{
|
2023-11-27 10:08:17 +00:00
|
|
|
accounts: c.accounts,
|
2023-05-26 08:27:48 +00:00
|
|
|
db: c.db,
|
|
|
|
blockDAO: c.blockDAO,
|
|
|
|
chainClient: c.chainClient,
|
|
|
|
transactionManager: c.transactionManager,
|
2023-06-21 14:09:55 +00:00
|
|
|
pendingTxManager: c.pendingTxManager,
|
2023-06-14 10:00:56 +00:00
|
|
|
tokenManager: c.tokenManager,
|
|
|
|
blocksByAddress: blocksMap,
|
2023-06-01 13:09:50 +00:00
|
|
|
feed: c.feed,
|
2023-05-26 08:27:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
group.Add(txCommand.Command())
|
2023-06-14 10:00:56 +00:00
|
|
|
|
|
|
|
return nil
|
2023-05-26 08:27:48 +00:00
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
func (c *loadBlocksAndTransfersCommand) notifyHistoryReady(account common.Address) {
|
2023-06-01 13:09:50 +00:00
|
|
|
if c.feed != nil {
|
|
|
|
c.feed.Send(walletevent.Event{
|
|
|
|
Type: EventRecentHistoryReady,
|
2023-11-27 10:08:17 +00:00
|
|
|
Accounts: []common.Address{account},
|
2023-09-19 11:17:36 +00:00
|
|
|
ChainID: c.chainClient.NetworkID(),
|
2023-06-01 13:09:50 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
func (c *loadBlocksAndTransfersCommand) areAllTransfersLoaded(account common.Address) (bool, error) {
|
|
|
|
allBlocksLoaded, err := areAllHistoryBlocksLoadedForAddress(c.blockRangeDAO, c.chainClient.NetworkID(), account)
|
2023-06-01 13:09:50 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Error("loadBlockAndTransfersCommand allHistoryBlocksLoaded", "error", err)
|
|
|
|
return false, err
|
|
|
|
}
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-06-01 13:09:50 +00:00
|
|
|
if allBlocksLoaded {
|
2023-11-27 10:08:17 +00:00
|
|
|
headers, err := c.blockDAO.GetBlocksToLoadByAddress(c.chainClient.NetworkID(), account, 1)
|
2023-06-01 13:09:50 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Error("loadBlocksAndTransfersCommand GetFirstSavedBlock", "error", err)
|
|
|
|
return false, err
|
|
|
|
}
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
if len(headers) == 0 {
|
2023-06-01 13:09:50 +00:00
|
|
|
return true, nil
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-01 13:09:50 +00:00
|
|
|
return false, nil
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
2023-05-26 08:27:48 +00:00
|
|
|
|
2023-06-14 10:00:56 +00:00
|
|
|
// TODO - make it a common method for every service that wants head block number, that will cache the latest block
|
|
|
|
// and updates it on timeout
|
2023-09-19 11:17:36 +00:00
|
|
|
func getHeadBlockNumber(parent context.Context, chainClient chain.ClientInterface) (*big.Int, error) {
|
2023-05-26 08:27:48 +00:00
|
|
|
ctx, cancel := context.WithTimeout(parent, 3*time.Second)
|
|
|
|
head, err := chainClient.HeaderByNumber(ctx, nil)
|
|
|
|
cancel()
|
|
|
|
if err != nil {
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Error("getHeadBlockNumber", "error", err)
|
2023-05-26 08:27:48 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return head.Number, err
|
|
|
|
}
|
|
|
|
|
2023-09-20 08:41:23 +00:00
|
|
|
func nextRange(maxRangeSize int, prevFrom, zeroBlockNumber *big.Int) (*big.Int, *big.Int) {
|
|
|
|
log.Debug("next range start", "from", prevFrom, "zeroBlockNumber", zeroBlockNumber)
|
2023-05-26 08:27:48 +00:00
|
|
|
|
2023-09-20 08:41:23 +00:00
|
|
|
rangeSize := big.NewInt(int64(maxRangeSize))
|
2023-05-26 08:27:48 +00:00
|
|
|
|
2023-09-20 08:41:23 +00:00
|
|
|
to := big.NewInt(0).Set(prevFrom)
|
|
|
|
from := big.NewInt(0).Sub(to, rangeSize)
|
|
|
|
if from.Cmp(zeroBlockNumber) < 0 {
|
2023-05-26 08:27:48 +00:00
|
|
|
from = new(big.Int).Set(zeroBlockNumber)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Debug("next range end", "from", from, "to", to, "zeroBlockNumber", zeroBlockNumber)
|
|
|
|
|
|
|
|
return from, to
|
|
|
|
}
|