2023-05-19 08:19:48 +00:00
|
|
|
package transfer
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"math/big"
|
2024-01-17 11:46:59 +00:00
|
|
|
"sync/atomic"
|
2023-05-19 08:19:48 +00:00
|
|
|
"time"
|
|
|
|
|
2024-01-19 15:57:04 +00:00
|
|
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
2023-05-19 08:19:48 +00:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
|
|
|
"github.com/ethereum/go-ethereum/event"
|
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2024-01-19 15:57:04 +00:00
|
|
|
"github.com/status-im/status-go/contracts"
|
2023-12-01 11:30:42 +00:00
|
|
|
nodetypes "github.com/status-im/status-go/eth-node/types"
|
|
|
|
"github.com/status-im/status-go/multiaccounts/accounts"
|
2023-05-19 08:19:48 +00:00
|
|
|
"github.com/status-im/status-go/rpc/chain"
|
|
|
|
"github.com/status-im/status-go/services/wallet/async"
|
2023-09-04 05:34:09 +00:00
|
|
|
"github.com/status-im/status-go/services/wallet/balance"
|
2024-01-25 12:05:59 +00:00
|
|
|
"github.com/status-im/status-go/services/wallet/blockchainstate"
|
2023-06-02 20:08:45 +00:00
|
|
|
"github.com/status-im/status-go/services/wallet/token"
|
2023-05-19 08:19:48 +00:00
|
|
|
"github.com/status-im/status-go/services/wallet/walletevent"
|
2023-06-21 14:09:55 +00:00
|
|
|
"github.com/status-im/status-go/transactions"
|
2023-05-19 08:19:48 +00:00
|
|
|
)
|
|
|
|
|
2024-01-17 11:46:59 +00:00
|
|
|
var findBlocksRetryInterval = 5 * time.Second
|
2023-12-10 14:31:30 +00:00
|
|
|
|
2024-05-20 12:38:02 +00:00
|
|
|
const (
|
|
|
|
transferHistoryTag = "transfer_history"
|
|
|
|
newTransferHistoryTag = "new_transfer_history"
|
|
|
|
|
2024-05-21 17:40:37 +00:00
|
|
|
transferHistoryLimit = 10000
|
|
|
|
transferHistoryLimitPerAccount = 5000
|
|
|
|
transferHistoryLimitPeriod = 24 * time.Hour
|
2024-05-20 12:38:02 +00:00
|
|
|
)
|
2024-05-20 11:21:21 +00:00
|
|
|
|
2024-02-08 11:54:04 +00:00
|
|
|
type nonceInfo struct {
|
|
|
|
nonce *int64
|
|
|
|
blockNumber *big.Int
|
|
|
|
}
|
|
|
|
|
2023-06-14 10:00:56 +00:00
|
|
|
type findNewBlocksCommand struct {
|
|
|
|
*findBlocksCommand
|
2024-02-08 11:54:04 +00:00
|
|
|
contractMaker *contracts.ContractMaker
|
|
|
|
iteration int
|
|
|
|
blockChainState *blockchainstate.BlockChainState
|
|
|
|
lastNonces map[common.Address]nonceInfo
|
|
|
|
nonceCheckIntervalIterations int
|
|
|
|
logsCheckIntervalIterations int
|
2023-06-14 10:00:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *findNewBlocksCommand) Command() async.Command {
|
|
|
|
return async.InfiniteCommand{
|
2023-10-25 08:00:28 +00:00
|
|
|
Interval: 2 * time.Minute,
|
2023-06-14 10:00:56 +00:00
|
|
|
Runable: c.Run,
|
|
|
|
}.Run
|
|
|
|
}
|
|
|
|
|
2024-01-19 15:57:04 +00:00
|
|
|
var requestTimeout = 20 * time.Second
|
|
|
|
|
|
|
|
func (c *findNewBlocksCommand) detectTransfers(parent context.Context, accounts []common.Address) (*big.Int, []common.Address, error) {
|
|
|
|
bc, err := c.contractMaker.NewBalanceChecker(c.chainClient.NetworkID())
|
|
|
|
if err != nil {
|
|
|
|
log.Error("findNewBlocksCommand error creating balance checker", "error", err, "chain", c.chainClient.NetworkID())
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
tokens, err := c.tokenManager.GetTokens(c.chainClient.NetworkID())
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
tokenAddresses := []common.Address{}
|
|
|
|
nilAddress := common.Address{}
|
|
|
|
for _, token := range tokens {
|
|
|
|
if token.Address != nilAddress {
|
|
|
|
tokenAddresses = append(tokenAddresses, token.Address)
|
|
|
|
}
|
|
|
|
}
|
2024-03-18 08:12:50 +00:00
|
|
|
log.Debug("findNewBlocksCommand detectTransfers", "cnt", len(tokenAddresses))
|
2024-01-19 15:57:04 +00:00
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(parent, requestTimeout)
|
|
|
|
defer cancel()
|
|
|
|
blockNum, hashes, err := bc.BalancesHash(&bind.CallOpts{Context: ctx}, c.accounts, tokenAddresses)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("findNewBlocksCommand can't get balances hashes", "error", err)
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
addressesToCheck := []common.Address{}
|
|
|
|
for idx, account := range accounts {
|
2024-02-19 15:50:07 +00:00
|
|
|
blockRange, _, err := c.blockRangeDAO.getBlockRange(c.chainClient.NetworkID(), account)
|
2024-01-19 15:57:04 +00:00
|
|
|
if err != nil {
|
2024-02-19 15:50:07 +00:00
|
|
|
log.Error("findNewBlocksCommand can't get block range", "error", err, "account", account, "chain", c.chainClient.NetworkID())
|
2024-01-19 15:57:04 +00:00
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
checkHash := common.BytesToHash(hashes[idx][:])
|
|
|
|
log.Debug("findNewBlocksCommand comparing hashes", "account", account, "network", c.chainClient.NetworkID(), "old hash", blockRange.balanceCheckHash, "new hash", checkHash.String())
|
|
|
|
if checkHash.String() != blockRange.balanceCheckHash {
|
|
|
|
addressesToCheck = append(addressesToCheck, account)
|
|
|
|
}
|
|
|
|
|
|
|
|
blockRange.balanceCheckHash = checkHash.String()
|
|
|
|
|
|
|
|
err = c.blockRangeDAO.upsertRange(c.chainClient.NetworkID(), account, blockRange)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("findNewBlocksCommand can't update balance check", "error", err, "account", account, "chain", c.chainClient.NetworkID())
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return blockNum, addressesToCheck, nil
|
|
|
|
}
|
|
|
|
|
2024-02-08 11:54:04 +00:00
|
|
|
func (c *findNewBlocksCommand) detectNonceChange(parent context.Context, to *big.Int, accounts []common.Address) (map[common.Address]*big.Int, error) {
|
|
|
|
addressesWithChange := map[common.Address]*big.Int{}
|
2024-01-26 12:57:16 +00:00
|
|
|
for _, account := range accounts {
|
2024-02-08 11:54:04 +00:00
|
|
|
var oldNonce *int64
|
|
|
|
|
2024-02-19 15:50:07 +00:00
|
|
|
blockRange, _, err := c.blockRangeDAO.getBlockRange(c.chainClient.NetworkID(), account)
|
2024-01-26 12:57:16 +00:00
|
|
|
if err != nil {
|
2024-02-08 11:54:04 +00:00
|
|
|
log.Error("findNewBlocksCommand can't get block range", "error", err, "account", account, "chain", c.chainClient.NetworkID())
|
2024-01-26 12:57:16 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-02-08 11:54:04 +00:00
|
|
|
lastNonceInfo, ok := c.lastNonces[account]
|
|
|
|
if !ok || lastNonceInfo.blockNumber.Cmp(blockRange.eth.LastKnown) != 0 {
|
2024-03-18 08:12:50 +00:00
|
|
|
log.Debug("Fetching old nonce", "at", blockRange.eth.LastKnown, "acc", account)
|
|
|
|
if blockRange.eth.LastKnown == nil {
|
|
|
|
blockRange.eth.LastKnown = big.NewInt(0)
|
|
|
|
oldNonce = new(int64) // At 0 block nonce is 0
|
|
|
|
} else {
|
|
|
|
oldNonce, err = c.balanceCacher.NonceAt(parent, c.chainClient, account, blockRange.eth.LastKnown)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("findNewBlocksCommand can't get nonce", "error", err, "account", account, "chain", c.chainClient.NetworkID())
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-02-08 11:54:04 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
oldNonce = lastNonceInfo.nonce
|
|
|
|
}
|
|
|
|
|
2024-01-26 12:57:16 +00:00
|
|
|
newNonce, err := c.balanceCacher.NonceAt(parent, c.chainClient, account, to)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("findNewBlocksCommand can't get nonce", "error", err, "account", account, "chain", c.chainClient.NetworkID())
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-03-18 08:12:50 +00:00
|
|
|
log.Debug("Comparing nonces", "oldNonce", *oldNonce, "newNonce", *newNonce, "to", to, "acc", account)
|
2024-02-08 11:54:04 +00:00
|
|
|
|
2024-01-26 12:57:16 +00:00
|
|
|
if *newNonce != *oldNonce {
|
2024-02-08 11:54:04 +00:00
|
|
|
addressesWithChange[account] = blockRange.eth.LastKnown
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.lastNonces == nil {
|
|
|
|
c.lastNonces = map[common.Address]nonceInfo{}
|
|
|
|
}
|
|
|
|
|
|
|
|
c.lastNonces[account] = nonceInfo{
|
|
|
|
nonce: newNonce,
|
|
|
|
blockNumber: to,
|
2024-01-26 12:57:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return addressesWithChange, nil
|
|
|
|
}
|
|
|
|
|
2024-01-19 15:57:04 +00:00
|
|
|
var nonceCheckIntervalIterations = 30
|
|
|
|
var logsCheckIntervalIterations = 5
|
|
|
|
|
|
|
|
func (c *findNewBlocksCommand) Run(parent context.Context) error {
|
|
|
|
mnemonicWasNotShown, err := c.accountsDB.GetMnemonicWasNotShown()
|
2023-06-14 10:00:56 +00:00
|
|
|
if err != nil {
|
2023-11-27 10:08:17 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-01-24 10:31:14 +00:00
|
|
|
accountsToCheck := []common.Address{}
|
|
|
|
// accounts which might have outgoing transfers initiated outside
|
|
|
|
// the application, e.g. watch only or restored from mnemonic phrase
|
|
|
|
accountsWithOutsideTransfers := []common.Address{}
|
2024-01-19 15:57:04 +00:00
|
|
|
|
|
|
|
for _, account := range c.accounts {
|
2024-01-24 10:31:14 +00:00
|
|
|
acc, err := c.accountsDB.GetAccountByAddress(nodetypes.Address(account))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2024-01-19 15:57:04 +00:00
|
|
|
if mnemonicWasNotShown {
|
|
|
|
if acc.AddressWasNotShown {
|
|
|
|
log.Info("skip findNewBlocksCommand, mnemonic has not been shown and the address has not been shared yet", "address", account)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
2024-01-24 10:31:14 +00:00
|
|
|
if !mnemonicWasNotShown || acc.Type != accounts.AccountTypeGenerated {
|
|
|
|
accountsWithOutsideTransfers = append(accountsWithOutsideTransfers, account)
|
|
|
|
}
|
|
|
|
|
|
|
|
accountsToCheck = append(accountsToCheck, account)
|
2024-01-19 15:57:04 +00:00
|
|
|
}
|
|
|
|
|
2024-01-24 10:31:14 +00:00
|
|
|
if len(accountsToCheck) == 0 {
|
2023-11-27 10:08:17 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-01-24 10:31:14 +00:00
|
|
|
headNum, accountsWithDetectedChanges, err := c.detectTransfers(parent, accountsToCheck)
|
2024-01-19 15:57:04 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Error("findNewBlocksCommand error on transfer detection", "error", err, "chain", c.chainClient.NetworkID())
|
|
|
|
return err
|
|
|
|
}
|
2023-11-27 10:08:17 +00:00
|
|
|
|
2024-01-25 12:05:59 +00:00
|
|
|
c.blockChainState.SetLastBlockNumber(c.chainClient.NetworkID(), headNum.Uint64())
|
|
|
|
|
2024-01-24 10:31:14 +00:00
|
|
|
if len(accountsWithDetectedChanges) != 0 {
|
2024-02-19 15:50:07 +00:00
|
|
|
log.Debug("findNewBlocksCommand detected accounts with changes, proceeding", "accounts", accountsWithDetectedChanges, "from", c.fromBlockNumber)
|
2024-02-01 16:26:40 +00:00
|
|
|
err = c.findAndSaveEthBlocks(parent, c.fromBlockNumber, headNum, accountsToCheck)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2024-02-08 11:54:04 +00:00
|
|
|
} else if c.iteration%c.nonceCheckIntervalIterations == 0 && len(accountsWithOutsideTransfers) > 0 {
|
2024-02-01 16:26:40 +00:00
|
|
|
log.Debug("findNewBlocksCommand nonce check", "accounts", accountsWithOutsideTransfers)
|
2024-02-08 11:54:04 +00:00
|
|
|
accountsWithNonceChanges, err := c.detectNonceChange(parent, headNum, accountsWithOutsideTransfers)
|
2024-01-26 12:57:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(accountsWithNonceChanges) > 0 {
|
2024-02-01 16:26:40 +00:00
|
|
|
log.Debug("findNewBlocksCommand detected nonce diff", "accounts", accountsWithNonceChanges)
|
2024-02-08 11:54:04 +00:00
|
|
|
for account, from := range accountsWithNonceChanges {
|
|
|
|
err = c.findAndSaveEthBlocks(parent, from, headNum, []common.Address{account})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2024-02-01 16:26:40 +00:00
|
|
|
}
|
2024-01-26 12:57:16 +00:00
|
|
|
}
|
|
|
|
|
2024-01-24 10:31:14 +00:00
|
|
|
for _, account := range accountsToCheck {
|
2024-02-08 11:54:04 +00:00
|
|
|
if _, ok := accountsWithNonceChanges[account]; ok {
|
2024-01-24 10:31:14 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
err := c.markEthBlockRangeChecked(account, &BlockRange{nil, c.fromBlockNumber, headNum})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2024-01-19 15:57:04 +00:00
|
|
|
}
|
2024-01-24 10:31:14 +00:00
|
|
|
|
2024-02-08 11:54:04 +00:00
|
|
|
if len(accountsWithDetectedChanges) != 0 || c.iteration%c.logsCheckIntervalIterations == 0 {
|
2024-02-19 10:50:03 +00:00
|
|
|
from := c.fromBlockNumber
|
|
|
|
if c.logsCheckLastKnownBlock != nil {
|
|
|
|
from = c.logsCheckLastKnownBlock
|
|
|
|
}
|
|
|
|
err = c.findAndSaveTokenBlocks(parent, from, headNum)
|
2024-02-01 16:26:40 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2024-02-19 10:50:03 +00:00
|
|
|
c.logsCheckLastKnownBlock = headNum
|
2024-01-19 15:57:04 +00:00
|
|
|
}
|
2023-11-27 10:08:17 +00:00
|
|
|
c.fromBlockNumber = headNum
|
2024-01-19 15:57:04 +00:00
|
|
|
c.iteration++
|
2023-11-27 10:08:17 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-01-17 11:46:59 +00:00
|
|
|
func (c *findNewBlocksCommand) findAndSaveEthBlocks(parent context.Context, fromNum, headNum *big.Int, accounts []common.Address) error {
|
2023-11-27 10:08:17 +00:00
|
|
|
// Check ETH transfers for each account independently
|
2023-12-06 11:09:58 +00:00
|
|
|
mnemonicWasNotShown, err := c.accountsDB.GetMnemonicWasNotShown()
|
|
|
|
if err != nil {
|
2024-01-17 11:46:59 +00:00
|
|
|
return err
|
2023-12-06 11:09:58 +00:00
|
|
|
}
|
|
|
|
|
2024-01-19 15:57:04 +00:00
|
|
|
for _, account := range accounts {
|
2024-01-16 14:17:02 +00:00
|
|
|
if mnemonicWasNotShown {
|
2023-12-06 11:09:58 +00:00
|
|
|
acc, err := c.accountsDB.GetAccountByAddress(nodetypes.Address(account))
|
|
|
|
if err != nil {
|
2024-01-17 11:46:59 +00:00
|
|
|
return err
|
2023-12-06 11:09:58 +00:00
|
|
|
}
|
|
|
|
if acc.AddressWasNotShown {
|
|
|
|
log.Info("skip findNewBlocksCommand, mnemonic has not been shown and the address has not been shared yet", "address", account)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("start findNewBlocksCommand", "account", account, "chain", c.chainClient.NetworkID(), "noLimit", c.noLimit, "from", fromNum, "to", headNum)
|
|
|
|
|
2023-12-11 13:29:10 +00:00
|
|
|
headers, startBlockNum, err := c.findBlocksWithEthTransfers(parent, account, fromNum, headNum)
|
|
|
|
if err != nil {
|
2024-01-17 11:46:59 +00:00
|
|
|
return err
|
2023-12-11 13:29:10 +00:00
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
if len(headers) > 0 {
|
|
|
|
log.Debug("findNewBlocksCommand saving headers", "len", len(headers), "lastBlockNumber", headNum,
|
|
|
|
"balance", c.balanceCacher.Cache().GetBalance(account, c.chainClient.NetworkID(), headNum),
|
|
|
|
"nonce", c.balanceCacher.Cache().GetNonce(account, c.chainClient.NetworkID(), headNum))
|
|
|
|
|
|
|
|
err := c.db.SaveBlocks(c.chainClient.NetworkID(), headers)
|
|
|
|
if err != nil {
|
2024-01-17 11:46:59 +00:00
|
|
|
return err
|
2023-11-27 10:08:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
c.blocksFound(headers)
|
|
|
|
}
|
|
|
|
|
2023-12-11 13:29:10 +00:00
|
|
|
err = c.markEthBlockRangeChecked(account, &BlockRange{startBlockNum, fromNum, headNum})
|
2023-11-27 10:08:17 +00:00
|
|
|
if err != nil {
|
2024-01-17 11:46:59 +00:00
|
|
|
return err
|
2023-11-27 10:08:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
log.Debug("end findNewBlocksCommand", "account", account, "chain", c.chainClient.NetworkID(), "noLimit", c.noLimit, "from", fromNum, "to", headNum)
|
2023-06-14 10:00:56 +00:00
|
|
|
}
|
2024-01-17 11:46:59 +00:00
|
|
|
|
|
|
|
return nil
|
2023-11-27 10:08:17 +00:00
|
|
|
}
|
2023-06-14 10:00:56 +00:00
|
|
|
|
2024-01-17 11:46:59 +00:00
|
|
|
func (c *findNewBlocksCommand) findAndSaveTokenBlocks(parent context.Context, fromNum, headNum *big.Int) error {
|
2023-11-27 10:08:17 +00:00
|
|
|
// Check token transfers for all accounts.
|
|
|
|
// Each account's last checked block can be different, so we can get duplicated headers,
|
|
|
|
// so we need to deduplicate them
|
|
|
|
const incomingOnly = false
|
|
|
|
erc20Headers, err := c.fastIndexErc20(parent, fromNum, headNum, incomingOnly)
|
2023-06-14 10:00:56 +00:00
|
|
|
if err != nil {
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Error("findNewBlocksCommand fastIndexErc20", "err", err, "account", c.accounts, "chain", c.chainClient.NetworkID())
|
2024-01-17 11:46:59 +00:00
|
|
|
return err
|
2023-06-14 10:00:56 +00:00
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
if len(erc20Headers) > 0 {
|
|
|
|
log.Debug("findNewBlocksCommand saving headers", "len", len(erc20Headers), "from", fromNum, "to", headNum)
|
2023-06-14 10:00:56 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
// get not loaded headers from DB for all accs and blocks
|
|
|
|
preLoadedTransactions, err := c.db.GetTransactionsToLoad(c.chainClient.NetworkID(), common.Address{}, nil)
|
|
|
|
if err != nil {
|
2024-01-17 11:46:59 +00:00
|
|
|
return err
|
2023-11-27 10:08:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tokenBlocksFiltered := filterNewPreloadedTransactions(erc20Headers, preLoadedTransactions)
|
2023-06-14 10:00:56 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
err = c.db.SaveBlocks(c.chainClient.NetworkID(), tokenBlocksFiltered)
|
|
|
|
if err != nil {
|
2024-01-17 11:46:59 +00:00
|
|
|
return err
|
2023-06-14 10:00:56 +00:00
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
c.blocksFound(tokenBlocksFiltered)
|
|
|
|
}
|
2023-06-14 10:00:56 +00:00
|
|
|
|
2024-01-17 11:46:59 +00:00
|
|
|
return c.markTokenBlockRangeChecked(c.accounts, fromNum, headNum)
|
2023-11-27 10:08:17 +00:00
|
|
|
}
|
2023-06-14 10:00:56 +00:00
|
|
|
|
2024-02-19 15:50:07 +00:00
|
|
|
func (c *findBlocksCommand) markTokenBlockRangeChecked(accounts []common.Address, from, to *big.Int) error {
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("markTokenBlockRangeChecked", "chain", c.chainClient.NetworkID(), "from", from.Uint64(), "to", to.Uint64())
|
|
|
|
|
|
|
|
for _, account := range accounts {
|
2024-02-19 15:50:07 +00:00
|
|
|
err := c.blockRangeDAO.updateTokenRange(c.chainClient.NetworkID(), account, &BlockRange{FirstKnown: from, LastKnown: to})
|
2023-11-27 10:08:17 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Error("findNewBlocksCommand upsertTokenRange", "error", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2023-11-02 17:24:23 +00:00
|
|
|
|
2023-06-14 10:00:56 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
func filterNewPreloadedTransactions(erc20Headers []*DBHeader, preLoadedTransfers []*PreloadedTransaction) []*DBHeader {
|
|
|
|
var uniqueErc20Headers []*DBHeader
|
|
|
|
for _, header := range erc20Headers {
|
|
|
|
loaded := false
|
|
|
|
for _, transfer := range preLoadedTransfers {
|
|
|
|
if header.PreloadedTransactions[0].ID == transfer.ID {
|
|
|
|
loaded = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !loaded {
|
|
|
|
uniqueErc20Headers = append(uniqueErc20Headers, header)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return uniqueErc20Headers
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *findNewBlocksCommand) findBlocksWithEthTransfers(parent context.Context, account common.Address, fromOrig, toOrig *big.Int) (headers []*DBHeader, startBlockNum *big.Int, err error) {
|
|
|
|
log.Debug("start findNewBlocksCommand::findBlocksWithEthTransfers", "account", account, "chain", c.chainClient.NetworkID(), "noLimit", c.noLimit, "from", c.fromBlockNumber, "to", c.toBlockNumber)
|
|
|
|
|
|
|
|
rangeSize := big.NewInt(int64(c.defaultNodeBlockChunkSize))
|
|
|
|
|
|
|
|
from, to := new(big.Int).Set(fromOrig), new(big.Int).Set(toOrig)
|
|
|
|
|
|
|
|
// Limit the range size to DefaultNodeBlockChunkSize
|
|
|
|
if new(big.Int).Sub(to, from).Cmp(rangeSize) > 0 {
|
|
|
|
from.Sub(to, rangeSize)
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
if from.Cmp(to) == 0 {
|
|
|
|
log.Debug("findNewBlocksCommand empty range", "from", from, "to", to)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
fromBlock := &Block{Number: from}
|
|
|
|
|
|
|
|
var newFromBlock *Block
|
|
|
|
var ethHeaders []*DBHeader
|
2023-12-09 08:10:07 +00:00
|
|
|
newFromBlock, ethHeaders, startBlockNum, err = c.fastIndex(parent, account, c.balanceCacher, fromBlock, to)
|
2023-11-27 10:08:17 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Error("findNewBlocksCommand checkRange fastIndex", "err", err, "account", account,
|
|
|
|
"chain", c.chainClient.NetworkID())
|
2023-12-11 13:29:10 +00:00
|
|
|
return nil, nil, err
|
2023-11-27 10:08:17 +00:00
|
|
|
}
|
|
|
|
log.Debug("findNewBlocksCommand checkRange", "chainID", c.chainClient.NetworkID(), "account", account,
|
|
|
|
"startBlock", startBlockNum, "newFromBlock", newFromBlock.Number, "toBlockNumber", to, "noLimit", c.noLimit)
|
|
|
|
|
|
|
|
headers = append(headers, ethHeaders...)
|
|
|
|
|
|
|
|
if startBlockNum != nil && startBlockNum.Cmp(from) >= 0 {
|
|
|
|
log.Debug("Checked all ranges, stop execution", "startBlock", startBlockNum, "from", from, "to", to)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
nextFrom, nextTo := nextRange(c.defaultNodeBlockChunkSize, newFromBlock.Number, fromOrig)
|
|
|
|
|
|
|
|
if nextFrom.Cmp(from) == 0 && nextTo.Cmp(to) == 0 {
|
|
|
|
log.Debug("findNewBlocksCommand empty next range", "from", from, "to", to)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
from = nextFrom
|
|
|
|
to = nextTo
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Debug("end findNewBlocksCommand::findBlocksWithEthTransfers", "account", account, "chain", c.chainClient.NetworkID(), "noLimit", c.noLimit)
|
|
|
|
|
|
|
|
return headers, startBlockNum, nil
|
|
|
|
}
|
|
|
|
|
2023-05-19 08:19:48 +00:00
|
|
|
// TODO NewFindBlocksCommand
|
|
|
|
type findBlocksCommand struct {
|
2023-11-27 10:08:17 +00:00
|
|
|
accounts []common.Address
|
2023-09-20 08:41:23 +00:00
|
|
|
db *Database
|
2023-12-01 11:30:42 +00:00
|
|
|
accountsDB *accounts.Database
|
2023-12-11 13:29:10 +00:00
|
|
|
blockRangeDAO BlockRangeDAOer
|
2023-09-20 08:41:23 +00:00
|
|
|
chainClient chain.ClientInterface
|
|
|
|
balanceCacher balance.Cacher
|
|
|
|
feed *event.Feed
|
|
|
|
noLimit bool
|
|
|
|
tokenManager *token.Manager
|
|
|
|
fromBlockNumber *big.Int
|
2024-02-19 10:50:03 +00:00
|
|
|
logsCheckLastKnownBlock *big.Int
|
2023-09-20 08:41:23 +00:00
|
|
|
toBlockNumber *big.Int
|
|
|
|
blocksLoadedCh chan<- []*DBHeader
|
|
|
|
defaultNodeBlockChunkSize int
|
2023-05-26 08:27:48 +00:00
|
|
|
|
|
|
|
// Not to be set by the caller
|
2023-09-20 08:41:23 +00:00
|
|
|
resFromBlock *Block
|
|
|
|
startBlockNumber *big.Int
|
|
|
|
reachedETHHistoryStart bool
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
2024-01-19 12:43:25 +00:00
|
|
|
func (c *findBlocksCommand) Runner(interval ...time.Duration) async.Runner {
|
|
|
|
intvl := findBlocksRetryInterval
|
|
|
|
if len(interval) > 0 {
|
|
|
|
intvl = interval[0]
|
|
|
|
}
|
2024-01-17 11:46:59 +00:00
|
|
|
return async.FiniteCommandWithErrorCounter{
|
|
|
|
FiniteCommand: async.FiniteCommand{
|
2024-01-19 12:43:25 +00:00
|
|
|
Interval: intvl,
|
2024-01-17 11:46:59 +00:00
|
|
|
Runable: c.Run,
|
|
|
|
},
|
2024-01-19 12:43:25 +00:00
|
|
|
ErrorCounter: async.NewErrorCounter(3, "findBlocksCommand"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *findBlocksCommand) Command(interval ...time.Duration) async.Command {
|
|
|
|
return c.Runner(interval...).Run
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
2023-10-05 12:35:16 +00:00
|
|
|
type ERC20BlockRange struct {
|
|
|
|
from *big.Int
|
|
|
|
to *big.Int
|
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
func (c *findBlocksCommand) ERC20ScanByBalance(parent context.Context, account common.Address, fromBlock, toBlock *big.Int, token common.Address) ([]ERC20BlockRange, error) {
|
2023-09-20 08:41:23 +00:00
|
|
|
var err error
|
|
|
|
batchSize := getErc20BatchSize(c.chainClient.NetworkID())
|
|
|
|
ranges := [][]*big.Int{{fromBlock, toBlock}}
|
2023-10-05 12:35:16 +00:00
|
|
|
foundRanges := []ERC20BlockRange{}
|
2023-09-20 08:41:23 +00:00
|
|
|
cache := map[int64]*big.Int{}
|
|
|
|
for {
|
|
|
|
nextRanges := [][]*big.Int{}
|
|
|
|
for _, blockRange := range ranges {
|
|
|
|
from, to := blockRange[0], blockRange[1]
|
|
|
|
fromBalance, ok := cache[from.Int64()]
|
|
|
|
if !ok {
|
2023-11-27 10:08:17 +00:00
|
|
|
fromBalance, err = c.tokenManager.GetTokenBalanceAt(parent, c.chainClient, account, token, from)
|
2023-09-20 08:41:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if fromBalance == nil {
|
|
|
|
fromBalance = big.NewInt(0)
|
|
|
|
}
|
|
|
|
cache[from.Int64()] = fromBalance
|
|
|
|
}
|
|
|
|
|
|
|
|
toBalance, ok := cache[to.Int64()]
|
|
|
|
if !ok {
|
2023-11-27 10:08:17 +00:00
|
|
|
toBalance, err = c.tokenManager.GetTokenBalanceAt(parent, c.chainClient, account, token, to)
|
2023-09-20 08:41:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if toBalance == nil {
|
|
|
|
toBalance = big.NewInt(0)
|
|
|
|
}
|
|
|
|
cache[to.Int64()] = toBalance
|
|
|
|
}
|
|
|
|
|
|
|
|
if fromBalance.Cmp(toBalance) != 0 {
|
|
|
|
diff := new(big.Int).Sub(to, from)
|
|
|
|
if diff.Cmp(batchSize) <= 0 {
|
2023-10-05 12:35:16 +00:00
|
|
|
foundRanges = append(foundRanges, ERC20BlockRange{from, to})
|
2023-09-20 08:41:23 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
halfOfDiff := new(big.Int).Div(diff, big.NewInt(2))
|
|
|
|
mid := new(big.Int).Add(from, halfOfDiff)
|
|
|
|
|
|
|
|
nextRanges = append(nextRanges, []*big.Int{from, mid})
|
|
|
|
nextRanges = append(nextRanges, []*big.Int{mid, to})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(nextRanges) == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
ranges = nextRanges
|
|
|
|
}
|
|
|
|
|
2023-10-05 12:35:16 +00:00
|
|
|
return foundRanges, nil
|
2023-09-20 08:41:23 +00:00
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
func (c *findBlocksCommand) checkERC20Tail(parent context.Context, account common.Address) ([]*DBHeader, error) {
|
2024-02-19 15:50:07 +00:00
|
|
|
log.Debug("checkERC20Tail", "account", account, "to block", c.startBlockNumber, "from", c.resFromBlock.Number)
|
2023-10-17 15:05:05 +00:00
|
|
|
tokens, err := c.tokenManager.GetTokens(c.chainClient.NetworkID())
|
2023-09-20 08:41:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
addresses := make([]common.Address, len(tokens))
|
|
|
|
for i, token := range tokens {
|
|
|
|
addresses[i] = token.Address
|
|
|
|
}
|
|
|
|
|
|
|
|
from := new(big.Int).Sub(c.resFromBlock.Number, big.NewInt(1))
|
|
|
|
|
|
|
|
clients := make(map[uint64]chain.ClientInterface, 1)
|
|
|
|
clients[c.chainClient.NetworkID()] = c.chainClient
|
|
|
|
atBlocks := make(map[uint64]*big.Int, 1)
|
|
|
|
atBlocks[c.chainClient.NetworkID()] = from
|
2023-11-27 10:08:17 +00:00
|
|
|
balances, err := c.tokenManager.GetBalancesAtByChain(parent, clients, []common.Address{account}, addresses, atBlocks)
|
2023-09-20 08:41:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-10-05 12:35:16 +00:00
|
|
|
foundRanges := []ERC20BlockRange{}
|
2023-11-27 10:08:17 +00:00
|
|
|
for token, balance := range balances[c.chainClient.NetworkID()][account] {
|
2023-09-20 08:41:23 +00:00
|
|
|
bigintBalance := big.NewInt(balance.ToInt().Int64())
|
|
|
|
if bigintBalance.Cmp(big.NewInt(0)) <= 0 {
|
|
|
|
continue
|
|
|
|
}
|
2023-11-27 10:08:17 +00:00
|
|
|
result, err := c.ERC20ScanByBalance(parent, account, big.NewInt(0), from, token)
|
2023-09-20 08:41:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-10-05 12:35:16 +00:00
|
|
|
foundRanges = append(foundRanges, result...)
|
|
|
|
}
|
|
|
|
|
|
|
|
uniqRanges := []ERC20BlockRange{}
|
|
|
|
rangesMap := map[string]bool{}
|
|
|
|
for _, rangeItem := range foundRanges {
|
|
|
|
key := rangeItem.from.String() + "-" + rangeItem.to.String()
|
|
|
|
if _, ok := rangesMap[key]; !ok {
|
|
|
|
rangesMap[key] = true
|
|
|
|
uniqRanges = append(uniqRanges, rangeItem)
|
|
|
|
}
|
2023-09-20 08:41:23 +00:00
|
|
|
}
|
|
|
|
|
2023-10-05 12:35:16 +00:00
|
|
|
foundHeaders := []*DBHeader{}
|
|
|
|
for _, rangeItem := range uniqRanges {
|
|
|
|
headers, err := c.fastIndexErc20(parent, rangeItem.from, rangeItem.to, true)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
foundHeaders = append(foundHeaders, headers...)
|
|
|
|
}
|
|
|
|
|
|
|
|
return foundHeaders, nil
|
2023-09-20 08:41:23 +00:00
|
|
|
}
|
|
|
|
|
2023-05-19 08:19:48 +00:00
|
|
|
func (c *findBlocksCommand) Run(parent context.Context) (err error) {
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("start findBlocksCommand", "accounts", c.accounts, "chain", c.chainClient.NetworkID(), "noLimit", c.noLimit, "from", c.fromBlockNumber, "to", c.toBlockNumber)
|
|
|
|
|
|
|
|
account := c.accounts[0] // For now this command supports only 1 account
|
2023-12-01 11:30:42 +00:00
|
|
|
mnemonicWasNotShown, err := c.accountsDB.GetMnemonicWasNotShown()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-01-16 14:17:02 +00:00
|
|
|
if mnemonicWasNotShown {
|
2023-11-27 10:08:17 +00:00
|
|
|
account, err := c.accountsDB.GetAccountByAddress(nodetypes.BytesToAddress(account.Bytes()))
|
2023-12-01 11:30:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if account.AddressWasNotShown {
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Info("skip findBlocksCommand, mnemonic has not been shown and the address has not been shared yet", "address", account)
|
2023-12-01 11:30:42 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-09-20 08:41:23 +00:00
|
|
|
rangeSize := big.NewInt(int64(c.defaultNodeBlockChunkSize))
|
2023-05-26 08:27:48 +00:00
|
|
|
from, to := new(big.Int).Set(c.fromBlockNumber), new(big.Int).Set(c.toBlockNumber)
|
2023-06-01 13:09:50 +00:00
|
|
|
|
2023-05-26 08:27:48 +00:00
|
|
|
// Limit the range size to DefaultNodeBlockChunkSize
|
|
|
|
if new(big.Int).Sub(to, from).Cmp(rangeSize) > 0 {
|
2023-05-19 08:19:48 +00:00
|
|
|
from.Sub(to, rangeSize)
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
2023-10-12 13:07:21 +00:00
|
|
|
if from.Cmp(to) == 0 {
|
|
|
|
log.Debug("findBlocksCommand empty range", "from", from, "to", to)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2023-09-20 08:41:23 +00:00
|
|
|
var headers []*DBHeader
|
|
|
|
if c.reachedETHHistoryStart {
|
|
|
|
if c.fromBlockNumber.Cmp(zero) == 0 && c.startBlockNumber != nil && c.startBlockNumber.Cmp(zero) == 1 {
|
2023-11-27 10:08:17 +00:00
|
|
|
headers, err = c.checkERC20Tail(parent, account)
|
2023-09-20 08:41:23 +00:00
|
|
|
if err != nil {
|
2024-01-17 11:46:59 +00:00
|
|
|
log.Error("findBlocksCommand checkERC20Tail", "err", err, "account", account, "chain", c.chainClient.NetworkID())
|
|
|
|
break
|
2023-09-20 08:41:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2024-01-17 11:46:59 +00:00
|
|
|
headers, err = c.checkRange(parent, from, to)
|
|
|
|
if err != nil {
|
|
|
|
break
|
|
|
|
}
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
2023-06-01 13:09:50 +00:00
|
|
|
if len(headers) > 0 {
|
|
|
|
log.Debug("findBlocksCommand saving headers", "len", len(headers), "lastBlockNumber", to,
|
2023-11-27 10:08:17 +00:00
|
|
|
"balance", c.balanceCacher.Cache().GetBalance(account, c.chainClient.NetworkID(), to),
|
|
|
|
"nonce", c.balanceCacher.Cache().GetNonce(account, c.chainClient.NetworkID(), to))
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
err = c.db.SaveBlocks(c.chainClient.NetworkID(), headers)
|
2023-06-01 13:09:50 +00:00
|
|
|
if err != nil {
|
|
|
|
break
|
|
|
|
}
|
2023-06-14 10:00:56 +00:00
|
|
|
|
|
|
|
c.blocksFound(headers)
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
2023-09-20 08:41:23 +00:00
|
|
|
if c.reachedETHHistoryStart {
|
2024-02-19 15:50:07 +00:00
|
|
|
err = c.markTokenBlockRangeChecked([]common.Address{account}, big.NewInt(0), to)
|
|
|
|
if err != nil {
|
|
|
|
break
|
|
|
|
}
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("findBlocksCommand reached first ETH transfer and checked erc20 tail", "chain", c.chainClient.NetworkID(), "account", account)
|
2023-09-20 08:41:23 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
err = c.markEthBlockRangeChecked(account, &BlockRange{c.startBlockNumber, c.resFromBlock.Number, to})
|
2023-05-26 08:27:48 +00:00
|
|
|
if err != nil {
|
|
|
|
break
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
2024-02-19 15:50:07 +00:00
|
|
|
err = c.markTokenBlockRangeChecked([]common.Address{account}, c.resFromBlock.Number, to)
|
|
|
|
if err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2023-09-22 16:09:14 +00:00
|
|
|
// if we have found first ETH block and we have not reached the start of ETH history yet
|
2023-10-12 13:07:21 +00:00
|
|
|
if c.startBlockNumber != nil && c.fromBlockNumber.Cmp(from) == -1 {
|
|
|
|
log.Debug("ERC20 tail should be checked", "initial from", c.fromBlockNumber, "actual from", from, "first ETH block", c.startBlockNumber)
|
|
|
|
c.reachedETHHistoryStart = true
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.startBlockNumber != nil && c.startBlockNumber.Cmp(from) >= 0 {
|
|
|
|
log.Debug("Checked all ranges, stop execution", "startBlock", c.startBlockNumber, "from", from, "to", to)
|
2023-09-20 08:41:23 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
nextFrom, nextTo := nextRange(c.defaultNodeBlockChunkSize, c.resFromBlock.Number, c.fromBlockNumber)
|
|
|
|
|
|
|
|
if nextFrom.Cmp(from) == 0 && nextTo.Cmp(to) == 0 {
|
2023-10-12 13:07:21 +00:00
|
|
|
log.Debug("findBlocksCommand empty next range", "from", from, "to", to)
|
2023-09-20 08:41:23 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
from = nextFrom
|
|
|
|
to = nextTo
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
2024-01-17 11:46:59 +00:00
|
|
|
log.Debug("end findBlocksCommand", "account", account, "chain", c.chainClient.NetworkID(), "noLimit", c.noLimit, "err", err)
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2024-01-17 11:46:59 +00:00
|
|
|
return err
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
2023-06-14 10:00:56 +00:00
|
|
|
func (c *findBlocksCommand) blocksFound(headers []*DBHeader) {
|
2023-10-04 12:00:12 +00:00
|
|
|
c.blocksLoadedCh <- headers
|
2023-06-14 10:00:56 +00:00
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
func (c *findBlocksCommand) markEthBlockRangeChecked(account common.Address, blockRange *BlockRange) error {
|
2023-05-26 08:27:48 +00:00
|
|
|
log.Debug("upsert block range", "Start", blockRange.Start, "FirstKnown", blockRange.FirstKnown, "LastKnown", blockRange.LastKnown,
|
2023-11-27 10:08:17 +00:00
|
|
|
"chain", c.chainClient.NetworkID(), "account", account)
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
err := c.blockRangeDAO.upsertEthRange(c.chainClient.NetworkID(), account, blockRange)
|
2023-05-19 08:19:48 +00:00
|
|
|
if err != nil {
|
2023-05-26 08:27:48 +00:00
|
|
|
log.Error("findBlocksCommand upsertRange", "error", err)
|
|
|
|
return err
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
2023-05-26 08:27:48 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *findBlocksCommand) checkRange(parent context.Context, from *big.Int, to *big.Int) (
|
|
|
|
foundHeaders []*DBHeader, err error) {
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
account := c.accounts[0]
|
2023-05-26 08:27:48 +00:00
|
|
|
fromBlock := &Block{Number: from}
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-12-09 08:10:07 +00:00
|
|
|
newFromBlock, ethHeaders, startBlock, err := c.fastIndex(parent, account, c.balanceCacher, fromBlock, to)
|
2023-05-19 08:19:48 +00:00
|
|
|
if err != nil {
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Error("findBlocksCommand checkRange fastIndex", "err", err, "account", account,
|
2023-09-19 11:17:36 +00:00
|
|
|
"chain", c.chainClient.NetworkID())
|
2024-01-17 11:46:59 +00:00
|
|
|
return nil, err
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("findBlocksCommand checkRange", "chainID", c.chainClient.NetworkID(), "account", account,
|
2023-06-14 10:00:56 +00:00
|
|
|
"startBlock", startBlock, "newFromBlock", newFromBlock.Number, "toBlockNumber", to, "noLimit", c.noLimit)
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-06-01 13:09:50 +00:00
|
|
|
// There could be incoming ERC20 transfers which don't change the balance
|
|
|
|
// and nonce of ETH account, so we keep looking for them
|
2023-10-05 09:11:47 +00:00
|
|
|
erc20Headers, err := c.fastIndexErc20(parent, newFromBlock.Number, to, false)
|
2023-06-01 13:09:50 +00:00
|
|
|
if err != nil {
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Error("findBlocksCommand checkRange fastIndexErc20", "err", err, "account", account, "chain", c.chainClient.NetworkID())
|
2024-01-17 11:46:59 +00:00
|
|
|
return nil, err
|
2023-06-01 13:09:50 +00:00
|
|
|
}
|
2023-05-26 08:27:48 +00:00
|
|
|
|
2023-06-01 13:09:50 +00:00
|
|
|
allHeaders := append(ethHeaders, erc20Headers...)
|
2023-05-26 08:27:48 +00:00
|
|
|
|
2023-06-01 13:09:50 +00:00
|
|
|
if len(allHeaders) > 0 {
|
2023-06-02 20:08:45 +00:00
|
|
|
foundHeaders = uniqueHeaderPerBlockHash(allHeaders)
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
c.resFromBlock = newFromBlock
|
|
|
|
c.startBlockNumber = startBlock
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("end findBlocksCommand checkRange", "chainID", c.chainClient.NetworkID(), "account", account,
|
2023-06-14 10:00:56 +00:00
|
|
|
"c.startBlock", c.startBlockNumber, "newFromBlock", newFromBlock.Number,
|
2023-05-19 08:19:48 +00:00
|
|
|
"toBlockNumber", to, "c.resFromBlock", c.resFromBlock.Number)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-12-11 13:29:10 +00:00
|
|
|
func loadBlockRangeInfo(chainID uint64, account common.Address, blockDAO BlockRangeDAOer) (
|
2023-11-27 10:08:17 +00:00
|
|
|
*ethTokensBlockRanges, error) {
|
2023-05-26 08:27:48 +00:00
|
|
|
|
2024-02-19 15:50:07 +00:00
|
|
|
blockRange, _, err := blockDAO.getBlockRange(chainID, account)
|
2023-05-19 08:19:48 +00:00
|
|
|
if err != nil {
|
2023-05-26 08:27:48 +00:00
|
|
|
log.Error("failed to load block ranges from database", "chain", chainID, "account", account,
|
|
|
|
"error", err)
|
2023-05-19 08:19:48 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-05-26 08:27:48 +00:00
|
|
|
return blockRange, nil
|
|
|
|
}
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-06-01 13:09:50 +00:00
|
|
|
// Returns if all blocks are loaded, which means that start block (beginning of account history)
|
|
|
|
// has been found and all block headers saved to the DB
|
2023-05-26 08:27:48 +00:00
|
|
|
func areAllHistoryBlocksLoaded(blockInfo *BlockRange) bool {
|
2024-02-19 15:50:07 +00:00
|
|
|
if blockInfo != nil && blockInfo.FirstKnown != nil &&
|
|
|
|
((blockInfo.Start != nil && blockInfo.Start.Cmp(blockInfo.FirstKnown) >= 0) ||
|
|
|
|
blockInfo.FirstKnown.Cmp(zero) == 0) {
|
2023-05-26 08:27:48 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
2023-12-11 13:29:10 +00:00
|
|
|
func areAllHistoryBlocksLoadedForAddress(blockRangeDAO BlockRangeDAOer, chainID uint64,
|
2023-06-01 13:09:50 +00:00
|
|
|
address common.Address) (bool, error) {
|
|
|
|
|
2024-02-19 15:50:07 +00:00
|
|
|
blockRange, _, err := blockRangeDAO.getBlockRange(chainID, address)
|
2023-06-01 13:09:50 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Error("findBlocksCommand getBlockRange", "error", err)
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
return areAllHistoryBlocksLoaded(blockRange.eth) && areAllHistoryBlocksLoaded(blockRange.tokens), nil
|
2023-06-01 13:09:50 +00:00
|
|
|
}
|
|
|
|
|
2023-05-19 08:19:48 +00:00
|
|
|
// run fast indexing for every accont up to canonical chain head minus safety depth.
|
|
|
|
// every account will run it from last synced header.
|
2023-12-09 08:10:07 +00:00
|
|
|
func (c *findBlocksCommand) fastIndex(ctx context.Context, account common.Address, bCacher balance.Cacher,
|
2023-05-19 08:19:48 +00:00
|
|
|
fromBlock *Block, toBlockNumber *big.Int) (resultingFrom *Block, headers []*DBHeader,
|
|
|
|
startBlock *big.Int, err error) {
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("fast index started", "chainID", c.chainClient.NetworkID(), "account", account,
|
2023-06-14 10:00:56 +00:00
|
|
|
"from", fromBlock.Number, "to", toBlockNumber)
|
2023-05-19 08:19:48 +00:00
|
|
|
|
|
|
|
start := time.Now()
|
|
|
|
group := async.NewGroup(ctx)
|
|
|
|
|
|
|
|
command := ðHistoricalCommand{
|
2023-09-04 05:34:09 +00:00
|
|
|
chainClient: c.chainClient,
|
|
|
|
balanceCacher: bCacher,
|
2023-11-27 10:08:17 +00:00
|
|
|
address: account,
|
2023-09-04 05:34:09 +00:00
|
|
|
feed: c.feed,
|
|
|
|
from: fromBlock,
|
|
|
|
to: toBlockNumber,
|
|
|
|
noLimit: c.noLimit,
|
|
|
|
threadLimit: SequentialThreadLimit,
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
group.Add(command.Command())
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
err = ctx.Err()
|
2023-11-02 17:24:23 +00:00
|
|
|
log.Debug("fast indexer ctx Done", "error", err)
|
2023-05-19 08:19:48 +00:00
|
|
|
return
|
|
|
|
case <-group.WaitAsync():
|
|
|
|
if command.error != nil {
|
|
|
|
err = command.error
|
|
|
|
return
|
|
|
|
}
|
|
|
|
resultingFrom = &Block{Number: command.resultingFrom}
|
|
|
|
headers = command.foundHeaders
|
|
|
|
startBlock = command.startBlock
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("fast indexer finished", "chainID", c.chainClient.NetworkID(), "account", account, "in", time.Since(start),
|
2023-06-14 10:00:56 +00:00
|
|
|
"startBlock", command.startBlock, "resultingFrom", resultingFrom.Number, "headers", len(headers))
|
2023-05-19 08:19:48 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// run fast indexing for every accont up to canonical chain head minus safety depth.
|
|
|
|
// every account will run it from last synced header.
|
|
|
|
func (c *findBlocksCommand) fastIndexErc20(ctx context.Context, fromBlockNumber *big.Int,
|
2023-10-05 09:11:47 +00:00
|
|
|
toBlockNumber *big.Int, incomingOnly bool) ([]*DBHeader, error) {
|
2023-05-19 08:19:48 +00:00
|
|
|
|
|
|
|
start := time.Now()
|
|
|
|
group := async.NewGroup(ctx)
|
|
|
|
|
|
|
|
erc20 := &erc20HistoricalCommand{
|
2023-11-27 10:08:17 +00:00
|
|
|
erc20: NewERC20TransfersDownloader(c.chainClient, c.accounts, types.LatestSignerForChainID(c.chainClient.ToBigInt()), incomingOnly),
|
2023-05-19 08:19:48 +00:00
|
|
|
chainClient: c.chainClient,
|
|
|
|
feed: c.feed,
|
|
|
|
from: fromBlockNumber,
|
|
|
|
to: toBlockNumber,
|
|
|
|
foundHeaders: []*DBHeader{},
|
|
|
|
}
|
|
|
|
group.Add(erc20.Command())
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return nil, ctx.Err()
|
|
|
|
case <-group.WaitAsync():
|
|
|
|
headers := erc20.foundHeaders
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("fast indexer Erc20 finished", "chainID", c.chainClient.NetworkID(),
|
2023-06-14 10:00:56 +00:00
|
|
|
"in", time.Since(start), "headers", len(headers))
|
2023-05-19 08:19:48 +00:00
|
|
|
return headers, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-11 13:29:10 +00:00
|
|
|
// Start transfers loop to load transfers for new blocks
|
|
|
|
func (c *loadBlocksAndTransfersCommand) startTransfersLoop(ctx context.Context) {
|
2024-01-19 12:43:25 +00:00
|
|
|
c.incLoops()
|
2023-12-11 13:29:10 +00:00
|
|
|
go func() {
|
|
|
|
defer func() {
|
2024-01-17 11:46:59 +00:00
|
|
|
c.decLoops()
|
2023-12-11 13:29:10 +00:00
|
|
|
}()
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-12-11 13:29:10 +00:00
|
|
|
log.Debug("loadTransfersLoop start", "chain", c.chainClient.NetworkID())
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-12-11 13:29:10 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
log.Debug("startTransfersLoop done", "chain", c.chainClient.NetworkID(), "error", ctx.Err())
|
|
|
|
return
|
|
|
|
case dbHeaders := <-c.blocksLoadedCh:
|
|
|
|
log.Debug("loadTransfersOnDemand transfers received", "chain", c.chainClient.NetworkID(), "headers", len(dbHeaders))
|
2023-12-10 14:31:30 +00:00
|
|
|
|
2023-12-11 13:29:10 +00:00
|
|
|
blocksByAddress := map[common.Address][]*big.Int{}
|
|
|
|
// iterate over headers and group them by address
|
|
|
|
for _, dbHeader := range dbHeaders {
|
|
|
|
blocksByAddress[dbHeader.Address] = append(blocksByAddress[dbHeader.Address], dbHeader.Number)
|
|
|
|
}
|
2023-06-14 10:00:56 +00:00
|
|
|
|
2023-12-11 13:29:10 +00:00
|
|
|
go func() {
|
|
|
|
_ = loadTransfers(ctx, c.blockDAO, c.db, c.chainClient, noBlockLimit,
|
|
|
|
blocksByAddress, c.transactionManager, c.pendingTxManager, c.tokenManager, c.feed)
|
|
|
|
}()
|
2023-06-28 13:48:37 +00:00
|
|
|
}
|
|
|
|
}
|
2023-12-11 13:29:10 +00:00
|
|
|
}()
|
2023-06-01 13:09:50 +00:00
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
func newLoadBlocksAndTransfersCommand(accounts []common.Address, db *Database, accountsDB *accounts.Database,
|
2023-12-11 13:29:10 +00:00
|
|
|
blockDAO *BlockDAO, blockRangesSeqDAO BlockRangeDAOer, chainClient chain.ClientInterface, feed *event.Feed,
|
2023-08-01 18:50:30 +00:00
|
|
|
transactionManager *TransactionManager, pendingTxManager *transactions.PendingTxTracker,
|
2024-01-25 12:05:59 +00:00
|
|
|
tokenManager *token.Manager, balanceCacher balance.Cacher, omitHistory bool,
|
|
|
|
blockChainState *blockchainstate.BlockChainState) *loadBlocksAndTransfersCommand {
|
2023-06-01 13:09:50 +00:00
|
|
|
|
|
|
|
return &loadBlocksAndTransfersCommand{
|
2023-11-27 10:08:17 +00:00
|
|
|
accounts: accounts,
|
2023-06-01 13:09:50 +00:00
|
|
|
db: db,
|
2023-11-28 14:23:03 +00:00
|
|
|
blockRangeDAO: blockRangesSeqDAO,
|
2023-12-01 11:30:42 +00:00
|
|
|
accountsDB: accountsDB,
|
2023-06-01 13:09:50 +00:00
|
|
|
blockDAO: blockDAO,
|
|
|
|
chainClient: chainClient,
|
|
|
|
feed: feed,
|
2023-09-04 05:34:09 +00:00
|
|
|
balanceCacher: balanceCacher,
|
2023-06-01 13:09:50 +00:00
|
|
|
transactionManager: transactionManager,
|
2023-06-21 14:09:55 +00:00
|
|
|
pendingTxManager: pendingTxManager,
|
2023-06-02 20:08:45 +00:00
|
|
|
tokenManager: tokenManager,
|
2023-06-28 13:48:37 +00:00
|
|
|
blocksLoadedCh: make(chan []*DBHeader, 100),
|
2023-10-18 10:02:35 +00:00
|
|
|
omitHistory: omitHistory,
|
2024-01-19 15:57:04 +00:00
|
|
|
contractMaker: tokenManager.ContractMaker,
|
2024-01-25 12:05:59 +00:00
|
|
|
blockChainState: blockChainState,
|
2023-06-01 13:09:50 +00:00
|
|
|
}
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type loadBlocksAndTransfersCommand struct {
|
2023-11-27 10:08:17 +00:00
|
|
|
accounts []common.Address
|
2023-05-19 08:19:48 +00:00
|
|
|
db *Database
|
2023-12-01 11:30:42 +00:00
|
|
|
accountsDB *accounts.Database
|
2023-12-11 13:29:10 +00:00
|
|
|
blockRangeDAO BlockRangeDAOer
|
2023-05-19 08:19:48 +00:00
|
|
|
blockDAO *BlockDAO
|
2023-09-20 08:41:23 +00:00
|
|
|
chainClient chain.ClientInterface
|
2023-05-19 08:19:48 +00:00
|
|
|
feed *event.Feed
|
2023-09-04 05:34:09 +00:00
|
|
|
balanceCacher balance.Cacher
|
2023-05-19 08:19:48 +00:00
|
|
|
// nonArchivalRPCNode bool // TODO Make use of it
|
|
|
|
transactionManager *TransactionManager
|
2023-08-01 18:50:30 +00:00
|
|
|
pendingTxManager *transactions.PendingTxTracker
|
2023-06-02 20:08:45 +00:00
|
|
|
tokenManager *token.Manager
|
2023-06-14 10:00:56 +00:00
|
|
|
blocksLoadedCh chan []*DBHeader
|
2023-10-18 10:02:35 +00:00
|
|
|
omitHistory bool
|
2024-01-19 15:57:04 +00:00
|
|
|
contractMaker *contracts.ContractMaker
|
2024-01-25 12:05:59 +00:00
|
|
|
blockChainState *blockchainstate.BlockChainState
|
2023-06-01 13:09:50 +00:00
|
|
|
|
|
|
|
// Not to be set by the caller
|
2023-11-27 10:08:17 +00:00
|
|
|
transfersLoaded map[common.Address]bool // For event RecentHistoryReady to be sent only once per account during app lifetime
|
2024-01-17 11:46:59 +00:00
|
|
|
loops atomic.Int32
|
2023-12-11 13:29:10 +00:00
|
|
|
}
|
|
|
|
|
2024-01-17 11:46:59 +00:00
|
|
|
func (c *loadBlocksAndTransfersCommand) incLoops() {
|
|
|
|
c.loops.Add(1)
|
2023-12-11 13:29:10 +00:00
|
|
|
}
|
|
|
|
|
2024-01-17 11:46:59 +00:00
|
|
|
func (c *loadBlocksAndTransfersCommand) decLoops() {
|
|
|
|
c.loops.Add(-1)
|
2023-12-11 13:29:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *loadBlocksAndTransfersCommand) isStarted() bool {
|
2024-01-17 11:46:59 +00:00
|
|
|
return c.loops.Load() > 0
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
2023-12-11 13:29:10 +00:00
|
|
|
func (c *loadBlocksAndTransfersCommand) Run(parent context.Context) (err error) {
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Debug("start load all transfers command", "chain", c.chainClient.NetworkID(), "accounts", c.accounts)
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2024-01-19 12:43:25 +00:00
|
|
|
// Finite processes (to be restarted on error, but stopped on success or context cancel):
|
2023-12-10 14:31:30 +00:00
|
|
|
// fetching transfers for loaded blocks
|
|
|
|
// fetching history blocks
|
2023-11-02 17:24:23 +00:00
|
|
|
|
2024-01-19 12:43:25 +00:00
|
|
|
// Infinite processes (to be restarted on error), but stopped on context cancel:
|
2023-12-10 14:31:30 +00:00
|
|
|
// fetching new blocks
|
|
|
|
// fetching transfers for new blocks
|
2024-01-17 11:46:59 +00:00
|
|
|
|
2024-01-19 12:43:25 +00:00
|
|
|
ctx := parent
|
2023-12-11 13:29:10 +00:00
|
|
|
finiteGroup := async.NewAtomicGroup(ctx)
|
2024-01-17 11:46:59 +00:00
|
|
|
finiteGroup.SetName("finiteGroup")
|
2023-12-11 13:29:10 +00:00
|
|
|
defer func() {
|
|
|
|
finiteGroup.Stop()
|
|
|
|
finiteGroup.Wait()
|
|
|
|
}()
|
|
|
|
|
2024-03-07 14:51:21 +00:00
|
|
|
blockRanges, err := c.blockRangeDAO.getBlockRanges(c.chainClient.NetworkID(), c.accounts)
|
2023-12-07 22:08:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-03-07 14:51:21 +00:00
|
|
|
firstScan := false
|
|
|
|
var headNum *big.Int
|
|
|
|
for _, address := range c.accounts {
|
|
|
|
blockRange, ok := blockRanges[address]
|
|
|
|
if !ok || blockRange.tokens.LastKnown == nil {
|
|
|
|
firstScan = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if headNum == nil || blockRange.tokens.LastKnown.Cmp(headNum) < 0 {
|
|
|
|
headNum = blockRange.tokens.LastKnown
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fromNum := big.NewInt(0)
|
|
|
|
if firstScan {
|
|
|
|
headNum, err = getHeadBlockNumber(ctx, c.chainClient)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-01-19 12:43:25 +00:00
|
|
|
// It will start loadTransfersCommand which will run until all transfers from DB are loaded or any one failed to load
|
2023-12-11 13:29:10 +00:00
|
|
|
err = c.startFetchingTransfersForLoadedBlocks(finiteGroup)
|
2023-12-10 14:31:30 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Error("loadBlocksAndTransfersCommand fetchTransfersForLoadedBlocks", "error", err)
|
2023-06-14 10:00:56 +00:00
|
|
|
return err
|
2023-05-26 08:27:48 +00:00
|
|
|
}
|
|
|
|
|
2023-12-11 13:29:10 +00:00
|
|
|
if !c.isStarted() {
|
2023-12-10 14:31:30 +00:00
|
|
|
c.startTransfersLoop(ctx)
|
|
|
|
c.startFetchingNewBlocks(ctx, c.accounts, headNum, c.blocksLoadedCh)
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
2023-12-10 14:31:30 +00:00
|
|
|
// It will start findBlocksCommands which will run until success when all blocks are loaded
|
2023-12-11 13:29:10 +00:00
|
|
|
err = c.fetchHistoryBlocks(finiteGroup, c.accounts, fromNum, headNum, c.blocksLoadedCh)
|
2023-12-10 14:31:30 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Error("loadBlocksAndTransfersCommand fetchHistoryBlocks", "error", err)
|
|
|
|
return err
|
|
|
|
}
|
2023-05-19 08:19:48 +00:00
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2023-12-11 13:29:10 +00:00
|
|
|
log.Debug("loadBlocksAndTransfers command cancelled", "chain", c.chainClient.NetworkID(), "accounts", c.accounts, "error", ctx.Err())
|
|
|
|
case <-finiteGroup.WaitAsync():
|
|
|
|
err = finiteGroup.Error() // if there was an error, rerun the command
|
2024-01-17 11:46:59 +00:00
|
|
|
log.Debug("end loadBlocksAndTransfers command", "chain", c.chainClient.NetworkID(), "accounts", c.accounts, "error", err, "group", finiteGroup.Name())
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
2023-12-11 13:29:10 +00:00
|
|
|
|
|
|
|
return err
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
2024-01-17 11:46:59 +00:00
|
|
|
func (c *loadBlocksAndTransfersCommand) Runner(interval ...time.Duration) async.Runner {
|
2023-12-11 13:29:10 +00:00
|
|
|
// 30s - default interval for Infura's delay returned in error. That should increase chances
|
|
|
|
// for request to succeed with the next attempt for now until we have a proper retry mechanism
|
|
|
|
intvl := 30 * time.Second
|
|
|
|
if len(interval) > 0 {
|
|
|
|
intvl = interval[0]
|
|
|
|
}
|
|
|
|
|
2024-01-19 12:43:25 +00:00
|
|
|
return async.FiniteCommand{
|
|
|
|
Interval: intvl,
|
|
|
|
Runable: c.Run,
|
2024-01-17 11:46:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *loadBlocksAndTransfersCommand) Command(interval ...time.Duration) async.Command {
|
|
|
|
return c.Runner(interval...).Run
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
2023-12-10 14:31:30 +00:00
|
|
|
func (c *loadBlocksAndTransfersCommand) fetchHistoryBlocks(group *async.AtomicGroup, accounts []common.Address, fromNum, toNum *big.Int, blocksLoadedCh chan []*DBHeader) (err error) {
|
|
|
|
for _, account := range accounts {
|
|
|
|
err = c.fetchHistoryBlocksForAccount(group, account, fromNum, toNum, c.blocksLoadedCh)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *loadBlocksAndTransfersCommand) fetchHistoryBlocksForAccount(group *async.AtomicGroup, account common.Address, fromNum, toNum *big.Int, blocksLoadedCh chan []*DBHeader) error {
|
2023-11-27 10:08:17 +00:00
|
|
|
|
|
|
|
log.Debug("fetchHistoryBlocks start", "chainID", c.chainClient.NetworkID(), "account", account, "omit", c.omitHistory)
|
2023-06-14 10:00:56 +00:00
|
|
|
|
2023-10-18 10:02:35 +00:00
|
|
|
if c.omitHistory {
|
2023-11-27 10:08:17 +00:00
|
|
|
blockRange := ðTokensBlockRanges{eth: &BlockRange{nil, big.NewInt(0), toNum}, tokens: &BlockRange{nil, big.NewInt(0), toNum}}
|
|
|
|
err := c.blockRangeDAO.upsertRange(c.chainClient.NetworkID(), account, blockRange)
|
|
|
|
log.Error("fetchHistoryBlocks upsertRange", "error", err)
|
2023-10-18 10:02:35 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
blockRange, err := loadBlockRangeInfo(c.chainClient.NetworkID(), account, c.blockRangeDAO)
|
2023-06-14 10:00:56 +00:00
|
|
|
if err != nil {
|
2023-12-10 14:31:30 +00:00
|
|
|
log.Error("fetchHistoryBlocks loadBlockRangeInfo", "error", err)
|
2023-12-11 13:29:10 +00:00
|
|
|
return err
|
2023-06-14 10:00:56 +00:00
|
|
|
}
|
2023-05-26 08:27:48 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
ranges := [][]*big.Int{}
|
|
|
|
// There are 2 history intervals:
|
|
|
|
// 1) from 0 to FirstKnown
|
|
|
|
// 2) from LastKnown to `toNum`` (head)
|
|
|
|
// If we blockRange is nil, we need to load all blocks from `fromNum` to `toNum`
|
|
|
|
// As current implementation checks ETH first then tokens, tokens ranges maybe behind ETH ranges in
|
|
|
|
// cases when block searching was interrupted, so we use tokens ranges
|
2024-02-19 15:50:07 +00:00
|
|
|
if blockRange.tokens.LastKnown != nil || blockRange.tokens.FirstKnown != nil {
|
2023-11-27 10:08:17 +00:00
|
|
|
if blockRange.tokens.LastKnown != nil && toNum.Cmp(blockRange.tokens.LastKnown) > 0 {
|
|
|
|
ranges = append(ranges, []*big.Int{blockRange.tokens.LastKnown, toNum})
|
|
|
|
}
|
2023-11-02 17:24:23 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
if blockRange.tokens.FirstKnown != nil {
|
|
|
|
if fromNum.Cmp(blockRange.tokens.FirstKnown) < 0 {
|
|
|
|
ranges = append(ranges, []*big.Int{fromNum, blockRange.tokens.FirstKnown})
|
|
|
|
} else {
|
|
|
|
if !c.transfersLoaded[account] {
|
|
|
|
transfersLoaded, err := c.areAllTransfersLoaded(account)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if transfersLoaded {
|
|
|
|
if c.transfersLoaded == nil {
|
|
|
|
c.transfersLoaded = make(map[common.Address]bool)
|
|
|
|
}
|
|
|
|
c.transfersLoaded[account] = true
|
|
|
|
c.notifyHistoryReady(account)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ranges = append(ranges, []*big.Int{fromNum, toNum})
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, rangeItem := range ranges {
|
2024-02-19 15:50:07 +00:00
|
|
|
log.Debug("range item", "r", rangeItem, "n", c.chainClient.NetworkID(), "a", account)
|
2024-05-20 11:21:21 +00:00
|
|
|
|
2024-05-21 17:40:37 +00:00
|
|
|
// Each account has its own limit and a global limit for all accounts
|
|
|
|
accountTag := transferHistoryTag + "_" + account.String()
|
|
|
|
chainClient := chain.ClientWithTag(c.chainClient, accountTag, transferHistoryTag)
|
2024-05-22 07:23:44 +00:00
|
|
|
storage := chain.NewLimitsDBStorage(c.db.client)
|
2024-05-21 17:40:37 +00:00
|
|
|
limiter := chain.NewRequestLimiter(storage)
|
2024-05-22 10:16:06 +00:00
|
|
|
err := limiter.SetLimit(accountTag, transferHistoryLimitPerAccount, transferHistoryLimitPeriod)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("fetchHistoryBlocksForAccount SetLimit", "error", err, "accountTag", accountTag)
|
|
|
|
}
|
|
|
|
err = limiter.SetLimit(transferHistoryTag, transferHistoryLimit, transferHistoryLimitPeriod)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("fetchHistoryBlocksForAccount SetLimit", "error", err, "groupTag", transferHistoryTag)
|
|
|
|
}
|
2024-05-20 12:38:02 +00:00
|
|
|
chainClient.SetLimiter(limiter)
|
|
|
|
|
2023-06-14 10:00:56 +00:00
|
|
|
fbc := &findBlocksCommand{
|
2023-11-27 10:08:17 +00:00
|
|
|
accounts: []common.Address{account},
|
2023-09-20 08:41:23 +00:00
|
|
|
db: c.db,
|
2023-12-01 11:30:42 +00:00
|
|
|
accountsDB: c.accountsDB,
|
2023-09-20 08:41:23 +00:00
|
|
|
blockRangeDAO: c.blockRangeDAO,
|
2024-05-20 11:21:21 +00:00
|
|
|
chainClient: chainClient,
|
2023-09-20 08:41:23 +00:00
|
|
|
balanceCacher: c.balanceCacher,
|
|
|
|
feed: c.feed,
|
|
|
|
noLimit: false,
|
2023-11-27 10:08:17 +00:00
|
|
|
fromBlockNumber: rangeItem[0],
|
|
|
|
toBlockNumber: rangeItem[1],
|
2023-09-20 08:41:23 +00:00
|
|
|
tokenManager: c.tokenManager,
|
|
|
|
blocksLoadedCh: blocksLoadedCh,
|
|
|
|
defaultNodeBlockChunkSize: DefaultNodeBlockChunkSize,
|
2023-06-14 10:00:56 +00:00
|
|
|
}
|
|
|
|
group.Add(fbc.Command())
|
2023-05-26 08:27:48 +00:00
|
|
|
}
|
|
|
|
|
2023-06-14 10:00:56 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-12-10 14:31:30 +00:00
|
|
|
func (c *loadBlocksAndTransfersCommand) startFetchingNewBlocks(ctx context.Context, addresses []common.Address, fromNum *big.Int, blocksLoadedCh chan<- []*DBHeader) {
|
|
|
|
log.Debug("startFetchingNewBlocks start", "chainID", c.chainClient.NetworkID(), "accounts", addresses)
|
2023-06-14 10:00:56 +00:00
|
|
|
|
2024-01-19 12:43:25 +00:00
|
|
|
c.incLoops()
|
2023-12-10 14:31:30 +00:00
|
|
|
go func() {
|
2023-12-11 13:29:10 +00:00
|
|
|
defer func() {
|
2024-01-17 11:46:59 +00:00
|
|
|
c.decLoops()
|
2023-12-11 13:29:10 +00:00
|
|
|
}()
|
|
|
|
|
2023-12-10 14:31:30 +00:00
|
|
|
newBlocksCmd := &findNewBlocksCommand{
|
|
|
|
findBlocksCommand: &findBlocksCommand{
|
|
|
|
accounts: addresses,
|
|
|
|
db: c.db,
|
|
|
|
accountsDB: c.accountsDB,
|
|
|
|
blockRangeDAO: c.blockRangeDAO,
|
|
|
|
chainClient: c.chainClient,
|
|
|
|
balanceCacher: c.balanceCacher,
|
|
|
|
feed: c.feed,
|
|
|
|
noLimit: false,
|
|
|
|
fromBlockNumber: fromNum,
|
|
|
|
tokenManager: c.tokenManager,
|
|
|
|
blocksLoadedCh: blocksLoadedCh,
|
|
|
|
defaultNodeBlockChunkSize: DefaultNodeBlockChunkSize,
|
|
|
|
},
|
2024-02-08 11:54:04 +00:00
|
|
|
contractMaker: c.contractMaker,
|
|
|
|
blockChainState: c.blockChainState,
|
|
|
|
nonceCheckIntervalIterations: nonceCheckIntervalIterations,
|
|
|
|
logsCheckIntervalIterations: logsCheckIntervalIterations,
|
2023-12-10 14:31:30 +00:00
|
|
|
}
|
|
|
|
group := async.NewGroup(ctx)
|
|
|
|
group.Add(newBlocksCmd.Command())
|
2023-05-26 08:27:48 +00:00
|
|
|
|
2023-12-10 14:31:30 +00:00
|
|
|
// No need to wait for the group since it is infinite
|
|
|
|
<-ctx.Done()
|
2023-06-14 10:00:56 +00:00
|
|
|
|
2023-12-11 13:29:10 +00:00
|
|
|
log.Debug("startFetchingNewBlocks end", "chainID", c.chainClient.NetworkID(), "accounts", addresses, "error", ctx.Err())
|
|
|
|
}()
|
2023-12-10 14:31:30 +00:00
|
|
|
}
|
2023-06-14 10:00:56 +00:00
|
|
|
|
2023-12-10 14:31:30 +00:00
|
|
|
func (c *loadBlocksAndTransfersCommand) getBlocksToLoad() (map[common.Address][]*big.Int, error) {
|
2023-11-27 10:08:17 +00:00
|
|
|
blocksMap := make(map[common.Address][]*big.Int)
|
|
|
|
for _, account := range c.accounts {
|
|
|
|
blocks, err := c.blockDAO.GetBlocksToLoadByAddress(c.chainClient.NetworkID(), account, numberOfBlocksCheckedPerIteration)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("loadBlocksAndTransfersCommand GetBlocksToLoadByAddress", "error", err)
|
2023-12-10 14:31:30 +00:00
|
|
|
return nil, err
|
2023-11-27 10:08:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(blocks) == 0 {
|
|
|
|
log.Debug("fetchTransfers no blocks to load", "chainID", c.chainClient.NetworkID(), "account", account)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
blocksMap[account] = blocks
|
2023-06-14 10:00:56 +00:00
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
if len(blocksMap) == 0 {
|
|
|
|
log.Debug("fetchTransfers no blocks to load", "chainID", c.chainClient.NetworkID())
|
2023-11-02 17:24:23 +00:00
|
|
|
}
|
|
|
|
|
2023-12-10 14:31:30 +00:00
|
|
|
return blocksMap, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *loadBlocksAndTransfersCommand) startFetchingTransfersForLoadedBlocks(group *async.AtomicGroup) error {
|
|
|
|
|
|
|
|
log.Debug("fetchTransfers start", "chainID", c.chainClient.NetworkID(), "accounts", c.accounts)
|
|
|
|
|
|
|
|
blocksMap, err := c.getBlocksToLoad()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2023-05-26 08:27:48 +00:00
|
|
|
}
|
|
|
|
|
2023-12-10 14:31:30 +00:00
|
|
|
go func() {
|
|
|
|
txCommand := &loadTransfersCommand{
|
|
|
|
accounts: c.accounts,
|
|
|
|
db: c.db,
|
|
|
|
blockDAO: c.blockDAO,
|
|
|
|
chainClient: c.chainClient,
|
|
|
|
transactionManager: c.transactionManager,
|
|
|
|
pendingTxManager: c.pendingTxManager,
|
|
|
|
tokenManager: c.tokenManager,
|
|
|
|
blocksByAddress: blocksMap,
|
|
|
|
feed: c.feed,
|
|
|
|
}
|
|
|
|
|
|
|
|
group.Add(txCommand.Command())
|
|
|
|
log.Debug("fetchTransfers end", "chainID", c.chainClient.NetworkID(), "accounts", c.accounts)
|
|
|
|
}()
|
2023-06-14 10:00:56 +00:00
|
|
|
|
|
|
|
return nil
|
2023-05-26 08:27:48 +00:00
|
|
|
}
|
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
func (c *loadBlocksAndTransfersCommand) notifyHistoryReady(account common.Address) {
|
2023-06-01 13:09:50 +00:00
|
|
|
if c.feed != nil {
|
|
|
|
c.feed.Send(walletevent.Event{
|
|
|
|
Type: EventRecentHistoryReady,
|
2023-11-27 10:08:17 +00:00
|
|
|
Accounts: []common.Address{account},
|
2023-09-19 11:17:36 +00:00
|
|
|
ChainID: c.chainClient.NetworkID(),
|
2023-06-01 13:09:50 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
func (c *loadBlocksAndTransfersCommand) areAllTransfersLoaded(account common.Address) (bool, error) {
|
|
|
|
allBlocksLoaded, err := areAllHistoryBlocksLoadedForAddress(c.blockRangeDAO, c.chainClient.NetworkID(), account)
|
2023-06-01 13:09:50 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Error("loadBlockAndTransfersCommand allHistoryBlocksLoaded", "error", err)
|
|
|
|
return false, err
|
|
|
|
}
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-06-01 13:09:50 +00:00
|
|
|
if allBlocksLoaded {
|
2023-11-27 10:08:17 +00:00
|
|
|
headers, err := c.blockDAO.GetBlocksToLoadByAddress(c.chainClient.NetworkID(), account, 1)
|
2023-06-01 13:09:50 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Error("loadBlocksAndTransfersCommand GetFirstSavedBlock", "error", err)
|
|
|
|
return false, err
|
|
|
|
}
|
2023-05-19 08:19:48 +00:00
|
|
|
|
2023-11-27 10:08:17 +00:00
|
|
|
if len(headers) == 0 {
|
2023-06-01 13:09:50 +00:00
|
|
|
return true, nil
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-01 13:09:50 +00:00
|
|
|
return false, nil
|
2023-05-19 08:19:48 +00:00
|
|
|
}
|
2023-05-26 08:27:48 +00:00
|
|
|
|
2023-06-14 10:00:56 +00:00
|
|
|
// TODO - make it a common method for every service that wants head block number, that will cache the latest block
|
|
|
|
// and updates it on timeout
|
2023-09-19 11:17:36 +00:00
|
|
|
func getHeadBlockNumber(parent context.Context, chainClient chain.ClientInterface) (*big.Int, error) {
|
2023-05-26 08:27:48 +00:00
|
|
|
ctx, cancel := context.WithTimeout(parent, 3*time.Second)
|
|
|
|
head, err := chainClient.HeaderByNumber(ctx, nil)
|
|
|
|
cancel()
|
|
|
|
if err != nil {
|
2023-11-27 10:08:17 +00:00
|
|
|
log.Error("getHeadBlockNumber", "error", err)
|
2023-05-26 08:27:48 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return head.Number, err
|
|
|
|
}
|
|
|
|
|
2023-09-20 08:41:23 +00:00
|
|
|
func nextRange(maxRangeSize int, prevFrom, zeroBlockNumber *big.Int) (*big.Int, *big.Int) {
|
|
|
|
log.Debug("next range start", "from", prevFrom, "zeroBlockNumber", zeroBlockNumber)
|
2023-05-26 08:27:48 +00:00
|
|
|
|
2023-09-20 08:41:23 +00:00
|
|
|
rangeSize := big.NewInt(int64(maxRangeSize))
|
2023-05-26 08:27:48 +00:00
|
|
|
|
2023-09-20 08:41:23 +00:00
|
|
|
to := big.NewInt(0).Set(prevFrom)
|
|
|
|
from := big.NewInt(0).Sub(to, rangeSize)
|
|
|
|
if from.Cmp(zeroBlockNumber) < 0 {
|
2023-05-26 08:27:48 +00:00
|
|
|
from = new(big.Int).Set(zeroBlockNumber)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Debug("next range end", "from", from, "to", to, "zeroBlockNumber", zeroBlockNumber)
|
|
|
|
|
|
|
|
return from, to
|
|
|
|
}
|