[#4690] Nilable block range values for proper ranges management (#4691)

This commit is contained in:
Roman Volosovskyi 2024-02-19 16:50:07 +01:00 committed by GitHub
parent a866b8025e
commit cc708ce0ce
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 362 additions and 267 deletions

View File

@ -58,3 +58,36 @@ func (i *SQLBigIntBytes) Value() (driver.Value, error) {
} }
return (*big.Int)(i).Bytes(), nil return (*big.Int)(i).Bytes(), nil
} }
type NilableSQLBigInt struct {
big.Int
isNil bool
}
func (i *NilableSQLBigInt) IsNil() bool {
return i.isNil
}
func (i *NilableSQLBigInt) SetNil() {
i.isNil = true
}
// Scan implements interface.
func (i *NilableSQLBigInt) Scan(value interface{}) error {
if value == nil {
i.SetNil()
return nil
}
val, ok := value.(int64)
if !ok {
return errors.New("not an integer")
}
i.SetInt64(val)
return nil
}
// Not implemented, used only for scanning
func (i *NilableSQLBigInt) Value() (driver.Value, error) {
return nil, errors.New("NilableSQLBigInt.Value is not implemented")
}

View File

@ -10,7 +10,7 @@ import (
) )
type BlockRangeDAOer interface { type BlockRangeDAOer interface {
getBlockRange(chainID uint64, address common.Address) (blockRange *ethTokensBlockRanges, err error) getBlockRange(chainID uint64, address common.Address) (blockRange *ethTokensBlockRanges, exists bool, err error)
upsertRange(chainID uint64, account common.Address, newBlockRange *ethTokensBlockRanges) (err error) upsertRange(chainID uint64, account common.Address, newBlockRange *ethTokensBlockRanges) (err error)
updateTokenRange(chainID uint64, account common.Address, newBlockRange *BlockRange) (err error) updateTokenRange(chainID uint64, account common.Address, newBlockRange *BlockRange) (err error)
upsertEthRange(chainID uint64, account common.Address, newBlockRange *BlockRange) (err error) upsertEthRange(chainID uint64, account common.Address, newBlockRange *BlockRange) (err error)
@ -27,7 +27,7 @@ type BlockRange struct {
} }
func NewBlockRange() *BlockRange { func NewBlockRange() *BlockRange {
return &BlockRange{Start: &big.Int{}, FirstKnown: &big.Int{}, LastKnown: &big.Int{}} return &BlockRange{Start: nil, FirstKnown: nil, LastKnown: nil}
} }
type ethTokensBlockRanges struct { type ethTokensBlockRanges struct {
@ -40,7 +40,7 @@ func newEthTokensBlockRanges() *ethTokensBlockRanges {
return &ethTokensBlockRanges{eth: NewBlockRange(), tokens: NewBlockRange()} return &ethTokensBlockRanges{eth: NewBlockRange(), tokens: NewBlockRange()}
} }
func (b *BlockRangeSequentialDAO) getBlockRange(chainID uint64, address common.Address) (blockRange *ethTokensBlockRanges, err error) { func (b *BlockRangeSequentialDAO) getBlockRange(chainID uint64, address common.Address) (blockRange *ethTokensBlockRanges, exists bool, err error) {
query := `SELECT blk_start, blk_first, blk_last, token_blk_start, token_blk_first, token_blk_last, balance_check_hash FROM blocks_ranges_sequential query := `SELECT blk_start, blk_first, blk_last, token_blk_start, token_blk_first, token_blk_last, balance_check_hash FROM blocks_ranges_sequential
WHERE address = ? WHERE address = ?
AND network_id = ?` AND network_id = ?`
@ -51,25 +51,45 @@ func (b *BlockRangeSequentialDAO) getBlockRange(chainID uint64, address common.A
} }
defer rows.Close() defer rows.Close()
blockRange = &ethTokensBlockRanges{}
if rows.Next() {
blockRange = newEthTokensBlockRanges() blockRange = newEthTokensBlockRanges()
err = rows.Scan((*bigint.SQLBigInt)(blockRange.eth.Start), if rows.Next() {
(*bigint.SQLBigInt)(blockRange.eth.FirstKnown), exists = true
(*bigint.SQLBigInt)(blockRange.eth.LastKnown), efk := &bigint.NilableSQLBigInt{}
(*bigint.SQLBigInt)(blockRange.tokens.Start), elk := &bigint.NilableSQLBigInt{}
(*bigint.SQLBigInt)(blockRange.tokens.FirstKnown), es := &bigint.NilableSQLBigInt{}
(*bigint.SQLBigInt)(blockRange.tokens.LastKnown), tfk := &bigint.NilableSQLBigInt{}
&blockRange.balanceCheckHash, tlk := &bigint.NilableSQLBigInt{}
) ts := &bigint.NilableSQLBigInt{}
err = rows.Scan(es, efk, elk, ts, tfk, tlk, &blockRange.balanceCheckHash)
if !es.IsNil() {
blockRange.eth.Start = big.NewInt(es.Int64())
}
if !efk.IsNil() {
blockRange.eth.FirstKnown = big.NewInt(efk.Int64())
}
if !elk.IsNil() {
blockRange.eth.LastKnown = big.NewInt(elk.Int64())
}
if !ts.IsNil() {
blockRange.tokens.Start = big.NewInt(ts.Int64())
}
if !tfk.IsNil() {
blockRange.tokens.FirstKnown = big.NewInt(tfk.Int64())
}
if !tlk.IsNil() {
blockRange.tokens.LastKnown = big.NewInt(tlk.Int64())
}
if err != nil { if err != nil {
return nil, err return nil, exists, err
} }
return blockRange, nil return blockRange, exists, nil
} }
return blockRange, nil return blockRange, exists, nil
} }
func (b *BlockRangeSequentialDAO) deleteRange(account common.Address) error { func (b *BlockRangeSequentialDAO) deleteRange(account common.Address) error {
@ -85,7 +105,7 @@ func (b *BlockRangeSequentialDAO) deleteRange(account common.Address) error {
} }
func (b *BlockRangeSequentialDAO) upsertRange(chainID uint64, account common.Address, newBlockRange *ethTokensBlockRanges) (err error) { func (b *BlockRangeSequentialDAO) upsertRange(chainID uint64, account common.Address, newBlockRange *ethTokensBlockRanges) (err error) {
ethTokensBlockRange, err := b.getBlockRange(chainID, account) ethTokensBlockRange, exists, err := b.getBlockRange(chainID, account)
if err != nil { if err != nil {
return err return err
} }
@ -93,18 +113,38 @@ func (b *BlockRangeSequentialDAO) upsertRange(chainID uint64, account common.Add
ethBlockRange := prepareUpdatedBlockRange(ethTokensBlockRange.eth, newBlockRange.eth) ethBlockRange := prepareUpdatedBlockRange(ethTokensBlockRange.eth, newBlockRange.eth)
tokensBlockRange := prepareUpdatedBlockRange(ethTokensBlockRange.tokens, newBlockRange.tokens) tokensBlockRange := prepareUpdatedBlockRange(ethTokensBlockRange.tokens, newBlockRange.tokens)
log.Debug("update eth and tokens blocks range", "account", account, "chainID", chainID, log.Debug("upsert eth and tokens blocks range",
"eth.start", ethBlockRange.Start, "eth.first", ethBlockRange.FirstKnown, "eth.last", ethBlockRange.LastKnown, "account", account, "chainID", chainID,
"tokens.start", tokensBlockRange.Start, "tokens.first", ethBlockRange.FirstKnown, "eth.last", ethBlockRange.LastKnown, "hash", newBlockRange.balanceCheckHash) "eth.start", ethBlockRange.Start,
"eth.first", ethBlockRange.FirstKnown,
"eth.last", ethBlockRange.LastKnown,
"tokens.first", tokensBlockRange.FirstKnown,
"tokens.last", tokensBlockRange.LastKnown,
"hash", newBlockRange.balanceCheckHash)
var query *sql.Stmt
if exists {
query, err = b.db.Prepare(`UPDATE blocks_ranges_sequential SET
blk_start = ?,
blk_first = ?,
blk_last = ?,
token_blk_start = ?,
token_blk_first = ?,
token_blk_last = ?,
balance_check_hash = ?
WHERE network_id = ? AND address = ?`)
} else {
query, err = b.db.Prepare(`INSERT INTO blocks_ranges_sequential
(blk_start, blk_first, blk_last, token_blk_start, token_blk_first, token_blk_last, balance_check_hash, network_id, address) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`)
}
upsert, err := b.db.Prepare(`REPLACE INTO blocks_ranges_sequential
(network_id, address, blk_start, blk_first, blk_last, token_blk_start, token_blk_first, token_blk_last, balance_check_hash) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`)
if err != nil { if err != nil {
return err return err
} }
_, err = query.Exec((*bigint.SQLBigInt)(ethBlockRange.Start), (*bigint.SQLBigInt)(ethBlockRange.FirstKnown), (*bigint.SQLBigInt)(ethBlockRange.LastKnown),
_, err = upsert.Exec(chainID, account, (*bigint.SQLBigInt)(ethBlockRange.Start), (*bigint.SQLBigInt)(ethBlockRange.FirstKnown), (*bigint.SQLBigInt)(ethBlockRange.LastKnown), (*bigint.SQLBigInt)(tokensBlockRange.Start), (*bigint.SQLBigInt)(tokensBlockRange.FirstKnown), (*bigint.SQLBigInt)(tokensBlockRange.LastKnown), newBlockRange.balanceCheckHash, chainID, account)
(*bigint.SQLBigInt)(tokensBlockRange.Start), (*bigint.SQLBigInt)(tokensBlockRange.FirstKnown), (*bigint.SQLBigInt)(tokensBlockRange.LastKnown), newBlockRange.balanceCheckHash)
return err return err
} }
@ -112,28 +152,36 @@ func (b *BlockRangeSequentialDAO) upsertRange(chainID uint64, account common.Add
func (b *BlockRangeSequentialDAO) upsertEthRange(chainID uint64, account common.Address, func (b *BlockRangeSequentialDAO) upsertEthRange(chainID uint64, account common.Address,
newBlockRange *BlockRange) (err error) { newBlockRange *BlockRange) (err error) {
ethTokensBlockRange, err := b.getBlockRange(chainID, account) ethTokensBlockRange, exists, err := b.getBlockRange(chainID, account)
if err != nil { if err != nil {
return err return err
} }
blockRange := prepareUpdatedBlockRange(ethTokensBlockRange.eth, newBlockRange) blockRange := prepareUpdatedBlockRange(ethTokensBlockRange.eth, newBlockRange)
log.Debug("update eth blocks range", "account", account, "chainID", chainID, log.Debug("upsert eth blocks range", "account", account, "chainID", chainID,
"start", blockRange.Start, "first", blockRange.FirstKnown, "last", blockRange.LastKnown, "old hash", ethTokensBlockRange.balanceCheckHash) "start", blockRange.Start,
"first", blockRange.FirstKnown,
"last", blockRange.LastKnown,
"old hash", ethTokensBlockRange.balanceCheckHash)
var query *sql.Stmt
if exists {
query, err = b.db.Prepare(`UPDATE blocks_ranges_sequential SET
blk_start = ?,
blk_first = ?,
blk_last = ?
WHERE network_id = ? AND address = ?`)
} else {
query, err = b.db.Prepare(`INSERT INTO blocks_ranges_sequential
(blk_start, blk_first, blk_last, network_id, address) VALUES (?, ?, ?, ?, ?)`)
}
upsert, err := b.db.Prepare(`REPLACE INTO blocks_ranges_sequential
(network_id, address, blk_start, blk_first, blk_last, token_blk_start, token_blk_first, token_blk_last, balance_check_hash) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`)
if err != nil { if err != nil {
return err return err
} }
_, err = query.Exec((*bigint.SQLBigInt)(blockRange.Start), (*bigint.SQLBigInt)(blockRange.FirstKnown), (*bigint.SQLBigInt)(blockRange.LastKnown), chainID, account)
if ethTokensBlockRange.tokens == nil {
ethTokensBlockRange.tokens = NewBlockRange()
}
_, err = upsert.Exec(chainID, account, (*bigint.SQLBigInt)(blockRange.Start), (*bigint.SQLBigInt)(blockRange.FirstKnown), (*bigint.SQLBigInt)(blockRange.LastKnown),
(*bigint.SQLBigInt)(ethTokensBlockRange.tokens.Start), (*bigint.SQLBigInt)(ethTokensBlockRange.tokens.FirstKnown), (*bigint.SQLBigInt)(ethTokensBlockRange.tokens.LastKnown), ethTokensBlockRange.balanceCheckHash)
return err return err
} }
@ -141,15 +189,16 @@ func (b *BlockRangeSequentialDAO) upsertEthRange(chainID uint64, account common.
func (b *BlockRangeSequentialDAO) updateTokenRange(chainID uint64, account common.Address, func (b *BlockRangeSequentialDAO) updateTokenRange(chainID uint64, account common.Address,
newBlockRange *BlockRange) (err error) { newBlockRange *BlockRange) (err error) {
ethTokensBlockRange, err := b.getBlockRange(chainID, account) ethTokensBlockRange, _, err := b.getBlockRange(chainID, account)
if err != nil { if err != nil {
return err return err
} }
blockRange := prepareUpdatedBlockRange(ethTokensBlockRange.tokens, newBlockRange) blockRange := prepareUpdatedBlockRange(ethTokensBlockRange.tokens, newBlockRange)
log.Debug("update tokens blocks range", "account", account, "chainID", chainID, log.Debug("update tokens blocks range",
"start", blockRange.Start, "first", blockRange.FirstKnown, "last", blockRange.LastKnown, "old hash", ethTokensBlockRange.balanceCheckHash) "first", blockRange.FirstKnown,
"last", blockRange.LastKnown)
update, err := b.db.Prepare(`UPDATE blocks_ranges_sequential SET token_blk_start = ?, token_blk_first = ?, token_blk_last = ? WHERE network_id = ? AND address = ?`) update, err := b.db.Prepare(`UPDATE blocks_ranges_sequential SET token_blk_start = ?, token_blk_first = ?, token_blk_last = ? WHERE network_id = ? AND address = ?`)
if err != nil { if err != nil {
@ -163,8 +212,6 @@ func (b *BlockRangeSequentialDAO) updateTokenRange(chainID uint64, account commo
} }
func prepareUpdatedBlockRange(blockRange, newBlockRange *BlockRange) *BlockRange { func prepareUpdatedBlockRange(blockRange, newBlockRange *BlockRange) *BlockRange {
// Update existing range
if blockRange != nil {
if newBlockRange != nil { if newBlockRange != nil {
// Ovewrite start block if there was not any or if new one is older, because it can be precised only // Ovewrite start block if there was not any or if new one is older, because it can be precised only
// to a greater value, because no history can be before some block that is considered // to a greater value, because no history can be before some block that is considered
@ -186,9 +233,6 @@ func prepareUpdatedBlockRange(blockRange, newBlockRange *BlockRange) *BlockRange
blockRange.LastKnown = newBlockRange.LastKnown blockRange.LastKnown = newBlockRange.LastKnown
} }
} }
} else {
blockRange = newBlockRange
}
return blockRange return blockRange
} }

View File

@ -64,7 +64,7 @@ func TestBlockRangeSequentialDAO_updateTokenRange(t *testing.T) {
t.Errorf("BlockRangeSequentialDAO.updateTokenRange() error = %v, wantErr %v", err, tt.wantErr) t.Errorf("BlockRangeSequentialDAO.updateTokenRange() error = %v, wantErr %v", err, tt.wantErr)
} }
ethTokensBlockRanges, err := b.getBlockRange(tt.args.chainID, tt.args.account) ethTokensBlockRanges, _, err := b.getBlockRange(tt.args.chainID, tt.args.account)
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, ethTokensBlockRanges.tokens) require.NotNil(t, ethTokensBlockRanges.tokens)
require.Equal(t, tt.args.newBlockRange.LastKnown, ethTokensBlockRanges.tokens.LastKnown) require.Equal(t, tt.args.newBlockRange.LastKnown, ethTokensBlockRanges.tokens.LastKnown)
@ -118,7 +118,7 @@ func TestBlockRangeSequentialDAO_updateEthRange(t *testing.T) {
t.Errorf("BlockRangeSequentialDAO.upsertEthRange() insert error = %v, wantErr %v", err, tt.wantErr) t.Errorf("BlockRangeSequentialDAO.upsertEthRange() insert error = %v, wantErr %v", err, tt.wantErr)
} }
ethTokensBlockRanges, err := b.getBlockRange(tt.args.chainID, tt.args.account) ethTokensBlockRanges, _, err := b.getBlockRange(tt.args.chainID, tt.args.account)
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, ethTokensBlockRanges.eth) require.NotNil(t, ethTokensBlockRanges.eth)
require.Equal(t, dummyBlockRange.Start, ethTokensBlockRanges.eth.Start) require.Equal(t, dummyBlockRange.Start, ethTokensBlockRanges.eth.Start)
@ -130,7 +130,7 @@ func TestBlockRangeSequentialDAO_updateEthRange(t *testing.T) {
t.Errorf("BlockRangeSequentialDAO.upsertEthRange() update error = %v, wantErr %v", err, tt.wantErr) t.Errorf("BlockRangeSequentialDAO.upsertEthRange() update error = %v, wantErr %v", err, tt.wantErr)
} }
ethTokensBlockRanges, err = b.getBlockRange(tt.args.chainID, tt.args.account) ethTokensBlockRanges, _, err = b.getBlockRange(tt.args.chainID, tt.args.account)
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, ethTokensBlockRanges.eth) require.NotNil(t, ethTokensBlockRanges.eth)
require.Equal(t, tt.args.newBlockRange.Start, ethTokensBlockRanges.eth.Start) require.Equal(t, tt.args.newBlockRange.Start, ethTokensBlockRanges.eth.Start)

View File

@ -79,22 +79,12 @@ func (c *findNewBlocksCommand) detectTransfers(parent context.Context, accounts
addressesToCheck := []common.Address{} addressesToCheck := []common.Address{}
for idx, account := range accounts { for idx, account := range accounts {
blockRange, err := c.blockRangeDAO.getBlockRange(c.chainClient.NetworkID(), account) blockRange, _, err := c.blockRangeDAO.getBlockRange(c.chainClient.NetworkID(), account)
if err != nil { if err != nil {
log.Error("findNewBlocksCommand can't block range", "error", err, "account", account, "chain", c.chainClient.NetworkID()) log.Error("findNewBlocksCommand can't get block range", "error", err, "account", account, "chain", c.chainClient.NetworkID())
return nil, nil, err return nil, nil, err
} }
if blockRange.eth == nil {
blockRange.eth = NewBlockRange()
blockRange.tokens = NewBlockRange()
}
if blockRange.eth.FirstKnown == nil {
blockRange.eth.FirstKnown = blockNum
}
if blockRange.eth.LastKnown == nil {
blockRange.eth.LastKnown = blockNum
}
checkHash := common.BytesToHash(hashes[idx][:]) checkHash := common.BytesToHash(hashes[idx][:])
log.Debug("findNewBlocksCommand comparing hashes", "account", account, "network", c.chainClient.NetworkID(), "old hash", blockRange.balanceCheckHash, "new hash", checkHash.String()) log.Debug("findNewBlocksCommand comparing hashes", "account", account, "network", c.chainClient.NetworkID(), "old hash", blockRange.balanceCheckHash, "new hash", checkHash.String())
if checkHash.String() != blockRange.balanceCheckHash { if checkHash.String() != blockRange.balanceCheckHash {
@ -118,7 +108,7 @@ func (c *findNewBlocksCommand) detectNonceChange(parent context.Context, to *big
for _, account := range accounts { for _, account := range accounts {
var oldNonce *int64 var oldNonce *int64
blockRange, err := c.blockRangeDAO.getBlockRange(c.chainClient.NetworkID(), account) blockRange, _, err := c.blockRangeDAO.getBlockRange(c.chainClient.NetworkID(), account)
if err != nil { if err != nil {
log.Error("findNewBlocksCommand can't get block range", "error", err, "account", account, "chain", c.chainClient.NetworkID()) log.Error("findNewBlocksCommand can't get block range", "error", err, "account", account, "chain", c.chainClient.NetworkID())
return nil, err return nil, err
@ -207,7 +197,7 @@ func (c *findNewBlocksCommand) Run(parent context.Context) error {
c.blockChainState.SetLastBlockNumber(c.chainClient.NetworkID(), headNum.Uint64()) c.blockChainState.SetLastBlockNumber(c.chainClient.NetworkID(), headNum.Uint64())
if len(accountsWithDetectedChanges) != 0 { if len(accountsWithDetectedChanges) != 0 {
log.Debug("findNewBlocksCommand detected accounts with changes, proceeding", "accounts", accountsWithDetectedChanges) log.Debug("findNewBlocksCommand detected accounts with changes, proceeding", "accounts", accountsWithDetectedChanges, "from", c.fromBlockNumber)
err = c.findAndSaveEthBlocks(parent, c.fromBlockNumber, headNum, accountsToCheck) err = c.findAndSaveEthBlocks(parent, c.fromBlockNumber, headNum, accountsToCheck)
if err != nil { if err != nil {
return err return err
@ -340,11 +330,11 @@ func (c *findNewBlocksCommand) findAndSaveTokenBlocks(parent context.Context, fr
return c.markTokenBlockRangeChecked(c.accounts, fromNum, headNum) return c.markTokenBlockRangeChecked(c.accounts, fromNum, headNum)
} }
func (c *findNewBlocksCommand) markTokenBlockRangeChecked(accounts []common.Address, from, to *big.Int) error { func (c *findBlocksCommand) markTokenBlockRangeChecked(accounts []common.Address, from, to *big.Int) error {
log.Debug("markTokenBlockRangeChecked", "chain", c.chainClient.NetworkID(), "from", from.Uint64(), "to", to.Uint64()) log.Debug("markTokenBlockRangeChecked", "chain", c.chainClient.NetworkID(), "from", from.Uint64(), "to", to.Uint64())
for _, account := range accounts { for _, account := range accounts {
err := c.blockRangeDAO.updateTokenRange(c.chainClient.NetworkID(), account, &BlockRange{LastKnown: to}) err := c.blockRangeDAO.updateTokenRange(c.chainClient.NetworkID(), account, &BlockRange{FirstKnown: from, LastKnown: to})
if err != nil { if err != nil {
log.Error("findNewBlocksCommand upsertTokenRange", "error", err) log.Error("findNewBlocksCommand upsertTokenRange", "error", err)
return err return err
@ -535,7 +525,7 @@ func (c *findBlocksCommand) ERC20ScanByBalance(parent context.Context, account c
} }
func (c *findBlocksCommand) checkERC20Tail(parent context.Context, account common.Address) ([]*DBHeader, error) { func (c *findBlocksCommand) checkERC20Tail(parent context.Context, account common.Address) ([]*DBHeader, error) {
log.Info("checkERC20Tail", "account", account, "to block", c.startBlockNumber, "from", c.resFromBlock.Number) log.Debug("checkERC20Tail", "account", account, "to block", c.startBlockNumber, "from", c.resFromBlock.Number)
tokens, err := c.tokenManager.GetTokens(c.chainClient.NetworkID()) tokens, err := c.tokenManager.GetTokens(c.chainClient.NetworkID())
if err != nil { if err != nil {
return nil, err return nil, err
@ -656,6 +646,10 @@ func (c *findBlocksCommand) Run(parent context.Context) (err error) {
} }
if c.reachedETHHistoryStart { if c.reachedETHHistoryStart {
err = c.markTokenBlockRangeChecked([]common.Address{account}, big.NewInt(0), to)
if err != nil {
break
}
log.Debug("findBlocksCommand reached first ETH transfer and checked erc20 tail", "chain", c.chainClient.NetworkID(), "account", account) log.Debug("findBlocksCommand reached first ETH transfer and checked erc20 tail", "chain", c.chainClient.NetworkID(), "account", account)
break break
} }
@ -665,6 +659,11 @@ func (c *findBlocksCommand) Run(parent context.Context) (err error) {
break break
} }
err = c.markTokenBlockRangeChecked([]common.Address{account}, c.resFromBlock.Number, to)
if err != nil {
break
}
// if we have found first ETH block and we have not reached the start of ETH history yet // if we have found first ETH block and we have not reached the start of ETH history yet
if c.startBlockNumber != nil && c.fromBlockNumber.Cmp(from) == -1 { if c.startBlockNumber != nil && c.fromBlockNumber.Cmp(from) == -1 {
log.Debug("ERC20 tail should be checked", "initial from", c.fromBlockNumber, "actual from", from, "first ETH block", c.startBlockNumber) log.Debug("ERC20 tail should be checked", "initial from", c.fromBlockNumber, "actual from", from, "first ETH block", c.startBlockNumber)
@ -752,7 +751,7 @@ func (c *findBlocksCommand) checkRange(parent context.Context, from *big.Int, to
func loadBlockRangeInfo(chainID uint64, account common.Address, blockDAO BlockRangeDAOer) ( func loadBlockRangeInfo(chainID uint64, account common.Address, blockDAO BlockRangeDAOer) (
*ethTokensBlockRanges, error) { *ethTokensBlockRanges, error) {
blockRange, err := blockDAO.getBlockRange(chainID, account) blockRange, _, err := blockDAO.getBlockRange(chainID, account)
if err != nil { if err != nil {
log.Error("failed to load block ranges from database", "chain", chainID, "account", account, log.Error("failed to load block ranges from database", "chain", chainID, "account", account,
"error", err) "error", err)
@ -765,8 +764,9 @@ func loadBlockRangeInfo(chainID uint64, account common.Address, blockDAO BlockRa
// Returns if all blocks are loaded, which means that start block (beginning of account history) // Returns if all blocks are loaded, which means that start block (beginning of account history)
// has been found and all block headers saved to the DB // has been found and all block headers saved to the DB
func areAllHistoryBlocksLoaded(blockInfo *BlockRange) bool { func areAllHistoryBlocksLoaded(blockInfo *BlockRange) bool {
if blockInfo != nil && blockInfo.FirstKnown != nil && blockInfo.Start != nil && if blockInfo != nil && blockInfo.FirstKnown != nil &&
blockInfo.Start.Cmp(blockInfo.FirstKnown) >= 0 { ((blockInfo.Start != nil && blockInfo.Start.Cmp(blockInfo.FirstKnown) >= 0) ||
blockInfo.FirstKnown.Cmp(zero) == 0) {
return true return true
} }
@ -776,7 +776,7 @@ func areAllHistoryBlocksLoaded(blockInfo *BlockRange) bool {
func areAllHistoryBlocksLoadedForAddress(blockRangeDAO BlockRangeDAOer, chainID uint64, func areAllHistoryBlocksLoadedForAddress(blockRangeDAO BlockRangeDAOer, chainID uint64,
address common.Address) (bool, error) { address common.Address) (bool, error) {
blockRange, err := blockRangeDAO.getBlockRange(chainID, address) blockRange, _, err := blockRangeDAO.getBlockRange(chainID, address)
if err != nil { if err != nil {
log.Error("findBlocksCommand getBlockRange", "error", err) log.Error("findBlocksCommand getBlockRange", "error", err)
return false, err return false, err
@ -1052,14 +1052,13 @@ func (c *loadBlocksAndTransfersCommand) fetchHistoryBlocksForAccount(group *asyn
} }
ranges := [][]*big.Int{} ranges := [][]*big.Int{}
// There are 2 history intervals: // There are 2 history intervals:
// 1) from 0 to FirstKnown // 1) from 0 to FirstKnown
// 2) from LastKnown to `toNum`` (head) // 2) from LastKnown to `toNum`` (head)
// If we blockRange is nil, we need to load all blocks from `fromNum` to `toNum` // If we blockRange is nil, we need to load all blocks from `fromNum` to `toNum`
// As current implementation checks ETH first then tokens, tokens ranges maybe behind ETH ranges in // As current implementation checks ETH first then tokens, tokens ranges maybe behind ETH ranges in
// cases when block searching was interrupted, so we use tokens ranges // cases when block searching was interrupted, so we use tokens ranges
if blockRange != nil && blockRange.tokens != nil { if blockRange.tokens.LastKnown != nil || blockRange.tokens.FirstKnown != nil {
if blockRange.tokens.LastKnown != nil && toNum.Cmp(blockRange.tokens.LastKnown) > 0 { if blockRange.tokens.LastKnown != nil && toNum.Cmp(blockRange.tokens.LastKnown) > 0 {
ranges = append(ranges, []*big.Int{blockRange.tokens.LastKnown, toNum}) ranges = append(ranges, []*big.Int{blockRange.tokens.LastKnown, toNum})
} }
@ -1089,6 +1088,7 @@ func (c *loadBlocksAndTransfersCommand) fetchHistoryBlocksForAccount(group *asyn
} }
for _, rangeItem := range ranges { for _, rangeItem := range ranges {
log.Debug("range item", "r", rangeItem, "n", c.chainClient.NetworkID(), "a", account)
fbc := &findBlocksCommand{ fbc := &findBlocksCommand{
accounts: []common.Address{account}, accounts: []common.Address{account},
db: c.db, db: c.db,

View File

@ -1079,10 +1079,11 @@ func TestFindBlocksCommand(t *testing.T) {
}) })
accDB, err := accounts.NewDB(appdb) accDB, err := accounts.NewDB(appdb)
require.NoError(t, err) require.NoError(t, err)
blockRangeDAO := &BlockRangeSequentialDAO{wdb.client}
fbc := &findBlocksCommand{ fbc := &findBlocksCommand{
accounts: []common.Address{accountAddress}, accounts: []common.Address{accountAddress},
db: wdb, db: wdb,
blockRangeDAO: &BlockRangeSequentialDAO{wdb.client}, blockRangeDAO: blockRangeDAO,
accountsDB: accDB, accountsDB: accDB,
chainClient: tc, chainClient: tc,
balanceCacher: balance.NewCacherWithTTL(5 * time.Minute), balanceCacher: balance.NewCacherWithTTL(5 * time.Minute),
@ -1126,6 +1127,14 @@ func TestFindBlocksCommand(t *testing.T) {
sort.Slice(numbers, func(i, j int) bool { return numbers[i] < numbers[j] }) sort.Slice(numbers, func(i, j int) bool { return numbers[i] < numbers[j] })
require.Equal(t, testCase.expectedBlocksFound, len(foundBlocks), testCase.label, "found blocks", numbers) require.Equal(t, testCase.expectedBlocksFound, len(foundBlocks), testCase.label, "found blocks", numbers)
blRange, _, err := blockRangeDAO.getBlockRange(tc.NetworkID(), accountAddress)
require.NoError(t, err)
require.NotNil(t, blRange.eth.FirstKnown)
require.NotNil(t, blRange.tokens.FirstKnown)
if testCase.fromBlock == 0 {
require.Equal(t, 0, blRange.tokens.FirstKnown.Cmp(zero))
}
} }
} }
} }
@ -1621,16 +1630,16 @@ type BlockRangeSequentialDAOMockError struct {
*BlockRangeSequentialDAO *BlockRangeSequentialDAO
} }
func (b *BlockRangeSequentialDAOMockError) getBlockRange(chainID uint64, address common.Address) (blockRange *ethTokensBlockRanges, err error) { func (b *BlockRangeSequentialDAOMockError) getBlockRange(chainID uint64, address common.Address) (blockRange *ethTokensBlockRanges, exists bool, err error) {
return nil, errors.New("DB error") return nil, true, errors.New("DB error")
} }
type BlockRangeSequentialDAOMockSuccess struct { type BlockRangeSequentialDAOMockSuccess struct {
*BlockRangeSequentialDAO *BlockRangeSequentialDAO
} }
func (b *BlockRangeSequentialDAOMockSuccess) getBlockRange(chainID uint64, address common.Address) (blockRange *ethTokensBlockRanges, err error) { func (b *BlockRangeSequentialDAOMockSuccess) getBlockRange(chainID uint64, address common.Address) (blockRange *ethTokensBlockRanges, exists bool, err error) {
return newEthTokensBlockRanges(), nil return newEthTokensBlockRanges(), true, nil
} }
func TestLoadBlocksAndTransfersCommand_FiniteFinishedInfiniteRunning(t *testing.T) { func TestLoadBlocksAndTransfersCommand_FiniteFinishedInfiniteRunning(t *testing.T) {

View File

@ -76,7 +76,7 @@ func TestController_watchAccountsChanges(t *testing.T) {
err = blockRangesDAO.upsertRange(chainID, address, newEthTokensBlockRanges()) err = blockRangesDAO.upsertRange(chainID, address, newEthTokensBlockRanges())
require.NoError(t, err) require.NoError(t, err)
ranges, err := blockRangesDAO.getBlockRange(chainID, address) ranges, _, err := blockRangesDAO.getBlockRange(chainID, address)
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, ranges) require.NotNil(t, ranges)
@ -113,11 +113,14 @@ func TestController_watchAccountsChanges(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, block) require.Nil(t, block)
ranges, err = blockRangesDAO.getBlockRange(chainID, address) ranges, _, err = blockRangesDAO.getBlockRange(chainID, address)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, ranges.eth) require.Nil(t, ranges.eth.FirstKnown)
require.Nil(t, ranges.tokens) require.Nil(t, ranges.eth.LastKnown)
require.Nil(t, ranges.eth.Start)
require.Nil(t, ranges.tokens.FirstKnown)
require.Nil(t, ranges.tokens.LastKnown)
require.Nil(t, ranges.tokens.Start)
} }
func TestController_cleanupAccountLeftovers(t *testing.T) { func TestController_cleanupAccountLeftovers(t *testing.T) {

View File

@ -1,37 +1,37 @@
// Code generated by go-bindata. DO NOT EDIT. // Code generated for package migrations by go-bindata DO NOT EDIT. (@generated)
// sources: // sources:
// 1691753758_initial.up.sql (5.738kB) // 1691753758_initial.up.sql
// 1692701329_add_collectibles_and_collections_data_cache.up.sql (1.808kB) // 1692701329_add_collectibles_and_collections_data_cache.up.sql
// 1692701339_add_scope_to_pending.up.sql (576B) // 1692701339_add_scope_to_pending.up.sql
// 1694540071_add_collectibles_ownership_update_timestamp.up.sql (349B) // 1694540071_add_collectibles_ownership_update_timestamp.up.sql
// 1694692748_add_raw_balance_to_token_balances.up.sql (165B) // 1694692748_add_raw_balance_to_token_balances.up.sql
// 1695133989_add_community_id_to_collectibles_and_collections_data_cache.up.sql (275B) // 1695133989_add_community_id_to_collectibles_and_collections_data_cache.up.sql
// 1695932536_balance_history_v2.up.sql (653B) // 1695932536_balance_history_v2.up.sql
// 1696853635_input_data.up.sql (23.14kB) // 1696853635_input_data.up.sql
// 1698117918_add_community_id_to_tokens.up.sql (61B) // 1698117918_add_community_id_to_tokens.up.sql
// 1698257443_add_community_metadata_to_wallet_db.up.sql (323B) // 1698257443_add_community_metadata_to_wallet_db.up.sql
// 1699987075_add_timestamp_and_state_to_community_data_cache.up.sql (865B) // 1699987075_add_timestamp_and_state_to_community_data_cache.up.sql
// 1700414564_add_wallet_connect_pairings_table.up.sql (439B) // 1700414564_add_wallet_connect_pairings_table.up.sql
// 1701101493_add_token_blocks_range.up.sql (469B) // 1701101493_add_token_blocks_range.up.sql
// 1702467441_wallet_connect_sessions_instead_of_pairings.up.sql (356B) // 1702467441_wallet_connect_sessions_instead_of_pairings.up.sql
// 1702577524_add_community_collections_and_collectibles_images_cache.up.sql (210B) // 1702577524_add_community_collections_and_collectibles_images_cache.up.sql
// 1702867707_add_balance_to_collectibles_ownership_cache.up.sql (289B) // 1702867707_add_balance_to_collectibles_ownership_cache.up.sql
// 1703686612_add_color_to_saved_addresses.up.sql (114B) // 1703686612_add_color_to_saved_addresses.up.sql
// 1704701942_remove_favourite_and_change_primary_key_for_saved_addresses.up.sql (894B) // 1704701942_remove_favourite_and_change_primary_key_for_saved_addresses.up.sql
// 1704913491_add_type_and_tx_timestamp_to_collectibles_ownership_cache.up.sql (73B) // 1704913491_add_type_and_tx_timestamp_to_collectibles_ownership_cache.up.sql
// 1705664490_add_balance_check_fields_blocks_ranges_sequential.up.sql (84B) // 1705664490_add_balance_check_fields_blocks_ranges_sequential.up.sql
// 1706531789_remove_gasfee-only-eth-transfers.up.sql (627B) // 1706531789_remove_gasfee-only-eth-transfers.up.sql
// 1707160323_add_contract_type_table.up.sql (282B) // 1707160323_add_contract_type_table.up.sql
// doc.go (74B) // 1708089811_add_nullable_fiesl_blocks_ranges.up.sql
// doc.go
package migrations package migrations
import ( import (
"bytes" "bytes"
"compress/gzip" "compress/gzip"
"crypto/sha256"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@ -41,7 +41,7 @@ import (
func bindataRead(data []byte, name string) ([]byte, error) { func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data)) gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil { if err != nil {
return nil, fmt.Errorf("read %q: %w", name, err) return nil, fmt.Errorf("Read %q: %v", name, err)
} }
var buf bytes.Buffer var buf bytes.Buffer
@ -49,7 +49,7 @@ func bindataRead(data []byte, name string) ([]byte, error) {
clErr := gz.Close() clErr := gz.Close()
if err != nil { if err != nil {
return nil, fmt.Errorf("read %q: %w", name, err) return nil, fmt.Errorf("Read %q: %v", name, err)
} }
if clErr != nil { if clErr != nil {
return nil, err return nil, err
@ -61,7 +61,6 @@ func bindataRead(data []byte, name string) ([]byte, error) {
type asset struct { type asset struct {
bytes []byte bytes []byte
info os.FileInfo info os.FileInfo
digest [sha256.Size]byte
} }
type bindataFileInfo struct { type bindataFileInfo struct {
@ -71,21 +70,32 @@ type bindataFileInfo struct {
modTime time.Time modTime time.Time
} }
// Name return file name
func (fi bindataFileInfo) Name() string { func (fi bindataFileInfo) Name() string {
return fi.name return fi.name
} }
// Size return file size
func (fi bindataFileInfo) Size() int64 { func (fi bindataFileInfo) Size() int64 {
return fi.size return fi.size
} }
// Mode return file mode
func (fi bindataFileInfo) Mode() os.FileMode { func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode return fi.mode
} }
// Mode return file modify time
func (fi bindataFileInfo) ModTime() time.Time { func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime return fi.modTime
} }
// IsDir return file whether a directory
func (fi bindataFileInfo) IsDir() bool { func (fi bindataFileInfo) IsDir() bool {
return false return fi.mode&os.ModeDir != 0
} }
// Sys return file is sys mode
func (fi bindataFileInfo) Sys() interface{} { func (fi bindataFileInfo) Sys() interface{} {
return nil return nil
} }
@ -105,8 +115,8 @@ func _1691753758_initialUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1691753758_initial.up.sql", size: 5738, mode: os.FileMode(0644), modTime: time.Unix(1707305568, 0)} info := bindataFileInfo{name: "1691753758_initial.up.sql", size: 5738, mode: os.FileMode(420), modTime: time.Unix(1698751811, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6b, 0x25, 0x31, 0xc8, 0x27, 0x3, 0x6b, 0x9f, 0x15, 0x42, 0x2f, 0x85, 0xfb, 0xe3, 0x6, 0xea, 0xf7, 0x97, 0x12, 0x56, 0x3c, 0x9a, 0x5b, 0x1a, 0xca, 0xb1, 0x23, 0xfa, 0xcd, 0x57, 0x25, 0x5c}} a := &asset{bytes: bytes, info: info}
return a, nil return a, nil
} }
@ -125,8 +135,8 @@ func _1692701329_add_collectibles_and_collections_data_cacheUpSql() (*asset, err
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1692701329_add_collectibles_and_collections_data_cache.up.sql", size: 1808, mode: os.FileMode(0644), modTime: time.Unix(1707305568, 0)} info := bindataFileInfo{name: "1692701329_add_collectibles_and_collections_data_cache.up.sql", size: 1808, mode: os.FileMode(420), modTime: time.Unix(1698751811, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1, 0x51, 0xf4, 0x2b, 0x92, 0xde, 0x59, 0x65, 0xd8, 0x9b, 0x57, 0xe0, 0xfd, 0x7b, 0x12, 0xb, 0x29, 0x6e, 0x9d, 0xb5, 0x90, 0xe, 0xfa, 0x12, 0x97, 0xd, 0x61, 0x60, 0x7f, 0x32, 0x1d, 0xc3}} a := &asset{bytes: bytes, info: info}
return a, nil return a, nil
} }
@ -145,8 +155,8 @@ func _1692701339_add_scope_to_pendingUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1692701339_add_scope_to_pending.up.sql", size: 576, mode: os.FileMode(0644), modTime: time.Unix(1707305568, 0)} info := bindataFileInfo{name: "1692701339_add_scope_to_pending.up.sql", size: 576, mode: os.FileMode(420), modTime: time.Unix(1698751811, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x36, 0x8a, 0x5e, 0xe2, 0x63, 0x15, 0x37, 0xba, 0x55, 0x18, 0xf3, 0xcc, 0xe0, 0x5, 0x84, 0xe1, 0x5b, 0xe8, 0x1, 0x32, 0x6b, 0x9f, 0x7d, 0x9f, 0xd9, 0x23, 0x6c, 0xa9, 0xb5, 0xdc, 0xf4, 0x93}} a := &asset{bytes: bytes, info: info}
return a, nil return a, nil
} }
@ -165,8 +175,8 @@ func _1694540071_add_collectibles_ownership_update_timestampUpSql() (*asset, err
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1694540071_add_collectibles_ownership_update_timestamp.up.sql", size: 349, mode: os.FileMode(0644), modTime: time.Unix(1707305568, 0)} info := bindataFileInfo{name: "1694540071_add_collectibles_ownership_update_timestamp.up.sql", size: 349, mode: os.FileMode(420), modTime: time.Unix(1698751811, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7f, 0x45, 0xc7, 0xce, 0x79, 0x63, 0xbc, 0x6f, 0x83, 0x5f, 0xe2, 0x3, 0x56, 0xcc, 0x5, 0x2f, 0x85, 0xda, 0x7e, 0xea, 0xf5, 0xd2, 0xac, 0x19, 0xd4, 0xd8, 0x5e, 0xdd, 0xed, 0xe2, 0xa9, 0x97}} a := &asset{bytes: bytes, info: info}
return a, nil return a, nil
} }
@ -185,8 +195,8 @@ func _1694692748_add_raw_balance_to_token_balancesUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1694692748_add_raw_balance_to_token_balances.up.sql", size: 165, mode: os.FileMode(0644), modTime: time.Unix(1707305568, 0)} info := bindataFileInfo{name: "1694692748_add_raw_balance_to_token_balances.up.sql", size: 165, mode: os.FileMode(420), modTime: time.Unix(1698751811, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd4, 0xe0, 0x5b, 0x42, 0xf0, 0x96, 0xa5, 0xf5, 0xed, 0xc0, 0x97, 0x88, 0xb0, 0x6d, 0xfe, 0x7d, 0x97, 0x2e, 0x17, 0xd2, 0x16, 0xbc, 0x2a, 0xf2, 0xcc, 0x67, 0x9e, 0xc5, 0x47, 0xf6, 0x69, 0x1}} a := &asset{bytes: bytes, info: info}
return a, nil return a, nil
} }
@ -205,8 +215,8 @@ func _1695133989_add_community_id_to_collectibles_and_collections_data_cacheUpSq
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1695133989_add_community_id_to_collectibles_and_collections_data_cache.up.sql", size: 275, mode: os.FileMode(0644), modTime: time.Unix(1707305568, 0)} info := bindataFileInfo{name: "1695133989_add_community_id_to_collectibles_and_collections_data_cache.up.sql", size: 275, mode: os.FileMode(420), modTime: time.Unix(1698751811, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xfa, 0x2, 0xa, 0x7f, 0x4b, 0xd1, 0x3, 0xd0, 0x3, 0x29, 0x84, 0x31, 0xed, 0x49, 0x4f, 0xb1, 0x2d, 0xd7, 0x80, 0x41, 0x5b, 0xfa, 0x6, 0xae, 0xb4, 0xf6, 0x6b, 0x49, 0xee, 0x57, 0x33, 0x76}} a := &asset{bytes: bytes, info: info}
return a, nil return a, nil
} }
@ -225,8 +235,8 @@ func _1695932536_balance_history_v2UpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1695932536_balance_history_v2.up.sql", size: 653, mode: os.FileMode(0644), modTime: time.Unix(1707305568, 0)} info := bindataFileInfo{name: "1695932536_balance_history_v2.up.sql", size: 653, mode: os.FileMode(420), modTime: time.Unix(1698751811, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x37, 0xf4, 0x14, 0x91, 0xf6, 0x5f, 0xc4, 0x9b, 0xb7, 0x83, 0x32, 0x72, 0xbe, 0x82, 0x42, 0x39, 0xa4, 0x3b, 0xc9, 0x78, 0x3d, 0xca, 0xd4, 0xbf, 0xfc, 0x7a, 0x33, 0x1e, 0xcd, 0x9e, 0xe4, 0x85}} a := &asset{bytes: bytes, info: info}
return a, nil return a, nil
} }
@ -245,8 +255,8 @@ func _1696853635_input_dataUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1696853635_input_data.up.sql", size: 23140, mode: os.FileMode(0644), modTime: time.Unix(1707305568, 0)} info := bindataFileInfo{name: "1696853635_input_data.up.sql", size: 23140, mode: os.FileMode(420), modTime: time.Unix(1698751811, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x89, 0x30, 0x33, 0x33, 0x55, 0xc5, 0x57, 0x2b, 0xaf, 0xef, 0x3d, 0x8d, 0x2a, 0xaa, 0x5c, 0x32, 0xd1, 0xf4, 0xd, 0x4a, 0xd0, 0x33, 0x4a, 0xe8, 0xf6, 0x8, 0x6b, 0x65, 0xcc, 0xba, 0xed, 0x42}} a := &asset{bytes: bytes, info: info}
return a, nil return a, nil
} }
@ -265,8 +275,8 @@ func _1698117918_add_community_id_to_tokensUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1698117918_add_community_id_to_tokens.up.sql", size: 61, mode: os.FileMode(0644), modTime: time.Unix(1707305568, 0)} info := bindataFileInfo{name: "1698117918_add_community_id_to_tokens.up.sql", size: 61, mode: os.FileMode(420), modTime: time.Unix(1698751811, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb3, 0x82, 0xdb, 0xde, 0x3, 0x3, 0xc, 0x67, 0xf3, 0x54, 0xc4, 0xad, 0xd6, 0xce, 0x56, 0xfb, 0xc1, 0x87, 0xd7, 0xda, 0xab, 0xec, 0x1, 0xe1, 0x7d, 0xb3, 0x63, 0xd6, 0xe5, 0x5d, 0x1c, 0x15}} a := &asset{bytes: bytes, info: info}
return a, nil return a, nil
} }
@ -285,8 +295,8 @@ func _1698257443_add_community_metadata_to_wallet_dbUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1698257443_add_community_metadata_to_wallet_db.up.sql", size: 323, mode: os.FileMode(0644), modTime: time.Unix(1707305568, 0)} info := bindataFileInfo{name: "1698257443_add_community_metadata_to_wallet_db.up.sql", size: 323, mode: os.FileMode(420), modTime: time.Unix(1698751811, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x22, 0xd3, 0x4, 0x25, 0xfa, 0x23, 0x1, 0x48, 0x83, 0x26, 0x20, 0xf2, 0x3d, 0xbc, 0xc1, 0xa7, 0x7c, 0x27, 0x7c, 0x1d, 0x63, 0x3, 0xa, 0xd0, 0xce, 0x47, 0x86, 0xdc, 0xa1, 0x3c, 0x2, 0x1c}} a := &asset{bytes: bytes, info: info}
return a, nil return a, nil
} }
@ -305,8 +315,8 @@ func _1699987075_add_timestamp_and_state_to_community_data_cacheUpSql() (*asset,
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1699987075_add_timestamp_and_state_to_community_data_cache.up.sql", size: 865, mode: os.FileMode(0644), modTime: time.Unix(1707305568, 0)} info := bindataFileInfo{name: "1699987075_add_timestamp_and_state_to_community_data_cache.up.sql", size: 865, mode: os.FileMode(420), modTime: time.Unix(1700258852, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc3, 0xee, 0x37, 0xf9, 0x7f, 0x9e, 0xfe, 0x93, 0x66, 0x2b, 0xd, 0x57, 0xf4, 0x89, 0x6c, 0x51, 0xfd, 0x14, 0xe9, 0xcd, 0xab, 0x65, 0xe7, 0xa7, 0x83, 0x7e, 0xe0, 0x5c, 0x14, 0x49, 0xf3, 0xe5}} a := &asset{bytes: bytes, info: info}
return a, nil return a, nil
} }
@ -325,8 +335,8 @@ func _1700414564_add_wallet_connect_pairings_tableUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1700414564_add_wallet_connect_pairings_table.up.sql", size: 439, mode: os.FileMode(0644), modTime: time.Unix(1707305568, 0)} info := bindataFileInfo{name: "1700414564_add_wallet_connect_pairings_table.up.sql", size: 439, mode: os.FileMode(420), modTime: time.Unix(1701084281, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa9, 0x77, 0x5e, 0x19, 0x62, 0x3c, 0x3a, 0x81, 0x16, 0xa0, 0x95, 0x35, 0x62, 0xab, 0x5e, 0x2b, 0xea, 0x11, 0x71, 0x11, 0xd0, 0x9, 0xab, 0x9c, 0xab, 0xf2, 0xdd, 0x5f, 0x88, 0x83, 0x9a, 0x93}} a := &asset{bytes: bytes, info: info}
return a, nil return a, nil
} }
@ -345,8 +355,8 @@ func _1701101493_add_token_blocks_rangeUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1701101493_add_token_blocks_range.up.sql", size: 469, mode: os.FileMode(0644), modTime: time.Unix(1707305568, 0)} info := bindataFileInfo{name: "1701101493_add_token_blocks_range.up.sql", size: 469, mode: os.FileMode(420), modTime: time.Unix(1701895190, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe0, 0x37, 0xfb, 0x1a, 0x6c, 0x8c, 0xa8, 0x1e, 0xa2, 0xa5, 0x1f, 0x90, 0x73, 0x3e, 0x31, 0x5f, 0x48, 0x1e, 0x9a, 0x37, 0x27, 0x1c, 0xc, 0x67, 0x1, 0xcd, 0xec, 0x85, 0x4c, 0x1c, 0x26, 0x52}} a := &asset{bytes: bytes, info: info}
return a, nil return a, nil
} }
@ -365,8 +375,8 @@ func _1702467441_wallet_connect_sessions_instead_of_pairingsUpSql() (*asset, err
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1702467441_wallet_connect_sessions_instead_of_pairings.up.sql", size: 356, mode: os.FileMode(0644), modTime: time.Unix(1707305568, 0)} info := bindataFileInfo{name: "1702467441_wallet_connect_sessions_instead_of_pairings.up.sql", size: 356, mode: os.FileMode(420), modTime: time.Unix(1703074936, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x73, 0x5f, 0x0, 0x60, 0x6, 0x28, 0x76, 0x61, 0x39, 0xdc, 0xa1, 0x84, 0x80, 0x46, 0x8a, 0xe4, 0x42, 0xb5, 0x1f, 0x18, 0x14, 0x23, 0x46, 0xb9, 0x51, 0xf, 0x62, 0xac, 0xc, 0x7, 0x98, 0xe}} a := &asset{bytes: bytes, info: info}
return a, nil return a, nil
} }
@ -385,8 +395,8 @@ func _1702577524_add_community_collections_and_collectibles_images_cacheUpSql()
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1702577524_add_community_collections_and_collectibles_images_cache.up.sql", size: 210, mode: os.FileMode(0644), modTime: time.Unix(1707305568, 0)} info := bindataFileInfo{name: "1702577524_add_community_collections_and_collectibles_images_cache.up.sql", size: 210, mode: os.FileMode(420), modTime: time.Unix(1703074936, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8e, 0x1b, 0x32, 0x2c, 0xfa, 0x11, 0x5e, 0x5e, 0x5d, 0xef, 0x92, 0xa0, 0x29, 0x52, 0xbf, 0x6e, 0xe3, 0x30, 0xe4, 0xdf, 0xdc, 0x5, 0xbe, 0xd1, 0xf8, 0x3e, 0xd9, 0x9b, 0xd6, 0x9b, 0x95, 0x96}} a := &asset{bytes: bytes, info: info}
return a, nil return a, nil
} }
@ -405,8 +415,8 @@ func _1702867707_add_balance_to_collectibles_ownership_cacheUpSql() (*asset, err
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1702867707_add_balance_to_collectibles_ownership_cache.up.sql", size: 289, mode: os.FileMode(0644), modTime: time.Unix(1707305568, 0)} info := bindataFileInfo{name: "1702867707_add_balance_to_collectibles_ownership_cache.up.sql", size: 289, mode: os.FileMode(420), modTime: time.Unix(1703074936, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6f, 0x63, 0x30, 0x11, 0x22, 0xb9, 0xee, 0xae, 0xb8, 0xc4, 0xe6, 0xd3, 0x7, 0xc, 0xe6, 0xa3, 0x72, 0x8c, 0x6, 0x9d, 0x6c, 0x97, 0x8f, 0xb2, 0xd0, 0x37, 0x69, 0x69, 0x6, 0x7f, 0x67, 0x94}} a := &asset{bytes: bytes, info: info}
return a, nil return a, nil
} }
@ -425,8 +435,8 @@ func _1703686612_add_color_to_saved_addressesUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1703686612_add_color_to_saved_addresses.up.sql", size: 114, mode: os.FileMode(0644), modTime: time.Unix(1707305568, 0)} info := bindataFileInfo{name: "1703686612_add_color_to_saved_addresses.up.sql", size: 114, mode: os.FileMode(420), modTime: time.Unix(1704191044, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb7, 0x6e, 0x8d, 0xc0, 0x49, 0xc, 0xb, 0x66, 0xa0, 0x77, 0x32, 0x76, 0xa8, 0xd0, 0x40, 0xce, 0x67, 0xa, 0x9e, 0x23, 0x36, 0xe, 0xc3, 0xd3, 0x9d, 0xe2, 0xde, 0x60, 0x19, 0xba, 0x44, 0xf1}} a := &asset{bytes: bytes, info: info}
return a, nil return a, nil
} }
@ -445,8 +455,8 @@ func _1704701942_remove_favourite_and_change_primary_key_for_saved_addressesUpSq
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1704701942_remove_favourite_and_change_primary_key_for_saved_addresses.up.sql", size: 894, mode: os.FileMode(0644), modTime: time.Unix(1707305568, 0)} info := bindataFileInfo{name: "1704701942_remove_favourite_and_change_primary_key_for_saved_addresses.up.sql", size: 894, mode: os.FileMode(420), modTime: time.Unix(1704963397, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x70, 0xd3, 0xcf, 0x90, 0xb2, 0xa, 0x23, 0x41, 0x8a, 0xa5, 0x90, 0x7b, 0x34, 0xec, 0x3b, 0x3f, 0xa9, 0xb1, 0x95, 0xf3, 0x2a, 0xdf, 0xbb, 0x53, 0x57, 0x27, 0x2b, 0x12, 0x84, 0xf4, 0x83, 0xda}} a := &asset{bytes: bytes, info: info}
return a, nil return a, nil
} }
@ -465,8 +475,8 @@ func _1704913491_add_type_and_tx_timestamp_to_collectibles_ownership_cacheUpSql(
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1704913491_add_type_and_tx_timestamp_to_collectibles_ownership_cache.up.sql", size: 73, mode: os.FileMode(0644), modTime: time.Unix(1707305568, 0)} info := bindataFileInfo{name: "1704913491_add_type_and_tx_timestamp_to_collectibles_ownership_cache.up.sql", size: 73, mode: os.FileMode(420), modTime: time.Unix(1705491656, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa2, 0xf0, 0x71, 0xb5, 0xaf, 0x22, 0xac, 0x77, 0xdb, 0x6f, 0x62, 0x27, 0x12, 0x46, 0x60, 0x3, 0x59, 0x43, 0x6f, 0x1, 0xdc, 0xe8, 0x6e, 0x89, 0xa5, 0x77, 0x37, 0x36, 0xd9, 0x4e, 0x6d, 0x9b}} a := &asset{bytes: bytes, info: info}
return a, nil return a, nil
} }
@ -485,8 +495,8 @@ func _1705664490_add_balance_check_fields_blocks_ranges_sequentialUpSql() (*asse
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1705664490_add_balance_check_fields_blocks_ranges_sequential.up.sql", size: 84, mode: os.FileMode(0644), modTime: time.Unix(1707305568, 0)} info := bindataFileInfo{name: "1705664490_add_balance_check_fields_blocks_ranges_sequential.up.sql", size: 84, mode: os.FileMode(420), modTime: time.Unix(1705915118, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3c, 0x49, 0x9, 0xd9, 0x83, 0x3d, 0xd8, 0xb7, 0x4f, 0x8f, 0xc2, 0xa2, 0xd1, 0xb, 0x57, 0x1f, 0x5, 0x39, 0x55, 0xfb, 0x6e, 0x32, 0x9e, 0x19, 0x3d, 0x3c, 0x77, 0xa2, 0xc4, 0xf4, 0x53, 0x35}} a := &asset{bytes: bytes, info: info}
return a, nil return a, nil
} }
@ -505,8 +515,8 @@ func _1706531789_remove_gasfeeOnlyEthTransfersUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1706531789_remove_gasfee-only-eth-transfers.up.sql", size: 627, mode: os.FileMode(0644), modTime: time.Unix(1707751421, 0)} info := bindataFileInfo{name: "1706531789_remove_gasfee-only-eth-transfers.up.sql", size: 627, mode: os.FileMode(420), modTime: time.Unix(1706782735, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xae, 0x8c, 0x37, 0xd3, 0x2a, 0xf, 0x1a, 0x8, 0xe9, 0xa7, 0x1f, 0x5a, 0x10, 0xa7, 0x4c, 0x36, 0x4f, 0xc6, 0xdc, 0xeb, 0x79, 0x90, 0xe7, 0xcd, 0x4a, 0xef, 0xb7, 0x3, 0x6e, 0x88, 0x6f, 0x79}} a := &asset{bytes: bytes, info: info}
return a, nil return a, nil
} }
@ -525,8 +535,28 @@ func _1707160323_add_contract_type_tableUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1707160323_add_contract_type_table.up.sql", size: 282, mode: os.FileMode(0644), modTime: time.Unix(1707751421, 0)} info := bindataFileInfo{name: "1707160323_add_contract_type_table.up.sql", size: 282, mode: os.FileMode(420), modTime: time.Unix(1707319100, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x45, 0x99, 0xd9, 0x86, 0x72, 0x37, 0x97, 0xfc, 0xa7, 0xbb, 0xe3, 0x86, 0xf1, 0xfd, 0x77, 0x31, 0x91, 0xcb, 0xa4, 0x2c, 0x5b, 0xaa, 0x4b, 0xa2, 0x7f, 0x8a, 0x2c, 0x7a, 0xda, 0x20, 0x7a, 0x97}} a := &asset{bytes: bytes, info: info}
return a, nil
}
var __1708089811_add_nullable_fiesl_blocks_rangesUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\xd0\x41\x6b\xc3\x20\x18\xc6\xf1\xbb\x9f\xe2\xa1\xa7\x16\xfa\x0d\x7a\xb2\x8d\x6b\x65\x99\x29\xce\xac\xeb\x49\x4c\xe2\x96\xa0\x18\xa6\x96\x7d\xfd\xb1\x43\x08\x3b\x2c\x39\xbf\xbf\x97\x3f\x3c\x85\xac\xae\x50\xf4\x58\x32\x34\x7e\x6c\x5d\xd2\xd1\x84\x4f\x9b\x74\xb2\x5f\x0f\x1b\xf2\x60\xfc\x81\x90\x93\x64\x54\xb1\x15\x87\x2d\x01\x82\xcd\xdf\x63\x74\x7a\xe8\x50\x8b\x57\x7e\x16\xac\xc0\x91\x9f\xb9\x50\x10\x95\x82\xa8\xcb\x72\x4f\x00\xd3\x75\xd1\xa6\x84\x37\x2a\x4f\x17\x2a\xff\xdc\x1a\xef\x74\xca\x26\xe6\xe9\xb1\x60\x4f\xb4\x2e\x15\xc2\xc3\xfb\x09\x7c\x0c\x31\x2d\x02\x6f\xfe\xbf\xe7\xd1\xd9\xa0\x57\x3b\x33\x5b\xac\xcd\x6c\xa9\xd9\x18\x6f\x42\x6b\x75\xdb\xdb\xd6\xe9\xde\xa4\x1e\x8a\xbd\xcf\x6e\xb3\xf9\x55\x57\xc9\x5f\xa8\xbc\xe3\x99\xdd\xb1\x9d\xb7\xdc\x4f\x83\xed\xc8\x0e\x37\xae\x2e\x55\xad\x20\xab\x1b\x2f\x0e\xe4\x27\x00\x00\xff\xff\xd3\xb5\x4d\x6e\xc2\x01\x00\x00")
func _1708089811_add_nullable_fiesl_blocks_rangesUpSqlBytes() ([]byte, error) {
return bindataRead(
__1708089811_add_nullable_fiesl_blocks_rangesUpSql,
"1708089811_add_nullable_fiesl_blocks_ranges.up.sql",
)
}
func _1708089811_add_nullable_fiesl_blocks_rangesUpSql() (*asset, error) {
bytes, err := _1708089811_add_nullable_fiesl_blocks_rangesUpSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "1708089811_add_nullable_fiesl_blocks_ranges.up.sql", size: 450, mode: os.FileMode(420), modTime: time.Unix(1708089781, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil return a, nil
} }
@ -545,8 +575,8 @@ func docGo() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "doc.go", size: 74, mode: os.FileMode(0644), modTime: time.Unix(1707305568, 0)} info := bindataFileInfo{name: "doc.go", size: 74, mode: os.FileMode(420), modTime: time.Unix(1698751811, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xde, 0x7c, 0x28, 0xcd, 0x47, 0xf2, 0xfa, 0x7c, 0x51, 0x2d, 0xd8, 0x38, 0xb, 0xb0, 0x34, 0x9d, 0x4c, 0x62, 0xa, 0x9e, 0x28, 0xc3, 0x31, 0x23, 0xd9, 0xbb, 0x89, 0x9f, 0xa0, 0x89, 0x1f, 0xe8}} a := &asset{bytes: bytes, info: info}
return a, nil return a, nil
} }
@ -554,8 +584,8 @@ func docGo() (*asset, error) {
// It returns an error if the asset could not be found or // It returns an error if the asset could not be found or
// could not be loaded. // could not be loaded.
func Asset(name string) ([]byte, error) { func Asset(name string) ([]byte, error) {
canonicalName := strings.Replace(name, "\\", "/", -1) cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok { if f, ok := _bindata[cannonicalName]; ok {
a, err := f() a, err := f()
if err != nil { if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
@ -565,12 +595,6 @@ func Asset(name string) ([]byte, error) {
return nil, fmt.Errorf("Asset %s not found", name) return nil, fmt.Errorf("Asset %s not found", name)
} }
// AssetString returns the asset contents as a string (instead of a []byte).
func AssetString(name string) (string, error) {
data, err := Asset(name)
return string(data), err
}
// MustAsset is like Asset but panics when Asset would return an error. // MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables. // It simplifies safe initialization of global variables.
func MustAsset(name string) []byte { func MustAsset(name string) []byte {
@ -582,18 +606,12 @@ func MustAsset(name string) []byte {
return a return a
} }
// MustAssetString is like AssetString but panics when Asset would return an
// error. It simplifies safe initialization of global variables.
func MustAssetString(name string) string {
return string(MustAsset(name))
}
// AssetInfo loads and returns the asset info for the given name. // AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or // It returns an error if the asset could not be found or
// could not be loaded. // could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) { func AssetInfo(name string) (os.FileInfo, error) {
canonicalName := strings.Replace(name, "\\", "/", -1) cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok { if f, ok := _bindata[cannonicalName]; ok {
a, err := f() a, err := f()
if err != nil { if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
@ -603,33 +621,6 @@ func AssetInfo(name string) (os.FileInfo, error) {
return nil, fmt.Errorf("AssetInfo %s not found", name) return nil, fmt.Errorf("AssetInfo %s not found", name)
} }
// AssetDigest returns the digest of the file with the given name. It returns an
// error if the asset could not be found or the digest could not be loaded.
func AssetDigest(name string) ([sha256.Size]byte, error) {
canonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[canonicalName]; ok {
a, err := f()
if err != nil {
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
}
return a.digest, nil
}
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
}
// Digests returns a map of all known files and their checksums.
func Digests() (map[string][sha256.Size]byte, error) {
mp := make(map[string][sha256.Size]byte, len(_bindata))
for name := range _bindata {
a, err := _bindata[name]()
if err != nil {
return nil, err
}
mp[name] = a.digest
}
return mp, nil
}
// AssetNames returns the names of the assets. // AssetNames returns the names of the assets.
func AssetNames() []string { func AssetNames() []string {
names := make([]string, 0, len(_bindata)) names := make([]string, 0, len(_bindata))
@ -663,32 +654,28 @@ var _bindata = map[string]func() (*asset, error){
"1705664490_add_balance_check_fields_blocks_ranges_sequential.up.sql": _1705664490_add_balance_check_fields_blocks_ranges_sequentialUpSql, "1705664490_add_balance_check_fields_blocks_ranges_sequential.up.sql": _1705664490_add_balance_check_fields_blocks_ranges_sequentialUpSql,
"1706531789_remove_gasfee-only-eth-transfers.up.sql": _1706531789_remove_gasfeeOnlyEthTransfersUpSql, "1706531789_remove_gasfee-only-eth-transfers.up.sql": _1706531789_remove_gasfeeOnlyEthTransfersUpSql,
"1707160323_add_contract_type_table.up.sql": _1707160323_add_contract_type_tableUpSql, "1707160323_add_contract_type_table.up.sql": _1707160323_add_contract_type_tableUpSql,
"1708089811_add_nullable_fiesl_blocks_ranges.up.sql": _1708089811_add_nullable_fiesl_blocks_rangesUpSql,
"doc.go": docGo, "doc.go": docGo,
} }
// AssetDebug is true if the assets were built with the debug flag enabled.
const AssetDebug = false
// AssetDir returns the file names below a certain // AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata. // directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the // For example if you run go-bindata on data/... and data contains the
// following hierarchy: // following hierarchy:
//
// data/ // data/
// foo.txt // foo.txt
// img/ // img/
// a.png // a.png
// b.png // b.png
// // then AssetDir("data") would return []string{"foo.txt", "img"}
// then AssetDir("data") would return []string{"foo.txt", "img"}, // AssetDir("data/img") would return []string{"a.png", "b.png"}
// AssetDir("data/img") would return []string{"a.png", "b.png"}, // AssetDir("foo.txt") and AssetDir("notexist") would return an error
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
// AssetDir("") will return []string{"data"}. // AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) { func AssetDir(name string) ([]string, error) {
node := _bintree node := _bintree
if len(name) != 0 { if len(name) != 0 {
canonicalName := strings.Replace(name, "\\", "/", -1) cannonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(canonicalName, "/") pathList := strings.Split(cannonicalName, "/")
for _, p := range pathList { for _, p := range pathList {
node = node.Children[p] node = node.Children[p]
if node == nil { if node == nil {
@ -712,32 +699,33 @@ type bintree struct {
} }
var _bintree = &bintree{nil, map[string]*bintree{ var _bintree = &bintree{nil, map[string]*bintree{
"1691753758_initial.up.sql": {_1691753758_initialUpSql, map[string]*bintree{}}, "1691753758_initial.up.sql": &bintree{_1691753758_initialUpSql, map[string]*bintree{}},
"1692701329_add_collectibles_and_collections_data_cache.up.sql": {_1692701329_add_collectibles_and_collections_data_cacheUpSql, map[string]*bintree{}}, "1692701329_add_collectibles_and_collections_data_cache.up.sql": &bintree{_1692701329_add_collectibles_and_collections_data_cacheUpSql, map[string]*bintree{}},
"1692701339_add_scope_to_pending.up.sql": {_1692701339_add_scope_to_pendingUpSql, map[string]*bintree{}}, "1692701339_add_scope_to_pending.up.sql": &bintree{_1692701339_add_scope_to_pendingUpSql, map[string]*bintree{}},
"1694540071_add_collectibles_ownership_update_timestamp.up.sql": {_1694540071_add_collectibles_ownership_update_timestampUpSql, map[string]*bintree{}}, "1694540071_add_collectibles_ownership_update_timestamp.up.sql": &bintree{_1694540071_add_collectibles_ownership_update_timestampUpSql, map[string]*bintree{}},
"1694692748_add_raw_balance_to_token_balances.up.sql": {_1694692748_add_raw_balance_to_token_balancesUpSql, map[string]*bintree{}}, "1694692748_add_raw_balance_to_token_balances.up.sql": &bintree{_1694692748_add_raw_balance_to_token_balancesUpSql, map[string]*bintree{}},
"1695133989_add_community_id_to_collectibles_and_collections_data_cache.up.sql": {_1695133989_add_community_id_to_collectibles_and_collections_data_cacheUpSql, map[string]*bintree{}}, "1695133989_add_community_id_to_collectibles_and_collections_data_cache.up.sql": &bintree{_1695133989_add_community_id_to_collectibles_and_collections_data_cacheUpSql, map[string]*bintree{}},
"1695932536_balance_history_v2.up.sql": {_1695932536_balance_history_v2UpSql, map[string]*bintree{}}, "1695932536_balance_history_v2.up.sql": &bintree{_1695932536_balance_history_v2UpSql, map[string]*bintree{}},
"1696853635_input_data.up.sql": {_1696853635_input_dataUpSql, map[string]*bintree{}}, "1696853635_input_data.up.sql": &bintree{_1696853635_input_dataUpSql, map[string]*bintree{}},
"1698117918_add_community_id_to_tokens.up.sql": {_1698117918_add_community_id_to_tokensUpSql, map[string]*bintree{}}, "1698117918_add_community_id_to_tokens.up.sql": &bintree{_1698117918_add_community_id_to_tokensUpSql, map[string]*bintree{}},
"1698257443_add_community_metadata_to_wallet_db.up.sql": {_1698257443_add_community_metadata_to_wallet_dbUpSql, map[string]*bintree{}}, "1698257443_add_community_metadata_to_wallet_db.up.sql": &bintree{_1698257443_add_community_metadata_to_wallet_dbUpSql, map[string]*bintree{}},
"1699987075_add_timestamp_and_state_to_community_data_cache.up.sql": {_1699987075_add_timestamp_and_state_to_community_data_cacheUpSql, map[string]*bintree{}}, "1699987075_add_timestamp_and_state_to_community_data_cache.up.sql": &bintree{_1699987075_add_timestamp_and_state_to_community_data_cacheUpSql, map[string]*bintree{}},
"1700414564_add_wallet_connect_pairings_table.up.sql": {_1700414564_add_wallet_connect_pairings_tableUpSql, map[string]*bintree{}}, "1700414564_add_wallet_connect_pairings_table.up.sql": &bintree{_1700414564_add_wallet_connect_pairings_tableUpSql, map[string]*bintree{}},
"1701101493_add_token_blocks_range.up.sql": {_1701101493_add_token_blocks_rangeUpSql, map[string]*bintree{}}, "1701101493_add_token_blocks_range.up.sql": &bintree{_1701101493_add_token_blocks_rangeUpSql, map[string]*bintree{}},
"1702467441_wallet_connect_sessions_instead_of_pairings.up.sql": {_1702467441_wallet_connect_sessions_instead_of_pairingsUpSql, map[string]*bintree{}}, "1702467441_wallet_connect_sessions_instead_of_pairings.up.sql": &bintree{_1702467441_wallet_connect_sessions_instead_of_pairingsUpSql, map[string]*bintree{}},
"1702577524_add_community_collections_and_collectibles_images_cache.up.sql": {_1702577524_add_community_collections_and_collectibles_images_cacheUpSql, map[string]*bintree{}}, "1702577524_add_community_collections_and_collectibles_images_cache.up.sql": &bintree{_1702577524_add_community_collections_and_collectibles_images_cacheUpSql, map[string]*bintree{}},
"1702867707_add_balance_to_collectibles_ownership_cache.up.sql": {_1702867707_add_balance_to_collectibles_ownership_cacheUpSql, map[string]*bintree{}}, "1702867707_add_balance_to_collectibles_ownership_cache.up.sql": &bintree{_1702867707_add_balance_to_collectibles_ownership_cacheUpSql, map[string]*bintree{}},
"1703686612_add_color_to_saved_addresses.up.sql": {_1703686612_add_color_to_saved_addressesUpSql, map[string]*bintree{}}, "1703686612_add_color_to_saved_addresses.up.sql": &bintree{_1703686612_add_color_to_saved_addressesUpSql, map[string]*bintree{}},
"1704701942_remove_favourite_and_change_primary_key_for_saved_addresses.up.sql": {_1704701942_remove_favourite_and_change_primary_key_for_saved_addressesUpSql, map[string]*bintree{}}, "1704701942_remove_favourite_and_change_primary_key_for_saved_addresses.up.sql": &bintree{_1704701942_remove_favourite_and_change_primary_key_for_saved_addressesUpSql, map[string]*bintree{}},
"1704913491_add_type_and_tx_timestamp_to_collectibles_ownership_cache.up.sql": {_1704913491_add_type_and_tx_timestamp_to_collectibles_ownership_cacheUpSql, map[string]*bintree{}}, "1704913491_add_type_and_tx_timestamp_to_collectibles_ownership_cache.up.sql": &bintree{_1704913491_add_type_and_tx_timestamp_to_collectibles_ownership_cacheUpSql, map[string]*bintree{}},
"1705664490_add_balance_check_fields_blocks_ranges_sequential.up.sql": {_1705664490_add_balance_check_fields_blocks_ranges_sequentialUpSql, map[string]*bintree{}}, "1705664490_add_balance_check_fields_blocks_ranges_sequential.up.sql": &bintree{_1705664490_add_balance_check_fields_blocks_ranges_sequentialUpSql, map[string]*bintree{}},
"1706531789_remove_gasfee-only-eth-transfers.up.sql": {_1706531789_remove_gasfeeOnlyEthTransfersUpSql, map[string]*bintree{}}, "1706531789_remove_gasfee-only-eth-transfers.up.sql": &bintree{_1706531789_remove_gasfeeOnlyEthTransfersUpSql, map[string]*bintree{}},
"1707160323_add_contract_type_table.up.sql": {_1707160323_add_contract_type_tableUpSql, map[string]*bintree{}}, "1707160323_add_contract_type_table.up.sql": &bintree{_1707160323_add_contract_type_tableUpSql, map[string]*bintree{}},
"doc.go": {docGo, map[string]*bintree{}}, "1708089811_add_nullable_fiesl_blocks_ranges.up.sql": &bintree{_1708089811_add_nullable_fiesl_blocks_rangesUpSql, map[string]*bintree{}},
"doc.go": &bintree{docGo, map[string]*bintree{}},
}} }}
// RestoreAsset restores an asset under the given directory. // RestoreAsset restores an asset under the given directory
func RestoreAsset(dir, name string) error { func RestoreAsset(dir, name string) error {
data, err := Asset(name) data, err := Asset(name)
if err != nil { if err != nil {
@ -751,14 +739,18 @@ func RestoreAsset(dir, name string) error {
if err != nil { if err != nil {
return err return err
} }
err = os.WriteFile(_filePath(dir, name), data, info.Mode()) err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil { if err != nil {
return err return err
} }
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
if err != nil {
return err
}
return nil
} }
// RestoreAssets restores an asset under the given directory recursively. // RestoreAssets restores an asset under the given directory recursively
func RestoreAssets(dir, name string) error { func RestoreAssets(dir, name string) error {
children, err := AssetDir(name) children, err := AssetDir(name)
// File // File
@ -776,6 +768,6 @@ func RestoreAssets(dir, name string) error {
} }
func _filePath(dir, name string) string { func _filePath(dir, name string) string {
canonicalName := strings.Replace(name, "\\", "/", -1) cannonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...) return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
} }

View File

@ -0,0 +1,14 @@
DROP TABLE blocks_ranges_sequential;
CREATE TABLE blocks_ranges_sequential (
network_id UNSIGNED BIGINT NOT NULL,
address VARCHAR NOT NULL,
blk_start BIGINT DEFAULT null,
blk_first BIGINT DEFAULT null,
blk_last BIGINT DEFAULT null,
token_blk_start BIGINT DEFAULT null,
token_blk_first BIGINT DEFAULT null,
token_blk_last BIGINT DEFAULT null,
balance_check_hash TEXT DEFAULT "",
PRIMARY KEY (network_id, address)
) WITHOUT ROWID;