feat: link owned collectibles with latest matching transfer

Part of #12942
This commit is contained in:
Dario Gabriel Lipicar 2024-01-08 02:21:50 -03:00 committed by dlipicar
parent 70b2cab096
commit ee6621b066
14 changed files with 948 additions and 207 deletions

View File

@ -2,6 +2,7 @@ package collectibles
import (
"context"
"encoding/json"
"errors"
"math/big"
"sync/atomic"
@ -14,6 +15,7 @@ import (
"github.com/status-im/status-go/services/wallet/bigint"
walletCommon "github.com/status-im/status-go/services/wallet/common"
"github.com/status-im/status-go/services/wallet/thirdparty"
"github.com/status-im/status-go/services/wallet/transfer"
"github.com/status-im/status-go/services/wallet/walletevent"
)
@ -31,7 +33,22 @@ type OwnedCollectibles struct {
ids []thirdparty.CollectibleUniqueID
}
type OwnedCollectiblesCb func(OwnedCollectibles)
type OwnedCollectiblesChangeType = int
const (
OwnedCollectiblesChangeTypeAdded OwnedCollectiblesChangeType = iota + 1
OwnedCollectiblesChangeTypeUpdated
OwnedCollectiblesChangeTypeRemoved
)
type OwnedCollectiblesChange struct {
ownedCollectibles OwnedCollectibles
changeType OwnedCollectiblesChangeType
}
type OwnedCollectiblesChangeCb func(OwnedCollectiblesChange)
type TransferCb func(common.Address, walletCommon.ChainID, []transfer.Transfer)
const (
OwnershipStateIdle OwnershipState = iota + 1
@ -46,7 +63,7 @@ type periodicRefreshOwnedCollectiblesCommand struct {
manager *Manager
ownershipDB *OwnershipDB
walletFeed *event.Feed
receivedCollectiblesCb OwnedCollectiblesCb
ownedCollectiblesChangeCb OwnedCollectiblesChangeCb
group *async.Group
state atomic.Value
@ -58,14 +75,14 @@ func newPeriodicRefreshOwnedCollectiblesCommand(
walletFeed *event.Feed,
chainID walletCommon.ChainID,
account common.Address,
receivedCollectiblesCb OwnedCollectiblesCb) *periodicRefreshOwnedCollectiblesCommand {
ownedCollectiblesChangeCb OwnedCollectiblesChangeCb) *periodicRefreshOwnedCollectiblesCommand {
ret := &periodicRefreshOwnedCollectiblesCommand{
manager: manager,
ownershipDB: ownershipDB,
walletFeed: walletFeed,
chainID: chainID,
account: account,
receivedCollectiblesCb: receivedCollectiblesCb,
ownedCollectiblesChangeCb: ownedCollectiblesChangeCb,
}
ret.state.Store(OwnershipStateIdle)
return ret
@ -108,8 +125,8 @@ func (c *periodicRefreshOwnedCollectiblesCommand) Stop() {
func (c *periodicRefreshOwnedCollectiblesCommand) loadOwnedCollectibles(ctx context.Context) error {
c.group = async.NewGroup(ctx)
receivedCollectiblesCh := make(chan OwnedCollectibles)
command := newLoadOwnedCollectiblesCommand(c.manager, c.ownershipDB, c.walletFeed, c.chainID, c.account, receivedCollectiblesCh)
ownedCollectiblesChangeCh := make(chan OwnedCollectiblesChange)
command := newLoadOwnedCollectiblesCommand(c.manager, c.ownershipDB, c.walletFeed, c.chainID, c.account, ownedCollectiblesChangeCh)
c.state.Store(OwnershipStateUpdating)
defer func() {
@ -123,9 +140,9 @@ func (c *periodicRefreshOwnedCollectiblesCommand) loadOwnedCollectibles(ctx cont
c.group.Add(command.Command())
select {
case ownedCollectibles := <-receivedCollectiblesCh:
if c.receivedCollectiblesCb != nil {
c.receivedCollectiblesCb(ownedCollectibles)
case ownedCollectiblesChange := <-ownedCollectiblesChangeCh:
if c.ownedCollectiblesChangeCb != nil {
c.ownedCollectiblesChangeCb(ownedCollectiblesChange)
}
case <-ctx.Done():
return ctx.Err()
@ -144,7 +161,7 @@ type loadOwnedCollectiblesCommand struct {
manager *Manager
ownershipDB *OwnershipDB
walletFeed *event.Feed
receivedCollectiblesCh chan<- OwnedCollectibles
ownedCollectiblesChangeCh chan<- OwnedCollectiblesChange
// Not to be set by the caller
partialOwnership []thirdparty.CollectibleUniqueID
@ -157,14 +174,14 @@ func newLoadOwnedCollectiblesCommand(
walletFeed *event.Feed,
chainID walletCommon.ChainID,
account common.Address,
receivedCollectiblesCh chan<- OwnedCollectibles) *loadOwnedCollectiblesCommand {
ownedCollectiblesChangeCh chan<- OwnedCollectiblesChange) *loadOwnedCollectiblesCommand {
return &loadOwnedCollectiblesCommand{
manager: manager,
ownershipDB: ownershipDB,
walletFeed: walletFeed,
chainID: chainID,
account: account,
receivedCollectiblesCh: receivedCollectiblesCh,
ownedCollectiblesChangeCh: ownedCollectiblesChangeCh,
}
}
@ -195,6 +212,41 @@ func ownedTokensToTokenBalancesPerContractAddress(ownership []thirdparty.Collect
return ret
}
func (c *loadOwnedCollectiblesCommand) sendOwnedCollectiblesChanges(removed, updated, added []thirdparty.CollectibleUniqueID) {
if len(removed) > 0 {
c.ownedCollectiblesChangeCh <- OwnedCollectiblesChange{
ownedCollectibles: OwnedCollectibles{
chainID: c.chainID,
account: c.account,
ids: removed,
},
changeType: OwnedCollectiblesChangeTypeRemoved,
}
}
if len(updated) > 0 {
c.ownedCollectiblesChangeCh <- OwnedCollectiblesChange{
ownedCollectibles: OwnedCollectibles{
chainID: c.chainID,
account: c.account,
ids: updated,
},
changeType: OwnedCollectiblesChangeTypeUpdated,
}
}
if len(added) > 0 {
c.ownedCollectiblesChangeCh <- OwnedCollectiblesChange{
ownedCollectibles: OwnedCollectibles{
chainID: c.chainID,
account: c.account,
ids: added,
},
changeType: OwnedCollectiblesChangeTypeAdded,
}
}
}
func (c *loadOwnedCollectiblesCommand) Run(parent context.Context) (err error) {
log.Debug("start loadOwnedCollectiblesCommand", "chain", c.chainID, "account", c.account)
@ -205,6 +257,8 @@ func (c *loadOwnedCollectiblesCommand) Run(parent context.Context) (err error) {
c.triggerEvent(EventCollectiblesOwnershipUpdateStarted, c.chainID, c.account, "")
updateMessage := OwnershipUpdateMessage{}
lastFetchTimestamp, err := c.ownershipDB.GetOwnershipUpdateTimestamp(c.account, c.chainID)
if err != nil {
c.err = err
@ -241,42 +295,45 @@ func (c *loadOwnedCollectiblesCommand) Run(parent context.Context) (err error) {
// Normally, update the DB once we've finished fetching
// If this is the first fetch, make partial updates to the client to get a better UX
if initialFetch || finished {
receivedIDs, err := c.ownershipDB.GetIDsNotInDB(c.chainID, c.account, c.partialOwnership)
if err != nil {
log.Error("failed GetIDsNotInDB in processOwnedIDs", "chain", c.chainID, "account", c.account, "error", err)
return err
}
// Token balances should come from the providers. For now we assume all balances are 1, which
// is only valid for ERC721.
// TODO (#13025): Fetch balances from the providers.
balances := ownedTokensToTokenBalancesPerContractAddress(c.partialOwnership)
err = c.ownershipDB.Update(c.chainID, c.account, balances, start.Unix())
updateMessage.Removed, updateMessage.Updated, updateMessage.Added, err = c.ownershipDB.Update(c.chainID, c.account, balances, start.Unix())
if err != nil {
log.Error("failed updating ownershipDB in loadOwnedCollectiblesCommand", "chain", c.chainID, "account", c.account, "error", err)
c.err = err
break
}
c.receivedCollectiblesCh <- OwnedCollectibles{
chainID: c.chainID,
account: c.account,
ids: receivedIDs,
}
c.sendOwnedCollectiblesChanges(updateMessage.Removed, updateMessage.Updated, updateMessage.Added)
}
if finished || c.err != nil {
break
} else if initialFetch {
c.triggerEvent(EventCollectiblesOwnershipUpdatePartial, c.chainID, c.account, "")
encodedMessage, err := json.Marshal(updateMessage)
if err != nil {
c.err = err
break
}
c.triggerEvent(EventCollectiblesOwnershipUpdatePartial, c.chainID, c.account, string(encodedMessage))
updateMessage = OwnershipUpdateMessage{}
}
}
}
var encodedMessage []byte
if c.err == nil {
encodedMessage, c.err = json.Marshal(updateMessage)
}
if c.err != nil {
c.triggerEvent(EventCollectiblesOwnershipUpdateFinishedWithError, c.chainID, c.account, c.err.Error())
} else {
c.triggerEvent(EventCollectiblesOwnershipUpdateFinished, c.chainID, c.account, "")
c.triggerEvent(EventCollectiblesOwnershipUpdateFinished, c.chainID, c.account, string(encodedMessage))
}
log.Debug("end loadOwnedCollectiblesCommand", "chain", c.chainID, "account", c.account, "in", time.Since(start))

View File

@ -49,7 +49,8 @@ type Controller struct {
walletEventsWatcher *walletevent.Watcher
settingsWatcher *settingsevent.Watcher
receivedCollectiblesCb OwnedCollectiblesCb
ownedCollectiblesChangeCb OwnedCollectiblesChangeCb
collectiblesTransferCb TransferCb
commandsLock sync.RWMutex
}
@ -75,8 +76,12 @@ func NewController(
}
}
func (c *Controller) SetReceivedCollectiblesCb(cb OwnedCollectiblesCb) {
c.receivedCollectiblesCb = cb
func (c *Controller) SetOwnedCollectiblesChangeCb(cb OwnedCollectiblesChangeCb) {
c.ownedCollectiblesChangeCb = cb
}
func (c *Controller) SetCollectiblesTransferCb(cb TransferCb) {
c.collectiblesTransferCb = cb
}
func (c *Controller) Start() {
@ -227,7 +232,7 @@ func (c *Controller) startPeriodicalOwnershipFetchForAccountAndChainID(address c
c.walletFeed,
chainID,
address,
c.receivedCollectiblesCb,
c.ownedCollectiblesChangeCb,
)
c.commands[address][chainID] = command
@ -334,32 +339,12 @@ func (c *Controller) startWalletEventsWatcher() {
chainID := walletCommon.ChainID(event.ChainID)
for _, account := range event.Accounts {
// Check last ownership update timestamp
timestamp, err := c.ownershipDB.GetOwnershipUpdateTimestamp(account, chainID)
if err != nil {
log.Error("Error getting ownership update timestamp", "error", err)
continue
}
if timestamp == InvalidTimestamp {
// Ownership was never fetched for this account
continue
// Call external callback
if c.collectiblesTransferCb != nil {
c.collectiblesTransferCb(account, chainID, event.EventParams.([]transfer.Transfer))
}
timeCheck := timestamp - activityRefetchMarginSeconds
if timeCheck < 0 {
timeCheck = 0
}
if event.At > timeCheck {
// Restart fetching for account + chainID
c.commandsLock.Lock()
err := c.startPeriodicalOwnershipFetchForAccountAndChainID(account, chainID, true)
c.commandsLock.Unlock()
if err != nil {
log.Error("Error starting periodical collectibles fetch", "address", account, "error", err)
}
}
c.refetchOwnershipIfRecentTransfer(account, chainID, event.At)
}
}
@ -401,3 +386,33 @@ func (c *Controller) stopSettingsWatcher() {
c.settingsWatcher = nil
}
}
func (c *Controller) refetchOwnershipIfRecentTransfer(account common.Address, chainID walletCommon.ChainID, latestTxTimestamp int64) {
// Check last ownership update timestamp
timestamp, err := c.ownershipDB.GetOwnershipUpdateTimestamp(account, chainID)
if err != nil {
log.Error("Error getting ownership update timestamp", "error", err)
return
}
if timestamp == InvalidTimestamp {
// Ownership was never fetched for this account
return
}
timeCheck := timestamp - activityRefetchMarginSeconds
if timeCheck < 0 {
timeCheck = 0
}
if latestTxTimestamp > timeCheck {
// Restart fetching for account + chainID
c.commandsLock.Lock()
err := c.startPeriodicalOwnershipFetchForAccountAndChainID(account, chainID, true)
c.commandsLock.Unlock()
if err != nil {
log.Error("Error starting periodical collectibles fetch", "address", account, "error", err)
}
}
}

View File

@ -81,7 +81,7 @@ func TestFilterOwnedCollectibles(t *testing.T) {
for chainID, balancesPerOwner := range balancesPerChainIDAndOwner {
for ownerAddress, balances := range balancesPerOwner {
err = oDB.Update(chainID, ownerAddress, balances, timestamp)
_, _, _, err = oDB.Update(chainID, ownerAddress, balances, timestamp)
require.NoError(t, err)
}
}

View File

@ -3,6 +3,7 @@ package collectibles
import (
"database/sql"
"fmt"
"math"
"math/big"
"github.com/ethereum/go-ethereum/common"
@ -11,7 +12,6 @@ import (
"github.com/status-im/status-go/services/wallet/bigint"
w_common "github.com/status-im/status-go/services/wallet/common"
walletCommon "github.com/status-im/status-go/services/wallet/common"
"github.com/status-im/status-go/services/wallet/thirdparty"
"github.com/status-im/status-go/sqlite"
)
@ -28,37 +28,68 @@ func NewOwnershipDB(sqlDb *sql.DB) *OwnershipDB {
}
}
const ownershipColumns = "chain_id, contract_address, token_id, owner_address, balance"
const unknownUpdateTimestamp = int64(math.MaxInt64)
const selectOwnershipColumns = "chain_id, contract_address, token_id"
const selectAccountBalancesColumns = "owner_address, balance"
const ownershipTimestampColumns = "owner_address, chain_id, timestamp"
const selectOwnershipTimestampColumns = "timestamp"
func removeAddressOwnership(creator sqlite.StatementCreator, chainID w_common.ChainID, ownerAddress common.Address) error {
deleteOwnership, err := creator.Prepare("DELETE FROM collectibles_ownership_cache WHERE chain_id = ? AND owner_address = ?")
func insertTmpOwnership(
db *sql.DB,
chainID w_common.ChainID,
ownerAddress common.Address,
balancesPerContractAdddress thirdparty.TokenBalancesPerContractAddress,
) error {
// Put old/new ownership data into temp tables
// NOTE: Temp table CREATE doesn't work with prepared statements,
// so we have to use Exec directly
_, err := db.Exec(`
DROP TABLE IF EXISTS temp.old_collectibles_ownership_cache;
CREATE TABLE temp.old_collectibles_ownership_cache(
contract_address VARCHAR NOT NULL,
token_id BLOB NOT NULL,
balance BLOB NOT NULL
);
DROP TABLE IF EXISTS temp.new_collectibles_ownership_cache;
CREATE TABLE temp.new_collectibles_ownership_cache(
contract_address VARCHAR NOT NULL,
token_id BLOB NOT NULL,
balance BLOB NOT NULL
);`)
if err != nil {
return err
}
_, err = deleteOwnership.Exec(chainID, ownerAddress)
insertTmpOldOwnership, err := db.Prepare(`
INSERT INTO temp.old_collectibles_ownership_cache
SELECT contract_address, token_id, balance FROM collectibles_ownership_cache
WHERE chain_id = ? AND owner_address = ?`)
if err != nil {
return err
}
defer insertTmpOldOwnership.Close()
_, err = insertTmpOldOwnership.Exec(chainID, ownerAddress)
if err != nil {
return err
}
return nil
}
func insertAddressOwnership(creator sqlite.StatementCreator, chainID w_common.ChainID, ownerAddress common.Address, balancesPerContractAdddress thirdparty.TokenBalancesPerContractAddress) error {
insertOwnership, err := creator.Prepare(fmt.Sprintf(`INSERT INTO collectibles_ownership_cache (%s)
VALUES (?, ?, ?, ?, ?)`, ownershipColumns))
insertTmpNewOwnership, err := db.Prepare(`
INSERT INTO temp.new_collectibles_ownership_cache (contract_address, token_id, balance)
VALUES (?, ?, ?)`)
if err != nil {
return err
}
defer insertTmpNewOwnership.Close()
for contractAddress, balances := range balancesPerContractAdddress {
for _, balance := range balances {
_, err = insertOwnership.Exec(chainID, contractAddress, (*bigint.SQLBigIntBytes)(balance.TokenID.Int), ownerAddress, (*bigint.SQLBigIntBytes)(balance.Balance.Int))
_, err = insertTmpNewOwnership.Exec(
contractAddress,
(*bigint.SQLBigIntBytes)(balance.TokenID.Int),
(*bigint.SQLBigIntBytes)(balance.Balance.Int),
)
if err != nil {
return err
}
@ -68,19 +99,205 @@ func insertAddressOwnership(creator sqlite.StatementCreator, chainID w_common.Ch
return nil
}
func removeOldAddressOwnership(
creator sqlite.StatementCreator,
chainID w_common.ChainID,
ownerAddress common.Address,
) ([]thirdparty.CollectibleUniqueID, error) {
// Find collectibles in the DB that are not in the temp table
removedQuery, err := creator.Prepare(fmt.Sprintf(`
SELECT %d, tOld.contract_address, tOld.token_id
FROM temp.old_collectibles_ownership_cache tOld
LEFT JOIN temp.new_collectibles_ownership_cache tNew ON
tOld.contract_address = tNew.contract_address AND tOld.token_id = tNew.token_id
WHERE
tNew.contract_address IS NULL
`, chainID))
if err != nil {
return nil, err
}
defer removedQuery.Close()
removedRows, err := removedQuery.Query()
if err != nil {
return nil, err
}
defer removedRows.Close()
removedIDs, err := thirdparty.RowsToCollectibles(removedRows)
if err != nil {
return nil, err
}
removeOwnership, err := creator.Prepare("DELETE FROM collectibles_ownership_cache WHERE chain_id = ? AND owner_address = ? AND contract_address = ? AND token_id = ?")
if err != nil {
return nil, err
}
defer removeOwnership.Close()
for _, id := range removedIDs {
_, err = removeOwnership.Exec(
chainID,
ownerAddress,
id.ContractID.Address,
(*bigint.SQLBigIntBytes)(id.TokenID.Int),
)
if err != nil {
return nil, err
}
}
return removedIDs, nil
}
func updateChangedAddressOwnership(
creator sqlite.StatementCreator,
chainID w_common.ChainID,
ownerAddress common.Address,
) ([]thirdparty.CollectibleUniqueID, error) {
// Find collectibles in the temp table that are in the DB and have a different balance
updatedQuery, err := creator.Prepare(fmt.Sprintf(`
SELECT %d, tNew.contract_address, tNew.token_id
FROM temp.new_collectibles_ownership_cache tNew
LEFT JOIN temp.old_collectibles_ownership_cache tOld ON
tOld.contract_address = tNew.contract_address AND tOld.token_id = tNew.token_id
WHERE
tOld.contract_address IS NOT NULL AND tOld.balance != tNew.balance
`, chainID))
if err != nil {
return nil, err
}
defer updatedQuery.Close()
updatedRows, err := updatedQuery.Query()
if err != nil {
return nil, err
}
defer updatedRows.Close()
updatedIDs, err := thirdparty.RowsToCollectibles(updatedRows)
if err != nil {
return nil, err
}
updateOwnership, err := creator.Prepare(`
UPDATE collectibles_ownership_cache
SET balance = (SELECT tNew.balance
FROM temp.new_collectibles_ownership_cache tNew
WHERE tNew.contract_address = collectibles_ownership_cache.contract_address AND tNew.token_id = collectibles_ownership_cache.token_id)
WHERE chain_id = ? AND owner_address = ? AND contract_address = ? AND token_id = ?
`)
if err != nil {
return nil, err
}
defer updateOwnership.Close()
for _, id := range updatedIDs {
_, err = updateOwnership.Exec(
chainID,
ownerAddress,
id.ContractID.Address,
(*bigint.SQLBigIntBytes)(id.TokenID.Int))
if err != nil {
return nil, err
}
}
return updatedIDs, nil
}
func insertNewAddressOwnership(
creator sqlite.StatementCreator,
chainID w_common.ChainID,
ownerAddress common.Address,
) ([]thirdparty.CollectibleUniqueID, error) {
// Find collectibles in the temp table that are not in the DB
insertedQuery, err := creator.Prepare(fmt.Sprintf(`
SELECT %d, tNew.contract_address, tNew.token_id
FROM temp.new_collectibles_ownership_cache tNew
LEFT JOIN temp.old_collectibles_ownership_cache tOld ON
tOld.contract_address = tNew.contract_address AND tOld.token_id = tNew.token_id
WHERE
tOld.contract_address IS NULL
`, chainID))
if err != nil {
return nil, err
}
defer insertedQuery.Close()
insertedRows, err := insertedQuery.Query()
if err != nil {
return nil, err
}
defer insertedRows.Close()
insertedIDs, err := thirdparty.RowsToCollectibles(insertedRows)
if err != nil {
return nil, err
}
insertOwnership, err := creator.Prepare(fmt.Sprintf(`
INSERT INTO collectibles_ownership_cache
SELECT
%d, tNew.contract_address, tNew.token_id, X'%s', tNew.balance, NULL
FROM temp.new_collectibles_ownership_cache tNew
WHERE
tNew.contract_address = ? AND tNew.token_id = ?
`, chainID, ownerAddress.Hex()[2:]))
if err != nil {
return nil, err
}
defer insertOwnership.Close()
for _, id := range insertedIDs {
_, err = insertOwnership.Exec(
id.ContractID.Address,
(*bigint.SQLBigIntBytes)(id.TokenID.Int))
if err != nil {
return nil, err
}
}
return insertedIDs, nil
}
func updateAddressOwnership(
tx sqlite.StatementCreator,
chainID w_common.ChainID,
ownerAddress common.Address,
) (removedIDs, updatedIDs, insertedIDs []thirdparty.CollectibleUniqueID, err error) {
removedIDs, err = removeOldAddressOwnership(tx, chainID, ownerAddress)
if err != nil {
return
}
updatedIDs, err = updateChangedAddressOwnership(tx, chainID, ownerAddress)
if err != nil {
return
}
insertedIDs, err = insertNewAddressOwnership(tx, chainID, ownerAddress)
if err != nil {
return
}
return
}
func updateAddressOwnershipTimestamp(creator sqlite.StatementCreator, ownerAddress common.Address, chainID w_common.ChainID, timestamp int64) error {
updateTimestamp, err := creator.Prepare(fmt.Sprintf(`INSERT OR REPLACE INTO collectibles_ownership_update_timestamps (%s)
VALUES (?, ?, ?)`, ownershipTimestampColumns))
if err != nil {
return err
}
defer updateTimestamp.Close()
_, err = updateTimestamp.Exec(ownerAddress, chainID, timestamp)
return err
}
// Returns the list of new IDs when comparing the given list of IDs with the ones in the DB.
// Returns the list of added/removed IDs when comparing the given list of IDs with the ones in the DB.
// Call before Update for the result to be useful.
func (o *OwnershipDB) GetIDsNotInDB(
chainID w_common.ChainID,
@ -116,13 +333,18 @@ func (o *OwnershipDB) GetIDsNotInDB(
return ret, nil
}
func (o *OwnershipDB) Update(chainID w_common.ChainID, ownerAddress common.Address, balances thirdparty.TokenBalancesPerContractAddress, timestamp int64) (err error) {
func (o *OwnershipDB) Update(chainID w_common.ChainID, ownerAddress common.Address, balances thirdparty.TokenBalancesPerContractAddress, timestamp int64) (removedIDs, updatedIDs, insertedIDs []thirdparty.CollectibleUniqueID, err error) {
err = insertTmpOwnership(o.db, chainID, ownerAddress, balances)
if err != nil {
return
}
var (
tx *sql.Tx
)
tx, err = o.db.Begin()
if err != nil {
return err
return
}
defer func() {
if err == nil {
@ -132,16 +354,10 @@ func (o *OwnershipDB) Update(chainID w_common.ChainID, ownerAddress common.Addre
_ = tx.Rollback()
}()
// Remove previous ownership data
err = removeAddressOwnership(tx, chainID, ownerAddress)
// Compare tmp and current ownership tables and update the current one
removedIDs, updatedIDs, insertedIDs, err = updateAddressOwnership(tx, chainID, ownerAddress)
if err != nil {
return err
}
// Insert new ownership data
err = insertAddressOwnership(tx, chainID, ownerAddress, balances)
if err != nil {
return err
return
}
// Update timestamp
@ -203,7 +419,7 @@ func (o *OwnershipDB) GetOwnedCollectible(chainID w_common.ChainID, ownerAddress
return &ids[0], nil
}
func (o *OwnershipDB) GetOwnershipUpdateTimestamp(owner common.Address, chainID walletCommon.ChainID) (int64, error) {
func (o *OwnershipDB) GetOwnershipUpdateTimestamp(owner common.Address, chainID w_common.ChainID) (int64, error) {
query := fmt.Sprintf(`SELECT %s
FROM collectibles_ownership_update_timestamps
WHERE owner_address = ? AND chain_id = ?`, selectOwnershipTimestampColumns)
@ -229,7 +445,7 @@ func (o *OwnershipDB) GetOwnershipUpdateTimestamp(owner common.Address, chainID
return timestamp, nil
}
func (o *OwnershipDB) GetLatestOwnershipUpdateTimestamp(chainID walletCommon.ChainID) (int64, error) {
func (o *OwnershipDB) GetLatestOwnershipUpdateTimestamp(chainID w_common.ChainID) (int64, error) {
query := `SELECT MAX(timestamp)
FROM collectibles_ownership_update_timestamps
WHERE chain_id = ?`
@ -257,9 +473,12 @@ func (o *OwnershipDB) GetLatestOwnershipUpdateTimestamp(chainID walletCommon.Cha
}
func (o *OwnershipDB) GetOwnership(id thirdparty.CollectibleUniqueID) ([]thirdparty.AccountBalance, error) {
query := fmt.Sprintf(`SELECT %s
FROM collectibles_ownership_cache
WHERE chain_id = ? AND contract_address = ? AND token_id = ?`, selectAccountBalancesColumns)
query := fmt.Sprintf(`SELECT c.owner_address, c.balance, COALESCE(t.timestamp, %d)
FROM collectibles_ownership_cache c
LEFT JOIN transfers t ON
c.transfer_id = t.hash
WHERE
c.chain_id = ? AND c.contract_address = ? AND c.token_id = ?`, unknownUpdateTimestamp)
stmt, err := o.db.Prepare(query)
if err != nil {
@ -281,6 +500,7 @@ func (o *OwnershipDB) GetOwnership(id thirdparty.CollectibleUniqueID) ([]thirdpa
err = rows.Scan(
&accountBalance.Address,
(*bigint.SQLBigIntBytes)(accountBalance.Balance.Int),
&accountBalance.TxTimestamp,
)
if err != nil {
return nil, err
@ -291,3 +511,93 @@ func (o *OwnershipDB) GetOwnership(id thirdparty.CollectibleUniqueID) ([]thirdpa
return ret, nil
}
func (o *OwnershipDB) SetTransferID(ownerAddress common.Address, id thirdparty.CollectibleUniqueID, transferID common.Hash) error {
query := `UPDATE collectibles_ownership_cache
SET transfer_id = ?
WHERE chain_id = ? AND contract_address = ? AND token_id = ? AND owner_address = ?`
stmt, err := o.db.Prepare(query)
if err != nil {
return err
}
defer stmt.Close()
_, err = stmt.Exec(transferID, id.ContractID.ChainID, id.ContractID.Address, (*bigint.SQLBigIntBytes)(id.TokenID.Int), ownerAddress)
if err != nil {
return err
}
return nil
}
func (o *OwnershipDB) GetTransferID(ownerAddress common.Address, id thirdparty.CollectibleUniqueID) (*common.Hash, error) {
query := `SELECT transfer_id
FROM collectibles_ownership_cache
WHERE chain_id = ? AND contract_address = ? AND token_id = ? AND owner_address = ?
LIMIT 1`
stmt, err := o.db.Prepare(query)
if err != nil {
return nil, err
}
defer stmt.Close()
row := stmt.QueryRow(id.ContractID.ChainID, id.ContractID.Address, (*bigint.SQLBigIntBytes)(id.TokenID.Int), ownerAddress)
var dbTransferID []byte
err = row.Scan(&dbTransferID)
if err == sql.ErrNoRows {
return nil, nil
} else if err != nil {
return nil, err
}
if len(dbTransferID) > 0 {
transferID := common.BytesToHash(dbTransferID)
return &transferID, nil
}
return nil, nil
}
func (o *OwnershipDB) GetCollectiblesWithNoTransferID(account common.Address, chainID w_common.ChainID) ([]thirdparty.CollectibleUniqueID, error) {
query := `SELECT contract_address, token_id
FROM collectibles_ownership_cache
WHERE chain_id = ? AND owner_address = ? AND transfer_id IS NULL`
stmt, err := o.db.Prepare(query)
if err != nil {
return nil, err
}
defer stmt.Close()
rows, err := stmt.Query(chainID, account)
if err != nil {
return nil, err
}
defer rows.Close()
var ret []thirdparty.CollectibleUniqueID
for rows.Next() {
id := thirdparty.CollectibleUniqueID{
ContractID: thirdparty.ContractID{
ChainID: chainID,
},
TokenID: &bigint.BigInt{Int: big.NewInt(0)},
}
err = rows.Scan(
&id.ContractID.Address,
(*bigint.SQLBigIntBytes)(id.TokenID.Int),
)
if err != nil {
return nil, err
}
ret = append(ret, id)
}
return ret, nil
}

View File

@ -9,6 +9,7 @@ import (
"github.com/status-im/status-go/services/wallet/bigint"
w_common "github.com/status-im/status-go/services/wallet/common"
"github.com/status-im/status-go/services/wallet/thirdparty"
"github.com/status-im/status-go/services/wallet/transfer"
"github.com/status-im/status-go/t/helpers"
"github.com/status-im/status-go/walletdatabase"
@ -120,6 +121,7 @@ func TestUpdateOwnership(t *testing.T) {
randomAddress := common.HexToAddress("0xFFFF")
var err error
var removedIDs, updatedIDs, insertedIDs []thirdparty.CollectibleUniqueID
var loadedTimestamp int64
var loadedList []thirdparty.CollectibleUniqueID
@ -148,8 +150,11 @@ func TestUpdateOwnership(t *testing.T) {
require.NoError(t, err)
require.Equal(t, InvalidTimestamp, loadedTimestamp)
err = oDB.Update(chainID0, ownerAddress1, ownedBalancesChain0, timestampChain0)
removedIDs, updatedIDs, insertedIDs, err = oDB.Update(chainID0, ownerAddress1, ownedBalancesChain0, timestampChain0)
require.NoError(t, err)
require.Empty(t, removedIDs)
require.Empty(t, updatedIDs)
require.ElementsMatch(t, ownedListChain0, insertedIDs)
loadedTimestamp, err = oDB.GetOwnershipUpdateTimestamp(ownerAddress1, chainID0)
require.NoError(t, err)
@ -175,17 +180,29 @@ func TestUpdateOwnership(t *testing.T) {
require.NoError(t, err)
require.Equal(t, InvalidTimestamp, loadedTimestamp)
err = oDB.Update(chainID1, ownerAddress1, ownedBalancesChain1, timestampChain1)
removedIDs, updatedIDs, insertedIDs, err = oDB.Update(chainID1, ownerAddress1, ownedBalancesChain1, timestampChain1)
require.NoError(t, err)
require.Empty(t, removedIDs)
require.Empty(t, updatedIDs)
require.ElementsMatch(t, ownedListChain1, insertedIDs)
err = oDB.Update(chainID2, ownerAddress2, ownedBalancesChain2, timestampChain2)
removedIDs, updatedIDs, insertedIDs, err = oDB.Update(chainID2, ownerAddress2, ownedBalancesChain2, timestampChain2)
require.NoError(t, err)
require.Empty(t, removedIDs)
require.Empty(t, updatedIDs)
require.ElementsMatch(t, ownedListChain2, insertedIDs)
err = oDB.Update(chainID1, ownerAddress3, ownedBalancesChain1b, timestampChain1b)
removedIDs, updatedIDs, insertedIDs, err = oDB.Update(chainID1, ownerAddress3, ownedBalancesChain1b, timestampChain1b)
require.NoError(t, err)
require.Empty(t, removedIDs)
require.Empty(t, updatedIDs)
require.ElementsMatch(t, ownedListChain1b, insertedIDs)
err = oDB.Update(chainID2, ownerAddress3, ownedBalancesChain2b, timestampChain2b)
removedIDs, updatedIDs, insertedIDs, err = oDB.Update(chainID2, ownerAddress3, ownedBalancesChain2b, timestampChain2b)
require.NoError(t, err)
require.Empty(t, removedIDs)
require.Empty(t, updatedIDs)
require.ElementsMatch(t, ownedListChain2b, insertedIDs)
loadedTimestamp, err = oDB.GetOwnershipUpdateTimestamp(ownerAddress1, chainID0)
require.NoError(t, err)
@ -254,10 +271,12 @@ func TestUpdateOwnership(t *testing.T) {
{
Address: ownerAddress2,
Balance: commonBalanceAddress2,
TxTimestamp: unknownUpdateTimestamp,
},
{
Address: ownerAddress3,
Balance: commonBalanceAddress3,
TxTimestamp: unknownUpdateTimestamp,
},
}
@ -277,6 +296,73 @@ func TestUpdateOwnership(t *testing.T) {
require.Empty(t, loadedOwnership)
}
func TestUpdateOwnershipChanges(t *testing.T) {
oDB, cleanDB := setupOwnershipDBTest(t)
defer cleanDB()
chainID0 := w_common.ChainID(0)
ownerAddress1 := common.HexToAddress("0x1234")
ownedBalancesChain0 := generateTestCollectibles(0, 10)
ownedListChain0 := testCollectiblesToList(chainID0, ownedBalancesChain0)
timestampChain0 := int64(1234567890)
var err error
var removedIDs, updatedIDs, insertedIDs []thirdparty.CollectibleUniqueID
var loadedList []thirdparty.CollectibleUniqueID
removedIDs, updatedIDs, insertedIDs, err = oDB.Update(chainID0, ownerAddress1, ownedBalancesChain0, timestampChain0)
require.NoError(t, err)
require.Empty(t, removedIDs)
require.Empty(t, updatedIDs)
require.ElementsMatch(t, ownedListChain0, insertedIDs)
loadedList, err = oDB.GetOwnedCollectibles([]w_common.ChainID{chainID0}, []common.Address{ownerAddress1}, 0, len(ownedListChain0))
require.NoError(t, err)
require.ElementsMatch(t, ownedListChain0, loadedList)
// Remove one collectible and change balance of another
var removedID, updatedID thirdparty.CollectibleUniqueID
count := 0
for contractAddress, balances := range ownedBalancesChain0 {
for i, balance := range balances {
if count == 0 {
count++
ownedBalancesChain0[contractAddress] = ownedBalancesChain0[contractAddress][1:]
removedID = thirdparty.CollectibleUniqueID{
ContractID: thirdparty.ContractID{
ChainID: chainID0,
Address: contractAddress,
},
TokenID: balance.TokenID,
}
} else if count == 1 {
count++
ownedBalancesChain0[contractAddress][i].Balance = &bigint.BigInt{Int: big.NewInt(100)}
updatedID = thirdparty.CollectibleUniqueID{
ContractID: thirdparty.ContractID{
ChainID: chainID0,
Address: contractAddress,
},
TokenID: balance.TokenID,
}
}
}
}
ownedListChain0 = testCollectiblesToList(chainID0, ownedBalancesChain0)
removedIDs, updatedIDs, insertedIDs, err = oDB.Update(chainID0, ownerAddress1, ownedBalancesChain0, timestampChain0)
require.NoError(t, err)
require.ElementsMatch(t, []thirdparty.CollectibleUniqueID{removedID}, removedIDs)
require.ElementsMatch(t, []thirdparty.CollectibleUniqueID{updatedID}, updatedIDs)
require.Empty(t, insertedIDs)
loadedList, err = oDB.GetOwnedCollectibles([]w_common.ChainID{chainID0}, []common.Address{ownerAddress1}, 0, len(ownedListChain0))
require.NoError(t, err)
require.ElementsMatch(t, ownedListChain0, loadedList)
}
func TestLargeTokenID(t *testing.T) {
oDB, cleanDB := setupOwnershipDBTest(t)
defer cleanDB()
@ -301,6 +387,7 @@ func TestLargeTokenID(t *testing.T) {
{
Address: ownerAddress,
Balance: balance,
TxTimestamp: unknownUpdateTimestamp,
},
}
@ -308,7 +395,7 @@ func TestLargeTokenID(t *testing.T) {
var err error
err = oDB.Update(chainID, ownerAddress, ownedBalancesChain, timestamp)
_, _, _, err = oDB.Update(chainID, ownerAddress, ownedBalancesChain, timestamp)
require.NoError(t, err)
loadedList, err := oDB.GetOwnedCollectibles([]w_common.ChainID{chainID}, []common.Address{ownerAddress}, 0, len(ownedListChain))
@ -327,3 +414,71 @@ func TestLargeTokenID(t *testing.T) {
require.NoError(t, err)
require.Equal(t, ownership, loadedOwnership)
}
func TestCollectibleTransferID(t *testing.T) {
oDB, cleanDB := setupOwnershipDBTest(t)
defer cleanDB()
chainID0 := w_common.ChainID(0)
ownerAddress1 := common.HexToAddress("0x1234")
ownedBalancesChain0 := generateTestCollectibles(0, 10)
ownedListChain0 := testCollectiblesToList(chainID0, ownedBalancesChain0)
timestampChain0 := int64(1234567890)
var err error
_, _, _, err = oDB.Update(chainID0, ownerAddress1, ownedBalancesChain0, timestampChain0)
require.NoError(t, err)
loadedList, err := oDB.GetCollectiblesWithNoTransferID(ownerAddress1, chainID0)
require.NoError(t, err)
require.ElementsMatch(t, ownedListChain0, loadedList)
for _, id := range ownedListChain0 {
loadedTransferID, err := oDB.GetTransferID(ownerAddress1, id)
require.NoError(t, err)
require.Nil(t, loadedTransferID)
}
firstCollectibleID := ownedListChain0[0]
firstTxID := common.HexToHash("0x1234")
err = oDB.SetTransferID(ownerAddress1, firstCollectibleID, firstTxID)
require.NoError(t, err)
for _, id := range ownedListChain0 {
loadedTransferID, err := oDB.GetTransferID(ownerAddress1, id)
require.NoError(t, err)
if id == firstCollectibleID {
require.Equal(t, firstTxID, *loadedTransferID)
} else {
require.Nil(t, loadedTransferID)
}
}
// Even though the first collectible has a TransferID set, since there's no matching entry in the transfers table it
// should return unknownUpdateTimestamp
firstOwnership, err := oDB.GetOwnership(firstCollectibleID)
require.NoError(t, err)
require.Equal(t, unknownUpdateTimestamp, firstOwnership[0].TxTimestamp)
trs, _, _ := transfer.GenerateTestTransfers(t, oDB.db, 1, 5)
trs[0].To = ownerAddress1
trs[0].ChainID = chainID0
trs[0].Hash = firstTxID
for i := range trs {
if i == 0 {
transfer.InsertTestTransferWithOptions(t, oDB.db, trs[i].To, &trs[i], &transfer.TestTransferOptions{
TokenAddress: firstCollectibleID.ContractID.Address,
TokenID: firstCollectibleID.TokenID.Int,
})
} else {
transfer.InsertTestTransfer(t, oDB.db, trs[i].To, &trs[i])
}
}
// There should now be a valid timestamp
firstOwnership, err = oDB.GetOwnership(firstCollectibleID)
require.NoError(t, err)
require.Equal(t, trs[0].Timestamp, firstOwnership[0].TxTimestamp)
}

View File

@ -16,9 +16,11 @@ import (
"github.com/status-im/status-go/rpc/network"
"github.com/status-im/status-go/services/wallet/async"
"github.com/status-im/status-go/services/wallet/bigint"
walletCommon "github.com/status-im/status-go/services/wallet/common"
"github.com/status-im/status-go/services/wallet/community"
"github.com/status-im/status-go/services/wallet/thirdparty"
"github.com/status-im/status-go/services/wallet/transfer"
"github.com/status-im/status-go/services/wallet/walletevent"
)
@ -35,6 +37,12 @@ const (
EventGetCollectiblesDetailsDone walletevent.EventType = "wallet-get-collectibles-details-done"
)
type OwnershipUpdateMessage struct {
Added []thirdparty.CollectibleUniqueID `json:"added"`
Updated []thirdparty.CollectibleUniqueID `json:"updated"`
Removed []thirdparty.CollectibleUniqueID `json:"removed"`
}
type CollectibleDataType byte
const (
@ -74,6 +82,7 @@ type Service struct {
controller *Controller
db *sql.DB
ownershipDB *OwnershipDB
transferDB *transfer.Database
communityManager *community.Manager
walletFeed *event.Feed
scheduler *async.MultiClientScheduler
@ -94,12 +103,14 @@ func NewService(
controller: NewController(db, walletFeed, accountsDB, accountsFeed, settingsFeed, networkManager, manager),
db: db,
ownershipDB: NewOwnershipDB(db),
transferDB: transfer.NewDB(db),
communityManager: communityManager,
walletFeed: walletFeed,
scheduler: async.NewMultiClientScheduler(),
group: async.NewGroup(context.Background()),
}
s.controller.SetReceivedCollectiblesCb(s.notifyCommunityCollectiblesReceived)
s.controller.SetOwnedCollectiblesChangeCb(s.onOwnedCollectiblesChange)
s.controller.SetCollectiblesTransferCb(s.onCollectiblesTransfer)
return s
}
@ -399,6 +410,56 @@ func (s *Service) collectibleIDsToDataType(ctx context.Context, ids []thirdparty
return nil, errors.New("unknown data type")
}
func (s *Service) onOwnedCollectiblesChange(ownedCollectiblesChange OwnedCollectiblesChange) {
// Try to find a matching transfer for newly added/updated collectibles
switch ownedCollectiblesChange.changeType {
case OwnedCollectiblesChangeTypeAdded, OwnedCollectiblesChangeTypeUpdated:
// For recently added/updated collectibles, try to find a matching transfer
s.lookupTransferForCollectibles(ownedCollectiblesChange.ownedCollectibles)
s.notifyCommunityCollectiblesReceived(ownedCollectiblesChange.ownedCollectibles)
}
}
func (s *Service) onCollectiblesTransfer(account common.Address, chainID walletCommon.ChainID, transfers []transfer.Transfer) {
for _, transfer := range transfers {
// If Collectible is already in the DB, update transfer ID with the latest detected transfer
id := thirdparty.CollectibleUniqueID{
ContractID: thirdparty.ContractID{
ChainID: chainID,
Address: transfer.Log.Address,
},
TokenID: &bigint.BigInt{Int: transfer.TokenID},
}
err := s.ownershipDB.SetTransferID(account, id, transfer.ID)
if err != nil {
log.Error("Error setting transfer ID for collectible", "error", err)
}
}
}
func (s *Service) lookupTransferForCollectibles(ownedCollectibles OwnedCollectibles) {
// There are some limitations to this approach:
// - Collectibles ownership and transfers are not in sync and might represent the state at different moments.
// - We have no way of knowing if the latest collectible transfer we've detected is actually the latest one, so the timestamp we
// use might be older than the real one.
// - There might be detected transfers that are temporarily not reflected in the collectibles ownership.
// - For ERC721 tokens we should only look for incoming transfers. For ERC1155 tokens we should look for both incoming and outgoing transfers.
// We need to get the contract standard for each collectible to know which approach to take.
for _, id := range ownedCollectibles.ids {
transfer, err := s.transferDB.GetLatestCollectibleTransfer(ownedCollectibles.account, id)
if err != nil {
log.Error("Error fetching latest collectible transfer", "error", err)
continue
}
if transfer != nil {
err = s.ownershipDB.SetTransferID(ownedCollectibles.account, id, transfer.ID)
if err != nil {
log.Error("Error setting transfer ID for collectible", "error", err)
}
}
}
}
func (s *Service) notifyCommunityCollectiblesReceived(ownedCollectibles OwnedCollectibles) {
ctx := context.Background()

View File

@ -203,6 +203,7 @@ type CollectibleContractOwnership struct {
type AccountBalance struct {
Address common.Address `json:"address"`
Balance *bigint.BigInt `json:"balance"`
TxTimestamp int64 `json:"txTimestamp"`
}
type CollectibleContractOwnershipProvider interface {

View File

@ -487,20 +487,23 @@ func transferTypeToEventType(transferType w_common.Type) walletevent.EventType {
func (c *transfersCommand) notifyOfLatestTransfers(transfers []Transfer, transferType w_common.Type) {
if c.feed != nil {
eventTransfers := make([]Transfer, 0, len(transfers))
latestTransferTimestamp := uint64(0)
for _, transfer := range transfers {
if transfer.Type == transferType {
eventTransfers = append(eventTransfers, transfer)
if transfer.Timestamp > latestTransferTimestamp {
latestTransferTimestamp = transfer.Timestamp
}
}
}
if latestTransferTimestamp > 0 {
if len(eventTransfers) > 0 {
c.feed.Send(walletevent.Event{
Type: transferTypeToEventType(transferType),
Accounts: []common.Address{c.address},
ChainID: c.chainClient.NetworkID(),
At: int64(latestTransferTimestamp),
EventParams: eventTransfers,
})
}
}

View File

@ -15,6 +15,7 @@ import (
"github.com/status-im/status-go/services/wallet/bigint"
w_common "github.com/status-im/status-go/services/wallet/common"
"github.com/status-im/status-go/services/wallet/thirdparty"
"github.com/status-im/status-go/sqlite"
)
@ -144,6 +145,7 @@ func (db *Database) GetTransfersByAddress(chainID uint64, address common.Address
FilterAddress(address).
FilterEnd(toBlock).
FilterLoaded(1).
SortByBlockNumberAndHash().
Limit(limit)
rows, err := db.client.Query(query.String(), query.Args()...)
@ -161,6 +163,7 @@ func (db *Database) GetTransfersByAddressAndBlock(chainID uint64, address common
FilterAddress(address).
FilterBlockNumber(block).
FilterLoaded(1).
SortByBlockNumberAndHash().
Limit(limit)
rows, err := db.client.Query(query.String(), query.Args()...)
@ -522,6 +525,31 @@ func GetOwnedMultiTransactionID(tx *sql.Tx, chainID w_common.ChainID, id common.
return mTID, nil
}
func (db *Database) GetLatestCollectibleTransfer(address common.Address, id thirdparty.CollectibleUniqueID) (*Transfer, error) {
query := newTransfersQuery().
FilterAddress(address).
FilterNetwork(uint64(id.ContractID.ChainID)).
FilterTokenAddress(id.ContractID.Address).
FilterTokenID(id.TokenID.Int).
FilterLoaded(1).
SortByTimestamp(false).
Limit(1)
rows, err := db.client.Query(query.String(), query.Args()...)
if err != nil {
return nil, err
}
defer rows.Close()
transfers, err := query.TransferScan(rows)
if err == sql.ErrNoRows || len(transfers) == 0 {
return nil, nil
} else if err != nil {
return nil, err
}
return &transfers[0], nil
}
// Delete blocks for address and chainID
// Transfers will be deleted by cascade
func deleteBlocks(creator statementCreator, address common.Address) error {

View File

@ -10,7 +10,9 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/status-im/status-go/services/wallet/bigint"
w_common "github.com/status-im/status-go/services/wallet/common"
"github.com/status-im/status-go/services/wallet/thirdparty"
"github.com/status-im/status-go/t/helpers"
"github.com/status-im/status-go/walletdatabase"
)
@ -152,3 +154,62 @@ func TestGetTransfersForIdentities(t *testing.T) {
require.Equal(t, MultiTransactionIDType(0), entries[0].MultiTransactionID)
require.Equal(t, MultiTransactionIDType(0), entries[1].MultiTransactionID)
}
func TestGetLatestCollectibleTransfer(t *testing.T) {
db, _, stop := setupTestDB(t)
defer stop()
trs, _, _ := GenerateTestTransfers(t, db.client, 1, len(TestCollectibles))
collectible := TestCollectibles[0]
collectibleID := thirdparty.CollectibleUniqueID{
ContractID: thirdparty.ContractID{
ChainID: collectible.ChainID,
Address: collectible.TokenAddress,
},
TokenID: &bigint.BigInt{Int: collectible.TokenID},
}
firstTr := trs[0]
lastTr := firstTr
// ExtraTrs is a sequence of send+receive of the same collectible
extraTrs, _, _ := GenerateTestTransfers(t, db.client, len(trs)+1, 2)
for i := range extraTrs {
if i%2 == 0 {
extraTrs[i].From = firstTr.To
extraTrs[i].To = firstTr.From
} else {
extraTrs[i].From = firstTr.From
extraTrs[i].To = firstTr.To
}
extraTrs[i].ChainID = collectible.ChainID
}
for i := range trs {
collectibleData := TestCollectibles[i]
trs[i].ChainID = collectibleData.ChainID
InsertTestTransferWithOptions(t, db.client, trs[i].To, &trs[i], &TestTransferOptions{
TokenAddress: collectibleData.TokenAddress,
TokenID: collectibleData.TokenID,
})
}
foundTx, err := db.GetLatestCollectibleTransfer(lastTr.To, collectibleID)
require.NoError(t, err)
require.NotEmpty(t, foundTx)
require.Equal(t, lastTr.Hash, foundTx.ID)
for i := range extraTrs {
InsertTestTransferWithOptions(t, db.client, firstTr.To, &extraTrs[i], &TestTransferOptions{
TokenAddress: collectible.TokenAddress,
TokenID: collectible.TokenID,
})
}
lastTr = extraTrs[len(extraTrs)-1]
foundTx, err = db.GetLatestCollectibleTransfer(lastTr.To, collectibleID)
require.NoError(t, err)
require.NotEmpty(t, foundTx)
require.Equal(t, lastTr.Hash, foundTx.ID)
}

View File

@ -9,6 +9,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/status-im/status-go/services/wallet/bigint"
w_common "github.com/status-im/status-go/services/wallet/common"
)
const baseTransfersQuery = "SELECT hash, type, blk_hash, blk_number, timestamp, address, tx, sender, receipt, log, network_id, base_gas_fee, COALESCE(multi_transaction_id, 0) %s FROM transfers"
@ -147,13 +148,50 @@ func (q *transfersQuery) FilterBlockNumber(blockNumber *big.Int) *transfersQuery
return q
}
func (q *transfersQuery) Limit(pageSize int64) *transfersQuery {
func ascendingString(ascending bool) string {
if ascending {
return "ASC"
}
return "DESC"
}
func (q *transfersQuery) SortByBlockNumberAndHash() *transfersQuery {
q.buf.WriteString(" ORDER BY blk_number DESC, hash ASC ")
return q
}
func (q *transfersQuery) SortByTimestamp(ascending bool) *transfersQuery {
q.buf.WriteString(fmt.Sprintf(" ORDER BY timestamp %s ", ascendingString(ascending)))
return q
}
func (q *transfersQuery) Limit(pageSize int64) *transfersQuery {
q.buf.WriteString(" LIMIT ?")
q.args = append(q.args, pageSize)
return q
}
func (q *transfersQuery) FilterType(txType w_common.Type) *transfersQuery {
q.addWhereSeparator(AndSeparator)
q.buf.WriteString(" type = ?")
q.args = append(q.args, txType)
return q
}
func (q *transfersQuery) FilterTokenAddress(address common.Address) *transfersQuery {
q.addWhereSeparator(AndSeparator)
q.buf.WriteString(" token_address = ?")
q.args = append(q.args, address)
return q
}
func (q *transfersQuery) FilterTokenID(tokenID *big.Int) *transfersQuery {
q.addWhereSeparator(AndSeparator)
q.buf.WriteString(" token_id = ?")
q.args = append(q.args, (*bigint.SQLBigIntBytes)(tokenID))
return q
}
func (q *transfersQuery) String() string {
return q.buf.String()
}

View File

@ -28,4 +28,6 @@ type Event struct {
At int64 `json:"at"`
ChainID uint64 `json:"chainId"`
RequestID *int `json:"requestId,omitempty"`
// For Internal events only, not serialized
EventParams interface{}
}

View File

@ -18,6 +18,7 @@
// 1702867707_add_balance_to_collectibles_ownership_cache.up.sql (289B)
// 1703686612_add_color_to_saved_addresses.up.sql (114B)
// 1704701942_remove_favourite_and_change_primary_key_for_saved_addresses.up.sql (894B)
// 1704913491_add_type_and_tx_timestamp_to_collectibles_ownership_cache.up.sql (73B)
// doc.go (74B)
package migrations
@ -28,7 +29,6 @@ import (
"crypto/sha256"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
@ -38,7 +38,7 @@ import (
func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("read %q: %v", name, err)
return nil, fmt.Errorf("read %q: %w", name, err)
}
var buf bytes.Buffer
@ -46,7 +46,7 @@ func bindataRead(data []byte, name string) ([]byte, error) {
clErr := gz.Close()
if err != nil {
return nil, fmt.Errorf("read %q: %v", name, err)
return nil, fmt.Errorf("read %q: %w", name, err)
}
if clErr != nil {
return nil, err
@ -102,7 +102,7 @@ func _1691753758_initialUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "1691753758_initial.up.sql", size: 5738, mode: os.FileMode(0644), modTime: time.Unix(1703598405, 0)}
info := bindataFileInfo{name: "1691753758_initial.up.sql", size: 5738, mode: os.FileMode(0644), modTime: time.Unix(1704459396, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6b, 0x25, 0x31, 0xc8, 0x27, 0x3, 0x6b, 0x9f, 0x15, 0x42, 0x2f, 0x85, 0xfb, 0xe3, 0x6, 0xea, 0xf7, 0x97, 0x12, 0x56, 0x3c, 0x9a, 0x5b, 0x1a, 0xca, 0xb1, 0x23, 0xfa, 0xcd, 0x57, 0x25, 0x5c}}
return a, nil
}
@ -122,7 +122,7 @@ func _1692701329_add_collectibles_and_collections_data_cacheUpSql() (*asset, err
return nil, err
}
info := bindataFileInfo{name: "1692701329_add_collectibles_and_collections_data_cache.up.sql", size: 1808, mode: os.FileMode(0644), modTime: time.Unix(1703598405, 0)}
info := bindataFileInfo{name: "1692701329_add_collectibles_and_collections_data_cache.up.sql", size: 1808, mode: os.FileMode(0644), modTime: time.Unix(1704459396, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1, 0x51, 0xf4, 0x2b, 0x92, 0xde, 0x59, 0x65, 0xd8, 0x9b, 0x57, 0xe0, 0xfd, 0x7b, 0x12, 0xb, 0x29, 0x6e, 0x9d, 0xb5, 0x90, 0xe, 0xfa, 0x12, 0x97, 0xd, 0x61, 0x60, 0x7f, 0x32, 0x1d, 0xc3}}
return a, nil
}
@ -142,7 +142,7 @@ func _1692701339_add_scope_to_pendingUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "1692701339_add_scope_to_pending.up.sql", size: 576, mode: os.FileMode(0644), modTime: time.Unix(1703598405, 0)}
info := bindataFileInfo{name: "1692701339_add_scope_to_pending.up.sql", size: 576, mode: os.FileMode(0644), modTime: time.Unix(1704459396, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x36, 0x8a, 0x5e, 0xe2, 0x63, 0x15, 0x37, 0xba, 0x55, 0x18, 0xf3, 0xcc, 0xe0, 0x5, 0x84, 0xe1, 0x5b, 0xe8, 0x1, 0x32, 0x6b, 0x9f, 0x7d, 0x9f, 0xd9, 0x23, 0x6c, 0xa9, 0xb5, 0xdc, 0xf4, 0x93}}
return a, nil
}
@ -162,7 +162,7 @@ func _1694540071_add_collectibles_ownership_update_timestampUpSql() (*asset, err
return nil, err
}
info := bindataFileInfo{name: "1694540071_add_collectibles_ownership_update_timestamp.up.sql", size: 349, mode: os.FileMode(0644), modTime: time.Unix(1703598405, 0)}
info := bindataFileInfo{name: "1694540071_add_collectibles_ownership_update_timestamp.up.sql", size: 349, mode: os.FileMode(0644), modTime: time.Unix(1704459396, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7f, 0x45, 0xc7, 0xce, 0x79, 0x63, 0xbc, 0x6f, 0x83, 0x5f, 0xe2, 0x3, 0x56, 0xcc, 0x5, 0x2f, 0x85, 0xda, 0x7e, 0xea, 0xf5, 0xd2, 0xac, 0x19, 0xd4, 0xd8, 0x5e, 0xdd, 0xed, 0xe2, 0xa9, 0x97}}
return a, nil
}
@ -182,7 +182,7 @@ func _1694692748_add_raw_balance_to_token_balancesUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "1694692748_add_raw_balance_to_token_balances.up.sql", size: 165, mode: os.FileMode(0644), modTime: time.Unix(1703598405, 0)}
info := bindataFileInfo{name: "1694692748_add_raw_balance_to_token_balances.up.sql", size: 165, mode: os.FileMode(0644), modTime: time.Unix(1704459396, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd4, 0xe0, 0x5b, 0x42, 0xf0, 0x96, 0xa5, 0xf5, 0xed, 0xc0, 0x97, 0x88, 0xb0, 0x6d, 0xfe, 0x7d, 0x97, 0x2e, 0x17, 0xd2, 0x16, 0xbc, 0x2a, 0xf2, 0xcc, 0x67, 0x9e, 0xc5, 0x47, 0xf6, 0x69, 0x1}}
return a, nil
}
@ -202,7 +202,7 @@ func _1695133989_add_community_id_to_collectibles_and_collections_data_cacheUpSq
return nil, err
}
info := bindataFileInfo{name: "1695133989_add_community_id_to_collectibles_and_collections_data_cache.up.sql", size: 275, mode: os.FileMode(0644), modTime: time.Unix(1703598405, 0)}
info := bindataFileInfo{name: "1695133989_add_community_id_to_collectibles_and_collections_data_cache.up.sql", size: 275, mode: os.FileMode(0644), modTime: time.Unix(1704459396, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xfa, 0x2, 0xa, 0x7f, 0x4b, 0xd1, 0x3, 0xd0, 0x3, 0x29, 0x84, 0x31, 0xed, 0x49, 0x4f, 0xb1, 0x2d, 0xd7, 0x80, 0x41, 0x5b, 0xfa, 0x6, 0xae, 0xb4, 0xf6, 0x6b, 0x49, 0xee, 0x57, 0x33, 0x76}}
return a, nil
}
@ -222,7 +222,7 @@ func _1695932536_balance_history_v2UpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "1695932536_balance_history_v2.up.sql", size: 653, mode: os.FileMode(0644), modTime: time.Unix(1703598405, 0)}
info := bindataFileInfo{name: "1695932536_balance_history_v2.up.sql", size: 653, mode: os.FileMode(0644), modTime: time.Unix(1704459396, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x37, 0xf4, 0x14, 0x91, 0xf6, 0x5f, 0xc4, 0x9b, 0xb7, 0x83, 0x32, 0x72, 0xbe, 0x82, 0x42, 0x39, 0xa4, 0x3b, 0xc9, 0x78, 0x3d, 0xca, 0xd4, 0xbf, 0xfc, 0x7a, 0x33, 0x1e, 0xcd, 0x9e, 0xe4, 0x85}}
return a, nil
}
@ -242,7 +242,7 @@ func _1696853635_input_dataUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "1696853635_input_data.up.sql", size: 23140, mode: os.FileMode(0644), modTime: time.Unix(1703598405, 0)}
info := bindataFileInfo{name: "1696853635_input_data.up.sql", size: 23140, mode: os.FileMode(0644), modTime: time.Unix(1704459396, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x89, 0x30, 0x33, 0x33, 0x55, 0xc5, 0x57, 0x2b, 0xaf, 0xef, 0x3d, 0x8d, 0x2a, 0xaa, 0x5c, 0x32, 0xd1, 0xf4, 0xd, 0x4a, 0xd0, 0x33, 0x4a, 0xe8, 0xf6, 0x8, 0x6b, 0x65, 0xcc, 0xba, 0xed, 0x42}}
return a, nil
}
@ -262,7 +262,7 @@ func _1698117918_add_community_id_to_tokensUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "1698117918_add_community_id_to_tokens.up.sql", size: 61, mode: os.FileMode(0644), modTime: time.Unix(1703598405, 0)}
info := bindataFileInfo{name: "1698117918_add_community_id_to_tokens.up.sql", size: 61, mode: os.FileMode(0644), modTime: time.Unix(1704459396, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb3, 0x82, 0xdb, 0xde, 0x3, 0x3, 0xc, 0x67, 0xf3, 0x54, 0xc4, 0xad, 0xd6, 0xce, 0x56, 0xfb, 0xc1, 0x87, 0xd7, 0xda, 0xab, 0xec, 0x1, 0xe1, 0x7d, 0xb3, 0x63, 0xd6, 0xe5, 0x5d, 0x1c, 0x15}}
return a, nil
}
@ -282,7 +282,7 @@ func _1698257443_add_community_metadata_to_wallet_dbUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "1698257443_add_community_metadata_to_wallet_db.up.sql", size: 323, mode: os.FileMode(0644), modTime: time.Unix(1703598405, 0)}
info := bindataFileInfo{name: "1698257443_add_community_metadata_to_wallet_db.up.sql", size: 323, mode: os.FileMode(0644), modTime: time.Unix(1704459396, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x22, 0xd3, 0x4, 0x25, 0xfa, 0x23, 0x1, 0x48, 0x83, 0x26, 0x20, 0xf2, 0x3d, 0xbc, 0xc1, 0xa7, 0x7c, 0x27, 0x7c, 0x1d, 0x63, 0x3, 0xa, 0xd0, 0xce, 0x47, 0x86, 0xdc, 0xa1, 0x3c, 0x2, 0x1c}}
return a, nil
}
@ -302,7 +302,7 @@ func _1699987075_add_timestamp_and_state_to_community_data_cacheUpSql() (*asset,
return nil, err
}
info := bindataFileInfo{name: "1699987075_add_timestamp_and_state_to_community_data_cache.up.sql", size: 865, mode: os.FileMode(0644), modTime: time.Unix(1703598405, 0)}
info := bindataFileInfo{name: "1699987075_add_timestamp_and_state_to_community_data_cache.up.sql", size: 865, mode: os.FileMode(0644), modTime: time.Unix(1704459396, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc3, 0xee, 0x37, 0xf9, 0x7f, 0x9e, 0xfe, 0x93, 0x66, 0x2b, 0xd, 0x57, 0xf4, 0x89, 0x6c, 0x51, 0xfd, 0x14, 0xe9, 0xcd, 0xab, 0x65, 0xe7, 0xa7, 0x83, 0x7e, 0xe0, 0x5c, 0x14, 0x49, 0xf3, 0xe5}}
return a, nil
}
@ -322,7 +322,7 @@ func _1700414564_add_wallet_connect_pairings_tableUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "1700414564_add_wallet_connect_pairings_table.up.sql", size: 439, mode: os.FileMode(0644), modTime: time.Unix(1703598405, 0)}
info := bindataFileInfo{name: "1700414564_add_wallet_connect_pairings_table.up.sql", size: 439, mode: os.FileMode(0644), modTime: time.Unix(1704459396, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa9, 0x77, 0x5e, 0x19, 0x62, 0x3c, 0x3a, 0x81, 0x16, 0xa0, 0x95, 0x35, 0x62, 0xab, 0x5e, 0x2b, 0xea, 0x11, 0x71, 0x11, 0xd0, 0x9, 0xab, 0x9c, 0xab, 0xf2, 0xdd, 0x5f, 0x88, 0x83, 0x9a, 0x93}}
return a, nil
}
@ -342,7 +342,7 @@ func _1701101493_add_token_blocks_rangeUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "1701101493_add_token_blocks_range.up.sql", size: 469, mode: os.FileMode(0644), modTime: time.Unix(1703598405, 0)}
info := bindataFileInfo{name: "1701101493_add_token_blocks_range.up.sql", size: 469, mode: os.FileMode(0644), modTime: time.Unix(1704459396, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe0, 0x37, 0xfb, 0x1a, 0x6c, 0x8c, 0xa8, 0x1e, 0xa2, 0xa5, 0x1f, 0x90, 0x73, 0x3e, 0x31, 0x5f, 0x48, 0x1e, 0x9a, 0x37, 0x27, 0x1c, 0xc, 0x67, 0x1, 0xcd, 0xec, 0x85, 0x4c, 0x1c, 0x26, 0x52}}
return a, nil
}
@ -362,7 +362,7 @@ func _1702467441_wallet_connect_sessions_instead_of_pairingsUpSql() (*asset, err
return nil, err
}
info := bindataFileInfo{name: "1702467441_wallet_connect_sessions_instead_of_pairings.up.sql", size: 356, mode: os.FileMode(0644), modTime: time.Unix(1703598405, 0)}
info := bindataFileInfo{name: "1702467441_wallet_connect_sessions_instead_of_pairings.up.sql", size: 356, mode: os.FileMode(0644), modTime: time.Unix(1704459396, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x73, 0x5f, 0x0, 0x60, 0x6, 0x28, 0x76, 0x61, 0x39, 0xdc, 0xa1, 0x84, 0x80, 0x46, 0x8a, 0xe4, 0x42, 0xb5, 0x1f, 0x18, 0x14, 0x23, 0x46, 0xb9, 0x51, 0xf, 0x62, 0xac, 0xc, 0x7, 0x98, 0xe}}
return a, nil
}
@ -382,7 +382,7 @@ func _1702577524_add_community_collections_and_collectibles_images_cacheUpSql()
return nil, err
}
info := bindataFileInfo{name: "1702577524_add_community_collections_and_collectibles_images_cache.up.sql", size: 210, mode: os.FileMode(0644), modTime: time.Unix(1703598405, 0)}
info := bindataFileInfo{name: "1702577524_add_community_collections_and_collectibles_images_cache.up.sql", size: 210, mode: os.FileMode(0644), modTime: time.Unix(1704459396, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8e, 0x1b, 0x32, 0x2c, 0xfa, 0x11, 0x5e, 0x5e, 0x5d, 0xef, 0x92, 0xa0, 0x29, 0x52, 0xbf, 0x6e, 0xe3, 0x30, 0xe4, 0xdf, 0xdc, 0x5, 0xbe, 0xd1, 0xf8, 0x3e, 0xd9, 0x9b, 0xd6, 0x9b, 0x95, 0x96}}
return a, nil
}
@ -402,7 +402,7 @@ func _1702867707_add_balance_to_collectibles_ownership_cacheUpSql() (*asset, err
return nil, err
}
info := bindataFileInfo{name: "1702867707_add_balance_to_collectibles_ownership_cache.up.sql", size: 289, mode: os.FileMode(0644), modTime: time.Unix(1703598405, 0)}
info := bindataFileInfo{name: "1702867707_add_balance_to_collectibles_ownership_cache.up.sql", size: 289, mode: os.FileMode(0644), modTime: time.Unix(1704459396, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6f, 0x63, 0x30, 0x11, 0x22, 0xb9, 0xee, 0xae, 0xb8, 0xc4, 0xe6, 0xd3, 0x7, 0xc, 0xe6, 0xa3, 0x72, 0x8c, 0x6, 0x9d, 0x6c, 0x97, 0x8f, 0xb2, 0xd0, 0x37, 0x69, 0x69, 0x6, 0x7f, 0x67, 0x94}}
return a, nil
}
@ -422,7 +422,7 @@ func _1703686612_add_color_to_saved_addressesUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "1703686612_add_color_to_saved_addresses.up.sql", size: 114, mode: os.FileMode(0644), modTime: time.Unix(1704402566, 0)}
info := bindataFileInfo{name: "1703686612_add_color_to_saved_addresses.up.sql", size: 114, mode: os.FileMode(0644), modTime: time.Unix(1704459396, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb7, 0x6e, 0x8d, 0xc0, 0x49, 0xc, 0xb, 0x66, 0xa0, 0x77, 0x32, 0x76, 0xa8, 0xd0, 0x40, 0xce, 0x67, 0xa, 0x9e, 0x23, 0x36, 0xe, 0xc3, 0xd3, 0x9d, 0xe2, 0xde, 0x60, 0x19, 0xba, 0x44, 0xf1}}
return a, nil
}
@ -442,11 +442,31 @@ func _1704701942_remove_favourite_and_change_primary_key_for_saved_addressesUpSq
return nil, err
}
info := bindataFileInfo{name: "1704701942_remove_favourite_and_change_primary_key_for_saved_addresses.up.sql", size: 894, mode: os.FileMode(0644), modTime: time.Unix(1704871375, 0)}
info := bindataFileInfo{name: "1704701942_remove_favourite_and_change_primary_key_for_saved_addresses.up.sql", size: 894, mode: os.FileMode(0644), modTime: time.Unix(1704913465, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x70, 0xd3, 0xcf, 0x90, 0xb2, 0xa, 0x23, 0x41, 0x8a, 0xa5, 0x90, 0x7b, 0x34, 0xec, 0x3b, 0x3f, 0xa9, 0xb1, 0x95, 0xf3, 0x2a, 0xdf, 0xbb, 0x53, 0x57, 0x27, 0x2b, 0x12, 0x84, 0xf4, 0x83, 0xda}}
return a, nil
}
var __1704913491_add_type_and_tx_timestamp_to_collectibles_ownership_cacheUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\xce\xcf\xc9\x49\x4d\x2e\xc9\x4c\xca\x49\x2d\x8e\xcf\x2f\xcf\x4b\x2d\x2a\xce\xc8\x2c\x88\x4f\x4e\x4c\xce\x48\x55\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x53\x28\x29\x4a\xcc\x2b\x4e\x4b\x2d\x8a\xcf\x4c\x51\x08\x73\x0c\x72\xf6\x70\x0c\xb2\xe6\x02\x04\x00\x00\xff\xff\x77\x9f\xe5\xab\x49\x00\x00\x00")
func _1704913491_add_type_and_tx_timestamp_to_collectibles_ownership_cacheUpSqlBytes() ([]byte, error) {
return bindataRead(
__1704913491_add_type_and_tx_timestamp_to_collectibles_ownership_cacheUpSql,
"1704913491_add_type_and_tx_timestamp_to_collectibles_ownership_cache.up.sql",
)
}
func _1704913491_add_type_and_tx_timestamp_to_collectibles_ownership_cacheUpSql() (*asset, error) {
bytes, err := _1704913491_add_type_and_tx_timestamp_to_collectibles_ownership_cacheUpSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "1704913491_add_type_and_tx_timestamp_to_collectibles_ownership_cache.up.sql", size: 73, mode: os.FileMode(0644), modTime: time.Unix(1704913465, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa2, 0xf0, 0x71, 0xb5, 0xaf, 0x22, 0xac, 0x77, 0xdb, 0x6f, 0x62, 0x27, 0x12, 0x46, 0x60, 0x3, 0x59, 0x43, 0x6f, 0x1, 0xdc, 0xe8, 0x6e, 0x89, 0xa5, 0x77, 0x37, 0x36, 0xd9, 0x4e, 0x6d, 0x9b}}
return a, nil
}
var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x2c\xc9\xb1\x0d\xc4\x20\x0c\x05\xd0\x9e\x29\xfe\x02\xd8\xfd\x6d\xe3\x4b\xac\x2f\x44\x82\x09\x78\x7f\xa5\x49\xfd\xa6\x1d\xdd\xe8\xd8\xcf\x55\x8a\x2a\xe3\x47\x1f\xbe\x2c\x1d\x8c\xfa\x6f\xe3\xb4\x34\xd4\xd9\x89\xbb\x71\x59\xb6\x18\x1b\x35\x20\xa2\x9f\x0a\x03\xa2\xe5\x0d\x00\x00\xff\xff\x60\xcd\x06\xbe\x4a\x00\x00\x00")
func docGoBytes() ([]byte, error) {
@ -462,7 +482,7 @@ func docGo() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "doc.go", size: 74, mode: os.FileMode(0644), modTime: time.Unix(1703598405, 0)}
info := bindataFileInfo{name: "doc.go", size: 74, mode: os.FileMode(0644), modTime: time.Unix(1704459396, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xde, 0x7c, 0x28, 0xcd, 0x47, 0xf2, 0xfa, 0x7c, 0x51, 0x2d, 0xd8, 0x38, 0xb, 0xb0, 0x34, 0x9d, 0x4c, 0x62, 0xa, 0x9e, 0x28, 0xc3, 0x31, 0x23, 0xd9, 0xbb, 0x89, 0x9f, 0xa0, 0x89, 0x1f, 0xe8}}
return a, nil
}
@ -559,53 +579,41 @@ func AssetNames() []string {
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"1691753758_initial.up.sql": _1691753758_initialUpSql,
"1692701329_add_collectibles_and_collections_data_cache.up.sql": _1692701329_add_collectibles_and_collections_data_cacheUpSql,
"1692701339_add_scope_to_pending.up.sql": _1692701339_add_scope_to_pendingUpSql,
"1694540071_add_collectibles_ownership_update_timestamp.up.sql": _1694540071_add_collectibles_ownership_update_timestampUpSql,
"1694692748_add_raw_balance_to_token_balances.up.sql": _1694692748_add_raw_balance_to_token_balancesUpSql,
"1695133989_add_community_id_to_collectibles_and_collections_data_cache.up.sql": _1695133989_add_community_id_to_collectibles_and_collections_data_cacheUpSql,
"1695932536_balance_history_v2.up.sql": _1695932536_balance_history_v2UpSql,
"1696853635_input_data.up.sql": _1696853635_input_dataUpSql,
"1698117918_add_community_id_to_tokens.up.sql": _1698117918_add_community_id_to_tokensUpSql,
"1698257443_add_community_metadata_to_wallet_db.up.sql": _1698257443_add_community_metadata_to_wallet_dbUpSql,
"1699987075_add_timestamp_and_state_to_community_data_cache.up.sql": _1699987075_add_timestamp_and_state_to_community_data_cacheUpSql,
"1700414564_add_wallet_connect_pairings_table.up.sql": _1700414564_add_wallet_connect_pairings_tableUpSql,
"1701101493_add_token_blocks_range.up.sql": _1701101493_add_token_blocks_rangeUpSql,
"1702467441_wallet_connect_sessions_instead_of_pairings.up.sql": _1702467441_wallet_connect_sessions_instead_of_pairingsUpSql,
"1702577524_add_community_collections_and_collectibles_images_cache.up.sql": _1702577524_add_community_collections_and_collectibles_images_cacheUpSql,
"1702867707_add_balance_to_collectibles_ownership_cache.up.sql": _1702867707_add_balance_to_collectibles_ownership_cacheUpSql,
"1703686612_add_color_to_saved_addresses.up.sql": _1703686612_add_color_to_saved_addressesUpSql,
"1704701942_remove_favourite_and_change_primary_key_for_saved_addresses.up.sql": _1704701942_remove_favourite_and_change_primary_key_for_saved_addressesUpSql,
"1704913491_add_type_and_tx_timestamp_to_collectibles_ownership_cache.up.sql": _1704913491_add_type_and_tx_timestamp_to_collectibles_ownership_cacheUpSql,
"doc.go": docGo,
}
// AssetDebug is true if the assets were built with the debug flag enabled.
const AssetDebug = false
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
//
// data/
// foo.txt
// img/
// a.png
// b.png
//
// then AssetDir("data") would return []string{"foo.txt", "img"},
// AssetDir("data/img") would return []string{"a.png", "b.png"},
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
@ -638,25 +646,26 @@ type bintree struct {
}
var _bintree = &bintree{nil, map[string]*bintree{
"1691753758_initial.up.sql": &bintree{_1691753758_initialUpSql, map[string]*bintree{}},
"1692701329_add_collectibles_and_collections_data_cache.up.sql": &bintree{_1692701329_add_collectibles_and_collections_data_cacheUpSql, map[string]*bintree{}},
"1692701339_add_scope_to_pending.up.sql": &bintree{_1692701339_add_scope_to_pendingUpSql, map[string]*bintree{}},
"1694540071_add_collectibles_ownership_update_timestamp.up.sql": &bintree{_1694540071_add_collectibles_ownership_update_timestampUpSql, map[string]*bintree{}},
"1694692748_add_raw_balance_to_token_balances.up.sql": &bintree{_1694692748_add_raw_balance_to_token_balancesUpSql, map[string]*bintree{}},
"1695133989_add_community_id_to_collectibles_and_collections_data_cache.up.sql": &bintree{_1695133989_add_community_id_to_collectibles_and_collections_data_cacheUpSql, map[string]*bintree{}},
"1695932536_balance_history_v2.up.sql": &bintree{_1695932536_balance_history_v2UpSql, map[string]*bintree{}},
"1696853635_input_data.up.sql": &bintree{_1696853635_input_dataUpSql, map[string]*bintree{}},
"1698117918_add_community_id_to_tokens.up.sql": &bintree{_1698117918_add_community_id_to_tokensUpSql, map[string]*bintree{}},
"1698257443_add_community_metadata_to_wallet_db.up.sql": &bintree{_1698257443_add_community_metadata_to_wallet_dbUpSql, map[string]*bintree{}},
"1699987075_add_timestamp_and_state_to_community_data_cache.up.sql": &bintree{_1699987075_add_timestamp_and_state_to_community_data_cacheUpSql, map[string]*bintree{}},
"1700414564_add_wallet_connect_pairings_table.up.sql": &bintree{_1700414564_add_wallet_connect_pairings_tableUpSql, map[string]*bintree{}},
"1701101493_add_token_blocks_range.up.sql": &bintree{_1701101493_add_token_blocks_rangeUpSql, map[string]*bintree{}},
"1702467441_wallet_connect_sessions_instead_of_pairings.up.sql": &bintree{_1702467441_wallet_connect_sessions_instead_of_pairingsUpSql, map[string]*bintree{}},
"1702577524_add_community_collections_and_collectibles_images_cache.up.sql": &bintree{_1702577524_add_community_collections_and_collectibles_images_cacheUpSql, map[string]*bintree{}},
"1702867707_add_balance_to_collectibles_ownership_cache.up.sql": &bintree{_1702867707_add_balance_to_collectibles_ownership_cacheUpSql, map[string]*bintree{}},
"1703686612_add_color_to_saved_addresses.up.sql": &bintree{_1703686612_add_color_to_saved_addressesUpSql, map[string]*bintree{}},
"1704701942_remove_favourite_and_change_primary_key_for_saved_addresses.up.sql": &bintree{_1704701942_remove_favourite_and_change_primary_key_for_saved_addressesUpSql, map[string]*bintree{}},
"doc.go": &bintree{docGo, map[string]*bintree{}},
"1691753758_initial.up.sql": {_1691753758_initialUpSql, map[string]*bintree{}},
"1692701329_add_collectibles_and_collections_data_cache.up.sql": {_1692701329_add_collectibles_and_collections_data_cacheUpSql, map[string]*bintree{}},
"1692701339_add_scope_to_pending.up.sql": {_1692701339_add_scope_to_pendingUpSql, map[string]*bintree{}},
"1694540071_add_collectibles_ownership_update_timestamp.up.sql": {_1694540071_add_collectibles_ownership_update_timestampUpSql, map[string]*bintree{}},
"1694692748_add_raw_balance_to_token_balances.up.sql": {_1694692748_add_raw_balance_to_token_balancesUpSql, map[string]*bintree{}},
"1695133989_add_community_id_to_collectibles_and_collections_data_cache.up.sql": {_1695133989_add_community_id_to_collectibles_and_collections_data_cacheUpSql, map[string]*bintree{}},
"1695932536_balance_history_v2.up.sql": {_1695932536_balance_history_v2UpSql, map[string]*bintree{}},
"1696853635_input_data.up.sql": {_1696853635_input_dataUpSql, map[string]*bintree{}},
"1698117918_add_community_id_to_tokens.up.sql": {_1698117918_add_community_id_to_tokensUpSql, map[string]*bintree{}},
"1698257443_add_community_metadata_to_wallet_db.up.sql": {_1698257443_add_community_metadata_to_wallet_dbUpSql, map[string]*bintree{}},
"1699987075_add_timestamp_and_state_to_community_data_cache.up.sql": {_1699987075_add_timestamp_and_state_to_community_data_cacheUpSql, map[string]*bintree{}},
"1700414564_add_wallet_connect_pairings_table.up.sql": {_1700414564_add_wallet_connect_pairings_tableUpSql, map[string]*bintree{}},
"1701101493_add_token_blocks_range.up.sql": {_1701101493_add_token_blocks_rangeUpSql, map[string]*bintree{}},
"1702467441_wallet_connect_sessions_instead_of_pairings.up.sql": {_1702467441_wallet_connect_sessions_instead_of_pairingsUpSql, map[string]*bintree{}},
"1702577524_add_community_collections_and_collectibles_images_cache.up.sql": {_1702577524_add_community_collections_and_collectibles_images_cacheUpSql, map[string]*bintree{}},
"1702867707_add_balance_to_collectibles_ownership_cache.up.sql": {_1702867707_add_balance_to_collectibles_ownership_cacheUpSql, map[string]*bintree{}},
"1703686612_add_color_to_saved_addresses.up.sql": {_1703686612_add_color_to_saved_addressesUpSql, map[string]*bintree{}},
"1704701942_remove_favourite_and_change_primary_key_for_saved_addresses.up.sql": {_1704701942_remove_favourite_and_change_primary_key_for_saved_addressesUpSql, map[string]*bintree{}},
"1704913491_add_type_and_tx_timestamp_to_collectibles_ownership_cache.up.sql": {_1704913491_add_type_and_tx_timestamp_to_collectibles_ownership_cacheUpSql, map[string]*bintree{}},
"doc.go": {docGo, map[string]*bintree{}},
}}
// RestoreAsset restores an asset under the given directory.
@ -673,7 +682,7 @@ func RestoreAsset(dir, name string) error {
if err != nil {
return err
}
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
err = os.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}

View File

@ -0,0 +1 @@
ALTER TABLE collectibles_ownership_cache ADD COLUMN transfer_id VARCHAR;