2019-07-25 05:35:09 +00:00
package appdatabase
import (
2024-05-21 02:34:43 +00:00
"crypto/sha256"
2019-07-25 05:35:09 +00:00
"database/sql"
2023-01-20 10:34:30 +00:00
"encoding/json"
2023-05-19 15:31:45 +00:00
"math/big"
2022-03-23 18:47:00 +00:00
2024-10-28 20:54:17 +00:00
"go.uber.org/zap"
2024-08-07 09:13:21 +00:00
d_common "github.com/status-im/status-go/common"
2024-10-28 20:54:17 +00:00
"github.com/status-im/status-go/logutils"
2024-08-07 09:13:21 +00:00
2023-05-19 15:31:45 +00:00
"github.com/ethereum/go-ethereum/common"
2024-05-24 07:36:44 +00:00
"github.com/ethereum/go-ethereum/common/hexutil"
2023-05-19 15:31:45 +00:00
"github.com/ethereum/go-ethereum/core/types"
2024-05-24 07:36:44 +00:00
"github.com/ethereum/go-ethereum/crypto"
2019-07-25 05:35:09 +00:00
"github.com/status-im/status-go/appdatabase/migrations"
2022-01-12 20:04:43 +00:00
migrationsprevnodecfg "github.com/status-im/status-go/appdatabase/migrationsprevnodecfg"
"github.com/status-im/status-go/nodecfg"
2023-05-19 15:31:45 +00:00
"github.com/status-im/status-go/services/wallet/bigint"
w_common "github.com/status-im/status-go/services/wallet/common"
2019-07-25 05:35:09 +00:00
"github.com/status-im/status-go/sqlite"
2024-05-24 07:36:44 +00:00
e_types "github.com/status-im/status-go/eth-node/types"
2019-07-25 05:35:09 +00:00
)
2022-01-26 17:18:01 +00:00
const nodeCfgMigrationDate = 1640111208
2023-08-08 06:55:13 +00:00
var customSteps = [ ] * sqlite . PostStep {
2023-05-19 15:31:45 +00:00
{ Version : 1674136690 , CustomMigration : migrateEnsUsernames } ,
{ Version : 1686048341 , CustomMigration : migrateWalletJSONBlobs , RollBackVersion : 1686041510 } ,
2023-06-20 02:50:49 +00:00
{ Version : 1687193315 , CustomMigration : migrateWalletTransferFromToAddresses , RollBackVersion : 1686825075 } ,
2023-05-19 15:31:45 +00:00
}
2024-05-28 11:59:54 +00:00
var CurrentAppDBKeyUID string
2023-08-11 11:25:14 +00:00
type DbInitializer struct {
}
func ( a DbInitializer ) Initialize ( path , password string , kdfIterationsNumber int ) ( * sql . DB , error ) {
return InitializeDB ( path , password , kdfIterationsNumber )
}
2023-06-30 14:46:07 +00:00
func doMigration ( db * sql . DB ) error {
2023-05-19 15:31:45 +00:00
lastMigration , migrationTableExists , err := sqlite . GetLastMigrationVersion ( db )
if err != nil {
2023-06-30 14:46:07 +00:00
return err
2022-01-12 20:04:43 +00:00
}
2022-01-26 17:18:01 +00:00
if ! migrationTableExists || ( lastMigration > 0 && lastMigration < nodeCfgMigrationDate ) {
// If it's the first time migration's being run, or latest migration happened before migrating the nodecfg table
err = migrationsprevnodecfg . Migrate ( db )
if err != nil {
2023-06-30 14:46:07 +00:00
return err
2022-01-26 17:18:01 +00:00
}
// NodeConfig migration cannot be done with SQL
err = nodecfg . MigrateNodeConfig ( db )
if err != nil {
2023-06-30 14:46:07 +00:00
return err
2022-01-26 17:18:01 +00:00
}
2022-01-12 20:04:43 +00:00
}
2024-05-25 01:17:54 +00:00
postSteps := [ ] * sqlite . PostStep {
2024-06-04 06:43:55 +00:00
{ Version : 1662365868 , CustomMigration : FixMissingKeyUIDForAccounts } ,
2024-08-07 09:13:21 +00:00
{ Version : 1720606449 , CustomMigration : OptimizeMobileWakuV2SettingsForMobileV1 } ,
2024-05-25 01:17:54 +00:00
}
postSteps = append ( postSteps , customSteps ... )
2023-05-19 15:31:45 +00:00
// Run all the new migrations
2024-05-25 01:17:54 +00:00
err = migrations . Migrate ( db , postSteps )
2023-06-30 14:46:07 +00:00
if err != nil {
return err
}
return nil
}
// InitializeDB creates db file at a given path and applies migrations.
func InitializeDB ( path , password string , kdfIterationsNumber int ) ( * sql . DB , error ) {
db , err := sqlite . OpenDB ( path , password , kdfIterationsNumber )
if err != nil {
return nil , err
}
err = doMigration ( db )
2023-01-20 10:34:30 +00:00
if err != nil {
return nil , err
}
2019-07-25 05:35:09 +00:00
return db , nil
}
2021-01-07 11:15:02 +00:00
2024-08-07 09:13:21 +00:00
func OptimizeMobileWakuV2SettingsForMobileV1 ( sqlTx * sql . Tx ) error {
if d_common . IsMobilePlatform ( ) {
_ , err := sqlTx . Exec ( ` UPDATE wakuv2_config SET light_client = ?, enable_store_confirmation_for_messages_sent = ? ` , true , false )
if err != nil {
2024-10-28 20:54:17 +00:00
logutils . ZapLogger ( ) . Error ( "failed to enable light client and disable store confirmation for mobile v1" , zap . Error ( err ) )
2024-08-07 09:13:21 +00:00
return err
}
}
return nil
}
2024-06-04 06:43:55 +00:00
func FixMissingKeyUIDForAccounts ( sqlTx * sql . Tx ) error {
2024-05-28 11:59:54 +00:00
rows , err := sqlTx . Query ( ` SELECT address,pubkey FROM accounts WHERE pubkey IS NOT NULL AND type != '' AND type != 'generated' ` )
2024-05-21 02:34:43 +00:00
if err != nil {
2024-10-28 20:54:17 +00:00
logutils . ZapLogger ( ) . Error ( "Migrating accounts: failed to query accounts" , zap . Error ( err ) )
2024-05-21 02:34:43 +00:00
return err
}
defer rows . Close ( )
for rows . Next ( ) {
2024-05-24 07:36:44 +00:00
var address e_types . Address
var pubkey e_types . HexBytes
err = rows . Scan ( & address , & pubkey )
2024-05-21 02:34:43 +00:00
if err != nil {
2024-10-28 20:54:17 +00:00
logutils . ZapLogger ( ) . Error ( "Migrating accounts: failed to scan records" , zap . Error ( err ) )
2024-05-21 02:34:43 +00:00
return err
}
2024-05-24 07:36:44 +00:00
pk , err := crypto . UnmarshalPubkey ( pubkey )
2024-05-21 02:34:43 +00:00
if err != nil {
2024-10-28 20:54:17 +00:00
logutils . ZapLogger ( ) . Error ( "Migrating accounts: failed to unmarshal pubkey" , zap . String ( "pubkey" , string ( pubkey ) ) , zap . Error ( err ) )
2024-05-21 02:34:43 +00:00
return err
}
pkBytes := sha256 . Sum256 ( crypto . FromECDSAPub ( pk ) )
keyUIDHex := hexutil . Encode ( pkBytes [ : ] )
2024-05-24 07:36:44 +00:00
_ , err = sqlTx . Exec ( ` UPDATE accounts SET key_uid = ? WHERE address = ? ` , keyUIDHex , address )
2024-05-21 02:34:43 +00:00
if err != nil {
2024-10-28 20:54:17 +00:00
logutils . ZapLogger ( ) . Error ( "Migrating accounts: failed to update key_uid for imported accounts" , zap . Error ( err ) )
2024-05-21 02:34:43 +00:00
return err
}
}
2024-06-04 00:57:11 +00:00
var walletRootAddress e_types . Address
err = sqlTx . QueryRow ( ` SELECT wallet_root_address FROM settings WHERE synthetic_id='id' ` ) . Scan ( & walletRootAddress )
if err == sql . ErrNoRows {
// we shouldn't reach here, but if we do, it probably happened from the test
2024-10-28 20:54:17 +00:00
logutils . ZapLogger ( ) . Warn ( "Migrating accounts: no wallet_root_address found in settings" )
2024-06-04 00:57:11 +00:00
return nil
}
2024-05-28 11:59:54 +00:00
if err != nil {
2024-10-28 20:54:17 +00:00
logutils . ZapLogger ( ) . Error ( "Migrating accounts: failed to get wallet_root_address" , zap . Error ( err ) )
2024-06-04 00:57:11 +00:00
return err
}
_ , err = sqlTx . Exec ( ` UPDATE accounts SET key_uid = ?, derived_from = ? WHERE type = '' OR type = 'generated' ` , CurrentAppDBKeyUID , walletRootAddress . Hex ( ) )
if err != nil {
2024-10-28 20:54:17 +00:00
logutils . ZapLogger ( ) . Error ( "Migrating accounts: failed to update key_uid/derived_from" , zap . Error ( err ) )
2024-06-04 00:57:11 +00:00
return err
2024-05-28 11:59:54 +00:00
}
2024-07-26 10:38:13 +00:00
// fix the default wallet account color issue https://github.com/status-im/status-mobile/issues/20476
// we don't care the other type of account's color
_ , err = sqlTx . Exec ( ` UPDATE accounts SET color = 'blue',emoji='🐳' WHERE wallet = 1 ` )
if err != nil {
2024-10-28 20:54:17 +00:00
logutils . ZapLogger ( ) . Error ( "Migrating accounts: failed to update default wallet account's color to blue" , zap . Error ( err ) )
2024-07-26 10:38:13 +00:00
return err
}
2024-05-21 02:34:43 +00:00
return nil
}
2023-05-19 15:31:45 +00:00
func migrateEnsUsernames ( sqlTx * sql . Tx ) error {
2023-01-20 10:34:30 +00:00
// 1. Check if ens_usernames table already exist
2023-05-19 15:31:45 +00:00
// row := sqlTx.QueryRow("SELECT exists(SELECT name FROM sqlite_master WHERE type='table' AND name='ens_usernames')")
2023-01-20 10:34:30 +00:00
// tableExists := false
// err := row.Scan(&tableExists)
// if err != nil && err != sql.ErrNoRows {
// return err
// }
// if tableExists {
// return nil
// }
// -- 1. Create new ens_usernames table
2023-05-19 15:31:45 +00:00
// _, err = sqlTx.Exec(`CREATE TABLE IF NOT EXISTS ens_usernames (
2023-01-20 10:34:30 +00:00
// "username" TEXT NOT NULL,
// "chain_id" UNSIGNED BIGINT DEFAULT 1);`)
// if err != nil {
// log.Error("Migrating ens usernames: failed to create table", "err", err.Error())
// return err
// }
// -- 2. Move current `settings.usernames` to the new table
/ *
INSERT INTO ens_usernames ( username )
SELECT json_each . value FROM settings , json_each ( usernames ) ;
* /
2023-05-19 15:31:45 +00:00
rows , err := sqlTx . Query ( ` SELECT usernames FROM settings ` )
2023-01-20 10:34:30 +00:00
if err != nil {
2024-10-28 20:54:17 +00:00
logutils . ZapLogger ( ) . Error ( "Migrating ens usernames: failed to query 'settings.usernames'" , zap . Error ( err ) )
2023-01-20 10:34:30 +00:00
return err
}
defer rows . Close ( )
var usernames [ ] string
for rows . Next ( ) {
var usernamesJSON sql . NullString
err := rows . Scan ( & usernamesJSON )
if err != nil {
return err
}
if ! usernamesJSON . Valid {
continue
}
var list [ ] string
err = json . Unmarshal ( [ ] byte ( usernamesJSON . String ) , & list )
if err != nil {
return err
}
usernames = append ( usernames , list ... )
}
defaultChainID := 1
for _ , username := range usernames {
var usernameAlreadyMigrated bool
2023-05-19 15:31:45 +00:00
row := sqlTx . QueryRow ( ` SELECT EXISTS(SELECT 1 FROM ens_usernames WHERE username=? AND chain_id=?) ` , username , defaultChainID )
2023-01-20 10:34:30 +00:00
err := row . Scan ( & usernameAlreadyMigrated )
if err != nil {
return err
}
if usernameAlreadyMigrated {
continue
}
2023-05-19 15:31:45 +00:00
_ , err = sqlTx . Exec ( ` INSERT INTO ens_usernames (username, chain_id) VALUES (?, ?) ` , username , defaultChainID )
2023-01-20 10:34:30 +00:00
if err != nil {
2024-10-28 20:54:17 +00:00
logutils . ZapLogger ( ) . Error ( "Migrating ens usernames: failed to insert username into new database" , zap . String ( "ensUsername" , username ) , zap . Error ( err ) )
2023-01-20 10:34:30 +00:00
}
}
return nil
}
2023-05-19 15:31:45 +00:00
2023-07-05 15:56:34 +00:00
func MigrateV3ToV4 ( v3Path string , v4Path string , password string , kdfIterationsNumber int , onStart func ( ) , onEnd func ( ) ) error {
return sqlite . MigrateV3ToV4 ( v3Path , v4Path , password , kdfIterationsNumber , onStart , onEnd )
2023-06-13 15:20:21 +00:00
}
2023-05-19 15:31:45 +00:00
const (
batchSize = 1000
)
func migrateWalletJSONBlobs ( sqlTx * sql . Tx ) error {
var batchEntries [ ] [ ] interface { }
// Extract useful information from the receipt blob and store it as sql interpretable
//
// Added tx_hash because the hash column in the transfers table is not (always) the transaction hash.
// Each entry in that table could either be: A native token (ETH) transfer or ERC20/ERC721 token transfer
// Added block_hash because the block_hash we have is generated by us and used as block entry ID
// Added receipt_type, the type we have only indicates if chain or token
// Added log_index that the log data represents
//
// Dropped storing postState because it was replaced by the status after EIP 658
// Dropped duplicating logs until we have a more structured way to store them.
// They can be extracted from the transfers.receipt still
// Dropped the bloom filter because in SQLite is not possible to use it in an
// efficient manner
//
// Extract useful information from the tx blob
//
// Added tx_type, which might be different than the receipt type
//
// Dropped access_list, need a separate table for it
// Already there chain_id
// Dropped v, r, s because I see no way to be useful as BLOBs
// Added BIGINT values as clamped 64 INT because we can't use 128 bits blobs/strings for int arithmetics
// _clamped64 prefix indicate clamped 64 bits INT values might be useful for queries (sorting, filtering ...)
// The amount is stored as a fixed length 128 bit hex string, in
// order to be able to sort and filter by it
newColumnsAndIndexSetup := `
ALTER TABLE transfers ADD COLUMN status INT ;
ALTER TABLE transfers ADD COLUMN receipt_type INT ;
ALTER TABLE transfers ADD COLUMN tx_hash BLOB ;
ALTER TABLE transfers ADD COLUMN log_index INT ;
ALTER TABLE transfers ADD COLUMN block_hash BLOB ;
ALTER TABLE transfers ADD COLUMN cumulative_gas_used INT ;
ALTER TABLE transfers ADD COLUMN contract_address TEXT ;
ALTER TABLE transfers ADD COLUMN gas_used INT ;
ALTER TABLE transfers ADD COLUMN tx_index INT ;
ALTER TABLE transfers ADD COLUMN tx_type INT ;
ALTER TABLE transfers ADD COLUMN protected BOOLEAN ;
ALTER TABLE transfers ADD COLUMN gas_limit UNSIGNED INT ;
ALTER TABLE transfers ADD COLUMN gas_price_clamped64 INT ;
ALTER TABLE transfers ADD COLUMN gas_tip_cap_clamped64 INT ;
ALTER TABLE transfers ADD COLUMN gas_fee_cap_clamped64 INT ;
ALTER TABLE transfers ADD COLUMN amount_padded128hex CHAR ( 32 ) ;
ALTER TABLE transfers ADD COLUMN account_nonce INT ;
ALTER TABLE transfers ADD COLUMN size INT ;
ALTER TABLE transfers ADD COLUMN token_address BLOB ;
ALTER TABLE transfers ADD COLUMN token_id BLOB ;
CREATE INDEX idx_transfers_filter ON transfers ( status , token_address , token_id ) ; `
rowIndex := 0
mightHaveRows := true
_ , err := sqlTx . Exec ( newColumnsAndIndexSetup )
if err != nil {
return err
}
for mightHaveRows {
var chainID uint64
var hash common . Hash
var address common . Address
var entryType string
rows , err := sqlTx . Query ( ` SELECT hash, address, network_id, tx, receipt, log, type FROM transfers WHERE tx IS NOT NULL OR receipt IS NOT NULL LIMIT ? OFFSET ? ` , batchSize , rowIndex )
if err != nil {
return err
}
curProcessed := 0
for rows . Next ( ) {
tx := & types . Transaction { }
r := & types . Receipt { }
l := & types . Log { }
// Scan row data into the transaction and receipt objects
nullableTx := sqlite . JSONBlob { Data : tx }
nullableR := sqlite . JSONBlob { Data : r }
nullableL := sqlite . JSONBlob { Data : l }
err = rows . Scan ( & hash , & address , & chainID , & nullableTx , & nullableR , & nullableL , & entryType )
if err != nil {
rows . Close ( )
return err
}
var logIndex * uint
if nullableL . Valid {
logIndex = new ( uint )
* logIndex = l . Index
}
var currentRow [ ] interface { }
// Check if the receipt is not null before transferring the receipt data
if nullableR . Valid {
currentRow = append ( currentRow , r . Status , r . Type , r . TxHash , logIndex , r . BlockHash , r . CumulativeGasUsed , r . ContractAddress , r . GasUsed , r . TransactionIndex )
} else {
for i := 0 ; i < 9 ; i ++ {
currentRow = append ( currentRow , nil )
}
}
if nullableTx . Valid {
2023-06-20 02:50:49 +00:00
correctType , tokenID , value , tokenAddress := extractToken ( entryType , tx , l , nullableL . Valid )
2023-05-19 15:31:45 +00:00
gasPrice := sqlite . BigIntToClampedInt64 ( tx . GasPrice ( ) )
gasTipCap := sqlite . BigIntToClampedInt64 ( tx . GasTipCap ( ) )
gasFeeCap := sqlite . BigIntToClampedInt64 ( tx . GasFeeCap ( ) )
valueStr := sqlite . BigIntToPadded128BitsStr ( value )
2023-06-20 02:50:49 +00:00
currentRow = append ( currentRow , tx . Type ( ) , tx . Protected ( ) , tx . Gas ( ) , gasPrice , gasTipCap , gasFeeCap , valueStr , tx . Nonce ( ) , int64 ( tx . Size ( ) ) , tokenAddress , ( * bigint . SQLBigIntBytes ) ( tokenID ) , correctType )
2023-05-19 15:31:45 +00:00
} else {
for i := 0 ; i < 11 ; i ++ {
currentRow = append ( currentRow , nil )
}
currentRow = append ( currentRow , w_common . EthTransfer )
}
currentRow = append ( currentRow , hash , address , chainID )
batchEntries = append ( batchEntries , currentRow )
curProcessed ++
}
rowIndex += curProcessed
// Check if there was an error in the last rows.Next()
rows . Close ( )
if err = rows . Err ( ) ; err != nil {
return err
}
mightHaveRows = ( curProcessed == batchSize )
// insert extracted data into the new columns
if len ( batchEntries ) > 0 {
var stmt * sql . Stmt
stmt , err = sqlTx . Prepare ( ` UPDATE transfers SET status = ? , receipt_type = ? , tx_hash = ? , log_index = ? , block_hash = ? , cumulative_gas_used = ? , contract_address = ? , gas_used = ? , tx_index = ? ,
tx_type = ? , protected = ? , gas_limit = ? , gas_price_clamped64 = ? , gas_tip_cap_clamped64 = ? , gas_fee_cap_clamped64 = ? , amount_padded128hex = ? , account_nonce = ? , size = ? , token_address = ? , token_id = ? , type = ?
WHERE hash = ? AND address = ? AND network_id = ? ` )
if err != nil {
return err
}
for _ , dataEntry := range batchEntries {
_ , err = stmt . Exec ( dataEntry ... )
if err != nil {
return err
}
}
// Reset placeHolders and batchEntries for the next batch
batchEntries = [ ] [ ] interface { } { }
}
}
return nil
}
2023-06-13 09:25:23 +00:00
2023-06-20 02:50:49 +00:00
func extractToken ( entryType string , tx * types . Transaction , l * types . Log , logValid bool ) ( correctType w_common . Type , tokenID * big . Int , value * big . Int , tokenAddress * common . Address ) {
2023-06-13 09:25:23 +00:00
if logValid {
2023-11-02 17:24:23 +00:00
correctType , tokenAddress , _ , _ = w_common . ExtractTokenTransferData ( w_common . Type ( entryType ) , l , tx )
_ , _ , _ , tokenIDs , values , _ := w_common . ParseTransferLog ( * l )
if len ( tokenIDs ) > 0 {
tokenID = tokenIDs [ 0 ]
}
if len ( values ) > 0 {
value = values [ 0 ]
}
2023-06-13 09:25:23 +00:00
} else {
correctType = w_common . Type ( entryType )
value = new ( big . Int ) . Set ( tx . Value ( ) )
}
return
}
2023-06-20 02:50:49 +00:00
func migrateWalletTransferFromToAddresses ( sqlTx * sql . Tx ) error {
var batchEntries [ ] [ ] interface { }
// Extract transfer from/to addresses and add the information into the new columns
// Re-extract token address and insert it as blob instead of string
newColumnsAndIndexSetup := `
ALTER TABLE transfers ADD COLUMN tx_from_address BLOB ;
ALTER TABLE transfers ADD COLUMN tx_to_address BLOB ; `
rowIndex := 0
mightHaveRows := true
_ , err := sqlTx . Exec ( newColumnsAndIndexSetup )
if err != nil {
return err
}
for mightHaveRows {
var chainID uint64
var hash common . Hash
var address common . Address
var sender common . Address
var entryType string
rows , err := sqlTx . Query ( ` SELECT hash, address, sender, network_id, tx, log, type FROM transfers WHERE tx IS NOT NULL OR receipt IS NOT NULL LIMIT ? OFFSET ? ` , batchSize , rowIndex )
if err != nil {
return err
}
curProcessed := 0
for rows . Next ( ) {
tx := & types . Transaction { }
l := & types . Log { }
// Scan row data into the transaction and receipt objects
nullableTx := sqlite . JSONBlob { Data : tx }
nullableL := sqlite . JSONBlob { Data : l }
err = rows . Scan ( & hash , & address , & sender , & chainID , & nullableTx , & nullableL , & entryType )
if err != nil {
rows . Close ( )
return err
}
var currentRow [ ] interface { }
var tokenAddress * common . Address
var txFrom * common . Address
var txTo * common . Address
if nullableTx . Valid {
if nullableL . Valid {
2023-11-02 17:24:23 +00:00
_ , tokenAddress , txFrom , txTo = w_common . ExtractTokenTransferData ( w_common . Type ( entryType ) , l , tx )
2023-06-20 02:50:49 +00:00
} else {
txFrom = & sender
txTo = tx . To ( )
}
}
currentRow = append ( currentRow , tokenAddress , txFrom , txTo )
currentRow = append ( currentRow , hash , address , chainID )
batchEntries = append ( batchEntries , currentRow )
curProcessed ++
}
rowIndex += curProcessed
// Check if there was an error in the last rows.Next()
rows . Close ( )
if err = rows . Err ( ) ; err != nil {
return err
}
mightHaveRows = ( curProcessed == batchSize )
// insert extracted data into the new columns
if len ( batchEntries ) > 0 {
var stmt * sql . Stmt
stmt , err = sqlTx . Prepare ( ` UPDATE transfers SET token_address = ? , tx_from_address = ? , tx_to_address = ?
WHERE hash = ? AND address = ? AND network_id = ? ` )
if err != nil {
return err
}
for _ , dataEntry := range batchEntries {
_ , err = stmt . Exec ( dataEntry ... )
if err != nil {
return err
}
}
// Reset placeHolders and batchEntries for the next batch
batchEntries = [ ] [ ] interface { } { }
}
}
return nil
}