feat(wallet): Implemented SequentialFetchStrategy for transfers. (#3480)

By default OnDemandFetchStategy is still used.

Updates #10246
This commit is contained in:
IvanBelyakoff 2023-05-19 10:19:48 +02:00 committed by GitHub
parent 6fa8c11382
commit 94c7cd32af
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 881 additions and 159 deletions

View File

@ -60,6 +60,7 @@
// 1682073779_settings_table_remove_latest_derived_path_column.up.sql (4.47kB) // 1682073779_settings_table_remove_latest_derived_path_column.up.sql (4.47kB)
// 1682146075_add_created_at_to_saved_addresses.up.sql (107B) // 1682146075_add_created_at_to_saved_addresses.up.sql (107B)
// 1682393575_sync_ens_name.up.sql (713B) // 1682393575_sync_ens_name.up.sql (713B)
// 1683457503_add_blocks_ranges_sequential_table.up.sql (263B)
// doc.go (74B) // doc.go (74B)
package migrations package migrations
@ -144,7 +145,7 @@ func _1640111208_dummyUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1640111208_dummy.up.sql", size: 258, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1640111208_dummy.up.sql", size: 258, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3e, 0xf0, 0xae, 0x20, 0x6e, 0x75, 0xd1, 0x36, 0x14, 0xf2, 0x40, 0xe5, 0xd6, 0x7a, 0xc4, 0xa5, 0x72, 0xaa, 0xb5, 0x4d, 0x71, 0x97, 0xb8, 0xe8, 0x95, 0x22, 0x95, 0xa2, 0xac, 0xaf, 0x48, 0x58}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3e, 0xf0, 0xae, 0x20, 0x6e, 0x75, 0xd1, 0x36, 0x14, 0xf2, 0x40, 0xe5, 0xd6, 0x7a, 0xc4, 0xa5, 0x72, 0xaa, 0xb5, 0x4d, 0x71, 0x97, 0xb8, 0xe8, 0x95, 0x22, 0x95, 0xa2, 0xac, 0xaf, 0x48, 0x58}}
return a, nil return a, nil
} }
@ -164,7 +165,7 @@ func _1642666031_add_removed_clock_to_bookmarksUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1642666031_add_removed_clock_to_bookmarks.up.sql", size: 117, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1642666031_add_removed_clock_to_bookmarks.up.sql", size: 117, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x84, 0x4e, 0x38, 0x99, 0x7a, 0xc, 0x90, 0x13, 0xec, 0xfe, 0x2f, 0x55, 0xff, 0xb7, 0xb6, 0xaa, 0x96, 0xc6, 0x92, 0x79, 0xcc, 0xee, 0x4e, 0x99, 0x53, 0xfe, 0x1c, 0xbb, 0x32, 0x2, 0xa4, 0x27}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x84, 0x4e, 0x38, 0x99, 0x7a, 0xc, 0x90, 0x13, 0xec, 0xfe, 0x2f, 0x55, 0xff, 0xb7, 0xb6, 0xaa, 0x96, 0xc6, 0x92, 0x79, 0xcc, 0xee, 0x4e, 0x99, 0x53, 0xfe, 0x1c, 0xbb, 0x32, 0x2, 0xa4, 0x27}}
return a, nil return a, nil
} }
@ -184,7 +185,7 @@ func _1643644541_gif_api_key_settingUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1643644541_gif_api_key_setting.up.sql", size: 108, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1643644541_gif_api_key_setting.up.sql", size: 108, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1b, 0x94, 0x28, 0xfb, 0x66, 0xd1, 0x7c, 0xb8, 0x89, 0xe2, 0xb4, 0x71, 0x65, 0x24, 0x57, 0x22, 0x95, 0x38, 0x97, 0x3, 0x9b, 0xc6, 0xa4, 0x41, 0x7b, 0xba, 0xf7, 0xdb, 0x70, 0xf7, 0x20, 0x3a}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1b, 0x94, 0x28, 0xfb, 0x66, 0xd1, 0x7c, 0xb8, 0x89, 0xe2, 0xb4, 0x71, 0x65, 0x24, 0x57, 0x22, 0x95, 0x38, 0x97, 0x3, 0x9b, 0xc6, 0xa4, 0x41, 0x7b, 0xba, 0xf7, 0xdb, 0x70, 0xf7, 0x20, 0x3a}}
return a, nil return a, nil
} }
@ -204,7 +205,7 @@ func _1644188994_recent_stickersUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1644188994_recent_stickers.up.sql", size: 79, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1644188994_recent_stickers.up.sql", size: 79, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1e, 0xad, 0xaa, 0x30, 0xbf, 0x4, 0x7, 0xf8, 0xc3, 0x3, 0xb8, 0x97, 0x23, 0x2b, 0xbd, 0x1c, 0x60, 0x69, 0xb0, 0x42, 0x5e, 0x6b, 0xd, 0xa7, 0xa3, 0x6b, 0x2e, 0xdc, 0x70, 0x13, 0x72, 0x7}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1e, 0xad, 0xaa, 0x30, 0xbf, 0x4, 0x7, 0xf8, 0xc3, 0x3, 0xb8, 0x97, 0x23, 0x2b, 0xbd, 0x1c, 0x60, 0x69, 0xb0, 0x42, 0x5e, 0x6b, 0xd, 0xa7, 0xa3, 0x6b, 0x2e, 0xdc, 0x70, 0x13, 0x72, 0x7}}
return a, nil return a, nil
} }
@ -224,7 +225,7 @@ func _1646659233_add_address_to_dapp_permisssionUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1646659233_add_address_to_dapp_permisssion.up.sql", size: 700, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1646659233_add_address_to_dapp_permisssion.up.sql", size: 700, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xed, 0xb0, 0x35, 0xcc, 0x2e, 0x16, 0xe6, 0x15, 0x86, 0x2c, 0x37, 0x80, 0xae, 0xa3, 0xc5, 0x31, 0x78, 0x5, 0x9d, 0xcd, 0x7b, 0xeb, 0x5f, 0xf2, 0xb3, 0x74, 0x72, 0xdf, 0xcf, 0x88, 0xb, 0x40}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xed, 0xb0, 0x35, 0xcc, 0x2e, 0x16, 0xe6, 0x15, 0x86, 0x2c, 0x37, 0x80, 0xae, 0xa3, 0xc5, 0x31, 0x78, 0x5, 0x9d, 0xcd, 0x7b, 0xeb, 0x5f, 0xf2, 0xb3, 0x74, 0x72, 0xdf, 0xcf, 0x88, 0xb, 0x40}}
return a, nil return a, nil
} }
@ -244,7 +245,7 @@ func _1646841105_add_emoji_accountUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1646841105_add_emoji_account.up.sql", size: 96, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1646841105_add_emoji_account.up.sql", size: 96, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe6, 0x77, 0x29, 0x95, 0x18, 0x64, 0x82, 0x63, 0xe7, 0xaf, 0x6c, 0xa9, 0x15, 0x7d, 0x46, 0xa6, 0xbc, 0xdf, 0xa7, 0xd, 0x2b, 0xd2, 0x2d, 0x97, 0x4d, 0xa, 0x6b, 0xd, 0x6e, 0x90, 0x42, 0x5c}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe6, 0x77, 0x29, 0x95, 0x18, 0x64, 0x82, 0x63, 0xe7, 0xaf, 0x6c, 0xa9, 0x15, 0x7d, 0x46, 0xa6, 0xbc, 0xdf, 0xa7, 0xd, 0x2b, 0xd2, 0x2d, 0x97, 0x4d, 0xa, 0x6b, 0xd, 0x6e, 0x90, 0x42, 0x5c}}
return a, nil return a, nil
} }
@ -264,7 +265,7 @@ func _1647278782_display_nameUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1647278782_display_name.up.sql", size: 110, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1647278782_display_name.up.sql", size: 110, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf4, 0xa1, 0x1f, 0x3e, 0x61, 0x65, 0x8d, 0xff, 0xee, 0xde, 0xc5, 0x91, 0xd9, 0x5c, 0xb5, 0xe2, 0xf0, 0xb7, 0xe7, 0x5c, 0x5c, 0x16, 0x25, 0x89, 0xee, 0x78, 0x12, 0xea, 0x3e, 0x48, 0x41, 0xa6}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf4, 0xa1, 0x1f, 0x3e, 0x61, 0x65, 0x8d, 0xff, 0xee, 0xde, 0xc5, 0x91, 0xd9, 0x5c, 0xb5, 0xe2, 0xf0, 0xb7, 0xe7, 0x5c, 0x5c, 0x16, 0x25, 0x89, 0xee, 0x78, 0x12, 0xea, 0x3e, 0x48, 0x41, 0xa6}}
return a, nil return a, nil
} }
@ -284,7 +285,7 @@ func _1647862838_reset_last_backupUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1647862838_reset_last_backup.up.sql", size: 37, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1647862838_reset_last_backup.up.sql", size: 37, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x21, 0xe3, 0xd5, 0xf6, 0x5f, 0xfe, 0x65, 0xfa, 0x1d, 0x88, 0xf8, 0x5f, 0x24, 0x71, 0x34, 0x68, 0x96, 0x2a, 0x60, 0x87, 0x15, 0x82, 0x4d, 0x8a, 0x59, 0x3d, 0x1f, 0xd8, 0x56, 0xd4, 0xfb, 0xda}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x21, 0xe3, 0xd5, 0xf6, 0x5f, 0xfe, 0x65, 0xfa, 0x1d, 0x88, 0xf8, 0x5f, 0x24, 0x71, 0x34, 0x68, 0x96, 0x2a, 0x60, 0x87, 0x15, 0x82, 0x4d, 0x8a, 0x59, 0x3d, 0x1f, 0xd8, 0x56, 0xd4, 0xfb, 0xda}}
return a, nil return a, nil
} }
@ -304,7 +305,7 @@ func _1647871652_add_settings_sync_clock_tableUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1647871652_add_settings_sync_clock_table.up.sql", size: 1044, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1647871652_add_settings_sync_clock_table.up.sql", size: 1044, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd8, 0x58, 0xec, 0x85, 0x90, 0xfa, 0x30, 0x98, 0x98, 0x9a, 0xa6, 0xa8, 0x96, 0x2b, 0x38, 0x93, 0xf3, 0xae, 0x46, 0x74, 0xa4, 0x41, 0x62, 0x9b, 0x2, 0x86, 0xbf, 0xe5, 0x2a, 0xce, 0xe2, 0xc0}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd8, 0x58, 0xec, 0x85, 0x90, 0xfa, 0x30, 0x98, 0x98, 0x9a, 0xa6, 0xa8, 0x96, 0x2b, 0x38, 0x93, 0xf3, 0xae, 0x46, 0x74, 0xa4, 0x41, 0x62, 0x9b, 0x2, 0x86, 0xbf, 0xe5, 0x2a, 0xce, 0xe2, 0xc0}}
return a, nil return a, nil
} }
@ -324,7 +325,7 @@ func _1647880168_add_torrent_configUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1647880168_add_torrent_config.up.sql", size: 211, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1647880168_add_torrent_config.up.sql", size: 211, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1, 0x92, 0x22, 0x37, 0x96, 0xf3, 0xb5, 0x5b, 0x27, 0xd0, 0x7d, 0x43, 0x5, 0x4e, 0x9d, 0xe2, 0x49, 0xbe, 0x86, 0x31, 0xa1, 0x89, 0xff, 0xd6, 0x51, 0xe0, 0x9c, 0xb, 0xda, 0xfc, 0xf2, 0x93}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1, 0x92, 0x22, 0x37, 0x96, 0xf3, 0xb5, 0x5b, 0x27, 0xd0, 0x7d, 0x43, 0x5, 0x4e, 0x9d, 0xe2, 0x49, 0xbe, 0x86, 0x31, 0xa1, 0x89, 0xff, 0xd6, 0x51, 0xe0, 0x9c, 0xb, 0xda, 0xfc, 0xf2, 0x93}}
return a, nil return a, nil
} }
@ -344,7 +345,7 @@ func _1647882837_add_communities_settings_tableUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1647882837_add_communities_settings_table.up.sql", size: 206, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1647882837_add_communities_settings_table.up.sql", size: 206, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xbd, 0x87, 0x78, 0x99, 0xd9, 0x5d, 0xbd, 0xf7, 0x57, 0x9c, 0xca, 0x97, 0xbd, 0xb3, 0xe9, 0xb5, 0x89, 0x31, 0x3f, 0xf6, 0x5c, 0x13, 0xb, 0xc3, 0x54, 0x93, 0x18, 0x40, 0x7, 0x82, 0xfe, 0x7e}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xbd, 0x87, 0x78, 0x99, 0xd9, 0x5d, 0xbd, 0xf7, 0x57, 0x9c, 0xca, 0x97, 0xbd, 0xb3, 0xe9, 0xb5, 0x89, 0x31, 0x3f, 0xf6, 0x5c, 0x13, 0xb, 0xc3, 0x54, 0x93, 0x18, 0x40, 0x7, 0x82, 0xfe, 0x7e}}
return a, nil return a, nil
} }
@ -364,7 +365,7 @@ func _1647956635_add_waku_messages_tableUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1647956635_add_waku_messages_table.up.sql", size: 266, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1647956635_add_waku_messages_table.up.sql", size: 266, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd1, 0xe, 0xe1, 0xdc, 0xda, 0x2e, 0x89, 0x8d, 0xdc, 0x2a, 0x1c, 0x13, 0xa1, 0xfc, 0xfe, 0xf, 0xb2, 0xb9, 0x85, 0xc8, 0x45, 0xd6, 0xd1, 0x7, 0x5c, 0xa3, 0x8, 0x47, 0x44, 0x6d, 0x96, 0xe0}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd1, 0xe, 0xe1, 0xdc, 0xda, 0x2e, 0x89, 0x8d, 0xdc, 0x2a, 0x1c, 0x13, 0xa1, 0xfc, 0xfe, 0xf, 0xb2, 0xb9, 0x85, 0xc8, 0x45, 0xd6, 0xd1, 0x7, 0x5c, 0xa3, 0x8, 0x47, 0x44, 0x6d, 0x96, 0xe0}}
return a, nil return a, nil
} }
@ -384,7 +385,7 @@ func _1648554928_network_testUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1648554928_network_test.up.sql", size: 132, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1648554928_network_test.up.sql", size: 132, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9a, 0xc5, 0x7f, 0x87, 0xf3, 0x2c, 0xf7, 0xbb, 0xd3, 0x3a, 0x4e, 0x76, 0x88, 0xca, 0xaf, 0x73, 0xce, 0x8f, 0xa1, 0xf6, 0x3d, 0x4d, 0xed, 0x6f, 0x49, 0xf2, 0xfe, 0x56, 0x2a, 0x60, 0x68, 0xca}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9a, 0xc5, 0x7f, 0x87, 0xf3, 0x2c, 0xf7, 0xbb, 0xd3, 0x3a, 0x4e, 0x76, 0x88, 0xca, 0xaf, 0x73, 0xce, 0x8f, 0xa1, 0xf6, 0x3d, 0x4d, 0xed, 0x6f, 0x49, 0xf2, 0xfe, 0x56, 0x2a, 0x60, 0x68, 0xca}}
return a, nil return a, nil
} }
@ -404,7 +405,7 @@ func _1649174829_add_visitble_tokenUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1649174829_add_visitble_token.up.sql", size: 84, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1649174829_add_visitble_token.up.sql", size: 84, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa3, 0x22, 0xc0, 0x2b, 0x3f, 0x4f, 0x3d, 0x5e, 0x4c, 0x68, 0x7c, 0xd0, 0x15, 0x36, 0x9f, 0xec, 0xa1, 0x2a, 0x7b, 0xb4, 0xe3, 0xc6, 0xc9, 0xb4, 0x81, 0x50, 0x4a, 0x11, 0x3b, 0x35, 0x7, 0xcf}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa3, 0x22, 0xc0, 0x2b, 0x3f, 0x4f, 0x3d, 0x5e, 0x4c, 0x68, 0x7c, 0xd0, 0x15, 0x36, 0x9f, 0xec, 0xa1, 0x2a, 0x7b, 0xb4, 0xe3, 0xc6, 0xc9, 0xb4, 0x81, 0x50, 0x4a, 0x11, 0x3b, 0x35, 0x7, 0xcf}}
return a, nil return a, nil
} }
@ -424,7 +425,7 @@ func _1649882262_add_derived_from_accountsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1649882262_add_derived_from_accounts.up.sql", size: 110, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1649882262_add_derived_from_accounts.up.sql", size: 110, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x11, 0xb9, 0x44, 0x4d, 0x85, 0x8d, 0x7f, 0xb4, 0xae, 0x4f, 0x5c, 0x66, 0x64, 0xb6, 0xe2, 0xe, 0x3d, 0xad, 0x9d, 0x8, 0x4f, 0xab, 0x6e, 0xa8, 0x7d, 0x76, 0x3, 0xad, 0x96, 0x1, 0xee, 0x5c}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x11, 0xb9, 0x44, 0x4d, 0x85, 0x8d, 0x7f, 0xb4, 0xae, 0x4f, 0x5c, 0x66, 0x64, 0xb6, 0xe2, 0xe, 0x3d, 0xad, 0x9d, 0x8, 0x4f, 0xab, 0x6e, 0xa8, 0x7d, 0x76, 0x3, 0xad, 0x96, 0x1, 0xee, 0x5c}}
return a, nil return a, nil
} }
@ -444,7 +445,7 @@ func _1650612625_add_community_message_archive_hashes_tableUpSql() (*asset, erro
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1650612625_add_community_message_archive_hashes_table.up.sql", size: 130, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1650612625_add_community_message_archive_hashes_table.up.sql", size: 130, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x48, 0x31, 0xb3, 0x75, 0x23, 0xe2, 0x45, 0xe, 0x47, 0x1b, 0x35, 0xa5, 0x6e, 0x83, 0x4e, 0x64, 0x7d, 0xd7, 0xa2, 0xda, 0xe9, 0x53, 0xf1, 0x16, 0x86, 0x2c, 0x57, 0xad, 0xfa, 0xca, 0x39, 0xde}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x48, 0x31, 0xb3, 0x75, 0x23, 0xe2, 0x45, 0xe, 0x47, 0x1b, 0x35, 0xa5, 0x6e, 0x83, 0x4e, 0x64, 0x7d, 0xd7, 0xa2, 0xda, 0xe9, 0x53, 0xf1, 0x16, 0x86, 0x2c, 0x57, 0xad, 0xfa, 0xca, 0x39, 0xde}}
return a, nil return a, nil
} }
@ -464,7 +465,7 @@ func _1650616788_add_communities_archives_info_tableUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1650616788_add_communities_archives_info_table.up.sql", size: 208, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1650616788_add_communities_archives_info_table.up.sql", size: 208, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd1, 0x4f, 0x80, 0x45, 0xb9, 0xd9, 0x15, 0xe2, 0x78, 0xd0, 0xcb, 0x71, 0xc1, 0x1b, 0xb7, 0x1b, 0x1b, 0x97, 0xfe, 0x47, 0x53, 0x3c, 0x62, 0xbc, 0xdd, 0x3a, 0x94, 0x1a, 0xc, 0x48, 0x76, 0xe}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd1, 0x4f, 0x80, 0x45, 0xb9, 0xd9, 0x15, 0xe2, 0x78, 0xd0, 0xcb, 0x71, 0xc1, 0x1b, 0xb7, 0x1b, 0x1b, 0x97, 0xfe, 0x47, 0x53, 0x3c, 0x62, 0xbc, 0xdd, 0x3a, 0x94, 0x1a, 0xc, 0x48, 0x76, 0xe}}
return a, nil return a, nil
} }
@ -484,7 +485,7 @@ func _1652715604_add_clock_accountsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1652715604_add_clock_accounts.up.sql", size: 62, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1652715604_add_clock_accounts.up.sql", size: 62, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb6, 0xd9, 0x8d, 0x73, 0xc9, 0xef, 0xfa, 0xb1, 0x4b, 0xa5, 0xf3, 0x5, 0x19, 0x26, 0x46, 0xf8, 0x47, 0x93, 0xdb, 0xac, 0x2, 0xef, 0xf9, 0x71, 0x56, 0x83, 0xe6, 0x2d, 0xb0, 0xd7, 0x83, 0x5c}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb6, 0xd9, 0x8d, 0x73, 0xc9, 0xef, 0xfa, 0xb1, 0x4b, 0xa5, 0xf3, 0x5, 0x19, 0x26, 0x46, 0xf8, 0x47, 0x93, 0xdb, 0xac, 0x2, 0xef, 0xf9, 0x71, 0x56, 0x83, 0xe6, 0x2d, 0xb0, 0xd7, 0x83, 0x5c}}
return a, nil return a, nil
} }
@ -504,7 +505,7 @@ func _1653037334_add_notifications_settings_tableUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1653037334_add_notifications_settings_table.up.sql", size: 1276, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1653037334_add_notifications_settings_table.up.sql", size: 1276, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4b, 0xc4, 0x65, 0xac, 0xa, 0xf2, 0xef, 0xb6, 0x39, 0x3c, 0xc5, 0xb1, 0xb2, 0x9c, 0x86, 0x58, 0xe0, 0x38, 0xcb, 0x57, 0x3c, 0x76, 0x73, 0x87, 0x79, 0x4e, 0xf6, 0xed, 0xb0, 0x8e, 0x9e, 0xa}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4b, 0xc4, 0x65, 0xac, 0xa, 0xf2, 0xef, 0xb6, 0x39, 0x3c, 0xc5, 0xb1, 0xb2, 0x9c, 0x86, 0x58, 0xe0, 0x38, 0xcb, 0x57, 0x3c, 0x76, 0x73, 0x87, 0x79, 0x4e, 0xf6, 0xed, 0xb0, 0x8e, 0x9e, 0xa}}
return a, nil return a, nil
} }
@ -524,7 +525,7 @@ func _1654702119_add_mutual_contact_settingsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1654702119_add_mutual_contact_settings.up.sql", size: 78, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1654702119_add_mutual_contact_settings.up.sql", size: 78, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x26, 0x66, 0x67, 0x50, 0xfe, 0xd7, 0xe3, 0x29, 0x8b, 0xff, 0x9d, 0x5a, 0x87, 0xa7, 0x99, 0x6e, 0xd6, 0xcd, 0x2e, 0xbb, 0x17, 0xdf, 0x7f, 0xf7, 0xa3, 0xfa, 0x32, 0x7c, 0x2d, 0x92, 0xc8, 0x74}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x26, 0x66, 0x67, 0x50, 0xfe, 0xd7, 0xe3, 0x29, 0x8b, 0xff, 0x9d, 0x5a, 0x87, 0xa7, 0x99, 0x6e, 0xd6, 0xcd, 0x2e, 0xbb, 0x17, 0xdf, 0x7f, 0xf7, 0xa3, 0xfa, 0x32, 0x7c, 0x2d, 0x92, 0xc8, 0x74}}
return a, nil return a, nil
} }
@ -544,7 +545,7 @@ func _1655375270_add_clock_field_to_communities_settings_tableUpSql() (*asset, e
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1655375270_add_clock_field_to_communities_settings_table.up.sql", size: 74, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1655375270_add_clock_field_to_communities_settings_table.up.sql", size: 74, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x19, 0xc5, 0xc0, 0xf9, 0x84, 0x53, 0xdf, 0x83, 0xcf, 0xb6, 0x40, 0x6d, 0xf5, 0xdc, 0x77, 0x37, 0xb7, 0xe3, 0xa, 0x75, 0xe7, 0x6, 0x11, 0xca, 0x2b, 0x51, 0x92, 0xdd, 0x7d, 0xdb, 0xc3, 0xf5}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x19, 0xc5, 0xc0, 0xf9, 0x84, 0x53, 0xdf, 0x83, 0xcf, 0xb6, 0x40, 0x6d, 0xf5, 0xdc, 0x77, 0x37, 0xb7, 0xe3, 0xa, 0x75, 0xe7, 0x6, 0x11, 0xca, 0x2b, 0x51, 0x92, 0xdd, 0x7d, 0xdb, 0xc3, 0xf5}}
return a, nil return a, nil
} }
@ -564,7 +565,7 @@ func _1655385721_drop_networks_configUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1655385721_drop_networks_config.up.sql", size: 27, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1655385721_drop_networks_config.up.sql", size: 27, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xfc, 0xa7, 0x20, 0xbb, 0x67, 0x21, 0xe, 0xc6, 0xc8, 0x21, 0x74, 0xe0, 0xce, 0xc8, 0xe2, 0x2, 0xb4, 0xea, 0xf0, 0xe5, 0xc4, 0x4d, 0xdd, 0xd4, 0x52, 0x31, 0xa9, 0x3d, 0xcd, 0xd8, 0x9b, 0xab}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xfc, 0xa7, 0x20, 0xbb, 0x67, 0x21, 0xe, 0xc6, 0xc8, 0x21, 0x74, 0xe0, 0xce, 0xc8, 0xe2, 0x2, 0xb4, 0xea, 0xf0, 0xe5, 0xc4, 0x4d, 0xdd, 0xd4, 0x52, 0x31, 0xa9, 0x3d, 0xcd, 0xd8, 0x9b, 0xab}}
return a, nil return a, nil
} }
@ -584,7 +585,7 @@ func _1655385724_networks_chaincolor_shortnameUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1655385724_networks_chainColor_shortName.up.sql", size: 220, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1655385724_networks_chainColor_shortName.up.sql", size: 220, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd9, 0xe7, 0x84, 0xbb, 0x5f, 0xd2, 0x2c, 0x42, 0x88, 0x62, 0x52, 0xb6, 0x58, 0x31, 0xac, 0xc, 0x96, 0x2b, 0x1b, 0xe5, 0x4e, 0x9a, 0x3a, 0xf6, 0xf6, 0xfc, 0xa9, 0x1a, 0x35, 0x62, 0x28, 0x88}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd9, 0xe7, 0x84, 0xbb, 0x5f, 0xd2, 0x2c, 0x42, 0x88, 0x62, 0x52, 0xb6, 0x58, 0x31, 0xac, 0xc, 0x96, 0x2b, 0x1b, 0xe5, 0x4e, 0x9a, 0x3a, 0xf6, 0xf6, 0xfc, 0xa9, 0x1a, 0x35, 0x62, 0x28, 0x88}}
return a, nil return a, nil
} }
@ -604,7 +605,7 @@ func _1655456688_add_deleted_at_field_to_bookmarks_tableUpSql() (*asset, error)
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1655456688_add_deleted_at_field_to_bookmarks_table.up.sql", size: 69, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1655456688_add_deleted_at_field_to_bookmarks_table.up.sql", size: 69, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe7, 0x9a, 0xbd, 0x9a, 0xc9, 0xf, 0xdf, 0x90, 0x0, 0x5d, 0xea, 0x6e, 0x7d, 0x51, 0x95, 0xcd, 0x90, 0xd3, 0x1a, 0x36, 0x6c, 0xf4, 0xbd, 0xa7, 0x6b, 0xbf, 0xe5, 0xdb, 0xa3, 0x88, 0xe3, 0x50}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe7, 0x9a, 0xbd, 0x9a, 0xc9, 0xf, 0xdf, 0x90, 0x0, 0x5d, 0xea, 0x6e, 0x7d, 0x51, 0x95, 0xcd, 0x90, 0xd3, 0x1a, 0x36, 0x6c, 0xf4, 0xbd, 0xa7, 0x6b, 0xbf, 0xe5, 0xdb, 0xa3, 0x88, 0xe3, 0x50}}
return a, nil return a, nil
} }
@ -624,7 +625,7 @@ func _1655462032_create_bookmarks_deleted_at_indexUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1655462032_create_bookmarks_deleted_at_index.up.sql", size: 81, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1655462032_create_bookmarks_deleted_at_index.up.sql", size: 81, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf, 0x8e, 0x20, 0x6b, 0x14, 0x9e, 0xcd, 0x97, 0xd3, 0xfe, 0x62, 0x3, 0x26, 0x59, 0x1, 0x6c, 0x99, 0xef, 0x6d, 0x21, 0xd4, 0xb5, 0xa3, 0xf4, 0x39, 0x40, 0x54, 0x6, 0xd, 0x60, 0x13, 0x38}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf, 0x8e, 0x20, 0x6b, 0x14, 0x9e, 0xcd, 0x97, 0xd3, 0xfe, 0x62, 0x3, 0x26, 0x59, 0x1, 0x6c, 0x99, 0xef, 0x6d, 0x21, 0xd4, 0xb5, 0xa3, 0xf4, 0x39, 0x40, 0x54, 0x6, 0xd, 0x60, 0x13, 0x38}}
return a, nil return a, nil
} }
@ -644,7 +645,7 @@ func _1657617291_add_multi_transactions_tableUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1657617291_add_multi_transactions_table.up.sql", size: 412, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1657617291_add_multi_transactions_table.up.sql", size: 412, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x86, 0xb0, 0x4e, 0x8c, 0x4, 0x82, 0xb4, 0x43, 0xaa, 0xd0, 0x16, 0xdd, 0xcb, 0x88, 0x81, 0xac, 0x4, 0x34, 0x1a, 0x8f, 0x2e, 0xc5, 0x69, 0xb, 0xf0, 0x17, 0xf7, 0xe3, 0x9, 0xe, 0x54, 0xe0}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x86, 0xb0, 0x4e, 0x8c, 0x4, 0x82, 0xb4, 0x43, 0xaa, 0xd0, 0x16, 0xdd, 0xcb, 0x88, 0x81, 0xac, 0x4, 0x34, 0x1a, 0x8f, 0x2e, 0xc5, 0x69, 0xb, 0xf0, 0x17, 0xf7, 0xe3, 0x9, 0xe, 0x54, 0xe0}}
return a, nil return a, nil
} }
@ -664,7 +665,7 @@ func _1660134042_add_social_links_settings_tableUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1660134042_add_social_links_settings_table.up.sql", size: 334, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1660134042_add_social_links_settings_table.up.sql", size: 334, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x84, 0x73, 0xb6, 0xe7, 0x3f, 0xaa, 0x39, 0x9a, 0x56, 0x56, 0x31, 0xf1, 0x8e, 0x26, 0x23, 0x1, 0xe4, 0xfa, 0x98, 0xfe, 0x78, 0x87, 0x20, 0xcb, 0x52, 0xf4, 0x38, 0x7f, 0xc4, 0x1c, 0x4, 0x22}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x84, 0x73, 0xb6, 0xe7, 0x3f, 0xaa, 0x39, 0x9a, 0x56, 0x56, 0x31, 0xf1, 0x8e, 0x26, 0x23, 0x1, 0xe4, 0xfa, 0x98, 0xfe, 0x78, 0x87, 0x20, 0xcb, 0x52, 0xf4, 0x38, 0x7f, 0xc4, 0x1c, 0x4, 0x22}}
return a, nil return a, nil
} }
@ -684,7 +685,7 @@ func _1660134060_settings_bioUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1660134060_settings_bio.up.sql", size: 91, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1660134060_settings_bio.up.sql", size: 91, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x46, 0x25, 0xa0, 0xa6, 0x47, 0xff, 0xbc, 0x2a, 0x0, 0xff, 0x59, 0x4b, 0xb0, 0xc9, 0x4e, 0x15, 0xe4, 0xd9, 0xda, 0xeb, 0xfe, 0x55, 0x98, 0xc3, 0x9d, 0x96, 0xe7, 0xf, 0xd1, 0x5c, 0x93, 0x73}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x46, 0x25, 0xa0, 0xa6, 0x47, 0xff, 0xbc, 0x2a, 0x0, 0xff, 0x59, 0x4b, 0xb0, 0xc9, 0x4e, 0x15, 0xe4, 0xd9, 0xda, 0xeb, 0xfe, 0x55, 0x98, 0xc3, 0x9d, 0x96, 0xe7, 0xf, 0xd1, 0x5c, 0x93, 0x73}}
return a, nil return a, nil
} }
@ -704,7 +705,7 @@ func _1660134070_add_wakuv2_storeUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1660134070_add_wakuv2_store.up.sql", size: 269, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1660134070_add_wakuv2_store.up.sql", size: 269, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1d, 0xe6, 0xc3, 0x9, 0xef, 0xdc, 0xae, 0x49, 0x30, 0x78, 0x54, 0xd6, 0xdb, 0xbf, 0xc0, 0x8e, 0x25, 0x8f, 0xfc, 0x67, 0x80, 0x39, 0x37, 0xd4, 0x86, 0xc1, 0x85, 0xc8, 0x99, 0xc4, 0x59, 0xd4}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1d, 0xe6, 0xc3, 0x9, 0xef, 0xdc, 0xae, 0x49, 0x30, 0x78, 0x54, 0xd6, 0xdb, 0xbf, 0xc0, 0x8e, 0x25, 0x8f, 0xfc, 0x67, 0x80, 0x39, 0x37, 0xd4, 0x86, 0xc1, 0x85, 0xc8, 0x99, 0xc4, 0x59, 0xd4}}
return a, nil return a, nil
} }
@ -724,7 +725,7 @@ func _1660134072_waku2_store_messagesUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1660134072_waku2_store_messages.up.sql", size: 497, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "1660134072_waku2_store_messages.up.sql", size: 497, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3e, 0xeb, 0xb4, 0xa0, 0xa1, 0x2b, 0xcb, 0x4c, 0x3c, 0xc6, 0xd0, 0xe8, 0x96, 0xe3, 0x96, 0xf1, 0x4f, 0x1f, 0xe0, 0xe7, 0x1f, 0x85, 0xa3, 0xe, 0xf7, 0x52, 0x56, 0x63, 0x2b, 0xb0, 0x87, 0x7b}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3e, 0xeb, 0xb4, 0xa0, 0xa1, 0x2b, 0xcb, 0x4c, 0x3c, 0xc6, 0xd0, 0xe8, 0x96, 0xe3, 0x96, 0xf1, 0x4f, 0x1f, 0xe0, 0xe7, 0x1f, 0x85, 0xa3, 0xe, 0xf7, 0x52, 0x56, 0x63, 0x2b, 0xb0, 0x87, 0x7b}}
return a, nil return a, nil
} }
@ -744,7 +745,7 @@ func _1662365868_add_key_uid_accountsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1662365868_add_key_uid_accounts.up.sql", size: 68, mode: os.FileMode(0644), modTime: time.Unix(1663587122, 0)} info := bindataFileInfo{name: "1662365868_add_key_uid_accounts.up.sql", size: 68, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc6, 0xd8, 0x2f, 0x2f, 0x3b, 0xa8, 0xbd, 0x6d, 0xf6, 0x87, 0x7e, 0xd2, 0xf1, 0xa2, 0xf7, 0x81, 0x6a, 0x23, 0x10, 0xbc, 0xbf, 0x5b, 0xe7, 0x2b, 0x9c, 0xa9, 0x8a, 0x18, 0xbb, 0xd0, 0x86, 0x91}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc6, 0xd8, 0x2f, 0x2f, 0x3b, 0xa8, 0xbd, 0x6d, 0xf6, 0x87, 0x7e, 0xd2, 0xf1, 0xa2, 0xf7, 0x81, 0x6a, 0x23, 0x10, 0xbc, 0xbf, 0x5b, 0xe7, 0x2b, 0x9c, 0xa9, 0x8a, 0x18, 0xbb, 0xd0, 0x86, 0x91}}
return a, nil return a, nil
} }
@ -764,7 +765,7 @@ func _1662447680_add_keypairs_tableUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1662447680_add_keypairs_table.up.sql", size: 218, mode: os.FileMode(0644), modTime: time.Unix(1663587122, 0)} info := bindataFileInfo{name: "1662447680_add_keypairs_table.up.sql", size: 218, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xdc, 0x25, 0xa9, 0xc7, 0x63, 0x27, 0x97, 0x35, 0x5f, 0x6b, 0xab, 0x26, 0xcb, 0xf9, 0xbd, 0x5e, 0xac, 0x3, 0xa0, 0x5e, 0xb9, 0x71, 0xa3, 0x1f, 0xb3, 0x4f, 0x7f, 0x79, 0x28, 0x48, 0xbe, 0xc}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xdc, 0x25, 0xa9, 0xc7, 0x63, 0x27, 0x97, 0x35, 0x5f, 0x6b, 0xab, 0x26, 0xcb, 0xf9, 0xbd, 0x5e, 0xac, 0x3, 0xa0, 0x5e, 0xb9, 0x71, 0xa3, 0x1f, 0xb3, 0x4f, 0x7f, 0x79, 0x28, 0x48, 0xbe, 0xc}}
return a, nil return a, nil
} }
@ -784,7 +785,7 @@ func _1662460056_move_favourites_to_saved_addressesUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1662460056_move_favourites_to_saved_addresses.up.sql", size: 233, mode: os.FileMode(0644), modTime: time.Unix(1663587122, 0)} info := bindataFileInfo{name: "1662460056_move_favourites_to_saved_addresses.up.sql", size: 233, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x10, 0xa2, 0x8c, 0xa3, 0xec, 0xad, 0xdf, 0xc3, 0x48, 0x5, 0x9b, 0x50, 0x25, 0x59, 0xae, 0x7d, 0xee, 0x58, 0xd2, 0x41, 0x27, 0xf2, 0x22, 0x2e, 0x9a, 0xb9, 0x4a, 0xcc, 0x38, 0x6e, 0x3a, 0xb2}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x10, 0xa2, 0x8c, 0xa3, 0xec, 0xad, 0xdf, 0xc3, 0x48, 0x5, 0x9b, 0x50, 0x25, 0x59, 0xae, 0x7d, 0xee, 0x58, 0xd2, 0x41, 0x27, 0xf2, 0x22, 0x2e, 0x9a, 0xb9, 0x4a, 0xcc, 0x38, 0x6e, 0x3a, 0xb2}}
return a, nil return a, nil
} }
@ -804,7 +805,7 @@ func _1662738097_add_base_fee_transactionUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1662738097_add_base_fee_transaction.up.sql", size: 112, mode: os.FileMode(0644), modTime: time.Unix(1663587122, 0)} info := bindataFileInfo{name: "1662738097_add_base_fee_transaction.up.sql", size: 112, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6b, 0xfb, 0x10, 0xae, 0xfc, 0x77, 0x70, 0x98, 0x6f, 0xec, 0xaa, 0xcd, 0x7, 0xc7, 0x74, 0x23, 0xc, 0xd5, 0x1e, 0x82, 0xdd, 0xfe, 0xff, 0x3b, 0xd2, 0x49, 0x10, 0x5b, 0x30, 0xc, 0x2d, 0xb0}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6b, 0xfb, 0x10, 0xae, 0xfc, 0x77, 0x70, 0x98, 0x6f, 0xec, 0xaa, 0xcd, 0x7, 0xc7, 0x74, 0x23, 0xc, 0xd5, 0x1e, 0x82, 0xdd, 0xfe, 0xff, 0x3b, 0xd2, 0x49, 0x10, 0x5b, 0x30, 0xc, 0x2d, 0xb0}}
return a, nil return a, nil
} }
@ -824,7 +825,7 @@ func _1662972194_add_keypairs_tableUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1662972194_add_keypairs_table.up.sql", size: 345, mode: os.FileMode(0644), modTime: time.Unix(1663587122, 0)} info := bindataFileInfo{name: "1662972194_add_keypairs_table.up.sql", size: 345, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xab, 0x76, 0xf2, 0x86, 0xe1, 0x7e, 0xe9, 0x47, 0x32, 0x48, 0xd5, 0x6b, 0xe5, 0xd, 0xab, 0xb7, 0xf1, 0xd4, 0xf1, 0xad, 0x38, 0xa6, 0x11, 0xe7, 0xce, 0x5c, 0x11, 0x11, 0xf, 0x47, 0xb2, 0x4}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xab, 0x76, 0xf2, 0x86, 0xe1, 0x7e, 0xe9, 0x47, 0x32, 0x48, 0xd5, 0x6b, 0xe5, 0xd, 0xab, 0xb7, 0xf1, 0xd4, 0xf1, 0xad, 0x38, 0xa6, 0x11, 0xe7, 0xce, 0x5c, 0x11, 0x11, 0xf, 0x47, 0xb2, 0x4}}
return a, nil return a, nil
} }
@ -844,7 +845,7 @@ func _1664392661_add_third_party_id_to_waku_messagesUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1664392661_add_third_party_id_to_waku_messages.up.sql", size: 70, mode: os.FileMode(0644), modTime: time.Unix(1664889752, 0)} info := bindataFileInfo{name: "1664392661_add_third_party_id_to_waku_messages.up.sql", size: 70, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xfd, 0x67, 0x66, 0x9e, 0x66, 0x74, 0xce, 0x1c, 0xb, 0x1b, 0x9d, 0xd5, 0xfc, 0x65, 0xe, 0x83, 0x90, 0x4c, 0x61, 0x4e, 0x6b, 0xe7, 0x86, 0xbe, 0x36, 0x4f, 0x91, 0x36, 0x4, 0x47, 0x7b, 0x82}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xfd, 0x67, 0x66, 0x9e, 0x66, 0x74, 0xce, 0x1c, 0xb, 0x1b, 0x9d, 0xd5, 0xfc, 0x65, 0xe, 0x83, 0x90, 0x4c, 0x61, 0x4e, 0x6b, 0xe7, 0x86, 0xbe, 0x36, 0x4f, 0x91, 0x36, 0x4, 0x47, 0x7b, 0x82}}
return a, nil return a, nil
} }
@ -864,7 +865,7 @@ func _1664783660_add_sync_info_to_saved_addressesUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1664783660_add_sync_info_to_saved_addresses.up.sql", size: 388, mode: os.FileMode(0644), modTime: time.Unix(1666188919, 0)} info := bindataFileInfo{name: "1664783660_add_sync_info_to_saved_addresses.up.sql", size: 388, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x67, 0x7c, 0x3a, 0x95, 0x4e, 0x55, 0xb2, 0xbd, 0xb4, 0x18, 0x93, 0xc1, 0xcf, 0x9f, 0x12, 0xbb, 0x49, 0x8a, 0x2a, 0x6a, 0x2a, 0x7f, 0xad, 0x44, 0xc3, 0xf, 0x3a, 0x79, 0x18, 0xb9, 0x4c, 0x64}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x67, 0x7c, 0x3a, 0x95, 0x4e, 0x55, 0xb2, 0xbd, 0xb4, 0x18, 0x93, 0xc1, 0xcf, 0x9f, 0x12, 0xbb, 0x49, 0x8a, 0x2a, 0x6a, 0x2a, 0x7f, 0xad, 0x44, 0xc3, 0xf, 0x3a, 0x79, 0x18, 0xb9, 0x4c, 0x64}}
return a, nil return a, nil
} }
@ -884,7 +885,7 @@ func _1668109917_wakunodesUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1668109917_wakunodes.up.sql", size: 99, mode: os.FileMode(0644), modTime: time.Unix(1669399895, 0)} info := bindataFileInfo{name: "1668109917_wakunodes.up.sql", size: 99, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x29, 0xaa, 0x9e, 0x2, 0x66, 0x85, 0x69, 0xa8, 0xd9, 0xe2, 0x4b, 0x8d, 0x2a, 0x9c, 0xdf, 0xd2, 0xef, 0x64, 0x58, 0xe3, 0xa6, 0xe7, 0xc1, 0xd1, 0xc8, 0x9c, 0xc0, 0x2c, 0x1, 0xa8, 0x7b, 0x81}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x29, 0xaa, 0x9e, 0x2, 0x66, 0x85, 0x69, 0xa8, 0xd9, 0xe2, 0x4b, 0x8d, 0x2a, 0x9c, 0xdf, 0xd2, 0xef, 0x64, 0x58, 0xe3, 0xa6, 0xe7, 0xc1, 0xd1, 0xc8, 0x9c, 0xc0, 0x2c, 0x1, 0xa8, 0x7b, 0x81}}
return a, nil return a, nil
} }
@ -904,7 +905,7 @@ func _1670249678_display_name_to_settings_sync_clock_tableUpSql() (*asset, error
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1670249678_display_name_to_settings_sync_clock_table.up.sql", size: 83, mode: os.FileMode(0644), modTime: time.Unix(1678807591, 0)} info := bindataFileInfo{name: "1670249678_display_name_to_settings_sync_clock_table.up.sql", size: 83, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x39, 0x18, 0xdc, 0xc4, 0x1f, 0x79, 0x22, 0x16, 0x4d, 0xdf, 0x6c, 0x66, 0xd5, 0xa4, 0x88, 0x5d, 0x5, 0x37, 0xa7, 0x41, 0x5, 0x50, 0xae, 0x12, 0xfa, 0x7e, 0x89, 0x24, 0x5c, 0xae, 0x30, 0xfc}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x39, 0x18, 0xdc, 0xc4, 0x1f, 0x79, 0x22, 0x16, 0x4d, 0xdf, 0x6c, 0x66, 0xd5, 0xa4, 0x88, 0x5d, 0x5, 0x37, 0xa7, 0x41, 0x5, 0x50, 0xae, 0x12, 0xfa, 0x7e, 0x89, 0x24, 0x5c, 0xae, 0x30, 0xfc}}
return a, nil return a, nil
} }
@ -924,7 +925,7 @@ func _1670836810_add_imported_flag_to_community_archive_hashesUpSql() (*asset, e
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1670836810_add_imported_flag_to_community_archive_hashes.up.sql", size: 144, mode: os.FileMode(0644), modTime: time.Unix(1673009770, 0)} info := bindataFileInfo{name: "1670836810_add_imported_flag_to_community_archive_hashes.up.sql", size: 144, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6f, 0xf, 0xf0, 0xbd, 0xfe, 0x63, 0x25, 0x8f, 0x5e, 0x46, 0x4b, 0x45, 0x31, 0x8b, 0x3e, 0xd8, 0x6b, 0x5d, 0x9d, 0x6d, 0x10, 0x9a, 0x87, 0x4b, 0x18, 0xc6, 0x39, 0x81, 0x6e, 0xe4, 0x75, 0xfb}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6f, 0xf, 0xf0, 0xbd, 0xfe, 0x63, 0x25, 0x8f, 0x5e, 0x46, 0x4b, 0x45, 0x31, 0x8b, 0x3e, 0xd8, 0x6b, 0x5d, 0x9d, 0x6d, 0x10, 0x9a, 0x87, 0x4b, 0x18, 0xc6, 0x39, 0x81, 0x6e, 0xe4, 0x75, 0xfb}}
return a, nil return a, nil
} }
@ -944,7 +945,7 @@ func _1671438731_add_magnetlink_uri_to_communities_archive_infoUpSql() (*asset,
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1671438731_add_magnetlink_uri_to_communities_archive_info.up.sql", size: 86, mode: os.FileMode(0644), modTime: time.Unix(1678807591, 0)} info := bindataFileInfo{name: "1671438731_add_magnetlink_uri_to_communities_archive_info.up.sql", size: 86, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xda, 0x8b, 0x4b, 0xd6, 0xd8, 0xe2, 0x3d, 0xf7, 0x6b, 0xcd, 0x1e, 0x70, 0x9, 0x2e, 0x35, 0x4, 0x61, 0xc3, 0xb5, 0x9d, 0xc5, 0x27, 0x21, 0xa, 0x5a, 0xd6, 0x3e, 0xa6, 0x24, 0xa2, 0x12, 0xdf}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xda, 0x8b, 0x4b, 0xd6, 0xd8, 0xe2, 0x3d, 0xf7, 0x6b, 0xcd, 0x1e, 0x70, 0x9, 0x2e, 0x35, 0x4, 0x61, 0xc3, 0xb5, 0x9d, 0xc5, 0x27, 0x21, 0xa, 0x5a, 0xd6, 0x3e, 0xa6, 0x24, 0xa2, 0x12, 0xdf}}
return a, nil return a, nil
} }
@ -964,7 +965,7 @@ func _1672933930_switcher_cardUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1672933930_switcher_card.up.sql", size: 162, mode: os.FileMode(0644), modTime: time.Unix(1678807591, 0)} info := bindataFileInfo{name: "1672933930_switcher_card.up.sql", size: 162, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x39, 0xba, 0xdc, 0xbb, 0x40, 0x4, 0xf2, 0x10, 0xdf, 0xb4, 0xd2, 0x80, 0x8a, 0x74, 0x4d, 0xf6, 0xbc, 0x50, 0x7, 0xd, 0x22, 0x7f, 0xc4, 0xaf, 0xaa, 0xde, 0xdc, 0x71, 0xe9, 0x42, 0x98, 0x36}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x39, 0xba, 0xdc, 0xbb, 0x40, 0x4, 0xf2, 0x10, 0xdf, 0xb4, 0xd2, 0x80, 0x8a, 0x74, 0x4d, 0xf6, 0xbc, 0x50, 0x7, 0xd, 0x22, 0x7f, 0xc4, 0xaf, 0xaa, 0xde, 0xdc, 0x71, 0xe9, 0x42, 0x98, 0x36}}
return a, nil return a, nil
} }
@ -984,7 +985,7 @@ func _1674056187_add_price_cacheUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1674056187_add_price_cache.up.sql", size: 255, mode: os.FileMode(0644), modTime: time.Unix(1678807591, 0)} info := bindataFileInfo{name: "1674056187_add_price_cache.up.sql", size: 255, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb7, 0x79, 0x6a, 0x9b, 0x28, 0xd1, 0x22, 0xf0, 0x84, 0x76, 0x40, 0x39, 0x49, 0x15, 0x5d, 0xaa, 0xfd, 0x11, 0xff, 0x13, 0x27, 0x42, 0x12, 0xfa, 0x82, 0xe6, 0x7a, 0xf0, 0x5e, 0x1f, 0xe3, 0xba}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb7, 0x79, 0x6a, 0x9b, 0x28, 0xd1, 0x22, 0xf0, 0x84, 0x76, 0x40, 0x39, 0x49, 0x15, 0x5d, 0xaa, 0xfd, 0x11, 0xff, 0x13, 0x27, 0x42, 0x12, 0xfa, 0x82, 0xe6, 0x7a, 0xf0, 0x5e, 0x1f, 0xe3, 0xba}}
return a, nil return a, nil
} }
@ -1004,7 +1005,7 @@ func _1674136690_ens_usernamesUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1674136690_ens_usernames.up.sql", size: 98, mode: os.FileMode(0644), modTime: time.Unix(1678807591, 0)} info := bindataFileInfo{name: "1674136690_ens_usernames.up.sql", size: 98, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x81, 0x7a, 0xf3, 0xa8, 0x88, 0x99, 0xd6, 0x9c, 0x69, 0x48, 0x3c, 0x10, 0xda, 0x72, 0xdc, 0x14, 0xd, 0x6e, 0x8c, 0x82, 0x92, 0x2d, 0x2c, 0xee, 0x4c, 0x70, 0xa4, 0xdc, 0x5c, 0x5, 0x2, 0xc3}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x81, 0x7a, 0xf3, 0xa8, 0x88, 0x99, 0xd6, 0x9c, 0x69, 0x48, 0x3c, 0x10, 0xda, 0x72, 0xdc, 0x14, 0xd, 0x6e, 0x8c, 0x82, 0x92, 0x2d, 0x2c, 0xee, 0x4c, 0x70, 0xa4, 0xdc, 0x5c, 0x5, 0x2, 0xc3}}
return a, nil return a, nil
} }
@ -1024,7 +1025,7 @@ func _1674232431_add_balance_historyUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1674232431_add_balance_history.up.sql", size: 698, mode: os.FileMode(0644), modTime: time.Unix(1678807591, 0)} info := bindataFileInfo{name: "1674232431_add_balance_history.up.sql", size: 698, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf7, 0xb5, 0x18, 0xca, 0x4a, 0x93, 0xbb, 0x6f, 0xa4, 0xee, 0xe4, 0x3e, 0xff, 0x6a, 0x4b, 0xe2, 0xe1, 0x61, 0x28, 0xee, 0xc5, 0x26, 0x57, 0x61, 0x5e, 0x6d, 0x44, 0x1e, 0x85, 0x43, 0x70, 0xa2}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf7, 0xb5, 0x18, 0xca, 0x4a, 0x93, 0xbb, 0x6f, 0xa4, 0xee, 0xe4, 0x3e, 0xff, 0x6a, 0x4b, 0xe2, 0xe1, 0x61, 0x28, 0xee, 0xc5, 0x26, 0x57, 0x61, 0x5e, 0x6d, 0x44, 0x1e, 0x85, 0x43, 0x70, 0xa2}}
return a, nil return a, nil
} }
@ -1044,7 +1045,7 @@ func _1676368933_keypairs_to_keycardsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1676368933_keypairs_to_keycards.up.sql", size: 639, mode: os.FileMode(0644), modTime: time.Unix(1678807591, 0)} info := bindataFileInfo{name: "1676368933_keypairs_to_keycards.up.sql", size: 639, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x81, 0x93, 0x27, 0x2, 0xf0, 0x37, 0x81, 0x65, 0xa4, 0xb3, 0x5b, 0x60, 0x36, 0x95, 0xfc, 0x81, 0xf0, 0x3b, 0x7c, 0xc3, 0x2c, 0x85, 0xbd, 0x38, 0x46, 0xa4, 0x95, 0x4a, 0x6, 0x3e, 0x74, 0xd5}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x81, 0x93, 0x27, 0x2, 0xf0, 0x37, 0x81, 0x65, 0xa4, 0xb3, 0x5b, 0x60, 0x36, 0x95, 0xfc, 0x81, 0xf0, 0x3b, 0x7c, 0xc3, 0x2c, 0x85, 0xbd, 0x38, 0x46, 0xa4, 0x95, 0x4a, 0x6, 0x3e, 0x74, 0xd5}}
return a, nil return a, nil
} }
@ -1064,7 +1065,7 @@ func _1676951398_add_currency_format_cacheUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1676951398_add_currency_format_cache.up.sql", size: 291, mode: os.FileMode(0644), modTime: time.Unix(1678807591, 0)} info := bindataFileInfo{name: "1676951398_add_currency_format_cache.up.sql", size: 291, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf9, 0xa3, 0x76, 0x35, 0xca, 0xf, 0xe8, 0xdf, 0xd9, 0x61, 0xf9, 0xed, 0xfc, 0x6d, 0xf5, 0xe, 0x11, 0x88, 0xbd, 0x14, 0x92, 0xc6, 0x57, 0x53, 0xe, 0xcd, 0x52, 0xf4, 0xa9, 0xb1, 0xdd, 0xfd}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf9, 0xa3, 0x76, 0x35, 0xca, 0xf, 0xe8, 0xdf, 0xd9, 0x61, 0xf9, 0xed, 0xfc, 0x6d, 0xf5, 0xe, 0x11, 0x88, 0xbd, 0x14, 0x92, 0xc6, 0x57, 0x53, 0xe, 0xcd, 0x52, 0xf4, 0xa9, 0xb1, 0xdd, 0xfd}}
return a, nil return a, nil
} }
@ -1084,7 +1085,7 @@ func _1676968196_keycards_add_clock_columnUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1676968196_keycards_add_clock_column.up.sql", size: 73, mode: os.FileMode(0644), modTime: time.Unix(1678807591, 0)} info := bindataFileInfo{name: "1676968196_keycards_add_clock_column.up.sql", size: 73, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4c, 0xf, 0x1c, 0x28, 0x41, 0x57, 0x57, 0x6c, 0xe, 0x75, 0x6b, 0x75, 0x12, 0x0, 0x18, 0x1e, 0x88, 0x1e, 0x45, 0xe0, 0x32, 0xb9, 0xd4, 0xd9, 0x2e, 0xc8, 0xb, 0x80, 0x6, 0x51, 0x3d, 0x28}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4c, 0xf, 0x1c, 0x28, 0x41, 0x57, 0x57, 0x6c, 0xe, 0x75, 0x6b, 0x75, 0x12, 0x0, 0x18, 0x1e, 0x88, 0x1e, 0x45, 0xe0, 0x32, 0xb9, 0xd4, 0xd9, 0x2e, 0xc8, 0xb, 0x80, 0x6, 0x51, 0x3d, 0x28}}
return a, nil return a, nil
} }
@ -1104,7 +1105,7 @@ func _1676968197_add_fallback_rpc_to_networksUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1676968197_add_fallback_rpc_to_networks.up.sql", size: 112, mode: os.FileMode(0644), modTime: time.Unix(1678807591, 0)} info := bindataFileInfo{name: "1676968197_add_fallback_rpc_to_networks.up.sql", size: 112, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x77, 0x6a, 0xc6, 0x45, 0xfa, 0x62, 0x84, 0x74, 0x6d, 0x7c, 0xd7, 0x1d, 0x79, 0xb6, 0x38, 0x43, 0xa8, 0x8, 0x6b, 0x75, 0x3d, 0x9, 0x2, 0xc5, 0x9f, 0xbb, 0x45, 0x56, 0x4c, 0x4e, 0x17, 0x89}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x77, 0x6a, 0xc6, 0x45, 0xfa, 0x62, 0x84, 0x74, 0x6d, 0x7c, 0xd7, 0x1d, 0x79, 0xb6, 0x38, 0x43, 0xa8, 0x8, 0x6b, 0x75, 0x3d, 0x9, 0x2, 0xc5, 0x9f, 0xbb, 0x45, 0x56, 0x4c, 0x4e, 0x17, 0x89}}
return a, nil return a, nil
} }
@ -1124,7 +1125,7 @@ func _1677674090_add_chains_ens_istest_to_saved_addressesUpSql() (*asset, error)
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1677674090_add_chains_ens_istest_to_saved_addresses.up.sql", size: 638, mode: os.FileMode(0644), modTime: time.Unix(1678807591, 0)} info := bindataFileInfo{name: "1677674090_add_chains_ens_istest_to_saved_addresses.up.sql", size: 638, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa8, 0x2d, 0xa4, 0x1b, 0xf6, 0x6a, 0x13, 0x7b, 0xe, 0x59, 0xcd, 0xe2, 0x4e, 0x81, 0x99, 0xc4, 0x33, 0x84, 0xde, 0x66, 0xca, 0xac, 0x2f, 0x5, 0x90, 0xac, 0xfd, 0x4e, 0xfc, 0x55, 0x44, 0xe5}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa8, 0x2d, 0xa4, 0x1b, 0xf6, 0x6a, 0x13, 0x7b, 0xe, 0x59, 0xcd, 0xe2, 0x4e, 0x81, 0x99, 0xc4, 0x33, 0x84, 0xde, 0x66, 0xca, 0xac, 0x2f, 0x5, 0x90, 0xac, 0xfd, 0x4e, 0xfc, 0x55, 0x44, 0xe5}}
return a, nil return a, nil
} }
@ -1144,7 +1145,7 @@ func _1677681143_accounts_table_type_column_updateUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1677681143_accounts_table_type_column_update.up.sql", size: 135, mode: os.FileMode(0644), modTime: time.Unix(1678807591, 0)} info := bindataFileInfo{name: "1677681143_accounts_table_type_column_update.up.sql", size: 135, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd2, 0xc4, 0x6, 0x42, 0x50, 0x1d, 0xf4, 0x48, 0x55, 0xbc, 0xa2, 0x19, 0xdd, 0xad, 0xc8, 0xc, 0xa7, 0x30, 0xb6, 0xaf, 0xe, 0x2b, 0xaa, 0x2a, 0xa4, 0xe1, 0xb9, 0x41, 0x23, 0x66, 0xd3, 0x3}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd2, 0xc4, 0x6, 0x42, 0x50, 0x1d, 0xf4, 0x48, 0x55, 0xbc, 0xa2, 0x19, 0xdd, 0xad, 0xc8, 0xc, 0xa7, 0x30, 0xb6, 0xaf, 0xe, 0x2b, 0xaa, 0x2a, 0xa4, 0xe1, 0xb9, 0x41, 0x23, 0x66, 0xd3, 0x3}}
return a, nil return a, nil
} }
@ -1164,7 +1165,7 @@ func _1678264207_accounts_table_new_columns_addedUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1678264207_accounts_table_new_columns_added.up.sql", size: 130, mode: os.FileMode(0644), modTime: time.Unix(1680619081, 0)} info := bindataFileInfo{name: "1678264207_accounts_table_new_columns_added.up.sql", size: 130, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf4, 0xd4, 0xf3, 0x35, 0xef, 0x5c, 0x19, 0x3c, 0x15, 0x90, 0x60, 0xbd, 0x1f, 0x81, 0xf0, 0x86, 0x73, 0x89, 0xa0, 0x70, 0xf2, 0x46, 0xae, 0xea, 0xd0, 0xc6, 0x9e, 0x55, 0x4a, 0x54, 0x62, 0xbb}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf4, 0xd4, 0xf3, 0x35, 0xef, 0x5c, 0x19, 0x3c, 0x15, 0x90, 0x60, 0xbd, 0x1f, 0x81, 0xf0, 0x86, 0x73, 0x89, 0xa0, 0x70, 0xf2, 0x46, 0xae, 0xea, 0xd0, 0xc6, 0x9e, 0x55, 0x4a, 0x54, 0x62, 0xbb}}
return a, nil return a, nil
} }
@ -1184,7 +1185,7 @@ func _1680770368_add_bio_to_settings_sync_clock_tableUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1680770368_add_bio_to_settings_sync_clock_table.up.sql", size: 75, mode: os.FileMode(0644), modTime: time.Unix(1682559310, 0)} info := bindataFileInfo{name: "1680770368_add_bio_to_settings_sync_clock_table.up.sql", size: 75, mode: os.FileMode(0664), modTime: time.Unix(1682584708, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4a, 0x52, 0xf6, 0x3f, 0xaa, 0xd, 0xa0, 0xee, 0xe8, 0xe6, 0x16, 0x21, 0x80, 0x61, 0xe4, 0x7a, 0x4e, 0x37, 0x8d, 0x30, 0x51, 0x20, 0x4d, 0x15, 0x47, 0xfb, 0x6, 0xa1, 0xce, 0xc8, 0x27, 0x5a}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4a, 0x52, 0xf6, 0x3f, 0xaa, 0xd, 0xa0, 0xee, 0xe8, 0xe6, 0x16, 0x21, 0x80, 0x61, 0xe4, 0x7a, 0x4e, 0x37, 0x8d, 0x30, 0x51, 0x20, 0x4d, 0x15, 0x47, 0xfb, 0x6, 0xa1, 0xce, 0xc8, 0x27, 0x5a}}
return a, nil return a, nil
} }
@ -1204,7 +1205,7 @@ func _1681110436_add_mnemonic_to_settings_sync_clock_tableUpSql() (*asset, error
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1681110436_add_mnemonic_to_settings_sync_clock_table.up.sql", size: 311, mode: os.FileMode(0644), modTime: time.Unix(1682559310, 0)} info := bindataFileInfo{name: "1681110436_add_mnemonic_to_settings_sync_clock_table.up.sql", size: 311, mode: os.FileMode(0664), modTime: time.Unix(1682584708, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3d, 0x74, 0x81, 0x7d, 0x9e, 0x77, 0xb6, 0xfe, 0xe3, 0xcb, 0x48, 0xe5, 0x5f, 0x39, 0x23, 0xa1, 0x7d, 0x53, 0x22, 0xe8, 0x96, 0x15, 0x8a, 0x1e, 0x8e, 0xbc, 0xe2, 0x1d, 0xc4, 0xc2, 0x56, 0x34}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3d, 0x74, 0x81, 0x7d, 0x9e, 0x77, 0xb6, 0xfe, 0xe3, 0xcb, 0x48, 0xe5, 0x5f, 0x39, 0x23, 0xa1, 0x7d, 0x53, 0x22, 0xe8, 0x96, 0x15, 0x8a, 0x1e, 0x8e, 0xbc, 0xe2, 0x1d, 0xc4, 0xc2, 0x56, 0x34}}
return a, nil return a, nil
} }
@ -1224,7 +1225,7 @@ func _1681392602_9d_sync_periodUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1681392602_9d_sync_period.up.sql", size: 60, mode: os.FileMode(0644), modTime: time.Unix(1682559310, 0)} info := bindataFileInfo{name: "1681392602_9d_sync_period.up.sql", size: 60, mode: os.FileMode(0664), modTime: time.Unix(1682584708, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc9, 0xa, 0x90, 0x29, 0x7f, 0x76, 0x98, 0xa7, 0x71, 0x80, 0x5a, 0x2f, 0xbe, 0x23, 0x9a, 0xd4, 0xf4, 0x39, 0x19, 0xd3, 0xa5, 0x34, 0x6e, 0x67, 0x6a, 0xbe, 0x8a, 0xad, 0x21, 0xc7, 0xba, 0x88}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc9, 0xa, 0x90, 0x29, 0x7f, 0x76, 0x98, 0xa7, 0x71, 0x80, 0x5a, 0x2f, 0xbe, 0x23, 0x9a, 0xd4, 0xf4, 0x39, 0x19, 0xd3, 0xa5, 0x34, 0x6e, 0x67, 0x6a, 0xbe, 0x8a, 0xad, 0x21, 0xc7, 0xba, 0x88}}
return a, nil return a, nil
} }
@ -1244,7 +1245,7 @@ func _1681762078_default_sync_period_9dUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1681762078_default_sync_period_9d.up.sql", size: 3002, mode: os.FileMode(0644), modTime: time.Unix(1682559310, 0)} info := bindataFileInfo{name: "1681762078_default_sync_period_9d.up.sql", size: 3002, mode: os.FileMode(0664), modTime: time.Unix(1682584708, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3e, 0xd9, 0x26, 0xfc, 0xa9, 0x45, 0xc1, 0x81, 0xa8, 0xe2, 0x2c, 0xe9, 0x3c, 0xea, 0x1d, 0x37, 0x11, 0x45, 0x8c, 0x6c, 0xbc, 0xc2, 0x6, 0x69, 0x2, 0x75, 0x29, 0x40, 0x9f, 0xc5, 0xbb, 0x36}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3e, 0xd9, 0x26, 0xfc, 0xa9, 0x45, 0xc1, 0x81, 0xa8, 0xe2, 0x2c, 0xe9, 0x3c, 0xea, 0x1d, 0x37, 0x11, 0x45, 0x8c, 0x6c, 0xbc, 0xc2, 0x6, 0x69, 0x2, 0x75, 0x29, 0x40, 0x9f, 0xc5, 0xbb, 0x36}}
return a, nil return a, nil
} }
@ -1264,7 +1265,7 @@ func _1681780680_add_clock_to_social_links_settingsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1681780680_add_clock_to_social_links_settings.up.sql", size: 137, mode: os.FileMode(0644), modTime: time.Unix(1682559310, 0)} info := bindataFileInfo{name: "1681780680_add_clock_to_social_links_settings.up.sql", size: 137, mode: os.FileMode(0664), modTime: time.Unix(1682584708, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x63, 0x11, 0xf5, 0x41, 0xe5, 0x5a, 0xf4, 0xe3, 0xf3, 0x14, 0x87, 0x28, 0xd8, 0xf0, 0x52, 0x31, 0x8, 0xd5, 0xbb, 0xf4, 0xff, 0x55, 0x5f, 0x42, 0x90, 0xcb, 0xf7, 0x46, 0x2, 0x6, 0xbe, 0x42}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x63, 0x11, 0xf5, 0x41, 0xe5, 0x5a, 0xf4, 0xe3, 0xf3, 0x14, 0x87, 0x28, 0xd8, 0xf0, 0x52, 0x31, 0x8, 0xd5, 0xbb, 0xf4, 0xff, 0x55, 0x5f, 0x42, 0x90, 0xcb, 0xf7, 0x46, 0x2, 0x6, 0xbe, 0x42}}
return a, nil return a, nil
} }
@ -1284,7 +1285,7 @@ func _1682073779_settings_table_remove_latest_derived_path_columnUpSql() (*asset
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1682073779_settings_table_remove_latest_derived_path_column.up.sql", size: 4470, mode: os.FileMode(0644), modTime: time.Unix(1682559310, 0)} info := bindataFileInfo{name: "1682073779_settings_table_remove_latest_derived_path_column.up.sql", size: 4470, mode: os.FileMode(0664), modTime: time.Unix(1682584708, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7a, 0x36, 0x2, 0x41, 0xd, 0x5c, 0xd1, 0x92, 0x85, 0x6d, 0x84, 0xff, 0x67, 0xa7, 0x4c, 0x67, 0xa4, 0xef, 0x52, 0x69, 0x1f, 0x22, 0x25, 0x92, 0xc, 0xb3, 0x89, 0x50, 0x91, 0xc, 0x49, 0xf9}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7a, 0x36, 0x2, 0x41, 0xd, 0x5c, 0xd1, 0x92, 0x85, 0x6d, 0x84, 0xff, 0x67, 0xa7, 0x4c, 0x67, 0xa4, 0xef, 0x52, 0x69, 0x1f, 0x22, 0x25, 0x92, 0xc, 0xb3, 0x89, 0x50, 0x91, 0xc, 0x49, 0xf9}}
return a, nil return a, nil
} }
@ -1304,7 +1305,7 @@ func _1682146075_add_created_at_to_saved_addressesUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1682146075_add_created_at_to_saved_addresses.up.sql", size: 107, mode: os.FileMode(0644), modTime: time.Unix(1682559310, 0)} info := bindataFileInfo{name: "1682146075_add_created_at_to_saved_addresses.up.sql", size: 107, mode: os.FileMode(0664), modTime: time.Unix(1682584708, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x88, 0xfe, 0x35, 0x9c, 0x6b, 0xdf, 0x67, 0x18, 0x16, 0xe4, 0xc9, 0xd4, 0x77, 0x7c, 0x4, 0xe2, 0x6c, 0x41, 0xd9, 0x53, 0x97, 0xfe, 0x5, 0xa3, 0x23, 0xce, 0x82, 0xad, 0x92, 0x5e, 0xd7, 0x7d}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x88, 0xfe, 0x35, 0x9c, 0x6b, 0xdf, 0x67, 0x18, 0x16, 0xe4, 0xc9, 0xd4, 0x77, 0x7c, 0x4, 0xe2, 0x6c, 0x41, 0xd9, 0x53, 0x97, 0xfe, 0x5, 0xa3, 0x23, 0xce, 0x82, 0xad, 0x92, 0x5e, 0xd7, 0x7d}}
return a, nil return a, nil
} }
@ -1324,11 +1325,31 @@ func _1682393575_sync_ens_nameUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1682393575_sync_ens_name.up.sql", size: 713, mode: os.FileMode(0644), modTime: time.Unix(1682559310, 0)} info := bindataFileInfo{name: "1682393575_sync_ens_name.up.sql", size: 713, mode: os.FileMode(0664), modTime: time.Unix(1683549277, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xfb, 0xea, 0xcb, 0x4d, 0x71, 0x5a, 0x49, 0x19, 0x8b, 0xef, 0x66, 0x27, 0x33, 0x89, 0xb0, 0xe, 0x37, 0x1b, 0x41, 0x8, 0x12, 0xcc, 0x56, 0xd8, 0x1b, 0xf, 0xf8, 0x50, 0x4b, 0x93, 0xf1, 0x29}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xfb, 0xea, 0xcb, 0x4d, 0x71, 0x5a, 0x49, 0x19, 0x8b, 0xef, 0x66, 0x27, 0x33, 0x89, 0xb0, 0xe, 0x37, 0x1b, 0x41, 0x8, 0x12, 0xcc, 0x56, 0xd8, 0x1b, 0xf, 0xf8, 0x50, 0x4b, 0x93, 0xf1, 0x29}}
return a, nil return a, nil
} }
var __1683457503_add_blocks_ranges_sequential_tableUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x74\x8e\xd1\x4a\x80\x30\x14\x86\xef\x7d\x8a\xff\x52\xc1\x37\xe8\x6a\xea\xd2\x43\x36\x63\xce\xcc\xab\x31\x73\x85\x38\x94\xb6\x45\xaf\x1f\x49\x21\x44\xdd\x9e\xef\xfc\x1f\x5f\x29\x39\x53\x1c\x8a\x15\x2d\x07\xdd\x42\x74\x0a\xfc\x89\x7a\xd5\x63\x76\xc7\xf3\x16\xb4\x37\xfb\xab\x0d\x3a\xd8\xb7\x77\xbb\xc7\xd5\x38\xa4\x09\x00\xec\x36\x7e\x1c\x7e\xd3\xeb\x82\x41\xf4\x54\x0b\x5e\xa1\xa0\x9a\x84\x3a\x25\x62\x68\xdb\xfc\x7c\x34\xcb\xe2\x6d\x08\x78\x64\xb2\x6c\x98\xfc\x45\x67\xb7\xe9\x10\x8d\x8f\xdf\xe3\xeb\xfa\xb2\xfa\x10\xff\x56\x7e\x61\x67\xfe\xa3\x0f\x92\xee\x99\x9c\x70\xc7\x27\xa4\x57\x66\xfe\x53\x92\x25\x19\x46\x52\x4d\x37\x28\xc8\x6e\xa4\xea\x26\xf9\x0c\x00\x00\xff\xff\x16\x63\xd9\xa7\x07\x01\x00\x00")
func _1683457503_add_blocks_ranges_sequential_tableUpSqlBytes() ([]byte, error) {
return bindataRead(
__1683457503_add_blocks_ranges_sequential_tableUpSql,
"1683457503_add_blocks_ranges_sequential_table.up.sql",
)
}
func _1683457503_add_blocks_ranges_sequential_tableUpSql() (*asset, error) {
bytes, err := _1683457503_add_blocks_ranges_sequential_tableUpSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "1683457503_add_blocks_ranges_sequential_table.up.sql", size: 263, mode: os.FileMode(0664), modTime: time.Unix(1684481356, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xfe, 0x57, 0x2e, 0x0, 0x6a, 0x6e, 0xd7, 0xeb, 0xe6, 0x66, 0x79, 0x32, 0x22, 0x82, 0x92, 0xf4, 0xc9, 0xf1, 0x58, 0x1a, 0x45, 0x60, 0x77, 0x50, 0xe7, 0x54, 0x4a, 0xc0, 0x42, 0x3a, 0x4f, 0x35}}
return a, nil
}
var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x2c\xc9\xb1\x0d\xc4\x20\x0c\x05\xd0\x9e\x29\xfe\x02\xd8\xfd\x6d\xe3\x4b\xac\x2f\x44\x82\x09\x78\x7f\xa5\x49\xfd\xa6\x1d\xdd\xe8\xd8\xcf\x55\x8a\x2a\xe3\x47\x1f\xbe\x2c\x1d\x8c\xfa\x6f\xe3\xb4\x34\xd4\xd9\x89\xbb\x71\x59\xb6\x18\x1b\x35\x20\xa2\x9f\x0a\x03\xa2\xe5\x0d\x00\x00\xff\xff\x60\xcd\x06\xbe\x4a\x00\x00\x00") var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x2c\xc9\xb1\x0d\xc4\x20\x0c\x05\xd0\x9e\x29\xfe\x02\xd8\xfd\x6d\xe3\x4b\xac\x2f\x44\x82\x09\x78\x7f\xa5\x49\xfd\xa6\x1d\xdd\xe8\xd8\xcf\x55\x8a\x2a\xe3\x47\x1f\xbe\x2c\x1d\x8c\xfa\x6f\xe3\xb4\x34\xd4\xd9\x89\xbb\x71\x59\xb6\x18\x1b\x35\x20\xa2\x9f\x0a\x03\xa2\xe5\x0d\x00\x00\xff\xff\x60\xcd\x06\xbe\x4a\x00\x00\x00")
func docGoBytes() ([]byte, error) { func docGoBytes() ([]byte, error) {
@ -1344,7 +1365,7 @@ func docGo() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "doc.go", size: 74, mode: os.FileMode(0644), modTime: time.Unix(1662753054, 0)} info := bindataFileInfo{name: "doc.go", size: 74, mode: os.FileMode(0664), modTime: time.Unix(1682577901, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xde, 0x7c, 0x28, 0xcd, 0x47, 0xf2, 0xfa, 0x7c, 0x51, 0x2d, 0xd8, 0x38, 0xb, 0xb0, 0x34, 0x9d, 0x4c, 0x62, 0xa, 0x9e, 0x28, 0xc3, 0x31, 0x23, 0xd9, 0xbb, 0x89, 0x9f, 0xa0, 0x89, 0x1f, 0xe8}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xde, 0x7c, 0x28, 0xcd, 0x47, 0xf2, 0xfa, 0x7c, 0x51, 0x2d, 0xd8, 0x38, 0xb, 0xb0, 0x34, 0x9d, 0x4c, 0x62, 0xa, 0x9e, 0x28, 0xc3, 0x31, 0x23, 0xd9, 0xbb, 0x89, 0x9f, 0xa0, 0x89, 0x1f, 0xe8}}
return a, nil return a, nil
} }
@ -1560,6 +1581,8 @@ var _bindata = map[string]func() (*asset, error){
"1682393575_sync_ens_name.up.sql": _1682393575_sync_ens_nameUpSql, "1682393575_sync_ens_name.up.sql": _1682393575_sync_ens_nameUpSql,
"1683457503_add_blocks_ranges_sequential_table.up.sql": _1683457503_add_blocks_ranges_sequential_tableUpSql,
"doc.go": docGo, "doc.go": docGo,
} }
@ -1664,6 +1687,7 @@ var _bintree = &bintree{nil, map[string]*bintree{
"1682073779_settings_table_remove_latest_derived_path_column.up.sql": &bintree{_1682073779_settings_table_remove_latest_derived_path_columnUpSql, map[string]*bintree{}}, "1682073779_settings_table_remove_latest_derived_path_column.up.sql": &bintree{_1682073779_settings_table_remove_latest_derived_path_columnUpSql, map[string]*bintree{}},
"1682146075_add_created_at_to_saved_addresses.up.sql": &bintree{_1682146075_add_created_at_to_saved_addressesUpSql, map[string]*bintree{}}, "1682146075_add_created_at_to_saved_addresses.up.sql": &bintree{_1682146075_add_created_at_to_saved_addressesUpSql, map[string]*bintree{}},
"1682393575_sync_ens_name.up.sql": &bintree{_1682393575_sync_ens_nameUpSql, map[string]*bintree{}}, "1682393575_sync_ens_name.up.sql": &bintree{_1682393575_sync_ens_nameUpSql, map[string]*bintree{}},
"1683457503_add_blocks_ranges_sequential_table.up.sql": &bintree{_1683457503_add_blocks_ranges_sequential_tableUpSql, map[string]*bintree{}},
"doc.go": &bintree{docGo, map[string]*bintree{}}, "doc.go": &bintree{docGo, map[string]*bintree{}},
}} }}

View File

@ -0,0 +1,8 @@
CREATE TABLE IF NOT EXISTS blocks_ranges_sequential (
network_id UNSIGNED BIGINT NOT NULL,
address VARCHAR NOT NULL,
blk_start BIGINT,
blk_first BIGINT NOT NULL,
blk_last BIGINT NOT NULL,
PRIMARY KEY (network_id, address)
) WITHOUT ROWID;

View File

@ -0,0 +1,146 @@
package transfer
import (
"database/sql"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/status-im/status-go/services/wallet/bigint"
)
const (
firstBlockColumn = "blk_first"
lastBlockColumn = "blk_last"
startBlockColumn = "blk_start"
)
type BlockRangeSequentialDAO struct {
db *sql.DB
}
type BlockRange struct {
Start *big.Int // Block of first transfer
FirstKnown *big.Int // Oldest scanned block
LastKnown *big.Int // Last scanned block
}
func NewBlockRange() *BlockRange {
return &BlockRange{Start: &big.Int{}, FirstKnown: &big.Int{}, LastKnown: &big.Int{}}
}
func (b *BlockRangeSequentialDAO) getBlockRange(chainID uint64, address common.Address) (blockRange *BlockRange, err error) {
query := `SELECT blk_start, blk_first, blk_last FROM blocks_ranges_sequential
WHERE address = ?
AND network_id = ?`
rows, err := b.db.Query(query, address, chainID)
if err != nil {
return
}
defer rows.Close()
if rows.Next() {
blockRange = NewBlockRange()
err = rows.Scan((*bigint.SQLBigInt)(blockRange.Start), (*bigint.SQLBigInt)(blockRange.FirstKnown), (*bigint.SQLBigInt)(blockRange.LastKnown))
if err != nil {
return nil, err
}
return blockRange, nil
}
return nil, nil
}
// TODO call it when account is removed
//
//lint:ignore U1000 Ignore unused function temporarily
func (b *BlockRangeSequentialDAO) deleteRange(chainID uint64, account common.Address) error {
log.Info("delete blocks range", "account", account, "network", chainID)
delete, err := b.db.Prepare(`DELETE FROM blocks_ranges_sequential
WHERE address = ?
AND network_id = ?`)
if err != nil {
log.Info("some error", "error", err)
return err
}
_, err = delete.Exec(account, chainID)
return err
}
func (b *BlockRangeSequentialDAO) updateStartBlock(chainID uint64, account common.Address, block *big.Int) (err error) {
return updateBlock(b.db, chainID, account, startBlockColumn, block)
}
//lint:ignore U1000 Ignore unused function temporarily, TODO use it when new transfers are fetched
func (b *BlockRangeSequentialDAO) updateLastBlock(chainID uint64, account common.Address, block *big.Int) (err error) {
return updateBlock(b.db, chainID, account, lastBlockColumn, block)
}
func (b *BlockRangeSequentialDAO) updateFirstBlock(chainID uint64, account common.Address, block *big.Int) (err error) {
return updateBlock(b.db, chainID, account, firstBlockColumn, block)
}
func updateBlock(creator statementCreator, chainID uint64, account common.Address,
blockColumn string, block *big.Int) (err error) {
update, err := creator.Prepare(fmt.Sprintf(`UPDATE blocks_ranges_sequential
SET %s = ?
WHERE address = ?
AND network_id = ?`, blockColumn))
if err != nil {
return err
}
_, err = update.Exec((*bigint.SQLBigInt)(block), account, chainID)
if err != nil {
return err
}
return
}
func (b *BlockRangeSequentialDAO) upsertRange(chainID uint64, account common.Address,
start *big.Int, first *big.Int, last *big.Int) (err error) {
log.Info("upsert blocks range", "account", account, "network id", chainID, "start", start, "first", first, "last", last)
update, err := b.db.Prepare(`UPDATE blocks_ranges_sequential
SET blk_start = ?,
blk_first = ?,
blk_last = ?
WHERE address = ?
AND network_id = ?`)
if err != nil {
return err
}
res, err := update.Exec((*bigint.SQLBigInt)(start), (*bigint.SQLBigInt)(first), (*bigint.SQLBigInt)(last), account, chainID)
if err != nil {
return err
}
affected, err := res.RowsAffected()
if err != nil {
return err
}
if affected == 0 {
insert, err := b.db.Prepare("INSERT INTO blocks_ranges_sequential (network_id, address, blk_first, blk_last, blk_start) VALUES (?, ?, ?, ?, ?)")
if err != nil {
return err
}
_, err = insert.Exec(chainID, account, (*bigint.SQLBigInt)(first), (*bigint.SQLBigInt)(last), (*bigint.SQLBigInt)(start))
if err != nil {
return err
}
}
return
}

View File

@ -27,6 +27,9 @@ const (
EventFetchingHistoryError walletevent.EventType = "fetching-history-error" EventFetchingHistoryError walletevent.EventType = "fetching-history-error"
// EventNonArchivalNodeDetected emitted when a connection to a non archival node is detected // EventNonArchivalNodeDetected emitted when a connection to a non archival node is detected
EventNonArchivalNodeDetected walletevent.EventType = "non-archival-node-detected" EventNonArchivalNodeDetected walletevent.EventType = "non-archival-node-detected"
numberOfBlocksCheckedPerIteration = 40
noBlockLimit = 0
) )
var ( var (
@ -41,7 +44,6 @@ var (
goerliChainID = uint64(5) goerliChainID = uint64(5)
goerliArbitrumChainID = uint64(421613) goerliArbitrumChainID = uint64(421613)
binanceTestChainID = uint64(97) binanceTestChainID = uint64(97)
numberOfBlocksCheckedPerIteration = 40
) )
type ethHistoricalCommand struct { type ethHistoricalCommand struct {
@ -55,7 +57,8 @@ type ethHistoricalCommand struct {
noLimit bool noLimit bool
from *Block from *Block
to, resultingFrom *big.Int to, resultingFrom, startBlock *big.Int
threadLimit uint32
} }
func (c *ethHistoricalCommand) Command() async.Command { func (c *ethHistoricalCommand) Command() async.Command {
@ -75,17 +78,20 @@ func (c *ethHistoricalCommand) Run(ctx context.Context) (err error) {
if c.from.Number != nil && c.from.Nonce != nil { if c.from.Number != nil && c.from.Nonce != nil {
c.balanceCache.addNonceToCache(c.address, c.from.Number, c.from.Nonce) c.balanceCache.addNonceToCache(c.address, c.from.Number, c.from.Nonce)
} }
from, headers, err := findBlocksWithEthTransfers(ctx, c.chainClient, c.balanceCache, c.eth, c.address, c.from.Number, c.to, c.noLimit) from, headers, startBlock, err := findBlocksWithEthTransfers(ctx, c.chainClient, c.balanceCache, c.eth, c.address, c.from.Number, c.to, c.noLimit, c.threadLimit)
if err != nil { if err != nil {
c.error = err c.error = err
log.Error("failed to find blocks with transfers", "error", err)
return nil return nil
} }
c.foundHeaders = headers c.foundHeaders = headers
c.resultingFrom = from c.resultingFrom = from
c.startBlock = startBlock
log.Info("eth historical downloader finished successfully", "address", c.address, "from", from, "to", c.to, "total blocks", len(headers), "time", time.Since(start)) log.Info("eth historical downloader finished successfully", "chain", c.chainClient.ChainID,
"address", c.address, "from", from, "to", c.to, "total blocks", len(headers), "time", time.Since(start))
return nil return nil
} }
@ -239,6 +245,7 @@ func (c *controlCommand) Run(parent context.Context) error {
cmnd := &findAndCheckBlockRangeCommand{ cmnd := &findAndCheckBlockRangeCommand{
accounts: c.accounts, accounts: c.accounts,
db: c.db, db: c.db,
blockDAO: c.blockDAO,
chainClient: c.chainClient, chainClient: c.chainClient,
balanceCache: bCache, balanceCache: bCache,
feed: c.feed, feed: c.feed,
@ -261,7 +268,7 @@ func (c *controlCommand) Run(parent context.Context) error {
return cmnd.error return cmnd.error
} }
_, err = c.LoadTransfers(parent, 40) _, err = c.LoadTransfers(parent, numberOfBlocksCheckedPerIteration)
if err != nil { if err != nil {
if c.NewError(err) { if c.NewError(err) {
return nil return nil
@ -336,7 +343,7 @@ func (c *controlCommand) Command() async.Command {
type transfersCommand struct { type transfersCommand struct {
db *Database db *Database
eth *ETHDownloader eth *ETHDownloader
block *big.Int blockNum *big.Int
address common.Address address common.Address
chainClient *chain.ClientWithFallback chainClient *chain.ClientWithFallback
fetchedTransfers []Transfer fetchedTransfers []Transfer
@ -351,11 +358,13 @@ func (c *transfersCommand) Command() async.Command {
} }
func (c *transfersCommand) Run(ctx context.Context) (err error) { func (c *transfersCommand) Run(ctx context.Context) (err error) {
log.Debug("start transfersCommand", "chain", c.chainClient.ChainID, "address", c.address, "block", c.blockNum)
startTs := time.Now() startTs := time.Now()
allTransfers, err := getTransfersByBlocks(ctx, c.db, c.eth, []*big.Int{c.block}) allTransfers, err := c.eth.GetTransfersByNumber(ctx, c.blockNum)
if err != nil { if err != nil {
log.Info("getTransfersByBlocks error", "error", err) log.Error("getTransfersByBlocks error", "error", err)
return err return err
} }
@ -373,7 +382,7 @@ func (c *transfersCommand) Run(ctx context.Context) (err error) {
} }
if len(allTransfers) > 0 { if len(allTransfers) > 0 {
err = c.db.SaveTransfersMarkBlocksLoaded(c.chainClient.ChainID, c.address, allTransfers, []*big.Int{c.block}) err = c.db.SaveTransfersMarkBlocksLoaded(c.chainClient.ChainID, c.address, allTransfers, []*big.Int{c.blockNum})
if err != nil { if err != nil {
log.Error("SaveTransfers error", "error", err) log.Error("SaveTransfers error", "error", err)
return err return err
@ -381,7 +390,9 @@ func (c *transfersCommand) Run(ctx context.Context) (err error) {
} }
c.fetchedTransfers = allTransfers c.fetchedTransfers = allTransfers
log.Debug("transfers loaded", "address", c.address, "len", len(allTransfers), "in", time.Since(startTs)) log.Debug("end transfersCommand", "chain", c.chainClient.ChainID, "address", c.address,
"block", c.blockNum, "len", len(allTransfers), "in", time.Since(startTs))
return nil return nil
} }
@ -393,6 +404,7 @@ type loadTransfersCommand struct {
blocksByAddress map[common.Address][]*big.Int blocksByAddress map[common.Address][]*big.Int
foundTransfersByAddress map[common.Address][]Transfer foundTransfersByAddress map[common.Address][]Transfer
transactionManager *TransactionManager transactionManager *TransactionManager
blocksLimit int
} }
func (c *loadTransfersCommand) Command() async.Command { func (c *loadTransfersCommand) Command() async.Command {
@ -407,7 +419,7 @@ func (c *loadTransfersCommand) LoadTransfers(ctx context.Context, limit int, blo
} }
func (c *loadTransfersCommand) Run(parent context.Context) (err error) { func (c *loadTransfersCommand) Run(parent context.Context) (err error) {
transfersByAddress, err := c.LoadTransfers(parent, 40, c.blocksByAddress, c.transactionManager) transfersByAddress, err := c.LoadTransfers(parent, c.blocksLimit, c.blocksByAddress, c.transactionManager)
if err != nil { if err != nil {
return err return err
} }
@ -419,6 +431,7 @@ func (c *loadTransfersCommand) Run(parent context.Context) (err error) {
type findAndCheckBlockRangeCommand struct { type findAndCheckBlockRangeCommand struct {
accounts []common.Address accounts []common.Address
db *Database db *Database
blockDAO *BlockDAO
chainClient *chain.ClientWithFallback chainClient *chain.ClientWithFallback
balanceCache *balanceCache balanceCache *balanceCache
feed *event.Feed feed *event.Feed
@ -528,6 +541,7 @@ func (c *findAndCheckBlockRangeCommand) fastIndex(ctx context.Context, bCache *b
from: fromByAddress[address], from: fromByAddress[address],
to: toByAddress[address], to: toByAddress[address],
noLimit: c.noLimit, noLimit: c.noLimit,
threadLimit: NoThreadLimit,
} }
commands[i] = eth commands[i] = eth
group.Add(eth.Command()) group.Add(eth.Command())
@ -586,10 +600,10 @@ func (c *findAndCheckBlockRangeCommand) fastIndexErc20(ctx context.Context, from
} }
func loadTransfers(ctx context.Context, accounts []common.Address, blockDAO *BlockDAO, db *Database, func loadTransfers(ctx context.Context, accounts []common.Address, blockDAO *BlockDAO, db *Database,
chainClient *chain.ClientWithFallback, limit int, blocksByAddress map[common.Address][]*big.Int, chainClient *chain.ClientWithFallback, blocksLimitPerAccount int, blocksByAddress map[common.Address][]*big.Int,
transactionManager *TransactionManager) (map[common.Address][]Transfer, error) { transactionManager *TransactionManager) (map[common.Address][]Transfer, error) {
log.Info("loadTransfers start", "accounts", accounts, "limit", limit) log.Info("loadTransfers start", "accounts", accounts, "chain", chainClient.ChainID, "limit", blocksLimitPerAccount)
start := time.Now() start := time.Now()
group := async.NewGroup(ctx) group := async.NewGroup(ctx)
@ -601,6 +615,7 @@ func loadTransfers(ctx context.Context, accounts []common.Address, blockDAO *Blo
if !ok { if !ok {
blocks, _ = blockDAO.GetBlocksByAddress(chainClient.ChainID, address, numberOfBlocksCheckedPerIteration) blocks, _ = blockDAO.GetBlocksByAddress(chainClient.ChainID, address, numberOfBlocksCheckedPerIteration)
} }
for _, block := range blocks { for _, block := range blocks {
transfers := &transfersCommand{ transfers := &transfersCommand{
db: db, db: db,
@ -612,14 +627,13 @@ func loadTransfers(ctx context.Context, accounts []common.Address, blockDAO *Blo
signer: types.NewLondonSigner(chainClient.ToBigInt()), signer: types.NewLondonSigner(chainClient.ToBigInt()),
db: db, db: db,
}, },
block: block, blockNum: block,
transactionManager: transactionManager, transactionManager: transactionManager,
} }
commands = append(commands, transfers) commands = append(commands, transfers)
group.Add(transfers.Command()) group.Add(transfers.Command())
} }
} }
select { select {
case <-ctx.Done(): case <-ctx.Done():
return nil, ctx.Err() return nil, ctx.Err()
@ -639,7 +653,7 @@ func loadTransfers(ctx context.Context, accounts []common.Address, blockDAO *Blo
transfersByAddress[command.address] = append(transfers, transfer) transfersByAddress[command.address] = append(transfers, transfer)
} }
} }
log.Info("loadTransfers finished", "in", time.Since(start)) log.Info("loadTransfers finished for account", "in", time.Since(start), "chain", chainClient.ChainID)
return transfersByAddress, nil return transfersByAddress, nil
} }
} }
@ -731,20 +745,3 @@ func findFirstRanges(c context.Context, accounts []common.Address, initialTo *bi
return res, nil return res, nil
} }
func getTransfersByBlocks(ctx context.Context, db *Database, downloader *ETHDownloader, blocks []*big.Int) ([]Transfer, error) {
allTransfers := []Transfer{}
for _, block := range blocks {
transfers, err := downloader.GetTransfersByNumber(ctx, block)
if err != nil {
return nil, err
}
log.Debug("loadTransfers", "block", block, "new transfers", len(transfers))
if len(transfers) > 0 {
allTransfers = append(allTransfers, transfers...)
}
}
return allTransfers, nil
}

View File

@ -0,0 +1,492 @@
package transfer
import (
"context"
"math/big"
"sort"
"time"
"github.com/pkg/errors"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/status-im/status-go/rpc/chain"
"github.com/status-im/status-go/services/wallet/async"
"github.com/status-im/status-go/services/wallet/walletevent"
)
const (
allBlocksLoaded = "all blocks loaded"
)
// TODO NewFindBlocksCommand
type findBlocksCommand struct {
account common.Address
db *Database
blockDAO *BlockRangeSequentialDAO
chainClient *chain.ClientWithFallback
balanceCache *balanceCache
feed *event.Feed
noLimit bool
error error
resFromBlock *Block
startBlockNumber *big.Int
transactionManager *TransactionManager
}
func (c *findBlocksCommand) Command() async.Command {
return async.FiniteCommand{
Interval: 5 * time.Second,
Runable: c.Run,
}.Run
}
func (c *findBlocksCommand) Run(parent context.Context) (err error) {
log.Info("start findBlocksCommand", "account", c.account, "chain", c.chainClient.ChainID, "noLimit", c.noLimit)
rangeSize := big.NewInt(DefaultNodeBlockChunkSize)
to, err := c.loadFirstKnownBlockNumber()
log.Info("findBlocksCommand", "firstKnownBlockNumber", to, "error", err)
if err != nil {
if err.Error() != allBlocksLoaded {
c.error = err
}
return
}
var head *types.Header = nil
if to == nil {
ctx, cancel := context.WithTimeout(parent, 3*time.Second)
head, err = c.chainClient.HeaderByNumber(ctx, nil)
cancel()
if err != nil {
c.error = err
log.Error("findBlocksCommand failed to get head block", "error", err)
return nil
}
log.Info("current head is", "chain", c.chainClient.ChainID, "block number", head.Number)
to = new(big.Int).Set(head.Number) // deep copy
} else {
to.Sub(to, big.NewInt(1))
}
var from = big.NewInt(0)
if to.Cmp(rangeSize) > 0 {
from.Sub(to, rangeSize)
}
for {
headers, _ := c.checkRange(parent, from, to)
if c.error != nil {
log.Error("findBlocksCommand checkRange", "error", c.error)
break
}
// 'to' is set to 'head' if 'last' block not found in DB
if head != nil && to.Cmp(head.Number) == 0 {
log.Info("update blockrange", "head", head.Number, "to", to, "chain", c.chainClient.ChainID, "account", c.account)
err = c.blockDAO.upsertRange(c.chainClient.ChainID, c.account, c.startBlockNumber,
c.resFromBlock.Number, to)
if err != nil {
c.error = err
log.Error("findBlocksCommand upsertRange", "error", err)
break
}
}
log.Info("findBlocksCommand.Run()", "headers len", len(headers), "resFromBlock", c.resFromBlock.Number)
err = c.blockDAO.updateFirstBlock(c.chainClient.ChainID, c.account, c.resFromBlock.Number)
if err != nil {
c.error = err
log.Error("findBlocksCommand failed to update first block", "error", err)
break
}
if c.startBlockNumber.Cmp(big.NewInt(0)) > 0 {
err = c.blockDAO.updateStartBlock(c.chainClient.ChainID, c.account, c.startBlockNumber)
if err != nil {
c.error = err
log.Error("findBlocksCommand failed to update start block", "error", err)
break
}
}
// Assign new range
to.Sub(from, big.NewInt(1)) // it won't hit the cache, but we wont load the transfers twice
if to.Cmp(rangeSize) > 0 {
from.Sub(to, rangeSize)
} else {
from = big.NewInt(0)
}
if to.Cmp(big.NewInt(0)) <= 0 || (c.startBlockNumber != nil &&
c.startBlockNumber.Cmp(big.NewInt(0)) > 0 && to.Cmp(c.startBlockNumber) <= 0) {
log.Info("Start block has been found, stop execution", "startBlock", c.startBlockNumber, "to", to)
break
}
}
log.Info("end findBlocksCommand", "account", c.account, "chain", c.chainClient.ChainID, "noLimit", c.noLimit)
return nil
}
func (c *findBlocksCommand) checkRange(parent context.Context, from *big.Int, to *big.Int) (
foundHeaders []*DBHeader, err error) {
fromBlock := &Block{Number: from}
newFromBlock, ethHeaders, startBlock, err := c.fastIndex(parent, c.balanceCache, fromBlock, to)
log.Info("findBlocksCommand checkRange", "startBlock", startBlock, "newFromBlock", newFromBlock.Number, "toBlockNumber", to, "noLimit", c.noLimit)
if err != nil {
log.Info("findBlocksCommand checkRange fastIndex", "err", err)
c.error = err
// return err // In case c.noLimit is true, hystrix "max concurrency" may be reached and we will not be able to index ETH transfers
return nil, nil
}
// TODO There should be transfers when either when we have found headers
// or when newFromBlock is different from fromBlock, but if I check for
// ERC20 transfers only when there are ETH transfers, I will miss ERC20 transfers
// if len(ethHeaders) > 0 || newFromBlock.Number.Cmp(fromBlock.Number) != 0 { // there is transaction history for this account
erc20Headers, err := c.fastIndexErc20(parent, newFromBlock.Number, to)
if err != nil {
log.Info("findBlocksCommand checkRange fastIndexErc20", "err", err)
c.error = err
// return err
return nil, nil
}
allHeaders := append(ethHeaders, erc20Headers...)
if len(allHeaders) > 0 {
uniqHeadersByHash := map[common.Hash]*DBHeader{}
for _, header := range allHeaders {
uniqHeader, ok := uniqHeadersByHash[header.Hash]
if ok {
if len(header.Erc20Transfers) > 0 {
uniqHeader.Erc20Transfers = append(uniqHeader.Erc20Transfers, header.Erc20Transfers...)
}
uniqHeadersByHash[header.Hash] = uniqHeader
} else {
uniqHeadersByHash[header.Hash] = header
}
}
uniqHeaders := []*DBHeader{}
for _, header := range uniqHeadersByHash {
uniqHeaders = append(uniqHeaders, header)
}
foundHeaders = uniqHeaders
log.Info("saving headers", "len", len(uniqHeaders), "lastBlockNumber", to,
"balance", c.balanceCache.ReadCachedBalance(c.account, to),
"nonce", c.balanceCache.ReadCachedNonce(c.account, to))
err = c.db.SaveBlocks(c.chainClient.ChainID, c.account, uniqHeaders)
if err != nil {
c.error = err
// return err
return nil, nil
}
sort.SliceStable(foundHeaders, func(i, j int) bool {
return foundHeaders[i].Number.Cmp(foundHeaders[j].Number) == 1
})
}
// }
c.resFromBlock = newFromBlock
c.startBlockNumber = startBlock
log.Info("end findBlocksCommand checkRange", "c.startBlock", c.startBlockNumber, "newFromBlock", newFromBlock.Number,
"toBlockNumber", to, "c.resFromBlock", c.resFromBlock.Number)
return
}
func (c *findBlocksCommand) loadFirstKnownBlockNumber() (*big.Int, error) {
blockInfo, err := c.blockDAO.getBlockRange(c.chainClient.ChainID, c.account)
if err != nil {
log.Error("failed to load block ranges from database", "chain", c.chainClient.ChainID, "account", c.account, "error", err)
return nil, err
}
if blockInfo != nil {
log.Info("blockInfo for", "address", c.account, "chain", c.chainClient.ChainID, "Start",
blockInfo.Start, "FirstKnown", blockInfo.FirstKnown, "LastKnown", blockInfo.LastKnown)
// Check if we have fetched all blocks for this account
if blockInfo.FirstKnown != nil && blockInfo.Start != nil && blockInfo.Start.Cmp(blockInfo.FirstKnown) >= 0 {
log.Info("all blocks fetched", "chain", c.chainClient.ChainID, "account", c.account)
return blockInfo.FirstKnown, errors.New(allBlocksLoaded)
}
return blockInfo.FirstKnown, nil
}
log.Info("no blockInfo for", "address", c.account, "chain", c.chainClient.ChainID)
return nil, nil
}
// run fast indexing for every accont up to canonical chain head minus safety depth.
// every account will run it from last synced header.
func (c *findBlocksCommand) fastIndex(ctx context.Context, bCache *balanceCache,
fromBlock *Block, toBlockNumber *big.Int) (resultingFrom *Block, headers []*DBHeader,
startBlock *big.Int, err error) {
log.Info("fast index started", "accounts", c.account, "from", fromBlock.Number, "to", toBlockNumber)
start := time.Now()
group := async.NewGroup(ctx)
command := &ethHistoricalCommand{
chainClient: c.chainClient,
balanceCache: bCache,
address: c.account,
eth: &ETHDownloader{
chainClient: c.chainClient,
accounts: []common.Address{c.account},
signer: types.NewLondonSigner(c.chainClient.ToBigInt()),
db: c.db,
},
feed: c.feed,
from: fromBlock,
to: toBlockNumber,
noLimit: c.noLimit,
threadLimit: SequentialThreadLimit,
}
group.Add(command.Command())
select {
case <-ctx.Done():
err = ctx.Err()
log.Info("fast indexer ctx Done", "error", err)
return
case <-group.WaitAsync():
if command.error != nil {
err = command.error
return
}
resultingFrom = &Block{Number: command.resultingFrom}
headers = command.foundHeaders
startBlock = command.startBlock
log.Info("fast indexer finished", "in", time.Since(start), "startBlock", command.startBlock, "resultingFrom", resultingFrom.Number, "headers", len(headers))
return
}
}
// run fast indexing for every accont up to canonical chain head minus safety depth.
// every account will run it from last synced header.
func (c *findBlocksCommand) fastIndexErc20(ctx context.Context, fromBlockNumber *big.Int,
toBlockNumber *big.Int) ([]*DBHeader, error) {
start := time.Now()
group := async.NewGroup(ctx)
erc20 := &erc20HistoricalCommand{
erc20: NewERC20TransfersDownloader(c.chainClient, []common.Address{c.account}, types.NewLondonSigner(c.chainClient.ToBigInt())),
chainClient: c.chainClient,
feed: c.feed,
address: c.account,
from: fromBlockNumber,
to: toBlockNumber,
foundHeaders: []*DBHeader{},
}
group.Add(erc20.Command())
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-group.WaitAsync():
headers := erc20.foundHeaders
log.Info("fast indexer Erc20 finished", "in", time.Since(start), "headers", len(headers))
return headers, nil
}
}
// TODO Think on how to reuse loadTransfersCommand, as it shares many members and some methods
// but does not need to return the transfers but only save them to DB, as there can be too many of them
// and the logic of `loadTransfersLoop` is different from `loadTransfers“
type loadAllTransfersCommand struct {
accounts []common.Address
db *Database
blockDAO *BlockDAO
chainClient *chain.ClientWithFallback
blocksByAddress map[common.Address][]*big.Int
transactionManager *TransactionManager
blocksLimit int
}
func (c *loadAllTransfersCommand) Command() async.Command {
return async.FiniteCommand{
Interval: 5 * time.Second,
Runable: c.Run,
}.Run
}
func (c *loadAllTransfersCommand) Run(parent context.Context) error {
return loadTransfersLoop(parent, c.accounts, c.blockDAO, c.db, c.chainClient, c.blocksLimit, c.blocksByAddress, c.transactionManager)
}
type loadBlocksAndTransfersCommand struct {
accounts []common.Address
db *Database
blockRangeDAO *BlockRangeSequentialDAO
blockDAO *BlockDAO
erc20 *ERC20TransfersDownloader
chainClient *chain.ClientWithFallback
feed *event.Feed
balanceCache *balanceCache
errorsCount int
// nonArchivalRPCNode bool // TODO Make use of it
transactionManager *TransactionManager
}
func (c *loadBlocksAndTransfersCommand) Run(parent context.Context) error {
log.Info("start load all transfers command", "chain", c.chainClient.ChainID)
ctx := parent
if c.feed != nil {
c.feed.Send(walletevent.Event{
Type: EventFetchingRecentHistory,
Accounts: c.accounts,
})
}
if c.balanceCache == nil {
c.balanceCache = newBalanceCache() // TODO - need to keep balanceCache in memory??? What about sharing it with other packages?
}
group := async.NewGroup(ctx)
for _, address := range c.accounts {
log.Info("start findBlocks command", "chain", c.chainClient.ChainID)
fbc := &findBlocksCommand{
account: address,
db: c.db,
blockDAO: c.blockRangeDAO,
chainClient: c.chainClient,
balanceCache: c.balanceCache,
feed: c.feed,
noLimit: false,
transactionManager: c.transactionManager,
}
group.Add(fbc.Command())
}
txCommand := &loadAllTransfersCommand{
accounts: c.accounts,
db: c.db,
blockDAO: c.blockDAO,
chainClient: c.chainClient,
transactionManager: c.transactionManager,
blocksLimit: noBlockLimit, // load transfers from all `unloaded` blocks
}
group.Add(txCommand.Command())
select {
case <-ctx.Done():
return ctx.Err()
case <-group.WaitAsync():
log.Info("end load all transfers command", "chain", c.chainClient.ChainID)
return nil
}
}
func (c *loadBlocksAndTransfersCommand) Command() async.Command {
return async.InfiniteCommand{
Interval: 5 * time.Second,
Runable: c.Run,
}.Run
}
func loadTransfersLoop(ctx context.Context, accounts []common.Address, blockDAO *BlockDAO, db *Database,
chainClient *chain.ClientWithFallback, blocksLimitPerAccount int, blocksByAddress map[common.Address][]*big.Int,
transactionManager *TransactionManager) error {
log.Info("loadTransfers start", "accounts", accounts, "chain", chainClient.ChainID, "limit", blocksLimitPerAccount)
start := time.Now()
group := async.NewGroup(ctx)
for _, address := range accounts {
// Take blocks from cache if available and disrespect the limit
// If no blocks are available in cache, take blocks from DB respecting the limit
// If no limit is set, take all blocks from DB
blocks, ok := blocksByAddress[address]
commands := []*transfersCommand{}
for {
if !ok {
blocks, _ = blockDAO.GetBlocksByAddress(chainClient.ChainID, address, numberOfBlocksCheckedPerIteration)
}
for _, block := range blocks {
transfers := &transfersCommand{
db: db,
chainClient: chainClient,
address: address,
eth: &ETHDownloader{
chainClient: chainClient,
accounts: []common.Address{address},
signer: types.NewLondonSigner(chainClient.ToBigInt()),
db: db,
},
blockNum: block,
transactionManager: transactionManager,
}
commands = append(commands, transfers)
group.Add(transfers.Command())
}
// We need to wait until the retrieved blocks are processed, otherwise
// they will be retrieved again in the next iteration
// It blocks transfer loading for single account at a time
select {
case <-ctx.Done():
log.Info("loadTransfers transfersCommand error", "chain", chainClient.ChainID, "address", address, "error", ctx.Err())
continue
// return nil, ctx.Err()
case <-group.WaitAsync():
// TODO Remove when done debugging
transfers := []Transfer{}
for _, command := range commands {
if len(command.fetchedTransfers) == 0 {
continue
}
transfers = append(transfers, command.fetchedTransfers...)
}
log.Info("loadTransfers finished for account", "address", address, "in", time.Since(start), "chain", chainClient.ChainID, "transfers", len(transfers), "limit", blocksLimitPerAccount)
}
log.Info("loadTransfers after select", "chain", chainClient.ChainID, "address", address, "blocks.len", len(blocks))
if ok || len(blocks) == 0 ||
(blocksLimitPerAccount > noBlockLimit && len(blocks) >= blocksLimitPerAccount) {
log.Info("loadTransfers breaking loop on block limits reached or 0 blocks", "chain", chainClient.ChainID, "address", address, "limit", blocksLimitPerAccount, "blocks", len(blocks))
break
}
}
}
return nil
}

View File

@ -16,6 +16,7 @@ import (
const ( const (
NoThreadLimit uint32 = 0 NoThreadLimit uint32 = 0
SequentialThreadLimit uint32 = 10
) )
// NewConcurrentDownloader creates ConcurrentDownloader instance. // NewConcurrentDownloader creates ConcurrentDownloader instance.
@ -90,24 +91,45 @@ type Downloader interface {
GetTransfersByNumber(context.Context, *big.Int) ([]Transfer, error) GetTransfersByNumber(context.Context, *big.Int) ([]Transfer, error)
} }
// Returns new block ranges that contain transfers and found block headers that contain transfers. // Returns new block ranges that contain transfers and found block headers that contain transfers, and a block where
func checkRanges(parent context.Context, client BalanceReader, cache BalanceCache, downloader Downloader, // beginning of trasfers history detected
account common.Address, ranges [][]*big.Int) ([][]*big.Int, []*DBHeader, error) { func checkRangesWithStartBlock(parent context.Context, client BalanceReader, cache BalanceCache, downloader Downloader,
account common.Address, ranges [][]*big.Int, threadLimit uint32, startBlock *big.Int) (resRanges [][]*big.Int,
headers []*DBHeader, newStartBlock *big.Int, err error) {
log.Debug("start checkRanges", "account", account.Hex(), "ranges len", len(ranges))
ctx, cancel := context.WithTimeout(parent, 30*time.Second) ctx, cancel := context.WithTimeout(parent, 30*time.Second)
defer cancel() defer cancel()
c := NewConcurrentDownloader(ctx, NoThreadLimit) c := NewConcurrentDownloader(ctx, threadLimit)
newStartBlock = startBlock
for _, blocksRange := range ranges { for _, blocksRange := range ranges {
from := blocksRange[0] from := blocksRange[0]
to := blocksRange[1] to := blocksRange[1]
if startBlock != nil {
if to.Cmp(newStartBlock) <= 0 {
log.Debug("'to' block is less than 'start' block", "to", to, "startBlock", startBlock)
continue
}
}
c.Add(func(ctx context.Context) error { c.Add(func(ctx context.Context) error {
if from.Cmp(to) >= 0 { if from.Cmp(to) >= 0 {
return nil return nil
} }
log.Debug("eth transfers comparing blocks", "from", from, "to", to) log.Debug("eth transfers comparing blocks", "from", from, "to", to)
if startBlock != nil {
if to.Cmp(startBlock) <= 0 {
log.Debug("'to' block is less than 'start' block", "to", to, "startBlock", startBlock)
return nil
}
}
lb, err := cache.BalanceAt(ctx, client, account, from) lb, err := cache.BalanceAt(ctx, client, account, from)
if err != nil { if err != nil {
return err return err
@ -126,6 +148,16 @@ func checkRanges(parent context.Context, client BalanceReader, cache BalanceCach
// if nonce is zero in a newer block then there is no need to check an older one // if nonce is zero in a newer block then there is no need to check an older one
if *hn == 0 { if *hn == 0 {
log.Debug("zero nonce", "to", to) log.Debug("zero nonce", "to", to)
if startBlock != nil {
if hb.Cmp(big.NewInt(0)) == 0 { // balance is 0, nonce is 0, we stop checking further, that will be the start block (even though the real one can be a later one)
if to.Cmp(newStartBlock) > 0 {
log.Debug("found possible start block, we should not search back", "block", to)
newStartBlock = to // increase newStartBlock if we found a new higher block
}
}
}
return nil return nil
} }
@ -154,48 +186,51 @@ func checkRanges(parent context.Context, client BalanceReader, cache BalanceCach
} }
log.Debug("balances are not equal", "from", from, "mid", mid, "to", to) log.Debug("balances are not equal", "from", from, "mid", mid, "to", to)
c.PushRange([]*big.Int{from, mid})
c.PushRange([]*big.Int{mid, to}) c.PushRange([]*big.Int{mid, to})
c.PushRange([]*big.Int{from, mid})
return nil return nil
}) })
} }
select { select {
case <-c.WaitAsync(): case <-c.WaitAsync():
case <-ctx.Done(): case <-ctx.Done():
return nil, nil, errDownloaderStuck return nil, nil, nil, errDownloaderStuck
} }
if c.Error() != nil { if c.Error() != nil {
return nil, nil, errors.Wrap(c.Error(), "failed to dowload transfers using concurrent downloader") return nil, nil, nil, errors.Wrap(c.Error(), "failed to dowload transfers using concurrent downloader")
} }
return c.GetRanges(), c.GetHeaders(), nil log.Debug("end checkRanges", "account", account.Hex(), "newStartBlock", newStartBlock)
return c.GetRanges(), c.GetHeaders(), newStartBlock, nil
} }
func findBlocksWithEthTransfers(parent context.Context, client BalanceReader, cache BalanceCache, downloader Downloader, func findBlocksWithEthTransfers(parent context.Context, client BalanceReader, cache BalanceCache, downloader Downloader,
account common.Address, low, high *big.Int, noLimit bool) (from *big.Int, headers []*DBHeader, err error) { account common.Address, low, high *big.Int, noLimit bool, threadLimit uint32) (from *big.Int, headers []*DBHeader, resStartBlock *big.Int, err error) {
log.Debug("findBlocksWithEthTranfers start", "account", account, "low", low, "high", high, "noLimit", noLimit)
ranges := [][]*big.Int{{low, high}} ranges := [][]*big.Int{{low, high}}
minBlock := big.NewInt(low.Int64()) minBlock := big.NewInt(low.Int64())
headers = []*DBHeader{} headers = []*DBHeader{}
var lvl = 1 var lvl = 1
for len(ranges) > 0 && lvl <= 30 { resStartBlock = big.NewInt(0)
log.Debug("check blocks ranges", "lvl", lvl, "ranges len", len(ranges))
lvl++
newRanges, newHeaders, err := checkRanges(parent, client, cache, downloader, account, ranges)
for len(ranges) > 0 && lvl <= 30 {
log.Info("check blocks ranges", "lvl", lvl, "ranges len", len(ranges))
lvl++
// Check if there are transfers in blocks in ranges. To do that, nonce and balance is checked
// the block ranges that have transfers are returned
newRanges, newHeaders, strtBlock, err := checkRangesWithStartBlock(parent, client, cache,
downloader, account, ranges, threadLimit, resStartBlock)
resStartBlock = strtBlock
if err != nil { if err != nil {
log.Info("check ranges end", "err", err) return nil, nil, nil, err
return nil, nil, err
} }
headers = append(headers, newHeaders...) headers = append(headers, newHeaders...)
if len(newRanges) > 0 { if len(newRanges) > 0 {
log.Debug("found new ranges", "account", account, "lvl", lvl, "new ranges len", len(newRanges)) log.Info("found new ranges", "account", account, "lvl", lvl, "new ranges len", len(newRanges))
} }
if len(newRanges) > 60 && !noLimit { if len(newRanges) > 60 && !noLimit {
sort.SliceStable(newRanges, func(i, j int) bool { sort.SliceStable(newRanges, func(i, j int) bool {
@ -209,6 +244,5 @@ func findBlocksWithEthTransfers(parent context.Context, client BalanceReader, ca
ranges = newRanges ranges = newRanges
} }
log.Debug("findBlocksWithEthTranfers end", "account", account, "minBlock", minBlock, "headers len", len(headers)) return minBlock, headers, resStartBlock, err
return minBlock, headers, err
} }

View File

@ -127,9 +127,9 @@ func TestConcurrentEthDownloader(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel() defer cancel()
concurrent := NewConcurrentDownloader(ctx, 0) concurrent := NewConcurrentDownloader(ctx, 0)
_, headers, _ := findBlocksWithEthTransfers( _, headers, _, _ := findBlocksWithEthTransfers(
ctx, tc.options.balances, newBalanceCache(), tc.options.batches, ctx, tc.options.balances, newBalanceCache(), tc.options.batches,
common.Address{}, zero, tc.options.last, false) common.Address{}, zero, tc.options.last, false, NoThreadLimit)
concurrent.Wait() concurrent.Wait()
require.NoError(t, concurrent.Error()) require.NoError(t, concurrent.Error())
rst := concurrent.Get() rst := concurrent.Get()

View File

@ -20,6 +20,7 @@ const (
ReactorNotStarted string = "reactor not started" ReactorNotStarted string = "reactor not started"
NonArchivalNodeBlockChunkSize = 100 NonArchivalNodeBlockChunkSize = 100
DefaultNodeBlockChunkSize = 10000
) )
var errAlreadyRunning = errors.New("already running") var errAlreadyRunning = errors.New("already running")
@ -133,10 +134,6 @@ func (s *OnDemandFetchStrategy) getTransfersByAddress(ctx context.Context, chain
} }
transfersCount := int64(len(rst)) transfersCount := int64(len(rst))
chainClient, err := getChainClientByID(s.chainClients, chainID)
if err != nil {
return nil, err
}
if fetchMore && limit > transfersCount { if fetchMore && limit > transfersCount {
@ -151,6 +148,11 @@ func (s *OnDemandFetchStrategy) getTransfersByAddress(ctx context.Context, chain
return rst, nil return rst, nil
} }
chainClient, err := getChainClientByID(s.chainClients, chainID)
if err != nil {
return nil, err
}
from, err := findFirstRange(ctx, address, block, chainClient) from, err := findFirstRange(ctx, address, block, chainClient)
if err != nil { if err != nil {
if nonArchivalNodeError(err) { if nonArchivalNodeError(err) {
@ -204,6 +206,7 @@ func (s *OnDemandFetchStrategy) getTransfersByAddress(ctx context.Context, chain
blockDAO: s.blockDAO, blockDAO: s.blockDAO,
chainClient: chainClient, chainClient: chainClient,
transactionManager: s.transactionManager, transactionManager: s.transactionManager,
blocksLimit: numberOfBlocksCheckedPerIteration,
} }
err = txCommand.Command()(ctx) err = txCommand.Command()(ctx)
@ -265,14 +268,14 @@ func (r *Reactor) createFetchStrategy(chainClients map[uint64]*chain.ClientWithF
accounts []common.Address, fetchType FetchStrategyType) HistoryFetcher { accounts []common.Address, fetchType FetchStrategyType) HistoryFetcher {
if fetchType == SequentialFetchStrategyType { if fetchType == SequentialFetchStrategyType {
return &SequentialFetchStrategy{ return NewSequentialFetchStrategy(
db: r.db, r.db,
feed: r.feed, r.blockDAO,
blockDAO: r.blockDAO, r.feed,
transactionManager: r.transactionManager, r.transactionManager,
chainClients: chainClients, chainClients,
accounts: accounts, accounts,
} )
} }
return &OnDemandFetchStrategy{ return &OnDemandFetchStrategy{

View File

@ -8,10 +8,26 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/status-im/status-go/rpc/chain" "github.com/status-im/status-go/rpc/chain"
"github.com/status-im/status-go/services/wallet/async" "github.com/status-im/status-go/services/wallet/async"
) )
func NewSequentialFetchStrategy(db *Database, blockDAO *BlockDAO, feed *event.Feed,
transactionManager *TransactionManager,
chainClients map[uint64]*chain.ClientWithFallback,
accounts []common.Address) *SequentialFetchStrategy {
return &SequentialFetchStrategy{
db: db,
blockDAO: blockDAO,
feed: feed,
transactionManager: transactionManager,
chainClients: chainClients,
accounts: accounts,
}
}
type SequentialFetchStrategy struct { type SequentialFetchStrategy struct {
db *Database db *Database
blockDAO *BlockDAO blockDAO *BlockDAO
@ -24,22 +40,15 @@ type SequentialFetchStrategy struct {
} }
func (s *SequentialFetchStrategy) newCommand(chainClient *chain.ClientWithFallback, func (s *SequentialFetchStrategy) newCommand(chainClient *chain.ClientWithFallback,
// accounts []common.Address) *loadAllTransfersCommand {
accounts []common.Address) async.Commander { accounts []common.Address) async.Commander {
signer := types.NewLondonSigner(chainClient.ToBigInt()) signer := types.NewLondonSigner(chainClient.ToBigInt())
// ctl := &loadAllTransfersCommand{ ctl := &loadBlocksAndTransfersCommand{
ctl := &controlCommand{ // TODO Will be replaced by loadAllTranfersCommand in upcoming commit
db: s.db, db: s.db,
chainClient: chainClient, chainClient: chainClient,
accounts: accounts, accounts: accounts,
blockRangeDAO: &BlockRangeSequentialDAO{s.db.client},
blockDAO: s.blockDAO, blockDAO: s.blockDAO,
eth: &ETHDownloader{
chainClient: chainClient,
accounts: accounts,
signer: signer,
db: s.db,
},
erc20: NewERC20TransfersDownloader(chainClient, accounts, signer), erc20: NewERC20TransfersDownloader(chainClient, accounts, signer),
feed: s.feed, feed: s.feed,
errorsCount: 0, errorsCount: 0,
@ -81,9 +90,18 @@ func (s *SequentialFetchStrategy) kind() FetchStrategyType {
return SequentialFetchStrategyType return SequentialFetchStrategyType
} }
// TODO: remove fetchMore parameter from here and interface, it is used by OnDemandFetchStrategy only
func (s *SequentialFetchStrategy) getTransfersByAddress(ctx context.Context, chainID uint64, address common.Address, toBlock *big.Int, func (s *SequentialFetchStrategy) getTransfersByAddress(ctx context.Context, chainID uint64, address common.Address, toBlock *big.Int,
limit int64, fetchMore bool) ([]Transfer, error) { limit int64, fetchMore bool) ([]Transfer, error) {
// TODO: implement - load from database log.Info("[WalletAPI:: GetTransfersByAddress] get transfers for an address", "address", address, "fetchMore", fetchMore,
return []Transfer{}, nil "chainID", chainID, "toBlock", toBlock, "limit", limit)
rst, err := s.db.GetTransfersByAddress(chainID, address, toBlock, limit)
if err != nil {
log.Error("[WalletAPI:: GetTransfersByAddress] can't fetch transfers", "err", err)
return nil, err
}
return rst, nil
} }