feat: backoff wallet community fetches after a failure

This commit is contained in:
Dario Gabriel Lipicar 2023-11-16 01:18:03 -03:00 committed by dlipicar
parent e17d4606b1
commit a51f8aa13c
8 changed files with 247 additions and 97 deletions

View File

@ -17,7 +17,7 @@ import (
const ( const (
fetchLimit = 50 // Limit number of collectibles we fetch per provider call fetchLimit = 50 // Limit number of collectibles we fetch per provider call
accountOwnershipUpdateInterval = 30 * time.Minute accountOwnershipUpdateInterval = 60 * time.Minute
accountOwnershipUpdateDelayInterval = 30 * time.Second accountOwnershipUpdateDelayInterval = 30 * time.Second
) )

View File

@ -175,7 +175,7 @@ func (c *Controller) stopPeriodicalOwnershipFetch() {
// Starts (or restarts) periodical fetching for the given account address for all chains // Starts (or restarts) periodical fetching for the given account address for all chains
func (c *Controller) startPeriodicalOwnershipFetchForAccount(address common.Address) error { func (c *Controller) startPeriodicalOwnershipFetchForAccount(address common.Address) error {
log.Debug("wallet.api.collectibles.Controller", "Start periodical fetching", "address", address) log.Debug("wallet.api.collectibles.Controller Start periodical fetching", "address", address)
networks, err := c.networkManager.Get(false) networks, err := c.networkManager.Get(false)
if err != nil { if err != nil {
@ -204,7 +204,7 @@ func (c *Controller) startPeriodicalOwnershipFetchForAccount(address common.Addr
// Starts (or restarts) periodical fetching for the given account address for all chains // Starts (or restarts) periodical fetching for the given account address for all chains
func (c *Controller) startPeriodicalOwnershipFetchForAccountAndChainID(address common.Address, chainID walletCommon.ChainID, delayed bool) error { func (c *Controller) startPeriodicalOwnershipFetchForAccountAndChainID(address common.Address, chainID walletCommon.ChainID, delayed bool) error {
log.Debug("wallet.api.collectibles.Controller", "Start periodical fetching", "address", address, "chainID", chainID, "delayed", delayed) log.Debug("wallet.api.collectibles.Controller Start periodical fetching", "address", address, "chainID", chainID, "delayed", delayed)
if !c.isPeriodicalOwnershipFetchRunning() { if !c.isPeriodicalOwnershipFetchRunning() {
return errors.New("periodical fetch not initialized") return errors.New("periodical fetch not initialized")
@ -240,7 +240,7 @@ func (c *Controller) startPeriodicalOwnershipFetchForAccountAndChainID(address c
// Stop periodical fetching for the given account address for all chains // Stop periodical fetching for the given account address for all chains
func (c *Controller) stopPeriodicalOwnershipFetchForAccount(address common.Address) error { func (c *Controller) stopPeriodicalOwnershipFetchForAccount(address common.Address) error {
log.Debug("wallet.api.collectibles.Controller", "Stop periodical fetching", "address", address) log.Debug("wallet.api.collectibles.Controller Stop periodical fetching", "address", address)
if !c.isPeriodicalOwnershipFetchRunning() { if !c.isPeriodicalOwnershipFetchRunning() {
return errors.New("periodical fetch not initialized") return errors.New("periodical fetch not initialized")
@ -260,7 +260,7 @@ func (c *Controller) stopPeriodicalOwnershipFetchForAccount(address common.Addre
} }
func (c *Controller) stopPeriodicalOwnershipFetchForAccountAndChainID(address common.Address, chainID walletCommon.ChainID) error { func (c *Controller) stopPeriodicalOwnershipFetchForAccountAndChainID(address common.Address, chainID walletCommon.ChainID) error {
log.Debug("wallet.api.collectibles.Controller", "Stop periodical fetching", "address", address, "chainID", chainID) log.Debug("wallet.api.collectibles.Controller Stop periodical fetching", "address", address, "chainID", chainID)
if !c.isPeriodicalOwnershipFetchRunning() { if !c.isPeriodicalOwnershipFetchRunning() {
return errors.New("periodical fetch not initialized") return errors.New("periodical fetch not initialized")
@ -417,7 +417,7 @@ func (c *Controller) notifyCommunityCollectiblesReceived(ownedCollectibles Owned
continue continue
} }
communityInfo, err := c.communityDB.GetCommunityInfo(communityID) communityInfo, _, err := c.communityDB.GetCommunityInfo(communityID)
if err != nil { if err != nil {
log.Error("Error fetching community info", "error", err) log.Error("Error fetching community info", "error", err)

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"database/sql" "database/sql"
"errors" "errors"
"fmt"
"math/big" "math/big"
"net/http" "net/http"
"strings" "strings"
@ -26,6 +27,7 @@ import (
) )
const requestTimeout = 5 * time.Second const requestTimeout = 5 * time.Second
const failedCommunityFetchRetryDelay = 1 * time.Hour
const hystrixContractOwnershipClientName = "contractOwnershipClient" const hystrixContractOwnershipClientName = "contractOwnershipClient"
@ -616,18 +618,56 @@ func (o *Manager) fillCommunityID(asset *thirdparty.FullCollectibleData) error {
return nil return nil
} }
func (o *Manager) fillCommunityInfo(communityID string, communityAssets []*thirdparty.FullCollectibleData) error { func (o *Manager) mustFetchCommunityInfo(communityID string) bool {
// See if we have cached data
_, state, err := o.communityDataDB.GetCommunityInfo(communityID)
if err != nil {
return true
}
// If we don't have a state, this community has never been fetched before
if state == nil {
return true
}
// If the last fetch was successful, we can safely refresh our cache
if state.LastUpdateSuccesful {
return true
}
// If the last fetch was not successful, we should only retry after a delay
if time.Unix(int64(state.LastUpdateTimestamp), 0).Add(failedCommunityFetchRetryDelay).Before(time.Now()) {
return true
}
return false
}
func (o *Manager) fetchCommunityInfo(communityID string) (*thirdparty.CommunityInfo, error) {
if !o.mustFetchCommunityInfo(communityID) {
return nil, fmt.Errorf("backing off fetchCommunityInfo for id: %s", communityID)
}
communityInfo, err := o.communityInfoProvider.FetchCommunityInfo(communityID) communityInfo, err := o.communityInfoProvider.FetchCommunityInfo(communityID)
if err != nil {
dbErr := o.communityDataDB.SetCommunityInfo(communityID, nil)
if dbErr != nil {
log.Error("SetCommunityInfo failed", "communityID", communityID, "err", dbErr)
}
return nil, err
}
err = o.communityDataDB.SetCommunityInfo(communityID, communityInfo)
return communityInfo, err
}
func (o *Manager) fillCommunityInfo(communityID string, communityAssets []*thirdparty.FullCollectibleData) error {
communityInfo, err := o.fetchCommunityInfo(communityID)
if err != nil { if err != nil {
return err return err
} }
if communityInfo != nil { if communityInfo != nil {
err := o.communityDataDB.SetCommunityInfo(communityID, *communityInfo)
if err != nil {
return err
}
for _, communityAsset := range communityAssets { for _, communityAsset := range communityAssets {
err := o.communityInfoProvider.FillCollectibleMetadata(communityAsset) err := o.communityInfoProvider.FillCollectibleMetadata(communityAsset)
if err != nil { if err != nil {

View File

@ -260,7 +260,7 @@ func (s *Service) fullCollectiblesDataToHeaders(data []thirdparty.FullCollectibl
header := fullCollectibleDataToHeader(c) header := fullCollectibleDataToHeader(c)
if c.CollectibleData.CommunityID != "" { if c.CollectibleData.CommunityID != "" {
communityInfo, err := s.communityDB.GetCommunityInfo(c.CollectibleData.CommunityID) communityInfo, _, err := s.communityDB.GetCommunityInfo(c.CollectibleData.CommunityID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -282,7 +282,7 @@ func (s *Service) fullCollectiblesDataToDetails(data []thirdparty.FullCollectibl
details := fullCollectibleDataToDetails(c) details := fullCollectibleDataToDetails(c)
if c.CollectibleData.CommunityID != "" { if c.CollectibleData.CommunityID != "" {
communityInfo, err := s.communityDB.GetCommunityInfo(c.CollectibleData.CommunityID) communityInfo, _, err := s.communityDB.GetCommunityInfo(c.CollectibleData.CommunityID)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -3,6 +3,7 @@ package community
import ( import (
"database/sql" "database/sql"
"fmt" "fmt"
"time"
"github.com/status-im/status-go/services/wallet/thirdparty" "github.com/status-im/status-go/services/wallet/thirdparty"
) )
@ -17,10 +18,18 @@ func NewDataDB(sqlDb *sql.DB) *DataDB {
} }
} }
const communityDataColumns = "id, name, color, image" type InfoState struct {
const selectCommunityDataColumns = "name, color, image" LastUpdateTimestamp uint64
LastUpdateSuccesful bool
}
func (o *DataDB) SetCommunityInfo(id string, c thirdparty.CommunityInfo) (err error) { const communityInfoColumns = "id, name, color, image"
const selectCommunityInfoColumns = "name, color, image"
const communityInfoStateColumns = "id, last_update_timestamp, last_update_successful"
const selectCommunityInfoStateColumns = "last_update_timestamp, last_update_successful"
func (o *DataDB) SetCommunityInfo(id string, c *thirdparty.CommunityInfo) (err error) {
tx, err := o.db.Begin() tx, err := o.db.Begin()
if err != nil { if err != nil {
return err return err
@ -33,45 +42,87 @@ func (o *DataDB) SetCommunityInfo(id string, c thirdparty.CommunityInfo) (err er
_ = tx.Rollback() _ = tx.Rollback()
}() }()
update, err := tx.Prepare(fmt.Sprintf(`INSERT OR REPLACE INTO community_data_cache (%s) setState, err := tx.Prepare(fmt.Sprintf(`INSERT OR REPLACE INTO community_data_cache_state (%s)
VALUES (?, ?, ?, ?)`, communityDataColumns)) VALUES (?, ?, ?)`, communityInfoStateColumns))
if err != nil { if err != nil {
return err return err
} }
_, err = update.Exec( valid := c != nil
_, err = setState.Exec(
id,
time.Now().Unix(),
valid,
)
if err != nil {
return err
}
if valid {
setInfo, err := tx.Prepare(fmt.Sprintf(`INSERT OR REPLACE INTO community_data_cache (%s)
VALUES (?, ?, ?, ?)`, communityInfoColumns))
if err != nil {
return err
}
_, err = setInfo.Exec(
id, id,
c.CommunityName, c.CommunityName,
c.CommunityColor, c.CommunityColor,
c.CommunityImage, c.CommunityImage,
) )
if err != nil {
return err return err
} }
func (o *DataDB) GetCommunityInfo(id string) (*thirdparty.CommunityInfo, error) {
var ret thirdparty.CommunityInfo
getData, err := o.db.Prepare(fmt.Sprintf(`SELECT %s
FROM community_data_cache
WHERE id=?`, selectCommunityDataColumns))
if err != nil {
return nil, err
} }
row := getData.QueryRow(id) return nil
}
func (o *DataDB) GetCommunityInfo(id string) (*thirdparty.CommunityInfo, *InfoState, error) {
var info thirdparty.CommunityInfo
var state InfoState
var row *sql.Row
getState, err := o.db.Prepare(fmt.Sprintf(`SELECT %s
FROM community_data_cache_state
WHERE id=?`, selectCommunityInfoStateColumns))
if err != nil {
return nil, nil, err
}
row = getState.QueryRow(id)
err = row.Scan( err = row.Scan(
&ret.CommunityName, &state.LastUpdateTimestamp,
&ret.CommunityColor, &state.LastUpdateSuccesful,
&ret.CommunityImage,
) )
if err == sql.ErrNoRows { if err == sql.ErrNoRows {
return nil, nil return nil, nil, nil
} else if err != nil { } else if err != nil {
return nil, err return nil, nil, err
} }
return &ret, nil getInfo, err := o.db.Prepare(fmt.Sprintf(`SELECT %s
FROM community_data_cache
WHERE id=?`, selectCommunityInfoColumns))
if err != nil {
return nil, nil, err
}
row = getInfo.QueryRow(id)
err = row.Scan(
&info.CommunityName,
&info.CommunityColor,
&info.CommunityImage,
)
if err == sql.ErrNoRows {
return nil, &state, nil
} else if err != nil {
return nil, nil, err
}
return &info, &state, nil
} }

View File

@ -39,14 +39,30 @@ func TestUpdateCommunityInfo(t *testing.T) {
defer cleanup() defer cleanup()
communityData := generateTestCommunityInfo(10) communityData := generateTestCommunityInfo(10)
for communityID, communityInfo := range communityData { extraCommunityID := "extra-community-id"
err := db.SetCommunityInfo(communityID, communityInfo)
require.NoError(t, err)
}
for communityID, communityInfo := range communityData { for communityID, communityInfo := range communityData {
communityInfoFromDB, err := db.GetCommunityInfo(communityID) communityInfo := communityInfo // Prevent lint warning G601: Implicit memory aliasing in for loop.
err := db.SetCommunityInfo(communityID, &communityInfo)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, communityInfo, *communityInfoFromDB)
} }
err := db.SetCommunityInfo(extraCommunityID, nil)
require.NoError(t, err)
for communityID, communityInfo := range communityData {
info, state, err := db.GetCommunityInfo(communityID)
require.NoError(t, err)
require.Equal(t, communityInfo, *info)
require.True(t, state.LastUpdateSuccesful)
}
info, state, err := db.GetCommunityInfo(extraCommunityID)
require.NoError(t, err)
require.Empty(t, info)
require.False(t, state.LastUpdateSuccesful)
randomCommunityID := "random-community-id"
info, state, err = db.GetCommunityInfo(randomCommunityID)
require.NoError(t, err)
require.Empty(t, info)
require.Empty(t, state)
} }

View File

@ -10,6 +10,7 @@
// 1696853635_input_data.up.sql (23.14kB) // 1696853635_input_data.up.sql (23.14kB)
// 1698117918_add_community_id_to_tokens.up.sql (61B) // 1698117918_add_community_id_to_tokens.up.sql (61B)
// 1698257443_add_community_metadata_to_wallet_db.up.sql (323B) // 1698257443_add_community_metadata_to_wallet_db.up.sql (323B)
// 1699987075_add_timestamp_and_state_to_community_data_cache.up.sql (865B)
// doc.go (74B) // doc.go (74B)
package migrations package migrations
@ -20,7 +21,6 @@ import (
"crypto/sha256" "crypto/sha256"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@ -30,7 +30,7 @@ import (
func bindataRead(data []byte, name string) ([]byte, error) { func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data)) gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil { if err != nil {
return nil, fmt.Errorf("read %q: %v", name, err) return nil, fmt.Errorf("read %q: %w", name, err)
} }
var buf bytes.Buffer var buf bytes.Buffer
@ -38,7 +38,7 @@ func bindataRead(data []byte, name string) ([]byte, error) {
clErr := gz.Close() clErr := gz.Close()
if err != nil { if err != nil {
return nil, fmt.Errorf("read %q: %v", name, err) return nil, fmt.Errorf("read %q: %w", name, err)
} }
if clErr != nil { if clErr != nil {
return nil, err return nil, err
@ -94,7 +94,7 @@ func _1691753758_initialUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1691753758_initial.up.sql", size: 5738, mode: os.FileMode(0644), modTime: time.Unix(1699005553, 0)} info := bindataFileInfo{name: "1691753758_initial.up.sql", size: 5738, mode: os.FileMode(0644), modTime: time.Unix(1698840720, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6b, 0x25, 0x31, 0xc8, 0x27, 0x3, 0x6b, 0x9f, 0x15, 0x42, 0x2f, 0x85, 0xfb, 0xe3, 0x6, 0xea, 0xf7, 0x97, 0x12, 0x56, 0x3c, 0x9a, 0x5b, 0x1a, 0xca, 0xb1, 0x23, 0xfa, 0xcd, 0x57, 0x25, 0x5c}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6b, 0x25, 0x31, 0xc8, 0x27, 0x3, 0x6b, 0x9f, 0x15, 0x42, 0x2f, 0x85, 0xfb, 0xe3, 0x6, 0xea, 0xf7, 0x97, 0x12, 0x56, 0x3c, 0x9a, 0x5b, 0x1a, 0xca, 0xb1, 0x23, 0xfa, 0xcd, 0x57, 0x25, 0x5c}}
return a, nil return a, nil
} }
@ -114,7 +114,7 @@ func _1692701329_add_collectibles_and_collections_data_cacheUpSql() (*asset, err
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1692701329_add_collectibles_and_collections_data_cache.up.sql", size: 1808, mode: os.FileMode(0644), modTime: time.Unix(1699005553, 0)} info := bindataFileInfo{name: "1692701329_add_collectibles_and_collections_data_cache.up.sql", size: 1808, mode: os.FileMode(0644), modTime: time.Unix(1698840720, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1, 0x51, 0xf4, 0x2b, 0x92, 0xde, 0x59, 0x65, 0xd8, 0x9b, 0x57, 0xe0, 0xfd, 0x7b, 0x12, 0xb, 0x29, 0x6e, 0x9d, 0xb5, 0x90, 0xe, 0xfa, 0x12, 0x97, 0xd, 0x61, 0x60, 0x7f, 0x32, 0x1d, 0xc3}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1, 0x51, 0xf4, 0x2b, 0x92, 0xde, 0x59, 0x65, 0xd8, 0x9b, 0x57, 0xe0, 0xfd, 0x7b, 0x12, 0xb, 0x29, 0x6e, 0x9d, 0xb5, 0x90, 0xe, 0xfa, 0x12, 0x97, 0xd, 0x61, 0x60, 0x7f, 0x32, 0x1d, 0xc3}}
return a, nil return a, nil
} }
@ -134,7 +134,7 @@ func _1692701339_add_scope_to_pendingUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1692701339_add_scope_to_pending.up.sql", size: 576, mode: os.FileMode(0644), modTime: time.Unix(1699005553, 0)} info := bindataFileInfo{name: "1692701339_add_scope_to_pending.up.sql", size: 576, mode: os.FileMode(0644), modTime: time.Unix(1698840720, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x36, 0x8a, 0x5e, 0xe2, 0x63, 0x15, 0x37, 0xba, 0x55, 0x18, 0xf3, 0xcc, 0xe0, 0x5, 0x84, 0xe1, 0x5b, 0xe8, 0x1, 0x32, 0x6b, 0x9f, 0x7d, 0x9f, 0xd9, 0x23, 0x6c, 0xa9, 0xb5, 0xdc, 0xf4, 0x93}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x36, 0x8a, 0x5e, 0xe2, 0x63, 0x15, 0x37, 0xba, 0x55, 0x18, 0xf3, 0xcc, 0xe0, 0x5, 0x84, 0xe1, 0x5b, 0xe8, 0x1, 0x32, 0x6b, 0x9f, 0x7d, 0x9f, 0xd9, 0x23, 0x6c, 0xa9, 0xb5, 0xdc, 0xf4, 0x93}}
return a, nil return a, nil
} }
@ -154,7 +154,7 @@ func _1694540071_add_collectibles_ownership_update_timestampUpSql() (*asset, err
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1694540071_add_collectibles_ownership_update_timestamp.up.sql", size: 349, mode: os.FileMode(0644), modTime: time.Unix(1699005553, 0)} info := bindataFileInfo{name: "1694540071_add_collectibles_ownership_update_timestamp.up.sql", size: 349, mode: os.FileMode(0644), modTime: time.Unix(1698840720, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7f, 0x45, 0xc7, 0xce, 0x79, 0x63, 0xbc, 0x6f, 0x83, 0x5f, 0xe2, 0x3, 0x56, 0xcc, 0x5, 0x2f, 0x85, 0xda, 0x7e, 0xea, 0xf5, 0xd2, 0xac, 0x19, 0xd4, 0xd8, 0x5e, 0xdd, 0xed, 0xe2, 0xa9, 0x97}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7f, 0x45, 0xc7, 0xce, 0x79, 0x63, 0xbc, 0x6f, 0x83, 0x5f, 0xe2, 0x3, 0x56, 0xcc, 0x5, 0x2f, 0x85, 0xda, 0x7e, 0xea, 0xf5, 0xd2, 0xac, 0x19, 0xd4, 0xd8, 0x5e, 0xdd, 0xed, 0xe2, 0xa9, 0x97}}
return a, nil return a, nil
} }
@ -174,7 +174,7 @@ func _1694692748_add_raw_balance_to_token_balancesUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1694692748_add_raw_balance_to_token_balances.up.sql", size: 165, mode: os.FileMode(0644), modTime: time.Unix(1699005553, 0)} info := bindataFileInfo{name: "1694692748_add_raw_balance_to_token_balances.up.sql", size: 165, mode: os.FileMode(0644), modTime: time.Unix(1698840720, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd4, 0xe0, 0x5b, 0x42, 0xf0, 0x96, 0xa5, 0xf5, 0xed, 0xc0, 0x97, 0x88, 0xb0, 0x6d, 0xfe, 0x7d, 0x97, 0x2e, 0x17, 0xd2, 0x16, 0xbc, 0x2a, 0xf2, 0xcc, 0x67, 0x9e, 0xc5, 0x47, 0xf6, 0x69, 0x1}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd4, 0xe0, 0x5b, 0x42, 0xf0, 0x96, 0xa5, 0xf5, 0xed, 0xc0, 0x97, 0x88, 0xb0, 0x6d, 0xfe, 0x7d, 0x97, 0x2e, 0x17, 0xd2, 0x16, 0xbc, 0x2a, 0xf2, 0xcc, 0x67, 0x9e, 0xc5, 0x47, 0xf6, 0x69, 0x1}}
return a, nil return a, nil
} }
@ -194,7 +194,7 @@ func _1695133989_add_community_id_to_collectibles_and_collections_data_cacheUpSq
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1695133989_add_community_id_to_collectibles_and_collections_data_cache.up.sql", size: 275, mode: os.FileMode(0644), modTime: time.Unix(1699005553, 0)} info := bindataFileInfo{name: "1695133989_add_community_id_to_collectibles_and_collections_data_cache.up.sql", size: 275, mode: os.FileMode(0644), modTime: time.Unix(1698840720, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xfa, 0x2, 0xa, 0x7f, 0x4b, 0xd1, 0x3, 0xd0, 0x3, 0x29, 0x84, 0x31, 0xed, 0x49, 0x4f, 0xb1, 0x2d, 0xd7, 0x80, 0x41, 0x5b, 0xfa, 0x6, 0xae, 0xb4, 0xf6, 0x6b, 0x49, 0xee, 0x57, 0x33, 0x76}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xfa, 0x2, 0xa, 0x7f, 0x4b, 0xd1, 0x3, 0xd0, 0x3, 0x29, 0x84, 0x31, 0xed, 0x49, 0x4f, 0xb1, 0x2d, 0xd7, 0x80, 0x41, 0x5b, 0xfa, 0x6, 0xae, 0xb4, 0xf6, 0x6b, 0x49, 0xee, 0x57, 0x33, 0x76}}
return a, nil return a, nil
} }
@ -214,7 +214,7 @@ func _1695932536_balance_history_v2UpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1695932536_balance_history_v2.up.sql", size: 653, mode: os.FileMode(0644), modTime: time.Unix(1699005553, 0)} info := bindataFileInfo{name: "1695932536_balance_history_v2.up.sql", size: 653, mode: os.FileMode(0644), modTime: time.Unix(1698840720, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x37, 0xf4, 0x14, 0x91, 0xf6, 0x5f, 0xc4, 0x9b, 0xb7, 0x83, 0x32, 0x72, 0xbe, 0x82, 0x42, 0x39, 0xa4, 0x3b, 0xc9, 0x78, 0x3d, 0xca, 0xd4, 0xbf, 0xfc, 0x7a, 0x33, 0x1e, 0xcd, 0x9e, 0xe4, 0x85}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x37, 0xf4, 0x14, 0x91, 0xf6, 0x5f, 0xc4, 0x9b, 0xb7, 0x83, 0x32, 0x72, 0xbe, 0x82, 0x42, 0x39, 0xa4, 0x3b, 0xc9, 0x78, 0x3d, 0xca, 0xd4, 0xbf, 0xfc, 0x7a, 0x33, 0x1e, 0xcd, 0x9e, 0xe4, 0x85}}
return a, nil return a, nil
} }
@ -234,7 +234,7 @@ func _1696853635_input_dataUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1696853635_input_data.up.sql", size: 23140, mode: os.FileMode(0644), modTime: time.Unix(1699005553, 0)} info := bindataFileInfo{name: "1696853635_input_data.up.sql", size: 23140, mode: os.FileMode(0644), modTime: time.Unix(1698840720, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x89, 0x30, 0x33, 0x33, 0x55, 0xc5, 0x57, 0x2b, 0xaf, 0xef, 0x3d, 0x8d, 0x2a, 0xaa, 0x5c, 0x32, 0xd1, 0xf4, 0xd, 0x4a, 0xd0, 0x33, 0x4a, 0xe8, 0xf6, 0x8, 0x6b, 0x65, 0xcc, 0xba, 0xed, 0x42}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x89, 0x30, 0x33, 0x33, 0x55, 0xc5, 0x57, 0x2b, 0xaf, 0xef, 0x3d, 0x8d, 0x2a, 0xaa, 0x5c, 0x32, 0xd1, 0xf4, 0xd, 0x4a, 0xd0, 0x33, 0x4a, 0xe8, 0xf6, 0x8, 0x6b, 0x65, 0xcc, 0xba, 0xed, 0x42}}
return a, nil return a, nil
} }
@ -254,7 +254,7 @@ func _1698117918_add_community_id_to_tokensUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1698117918_add_community_id_to_tokens.up.sql", size: 61, mode: os.FileMode(0644), modTime: time.Unix(1699005553, 0)} info := bindataFileInfo{name: "1698117918_add_community_id_to_tokens.up.sql", size: 61, mode: os.FileMode(0644), modTime: time.Unix(1698840720, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb3, 0x82, 0xdb, 0xde, 0x3, 0x3, 0xc, 0x67, 0xf3, 0x54, 0xc4, 0xad, 0xd6, 0xce, 0x56, 0xfb, 0xc1, 0x87, 0xd7, 0xda, 0xab, 0xec, 0x1, 0xe1, 0x7d, 0xb3, 0x63, 0xd6, 0xe5, 0x5d, 0x1c, 0x15}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb3, 0x82, 0xdb, 0xde, 0x3, 0x3, 0xc, 0x67, 0xf3, 0x54, 0xc4, 0xad, 0xd6, 0xce, 0x56, 0xfb, 0xc1, 0x87, 0xd7, 0xda, 0xab, 0xec, 0x1, 0xe1, 0x7d, 0xb3, 0x63, 0xd6, 0xe5, 0x5d, 0x1c, 0x15}}
return a, nil return a, nil
} }
@ -274,11 +274,31 @@ func _1698257443_add_community_metadata_to_wallet_dbUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1698257443_add_community_metadata_to_wallet_db.up.sql", size: 323, mode: os.FileMode(0644), modTime: time.Unix(1699005553, 0)} info := bindataFileInfo{name: "1698257443_add_community_metadata_to_wallet_db.up.sql", size: 323, mode: os.FileMode(0644), modTime: time.Unix(1698840720, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x22, 0xd3, 0x4, 0x25, 0xfa, 0x23, 0x1, 0x48, 0x83, 0x26, 0x20, 0xf2, 0x3d, 0xbc, 0xc1, 0xa7, 0x7c, 0x27, 0x7c, 0x1d, 0x63, 0x3, 0xa, 0xd0, 0xce, 0x47, 0x86, 0xdc, 0xa1, 0x3c, 0x2, 0x1c}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x22, 0xd3, 0x4, 0x25, 0xfa, 0x23, 0x1, 0x48, 0x83, 0x26, 0x20, 0xf2, 0x3d, 0xbc, 0xc1, 0xa7, 0x7c, 0x27, 0x7c, 0x1d, 0x63, 0x3, 0xa, 0xd0, 0xce, 0x47, 0x86, 0xdc, 0xa1, 0x3c, 0x2, 0x1c}}
return a, nil return a, nil
} }
var __1699987075_add_timestamp_and_state_to_community_data_cacheUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x92\xcd\x6a\xdc\x30\x14\x85\xf7\x7a\x8a\xb3\xcc\x80\x67\x91\xf5\xac\x34\xf6\xf5\x54\xd4\x23\x05\x59\x81\x64\x65\x84\xac\x36\x02\xff\x84\x91\x4c\xe9\xdb\x17\xdb\xa1\x25\xe0\x31\xe9\xd6\xfe\x3e\xee\xb9\xf7\xe8\x78\xc4\xb7\xb1\x6b\x23\xdc\xd8\xf7\xd3\x10\xd2\x6f\xf4\x3e\xd9\xd6\x26\x8b\x98\x6c\xf2\x2c\xd7\xc4\x0d\xc1\xf0\x73\x45\x10\x25\xa4\x32\xa0\x17\x51\x9b\xfa\x9f\xd3\xcc\x7c\xe3\xac\x7b\xf3\xcd\x62\xe1\x81\x01\x40\x68\x61\xe8\xc5\xe0\x49\x8b\x2b\xd7\xaf\xf8\x4e\xaf\x8b\x2f\x9f\xab\x2a\x5b\x88\xce\xc6\xd4\x4c\xef\xad\x4d\xbe\x49\xa1\xf7\x31\xd9\xfe\x1d\xcf\xb2\x16\x17\x49\x05\xce\xe2\x22\xa4\xd9\x71\xe2\xe4\x9c\x8f\xf1\xc7\xd4\xe1\xac\x54\x45\x5c\xfe\x85\xd9\xe1\xc4\x98\x90\x35\x69\x03\x21\x8d\xda\x8d\x1b\xda\x6c\x3b\x4b\x76\x67\xdc\x81\x01\x35\x55\x94\x1b\xcc\xee\x63\x86\x47\x94\x5a\x5d\x37\xa7\x9c\x18\x3b\x1e\xa1\xbd\xbb\xf9\x79\xda\x16\x82\x5f\x21\xbd\xad\x27\x87\x1b\x87\x98\x6e\x36\x0c\x29\x32\x5e\x19\xd2\x1f\xd7\xdf\xf4\x34\x49\x7e\x25\xdc\xdb\x6f\xec\xda\x13\xfb\xdf\x12\xbf\x5c\xdf\x60\x7b\xbf\x32\x9f\xbf\xbb\xb1\x1b\x6f\x5b\x3f\x42\x6f\x7f\x6e\x1a\xa5\xd2\x24\x2e\x72\x9e\xf1\x10\xda\x03\x34\x95\xa4\x49\xe6\xb4\xf7\xcc\x16\x52\x49\x14\x54\x91\x21\xe4\xbc\xce\x79\x41\x5f\x2a\xfe\x73\x7d\xf3\x1e\xd9\x9a\x3a\x5b\x33\x32\xdc\xaf\xf3\xe3\xa8\x85\x56\x4f\x3b\xcd\xac\xd4\x9f\x00\x00\x00\xff\xff\x44\x65\xb5\xf4\x61\x03\x00\x00")
func _1699987075_add_timestamp_and_state_to_community_data_cacheUpSqlBytes() ([]byte, error) {
return bindataRead(
__1699987075_add_timestamp_and_state_to_community_data_cacheUpSql,
"1699987075_add_timestamp_and_state_to_community_data_cache.up.sql",
)
}
func _1699987075_add_timestamp_and_state_to_community_data_cacheUpSql() (*asset, error) {
bytes, err := _1699987075_add_timestamp_and_state_to_community_data_cacheUpSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "1699987075_add_timestamp_and_state_to_community_data_cache.up.sql", size: 865, mode: os.FileMode(0644), modTime: time.Unix(1700084408, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc3, 0xee, 0x37, 0xf9, 0x7f, 0x9e, 0xfe, 0x93, 0x66, 0x2b, 0xd, 0x57, 0xf4, 0x89, 0x6c, 0x51, 0xfd, 0x14, 0xe9, 0xcd, 0xab, 0x65, 0xe7, 0xa7, 0x83, 0x7e, 0xe0, 0x5c, 0x14, 0x49, 0xf3, 0xe5}}
return a, nil
}
var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x2c\xc9\xb1\x0d\xc4\x20\x0c\x05\xd0\x9e\x29\xfe\x02\xd8\xfd\x6d\xe3\x4b\xac\x2f\x44\x82\x09\x78\x7f\xa5\x49\xfd\xa6\x1d\xdd\xe8\xd8\xcf\x55\x8a\x2a\xe3\x47\x1f\xbe\x2c\x1d\x8c\xfa\x6f\xe3\xb4\x34\xd4\xd9\x89\xbb\x71\x59\xb6\x18\x1b\x35\x20\xa2\x9f\x0a\x03\xa2\xe5\x0d\x00\x00\xff\xff\x60\xcd\x06\xbe\x4a\x00\x00\x00") var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x2c\xc9\xb1\x0d\xc4\x20\x0c\x05\xd0\x9e\x29\xfe\x02\xd8\xfd\x6d\xe3\x4b\xac\x2f\x44\x82\x09\x78\x7f\xa5\x49\xfd\xa6\x1d\xdd\xe8\xd8\xcf\x55\x8a\x2a\xe3\x47\x1f\xbe\x2c\x1d\x8c\xfa\x6f\xe3\xb4\x34\xd4\xd9\x89\xbb\x71\x59\xb6\x18\x1b\x35\x20\xa2\x9f\x0a\x03\xa2\xe5\x0d\x00\x00\xff\xff\x60\xcd\x06\xbe\x4a\x00\x00\x00")
func docGoBytes() ([]byte, error) { func docGoBytes() ([]byte, error) {
@ -294,7 +314,7 @@ func docGo() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "doc.go", size: 74, mode: os.FileMode(0644), modTime: time.Unix(1699005553, 0)} info := bindataFileInfo{name: "doc.go", size: 74, mode: os.FileMode(0644), modTime: time.Unix(1698840720, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xde, 0x7c, 0x28, 0xcd, 0x47, 0xf2, 0xfa, 0x7c, 0x51, 0x2d, 0xd8, 0x38, 0xb, 0xb0, 0x34, 0x9d, 0x4c, 0x62, 0xa, 0x9e, 0x28, 0xc3, 0x31, 0x23, 0xd9, 0xbb, 0x89, 0x9f, 0xa0, 0x89, 0x1f, 0xe8}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xde, 0x7c, 0x28, 0xcd, 0x47, 0xf2, 0xfa, 0x7c, 0x51, 0x2d, 0xd8, 0x38, 0xb, 0xb0, 0x34, 0x9d, 0x4c, 0x62, 0xa, 0x9e, 0x28, 0xc3, 0x31, 0x23, 0xd9, 0xbb, 0x89, 0x9f, 0xa0, 0x89, 0x1f, 0xe8}}
return a, nil return a, nil
} }
@ -391,37 +411,33 @@ func AssetNames() []string {
// _bindata is a table, holding each asset generator, mapped to its name. // _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){ var _bindata = map[string]func() (*asset, error){
"1691753758_initial.up.sql": _1691753758_initialUpSql, "1691753758_initial.up.sql": _1691753758_initialUpSql,
"1692701329_add_collectibles_and_collections_data_cache.up.sql": _1692701329_add_collectibles_and_collections_data_cacheUpSql, "1692701329_add_collectibles_and_collections_data_cache.up.sql": _1692701329_add_collectibles_and_collections_data_cacheUpSql,
"1692701339_add_scope_to_pending.up.sql": _1692701339_add_scope_to_pendingUpSql, "1692701339_add_scope_to_pending.up.sql": _1692701339_add_scope_to_pendingUpSql,
"1694540071_add_collectibles_ownership_update_timestamp.up.sql": _1694540071_add_collectibles_ownership_update_timestampUpSql, "1694540071_add_collectibles_ownership_update_timestamp.up.sql": _1694540071_add_collectibles_ownership_update_timestampUpSql,
"1694692748_add_raw_balance_to_token_balances.up.sql": _1694692748_add_raw_balance_to_token_balancesUpSql, "1694692748_add_raw_balance_to_token_balances.up.sql": _1694692748_add_raw_balance_to_token_balancesUpSql,
"1695133989_add_community_id_to_collectibles_and_collections_data_cache.up.sql": _1695133989_add_community_id_to_collectibles_and_collections_data_cacheUpSql, "1695133989_add_community_id_to_collectibles_and_collections_data_cache.up.sql": _1695133989_add_community_id_to_collectibles_and_collections_data_cacheUpSql,
"1695932536_balance_history_v2.up.sql": _1695932536_balance_history_v2UpSql, "1695932536_balance_history_v2.up.sql": _1695932536_balance_history_v2UpSql,
"1696853635_input_data.up.sql": _1696853635_input_dataUpSql, "1696853635_input_data.up.sql": _1696853635_input_dataUpSql,
"1698117918_add_community_id_to_tokens.up.sql": _1698117918_add_community_id_to_tokensUpSql, "1698117918_add_community_id_to_tokens.up.sql": _1698117918_add_community_id_to_tokensUpSql,
"1698257443_add_community_metadata_to_wallet_db.up.sql": _1698257443_add_community_metadata_to_wallet_dbUpSql, "1698257443_add_community_metadata_to_wallet_db.up.sql": _1698257443_add_community_metadata_to_wallet_dbUpSql,
"1699987075_add_timestamp_and_state_to_community_data_cache.up.sql": _1699987075_add_timestamp_and_state_to_community_data_cacheUpSql,
"doc.go": docGo, "doc.go": docGo,
} }
// AssetDebug is true if the assets were built with the debug flag enabled.
const AssetDebug = false
// AssetDir returns the file names below a certain // AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata. // directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the // For example if you run go-bindata on data/... and data contains the
// following hierarchy: // following hierarchy:
//
// data/ // data/
// foo.txt // foo.txt
// img/ // img/
// a.png // a.png
// b.png // b.png
//
// then AssetDir("data") would return []string{"foo.txt", "img"}, // then AssetDir("data") would return []string{"foo.txt", "img"},
// AssetDir("data/img") would return []string{"a.png", "b.png"}, // AssetDir("data/img") would return []string{"a.png", "b.png"},
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and // AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
@ -454,17 +470,18 @@ type bintree struct {
} }
var _bintree = &bintree{nil, map[string]*bintree{ var _bintree = &bintree{nil, map[string]*bintree{
"1691753758_initial.up.sql": &bintree{_1691753758_initialUpSql, map[string]*bintree{}}, "1691753758_initial.up.sql": {_1691753758_initialUpSql, map[string]*bintree{}},
"1692701329_add_collectibles_and_collections_data_cache.up.sql": &bintree{_1692701329_add_collectibles_and_collections_data_cacheUpSql, map[string]*bintree{}}, "1692701329_add_collectibles_and_collections_data_cache.up.sql": {_1692701329_add_collectibles_and_collections_data_cacheUpSql, map[string]*bintree{}},
"1692701339_add_scope_to_pending.up.sql": &bintree{_1692701339_add_scope_to_pendingUpSql, map[string]*bintree{}}, "1692701339_add_scope_to_pending.up.sql": {_1692701339_add_scope_to_pendingUpSql, map[string]*bintree{}},
"1694540071_add_collectibles_ownership_update_timestamp.up.sql": &bintree{_1694540071_add_collectibles_ownership_update_timestampUpSql, map[string]*bintree{}}, "1694540071_add_collectibles_ownership_update_timestamp.up.sql": {_1694540071_add_collectibles_ownership_update_timestampUpSql, map[string]*bintree{}},
"1694692748_add_raw_balance_to_token_balances.up.sql": &bintree{_1694692748_add_raw_balance_to_token_balancesUpSql, map[string]*bintree{}}, "1694692748_add_raw_balance_to_token_balances.up.sql": {_1694692748_add_raw_balance_to_token_balancesUpSql, map[string]*bintree{}},
"1695133989_add_community_id_to_collectibles_and_collections_data_cache.up.sql": &bintree{_1695133989_add_community_id_to_collectibles_and_collections_data_cacheUpSql, map[string]*bintree{}}, "1695133989_add_community_id_to_collectibles_and_collections_data_cache.up.sql": {_1695133989_add_community_id_to_collectibles_and_collections_data_cacheUpSql, map[string]*bintree{}},
"1695932536_balance_history_v2.up.sql": &bintree{_1695932536_balance_history_v2UpSql, map[string]*bintree{}}, "1695932536_balance_history_v2.up.sql": {_1695932536_balance_history_v2UpSql, map[string]*bintree{}},
"1696853635_input_data.up.sql": &bintree{_1696853635_input_dataUpSql, map[string]*bintree{}}, "1696853635_input_data.up.sql": {_1696853635_input_dataUpSql, map[string]*bintree{}},
"1698117918_add_community_id_to_tokens.up.sql": &bintree{_1698117918_add_community_id_to_tokensUpSql, map[string]*bintree{}}, "1698117918_add_community_id_to_tokens.up.sql": {_1698117918_add_community_id_to_tokensUpSql, map[string]*bintree{}},
"1698257443_add_community_metadata_to_wallet_db.up.sql": &bintree{_1698257443_add_community_metadata_to_wallet_dbUpSql, map[string]*bintree{}}, "1698257443_add_community_metadata_to_wallet_db.up.sql": {_1698257443_add_community_metadata_to_wallet_dbUpSql, map[string]*bintree{}},
"doc.go": &bintree{docGo, map[string]*bintree{}}, "1699987075_add_timestamp_and_state_to_community_data_cache.up.sql": {_1699987075_add_timestamp_and_state_to_community_data_cacheUpSql, map[string]*bintree{}},
"doc.go": {docGo, map[string]*bintree{}},
}} }}
// RestoreAsset restores an asset under the given directory. // RestoreAsset restores an asset under the given directory.
@ -481,7 +498,7 @@ func RestoreAsset(dir, name string) error {
if err != nil { if err != nil {
return err return err
} }
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) err = os.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil { if err != nil {
return err return err
} }

View File

@ -0,0 +1,26 @@
-- Holds community metadata state
CREATE TABLE IF NOT EXISTS community_data_cache_state (
id TEXT PRIMARY KEY NOT NULL,
last_update_timestamp UNSIGNED BIGINT NOT NULL,
last_update_successful BOOLEAN NOT NULL
);
INSERT INTO community_data_cache_state (id, last_update_timestamp, last_update_successful)
SELECT id, 1, 1 FROM community_data_cache;
-- Recreate community_data_cache with state constraints
ALTER TABLE community_data_cache RENAME TO community_data_cache_old;
CREATE TABLE IF NOT EXISTS community_data_cache (
id TEXT PRIMARY KEY NOT NULL,
name TEXT NOT NULL,
color TEXT NOT NULL,
image TEXT NOT NULL,
FOREIGN KEY(id) REFERENCES community_data_cache_state(id) ON DELETE CASCADE
);
INSERT INTO community_data_cache
SELECT id, name, color, image
FROM community_data_cache_old;
DROP TABLE community_data_cache_old;