Implemented balance history based on transfers (#4022)

* feat(wallet): implement balance history based on fetched transfers
* Added vendor 'ttlcache'
This commit is contained in:
IvanBelyakoff 2023-10-04 14:00:12 +02:00 committed by GitHub
parent c85a110a31
commit 9d6577049f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
35 changed files with 3327 additions and 2307 deletions

1
go.mod
View File

@ -78,6 +78,7 @@ require (
github.com/andybalholm/brotli v1.0.5
github.com/gorilla/sessions v1.2.1
github.com/ipfs/go-log/v2 v2.5.1
github.com/jellydator/ttlcache/v3 v3.1.0
github.com/jmoiron/sqlx v1.3.5
github.com/ladydascalie/currency v1.6.0
github.com/meirf/gopart v0.0.0-20180520194036-37e9492a85a8

4
go.sum
View File

@ -1217,6 +1217,8 @@ github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABo
github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g=
github.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
@ -2221,8 +2223,8 @@ go.uber.org/fx v1.20.0 h1:ZMC/pnRvhsthOZh9MZjMq5U8Or3mA9zBSPaLnzs3ihQ=
go.uber.org/fx v1.20.0/go.mod h1:qCUj0btiR3/JnanEr1TYEePfSw6o/4qYJscgvzQ5Ub0=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=

View File

@ -149,15 +149,35 @@ func (api *API) GetTokensBalancesForChainIDs(ctx context.Context, chainIDs []uin
return api.s.tokenManager.GetBalances(ctx, clients, accounts, addresses)
}
func (api *API) UpdateVisibleTokens(ctx context.Context, symbols []string) error {
api.s.history.UpdateVisibleTokens(symbols)
return nil
}
// GetBalanceHistory retrieves token balance history for token identity on multiple chains
func (api *API) GetBalanceHistory(ctx context.Context, chainIDs []uint64, address common.Address, tokenSymbol string, currencySymbol string, timeInterval history.TimeInterval) ([]*history.ValuePoint, error) {
endTimestamp := time.Now().UTC().Unix()
return api.s.history.GetBalanceHistory(ctx, chainIDs, address, tokenSymbol, currencySymbol, endTimestamp, timeInterval)
log.Debug("wallet.api.GetBalanceHistory", "chainIDs", chainIDs, "address", address, "tokenSymbol", tokenSymbol, "currencySymbol", currencySymbol, "timeInterval", timeInterval)
var fromTimestamp uint64
now := uint64(time.Now().UTC().Unix())
switch timeInterval {
case history.BalanceHistoryAllTime:
fromTimestamp = 0
case history.BalanceHistory1Year:
fallthrough
case history.BalanceHistory6Months:
fallthrough
case history.BalanceHistory1Month:
fallthrough
case history.BalanceHistory7Days:
fromTimestamp = now - history.TimeIntervalDurationSecs(timeInterval)
default:
return nil, fmt.Errorf("unknown time interval: %v", timeInterval)
}
return api.GetBalanceHistoryRange(ctx, chainIDs, address, tokenSymbol, currencySymbol, fromTimestamp, now)
}
// GetBalanceHistoryRange retrieves token balance history for token identity on multiple chains for a time range
// 'toTimestamp' is ignored for now, but will be used in the future to limit the range of the history
func (api *API) GetBalanceHistoryRange(ctx context.Context, chainIDs []uint64, address common.Address, tokenSymbol string, currencySymbol string, fromTimestamp uint64, _ uint64) ([]*history.ValuePoint, error) {
log.Debug("wallet.api.GetBalanceHistoryRange", "chainIDs", chainIDs, "address", address, "tokenSymbol", tokenSymbol, "currencySymbol", currencySymbol, "fromTimestamp", fromTimestamp)
return api.s.history.GetBalanceHistory(ctx, chainIDs, address, tokenSymbol, currencySymbol, fromTimestamp)
}
func (api *API) GetTokens(ctx context.Context, chainID uint64) ([]*token.Token, error) {

View File

@ -2,22 +2,16 @@ package balance
import (
"context"
"math"
"math/big"
"sort"
"reflect"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/status-im/status-go/rpc/chain"
)
type nonceRange struct {
nonce int64
max *big.Int
min *big.Int
}
// Reader interface for reading balance at a specified address.
type Reader interface {
BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error)
@ -41,253 +35,187 @@ type CacheIface interface {
GetNonce(account common.Address, chainID uint64, blockNumber *big.Int) *int64
AddBalance(account common.Address, chainID uint64, blockNumber *big.Int, balance *big.Int)
AddNonce(account common.Address, chainID uint64, blockNumber *big.Int, nonce *int64)
BalanceSize(account common.Address, chainID uint64) int
NonceSize(account common.Address, chainID uint64) int
Clear()
}
type balanceCacheType map[common.Address]map[uint64]map[uint64]*big.Int // address->chainID->blockNumber->balance
type nonceCacheType map[common.Address]map[uint64]map[uint64]*int64 // address->chainID->blockNumber->nonce
type nonceRangesCacheType map[common.Address]map[uint64]map[int64]nonceRange // address->chainID->blockNumber->nonceRange
type sortedNonceRangesCacheType map[common.Address]map[uint64][]nonceRange // address->chainID->[]nonceRange
type addressChainMap[T any] map[common.Address]map[uint64]T // address->chainID
type Cache struct {
// balances maps an address to a map of a block number and the balance of this particular address
balances balanceCacheType
nonces nonceCacheType
nonceRanges nonceRangesCacheType
sortedRanges sortedNonceRangesCacheType
rw sync.RWMutex
type cacheIface[K comparable, V any] interface {
get(K) V
set(K, V)
len() int
keys() []K
clear()
init()
}
func NewCache() *Cache {
return &Cache{
balances: make(balanceCacheType),
nonces: make(nonceCacheType),
nonceRanges: make(nonceRangesCacheType),
sortedRanges: make(sortedNonceRangesCacheType),
}
// genericCache is a generic implementation of CacheIface
type genericCache[B cacheIface[uint64, *big.Int], N cacheIface[uint64, *int64], NR cacheIface[int64, nonceRange]] struct {
nonceRangeCache[NR]
// balances maps an address and chain to a cache of a block number and the balance of this particular address on the chain
balances addressChainMap[B]
nonces addressChainMap[N]
rw sync.RWMutex
}
func (b *Cache) Clear() {
b.rw.Lock()
defer b.rw.Unlock()
for address, chainCache := range b.balances {
if len(chainCache) == 0 {
continue
}
for chainID, cache := range chainCache {
if len(cache) == 0 {
continue
}
var maxBlock uint64 = 0
var minBlock uint64 = math.MaxUint64
for key := range cache {
if key > maxBlock {
maxBlock = key
}
if key < minBlock {
minBlock = key
}
}
newCache := make(map[uint64]*big.Int)
newCache[maxBlock] = cache[maxBlock]
newCache[minBlock] = cache[minBlock]
b.balances[address][chainID] = newCache
}
}
for address, chainCache := range b.nonces {
if len(chainCache) == 0 {
continue
}
for chainID, cache := range chainCache {
var maxBlock uint64 = 0
var minBlock uint64 = math.MaxUint64
for key := range cache {
if key > maxBlock {
maxBlock = key
}
if key < minBlock {
minBlock = key
}
}
newCache := make(map[uint64]*int64)
newCache[maxBlock] = cache[maxBlock]
newCache[minBlock] = cache[minBlock]
b.nonces[address][chainID] = newCache
}
}
b.nonceRanges = make(nonceRangesCacheType)
b.sortedRanges = make(sortedNonceRangesCacheType)
}
func (b *Cache) GetBalance(account common.Address, chainID uint64, blockNumber *big.Int) *big.Int {
func (b *genericCache[_, _, _]) GetBalance(account common.Address, chainID uint64, blockNumber *big.Int) *big.Int {
b.rw.RLock()
defer b.rw.RUnlock()
if b.balances[account] == nil || b.balances[account][chainID] == nil {
_, exists := b.balances[account]
if !exists {
return nil
}
return b.balances[account][chainID][blockNumber.Uint64()]
_, exists = b.balances[account][chainID]
if !exists {
return nil
}
return b.balances[account][chainID].get(blockNumber.Uint64())
}
func (b *Cache) AddBalance(account common.Address, chainID uint64, blockNumber *big.Int, balance *big.Int) {
func (b *genericCache[B, _, _]) AddBalance(account common.Address, chainID uint64, blockNumber *big.Int, balance *big.Int) {
b.rw.Lock()
defer b.rw.Unlock()
_, exists := b.balances[account]
if !exists {
b.balances[account] = make(map[uint64]map[uint64]*big.Int)
b.balances[account] = make(map[uint64]B)
}
_, exists = b.balances[account][chainID]
if !exists {
b.balances[account][chainID] = make(map[uint64]*big.Int)
b.balances[account][chainID] = reflect.New(reflect.TypeOf(b.balances[account][chainID]).Elem()).Interface().(B)
b.balances[account][chainID].init()
}
b.balances[account][chainID][blockNumber.Uint64()] = balance
b.balances[account][chainID].set(blockNumber.Uint64(), balance)
}
func (b *Cache) BalanceAt(ctx context.Context, client Reader, account common.Address, blockNumber *big.Int) (*big.Int, error) {
cachedBalance := b.GetBalance(account, client.NetworkID(), blockNumber)
if cachedBalance != nil {
return cachedBalance, nil
}
balance, err := client.BalanceAt(ctx, account, blockNumber)
if err != nil {
return nil, err
}
b.AddBalance(account, client.NetworkID(), blockNumber, balance)
return balance, nil
}
func (b *Cache) GetNonce(account common.Address, chainID uint64, blockNumber *big.Int) *int64 {
func (b *genericCache[_, _, _]) GetNonce(account common.Address, chainID uint64, blockNumber *big.Int) *int64 {
b.rw.RLock()
defer b.rw.RUnlock()
if b.nonces[account] == nil || b.nonces[account][chainID] == nil {
_, exists := b.nonces[account]
if !exists {
return nil
}
return b.nonces[account][chainID][blockNumber.Uint64()]
}
func (b *Cache) Cache() CacheIface {
return b
}
func (b *Cache) sortRanges(account common.Address, chainID uint64) {
keys := make([]int, 0, len(b.nonceRanges[account][chainID]))
for k := range b.nonceRanges[account][chainID] {
keys = append(keys, int(k))
}
sort.Ints(keys) // This will not work for keys > 2^31
ranges := []nonceRange{}
for _, k := range keys {
r := b.nonceRanges[account][chainID][int64(k)]
ranges = append(ranges, r)
}
_, exists := b.sortedRanges[account]
_, exists = b.nonces[account][chainID]
if !exists {
b.sortedRanges[account] = make(map[uint64][]nonceRange)
return nil
}
b.sortedRanges[account][chainID] = ranges
nonce := b.nonces[account][chainID].get(blockNumber.Uint64())
if nonce != nil {
return nonce
}
return b.findNonceInRange(account, chainID, blockNumber)
}
func (b *Cache) findNonceInRange(account common.Address, chainID uint64, block *big.Int) *int64 {
b.rw.RLock()
defer b.rw.RUnlock()
for k := range b.sortedRanges[account][chainID] {
nr := b.sortedRanges[account][chainID][k]
cmpMin := nr.min.Cmp(block)
if cmpMin == 1 {
return nil
} else if cmpMin == 0 {
return &nr.nonce
} else {
cmpMax := nr.max.Cmp(block)
if cmpMax >= 0 {
return &nr.nonce
}
}
}
return nil
}
func (b *Cache) updateNonceRange(account common.Address, chainID uint64, blockNumber *big.Int, nonce *int64) {
_, exists := b.nonceRanges[account]
if !exists {
b.nonceRanges[account] = make(map[uint64]map[int64]nonceRange)
}
_, exists = b.nonceRanges[account][chainID]
if !exists {
b.nonceRanges[account][chainID] = make(map[int64]nonceRange)
}
nr, exists := b.nonceRanges[account][chainID][*nonce]
if !exists {
r := nonceRange{
max: big.NewInt(0).Set(blockNumber),
min: big.NewInt(0).Set(blockNumber),
nonce: *nonce,
}
b.nonceRanges[account][chainID][*nonce] = r
} else {
if nr.max.Cmp(blockNumber) == -1 {
nr.max.Set(blockNumber)
}
if nr.min.Cmp(blockNumber) == 1 {
nr.min.Set(blockNumber)
}
b.nonceRanges[account][chainID][*nonce] = nr
b.sortRanges(account, chainID)
}
}
func (b *Cache) AddNonce(account common.Address, chainID uint64, blockNumber *big.Int, nonce *int64) {
func (b *genericCache[_, N, _]) AddNonce(account common.Address, chainID uint64, blockNumber *big.Int, nonce *int64) {
b.rw.Lock()
defer b.rw.Unlock()
_, exists := b.nonces[account]
if !exists {
b.nonces[account] = make(map[uint64]map[uint64]*int64)
b.nonces[account] = make(map[uint64]N)
}
_, exists = b.nonces[account][chainID]
if !exists {
b.nonces[account][chainID] = make(map[uint64]*int64)
b.nonces[account][chainID] = reflect.New(reflect.TypeOf(b.nonces[account][chainID]).Elem()).Interface().(N)
b.nonces[account][chainID].init()
}
b.nonces[account][chainID][blockNumber.Uint64()] = nonce
b.nonces[account][chainID].set(blockNumber.Uint64(), nonce)
b.updateNonceRange(account, chainID, blockNumber, nonce)
}
func (b *Cache) NonceAt(ctx context.Context, client Reader, account common.Address, blockNumber *big.Int) (*int64, error) {
cachedNonce := b.GetNonce(account, client.NetworkID(), blockNumber)
func (b *genericCache[_, _, _]) BalanceSize(account common.Address, chainID uint64) int {
b.rw.RLock()
defer b.rw.RUnlock()
_, exists := b.balances[account]
if !exists {
return 0
}
_, exists = b.balances[account][chainID]
if !exists {
return 0
}
return b.balances[account][chainID].len()
}
func (b *genericCache[_, N, _]) NonceSize(account common.Address, chainID uint64) int {
b.rw.RLock()
defer b.rw.RUnlock()
_, exists := b.nonces[account]
if !exists {
return 0
}
_, exists = b.nonces[account][chainID]
if !exists {
return 0
}
return b.nonces[account][chainID].len()
}
// implements Cacher interface that caches balance and nonce in memory.
type cacherImpl struct {
cache CacheIface
}
func newCacherImpl(cache CacheIface) *cacherImpl {
return &cacherImpl{
cache: cache,
}
}
func (b *cacherImpl) BalanceAt(ctx context.Context, client Reader, account common.Address, blockNumber *big.Int) (*big.Int, error) {
cachedBalance := b.cache.GetBalance(account, client.NetworkID(), blockNumber)
if cachedBalance != nil {
return cachedBalance, nil
}
balance, err := client.BalanceAt(ctx, account, blockNumber)
if err != nil {
return nil, err
}
b.cache.AddBalance(account, client.NetworkID(), blockNumber, balance)
return balance, nil
}
func (b *cacherImpl) NonceAt(ctx context.Context, client Reader, account common.Address, blockNumber *big.Int) (*int64, error) {
cachedNonce := b.cache.GetNonce(account, client.NetworkID(), blockNumber)
if cachedNonce != nil {
return cachedNonce, nil
}
rangeNonce := b.findNonceInRange(account, client.NetworkID(), blockNumber)
if rangeNonce != nil {
return rangeNonce, nil
}
nonce, err := client.NonceAt(ctx, account, blockNumber)
if err != nil {
return nil, err
}
int64Nonce := int64(nonce)
b.AddNonce(account, client.NetworkID(), blockNumber, &int64Nonce)
b.cache.AddNonce(account, client.NetworkID(), blockNumber, &int64Nonce)
return &int64Nonce, nil
}
func (b *cacherImpl) Clear() {
b.cache.Clear()
}
func (b *cacherImpl) Cache() CacheIface {
return b.cache
}

View File

@ -0,0 +1,134 @@
package balance
import (
"math/big"
"reflect"
"sort"
"sync"
"github.com/ethereum/go-ethereum/common"
)
type nonceRange struct {
nonce int64
max *big.Int
min *big.Int
}
type sortedNonceRangesCacheType addressChainMap[[]nonceRange] // address->chainID->[]nonceRange
type nonceRangeCache[T cacheIface[int64, nonceRange]] struct {
nonceRanges addressChainMap[T]
sortedRanges sortedNonceRangesCacheType
rw sync.RWMutex
}
func newNonceRangeCache[T cacheIface[int64, nonceRange]]() *nonceRangeCache[T] {
return &nonceRangeCache[T]{
nonceRanges: make(addressChainMap[T]),
sortedRanges: make(sortedNonceRangesCacheType),
}
}
func (b *nonceRangeCache[T]) updateNonceRange(account common.Address, chainID uint64, blockNumber *big.Int, nonce *int64) {
b.rw.Lock()
defer b.rw.Unlock()
_, exists := b.nonceRanges[account]
if !exists {
b.nonceRanges[account] = make(map[uint64]T)
}
_, exists = b.nonceRanges[account][chainID]
if !exists {
b.nonceRanges[account][chainID] = reflect.New(reflect.TypeOf(b.nonceRanges[account][chainID]).Elem()).Interface().(T)
b.nonceRanges[account][chainID].init()
}
nr := b.nonceRanges[account][chainID].get(*nonce)
if nr == reflect.Zero(reflect.TypeOf(nr)).Interface() {
nr = nonceRange{
max: big.NewInt(0).Set(blockNumber),
min: big.NewInt(0).Set(blockNumber),
nonce: *nonce,
}
} else {
if nr.max.Cmp(blockNumber) == -1 {
nr.max.Set(blockNumber)
}
if nr.min.Cmp(blockNumber) == 1 {
nr.min.Set(blockNumber)
}
}
b.nonceRanges[account][chainID].set(*nonce, nr)
b.sortRanges(account, chainID)
}
func (b *nonceRangeCache[_]) findNonceInRange(account common.Address, chainID uint64, block *big.Int) *int64 {
b.rw.RLock()
defer b.rw.RUnlock()
for k := range b.sortedRanges[account][chainID] {
nr := b.sortedRanges[account][chainID][k]
cmpMin := nr.min.Cmp(block)
if cmpMin == 1 {
return nil
} else if cmpMin == 0 {
return &nr.nonce
} else {
cmpMax := nr.max.Cmp(block)
if cmpMax >= 0 {
return &nr.nonce
}
}
}
return nil
}
func (b *nonceRangeCache[T]) sortRanges(account common.Address, chainID uint64) {
// DO NOT LOCK HERE - this function is called from a locked function
keys := b.nonceRanges[account][chainID].keys()
sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
ranges := []nonceRange{}
for _, k := range keys {
r := b.nonceRanges[account][chainID].get(k)
ranges = append(ranges, r)
}
_, exists := b.sortedRanges[account]
if !exists {
b.sortedRanges[account] = make(map[uint64][]nonceRange)
}
b.sortedRanges[account][chainID] = ranges
}
func (b *nonceRangeCache[T]) clear() {
b.rw.Lock()
defer b.rw.Unlock()
b.nonceRanges = make(addressChainMap[T])
b.sortedRanges = make(sortedNonceRangesCacheType)
}
func (b *nonceRangeCache[T]) size(account common.Address, chainID uint64) int {
b.rw.RLock()
defer b.rw.RUnlock()
_, exists := b.nonceRanges[account]
if !exists {
return 0
}
_, exists = b.nonceRanges[account][chainID]
if !exists {
return 0
}
return b.nonceRanges[account][chainID].len()
}

View File

@ -0,0 +1,127 @@
package balance
import (
"math"
"math/big"
)
func NewSimpleCacher() Cacher {
return newCacherImpl(newSimpleCache())
}
// implements cacheIface for plain map internal storage
type mapCache[K comparable, V any] struct {
cache map[K]V
}
func (c *mapCache[K, V]) get(key K) V {
return c.cache[key]
}
func (c *mapCache[K, V]) set(key K, value V) {
c.cache[key] = value
}
func (c *mapCache[K, V]) len() int {
return len(c.cache)
}
func (c *mapCache[K, V]) keys() []K {
keys := make([]K, 0, len(c.cache))
for k := range c.cache {
keys = append(keys, k)
}
return keys
}
func (c *mapCache[K, V]) init() {
c.cache = make(map[K]V)
}
func (c *mapCache[K, V]) clear() {
c.cache = make(map[K]V)
}
// specializes generic cache
type simpleCache struct {
genericCache[*mapCache[uint64, *big.Int], *mapCache[uint64, *int64], *mapCache[int64, nonceRange]]
}
func newSimpleCache() *simpleCache {
return &simpleCache{
genericCache: genericCache[*mapCache[uint64, *big.Int], *mapCache[uint64, *int64], *mapCache[int64, nonceRange]]{
balances: make(addressChainMap[*mapCache[uint64, *big.Int]]),
nonces: make(addressChainMap[*mapCache[uint64, *int64]]),
nonceRangeCache: *newNonceRangeCache[*mapCache[int64, nonceRange]](),
},
}
}
// Doesn't remove all entries, but keeps max and min to use on next iterations of transfer blocks searching
func (c *simpleCache) Clear() {
c.rw.Lock()
defer c.rw.Unlock()
for _, chainCache := range c.balances {
for _, cache := range chainCache {
if cache.len() == 0 {
continue
}
var maxBlock uint64 = 0
var minBlock uint64 = math.MaxUint64
for _, key := range cache.keys() {
if key > maxBlock {
maxBlock = key
}
if key < minBlock {
minBlock = key
}
}
maxBlockValue := cache.get(maxBlock)
minBlockValue := cache.get(maxBlock)
cache.clear()
if maxBlockValue != nil {
cache.set(maxBlock, maxBlockValue)
}
if minBlockValue != nil {
cache.set(minBlock, minBlockValue)
}
}
}
for _, chainCache := range c.nonces {
for _, cache := range chainCache {
if cache.len() == 0 {
continue
}
var maxBlock uint64 = 0
var minBlock uint64 = math.MaxUint64
for _, key := range cache.keys() {
if key > maxBlock {
maxBlock = key
}
if key < minBlock {
minBlock = key
}
}
maxBlockValue := cache.get(maxBlock)
minBlockValue := cache.get(maxBlock)
cache.clear()
if maxBlockValue != nil {
cache.set(maxBlock, maxBlockValue)
}
if minBlockValue != nil {
cache.set(minBlock, minBlockValue)
}
}
}
c.nonceRangeCache.clear()
}

View File

@ -0,0 +1,44 @@
package balance
import (
"math/big"
"testing"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
)
func Test_simpleCacheAll(t *testing.T) {
cache := newSimpleCache()
// init args
block := big.NewInt(1)
chainID := uint64(1)
account := common.Address{1}
balance := big.NewInt(1)
// Test balance
cache.AddBalance(account, chainID, block, balance)
require.Equal(t, 1, cache.BalanceSize(account, chainID))
require.Equal(t, 0, cache.NonceSize(account, chainID))
balRes := cache.GetBalance(account, chainID, block)
require.Equal(t, balance, balRes)
// Test nonce
cache = newSimpleCache()
nonce := int64(2)
cache.AddNonce(account, chainID, block, &nonce)
require.Equal(t, 1, cache.NonceSize(account, chainID))
require.Equal(t, 0, cache.BalanceSize(account, chainID))
nonceRes := cache.GetNonce(account, chainID, block)
require.Equal(t, nonce, *nonceRes)
// Test nonceRange size after adding nonce
cache = newSimpleCache()
cache.AddNonce(account, chainID, block, &nonce)
require.Equal(t, 1, cache.nonceRangeCache.size(account, chainID))
require.Equal(t, 1, len(cache.nonceRangeCache.sortedRanges))
}

View File

@ -0,0 +1,105 @@
package balance
import (
"context"
"math/big"
"time"
"github.com/jellydator/ttlcache/v3"
"github.com/ethereum/go-ethereum/log"
)
var (
defaultTTLValue = 5 * time.Minute
)
func NewCacherWithTTL(ttl time.Duration) Cacher {
return newCacherImpl(newCacheWithTTL(ttl))
}
// TTL cache implementation of cacheIface
type ttlCache[K comparable, V any] struct {
cache *ttlcache.Cache[K, V]
}
//nolint:golint,unused // linter does not detect using it via reflect
func (c *ttlCache[K, V]) get(key K) V {
item := c.cache.Get(key)
if item == nil {
var v V
return v
}
return item.Value()
}
//nolint:golint,unused // linter does not detect using it via reflect
func (c *ttlCache[K, V]) set(key K, value V) {
_ = c.cache.Set(key, value, ttlcache.DefaultTTL)
}
//nolint:golint,unused // linter does not detect using it via reflect
func (c *ttlCache[K, V]) len() int {
return c.cache.Len()
}
//nolint:golint,unused // linter does not detect using it via reflect
func (c *ttlCache[K, V]) keys() []K {
return c.cache.Keys()
}
//nolint:golint,unused // linter does not detect using it via reflect
func (c *ttlCache[K, V]) init() {
c.cache = ttlcache.New[K, V](
ttlcache.WithTTL[K, V](defaultTTLValue),
)
c.cache.OnEviction(func(ctx context.Context, reason ttlcache.EvictionReason, item *ttlcache.Item[K, V]) {
log.Debug("Evicting item from balance/nonce cache", "reason", reason, "key", item.Key, "value", item.Value)
})
go c.cache.Start() // starts automatic expired item deletion
}
//nolint:golint,unused // linter does not detect using it via reflect
func (c *ttlCache[K, V]) clear() {
c.cache.DeleteAll()
}
// specializes generic cache
type cacheWithTTL struct {
// TODO: use ttlCache instead of mapCache for nonceRangeCache. For that we need to update sortedRanges on item eviction
// For now, nonceRanges cache is not updated on nonces items eviction, but it should not be as big as nonceCache is
genericCache[*ttlCache[uint64, *big.Int], *ttlCache[uint64, *int64], *mapCache[int64, nonceRange]]
}
func newCacheWithTTL(ttl time.Duration) *cacheWithTTL {
defaultTTLValue = ttl
return &cacheWithTTL{
genericCache: genericCache[*ttlCache[uint64, *big.Int], *ttlCache[uint64, *int64], *mapCache[int64, nonceRange]]{
balances: make(addressChainMap[*ttlCache[uint64, *big.Int]]),
nonces: make(addressChainMap[*ttlCache[uint64, *int64]]),
nonceRangeCache: *newNonceRangeCache[*mapCache[int64, nonceRange]](),
},
}
}
func (c *cacheWithTTL) Clear() {
c.rw.Lock()
defer c.rw.Unlock()
// TTL cache removes expired items automatically
// but in case we want to clear it manually we can do it here
for _, chainCache := range c.balances {
for _, cache := range chainCache {
cache.clear()
}
}
for _, chainCache := range c.nonces {
for _, cache := range chainCache {
cache.clear()
}
}
c.nonceRangeCache.clear()
}

View File

@ -0,0 +1,67 @@
package balance
import (
"math/big"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
)
func Test_ttlCacheAll(t *testing.T) {
const ttl = 10 * time.Millisecond
cache := newCacheWithTTL(ttl)
// init args
block := big.NewInt(1)
chainID := uint64(1)
account := common.Address{1}
balance := big.NewInt(1)
// Test balance
cache.AddBalance(account, chainID, block, balance)
require.Equal(t, 1, cache.BalanceSize(account, chainID))
require.Equal(t, 0, cache.NonceSize(account, chainID))
balRes := cache.GetBalance(account, chainID, block)
require.Equal(t, balance, balRes)
cache.Clear()
require.Equal(t, 0, cache.BalanceSize(account, chainID))
require.Equal(t, 0, cache.NonceSize(account, chainID))
// Test nonce
nonce := int64(2)
cache.AddNonce(account, chainID, block, &nonce)
require.Equal(t, 1, cache.NonceSize(account, chainID))
require.Equal(t, 0, cache.BalanceSize(account, chainID))
nonceRes := cache.GetNonce(account, chainID, block)
require.Equal(t, nonce, *nonceRes)
cache.Clear()
require.Equal(t, 0, cache.BalanceSize(account, chainID))
require.Equal(t, 0, cache.NonceSize(account, chainID))
// Test cache expiration
cache.Clear()
cache.AddBalance(account, chainID, block, balance)
cache.AddNonce(account, chainID, block, &nonce)
time.Sleep(ttl * 2) // wait for cache to expire
require.Equal(t, 0, cache.BalanceSize(account, chainID))
require.Equal(t, 0, cache.NonceSize(account, chainID))
require.Equal(t, 1, cache.nonceRangeCache.size(account, chainID)) // not updated by ttlCache for now
cache.Clear()
// Test nonceRange size after adding nonce
cache.Clear()
cache.AddNonce(account, chainID, block, &nonce)
require.Equal(t, 1, cache.nonceRangeCache.size(account, chainID))
require.Equal(t, 1, len(cache.nonceRangeCache.sortedRanges))
// Test nonceRange size after clearing
cache.nonceRangeCache.clear()
require.Equal(t, 0, cache.nonceRangeCache.size(account, chainID))
require.Equal(t, 0, len(cache.nonceRangeCache.sortedRanges))
}

View File

@ -3,42 +3,20 @@ package history
import (
"context"
"errors"
"fmt"
"math/big"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
w_common "github.com/status-im/status-go/services/wallet/common"
"github.com/ethereum/go-ethereum/log"
)
type Balance struct {
db *BalanceDB
}
const (
aDay = time.Duration(24) * time.Hour
)
// Must have a common divisor to share common blocks and increase the cache hit
const (
twiceADayStride time.Duration = time.Duration(12) * time.Hour
weekStride = 14 * twiceADayStride
monthsStride = 1 /*months*/ * 4 * weekStride
)
// bitsetFilters used to fetch relevant data points in one batch and to increase cache hit
const (
filterAllTime bitsetFilter = 1
filterWeekly bitsetFilter = 1 << 3
filterTwiceADay bitsetFilter = 1 << 5
)
type TimeInterval int
const genesisTimestamp = 1438269988
// Specific time intervals for which balance history can be fetched
type TimeInterval int
const (
BalanceHistory7Days TimeInterval = iota + 1
BalanceHistory1Month
@ -47,6 +25,8 @@ const (
BalanceHistoryAllTime
)
const aDay = time.Duration(24) * time.Hour
var timeIntervalDuration = map[TimeInterval]time.Duration{
BalanceHistory7Days: time.Duration(7) * aDay,
BalanceHistory1Month: time.Duration(30) * aDay,
@ -54,44 +34,8 @@ var timeIntervalDuration = map[TimeInterval]time.Duration{
BalanceHistory1Year: time.Duration(365) * aDay,
}
var timeIntervalToBitsetFilter = map[TimeInterval]bitsetFilter{
BalanceHistory7Days: filterTwiceADay,
BalanceHistory1Month: filterTwiceADay,
BalanceHistory6Months: filterWeekly,
BalanceHistory1Year: filterWeekly,
BalanceHistoryAllTime: filterAllTime,
}
var timeIntervalToStrideDuration = map[TimeInterval]time.Duration{
BalanceHistory7Days: twiceADayStride,
BalanceHistory1Month: twiceADayStride,
BalanceHistory6Months: weekStride,
BalanceHistory1Year: weekStride,
BalanceHistoryAllTime: monthsStride,
}
func strideBlockCount(timeInterval TimeInterval, chainID uint64) int {
blockDuration, found := w_common.AverageBlockDurationForChain[w_common.ChainID(chainID)]
if !found {
blockDuration = w_common.AverageBlockDurationForChain[w_common.ChainID(w_common.UnknownChainID)]
}
return int(timeIntervalToStrideDuration[timeInterval] / blockDuration)
}
func NewBalance(db *BalanceDB) *Balance {
return &Balance{
db: db,
}
}
// DataSource used as an abstraction to fetch required data from a specific blockchain
type DataSource interface {
HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error)
ChainID() uint64
Currency() string
TimeNow() int64
func TimeIntervalDurationSecs(timeInterval TimeInterval) uint64 {
return uint64(timeIntervalDuration[timeInterval].Seconds())
}
type DataPoint struct {
@ -100,256 +44,150 @@ type DataPoint struct {
BlockNumber *hexutil.Big
}
// fetchAndCache will process the last available block if blocNo is nil
// reuses previous fetched blocks timestamp to avoid fetching block headers again
func (b *Balance) fetchAndCache(ctx context.Context, source DataSource, address common.Address, blockNo *big.Int, bitset bitsetFilter) (*DataPoint, *big.Int, error) {
var outEntry *entry
var err error
if blockNo != nil {
cached, bitsetList, err := b.db.get(&assetIdentity{source.ChainID(), address, source.Currency()}, blockNo, 1, asc)
if err != nil {
return nil, nil, err
}
if len(cached) > 0 && cached[0].block.Cmp(blockNo) == 0 {
// found a match update bitset
err := b.db.updateBitset(&assetIdentity{source.ChainID(), address, source.Currency()}, blockNo, bitset|bitsetList[0])
if err != nil {
return nil, nil, err
}
return &DataPoint{
Balance: (*hexutil.Big)(cached[0].balance),
Timestamp: uint64(cached[0].timestamp),
BlockNumber: (*hexutil.Big)(cached[0].block),
}, blockNo, nil
}
// otherwise try fetch any to get the timestamp info
outEntry, _, err = b.db.getFirst(source.ChainID(), blockNo)
if err != nil {
return nil, nil, err
}
}
var timestamp int64
if outEntry != nil {
timestamp = outEntry.timestamp
} else {
header, err := source.HeaderByNumber(ctx, blockNo)
if err != nil {
return nil, nil, err
}
blockNo = new(big.Int).Set(header.Number)
timestamp = int64(header.Time)
}
currentBalance, err := source.BalanceAt(ctx, address, blockNo)
if err != nil {
return nil, nil, err
}
entry := entry{
chainID: source.ChainID(),
address: address,
tokenSymbol: source.Currency(),
block: new(big.Int).Set(blockNo),
balance: currentBalance,
timestamp: timestamp,
}
err = b.db.add(&entry, bitset)
if err != nil {
return nil, nil, err
}
var dataPoint DataPoint
dataPoint.Balance = (*hexutil.Big)(currentBalance)
dataPoint.Timestamp = uint64(timestamp)
return &dataPoint, blockNo, nil
// String returns a string representation of the data point
func (d *DataPoint) String() string {
return fmt.Sprintf("timestamp: %d balance: %v block: %v", d.Timestamp, d.Balance.ToInt(), d.BlockNumber.ToInt())
}
// update retrieves the balance history for a specified asset from the database initially
// and supplements any missing information from the blockchain to minimize the number of RPC calls.
// if context is cancelled it will return with error
func (b *Balance) update(ctx context.Context, source DataSource, address common.Address, timeInterval TimeInterval) error {
startTimestamp := int64(0)
fetchTimestamp := int64(0)
endTime := source.TimeNow()
if timeInterval != BalanceHistoryAllTime {
// Ensure we always get the complete range by fetching the next block also
startTimestamp = endTime - int64(timeIntervalDuration[timeInterval].Seconds())
fetchTimestamp = startTimestamp - int64(timeIntervalToStrideDuration[timeInterval].Seconds())
}
identity := &assetIdentity{source.ChainID(), address, source.Currency()}
firstCached, err := b.firstCachedStartingAt(identity, fetchTimestamp, timeInterval)
if err != nil {
return err
}
var oldestCached *big.Int
var oldestTimestamp int64
var newestCached *big.Int
if firstCached != nil {
oldestCached = new(big.Int).Set(firstCached.block)
oldestTimestamp = firstCached.timestamp
lastCached, err := b.lastCached(identity, timeInterval)
if err != nil {
return err
}
newestCached = new(big.Int).Set(lastCached.block)
} else {
var fetchBlock *big.Int
lastEntry, _, err := b.db.getLastEntryForChain(source.ChainID())
if err != nil {
return err
}
if lastEntry != nil {
fetchBlock = new(big.Int).Set(lastEntry.block)
}
mostRecentDataPoint, mostRecentBlock, err := b.fetchAndCache(ctx, source, address, fetchBlock, timeIntervalToBitsetFilter[timeInterval])
if err != nil {
return err
}
oldestCached = new(big.Int).Set(mostRecentBlock)
oldestTimestamp = int64(mostRecentDataPoint.Timestamp)
newestCached = new(big.Int).Set(mostRecentBlock)
}
if oldestTimestamp > startTimestamp {
err := b.fetchBackwardAndCache(ctx, source, address, oldestCached, startTimestamp, timeInterval)
if err != nil {
return err
}
}
// Fetch forward if didn't update in a stride duration
err = b.fetchForwardAndCache(ctx, source, address, newestCached, timeInterval)
if err != nil {
return err
}
return nil
type Balance struct {
db *BalanceDB
}
// get returns the balance history for the given address and time interval until endTimestamp
func (b *Balance) get(ctx context.Context, chainID uint64, currency string, address common.Address, endTimestamp int64, timeInterval TimeInterval) ([]*DataPoint, error) {
startTimestamp := int64(0)
fetchTimestamp := int64(0)
if timeInterval != BalanceHistoryAllTime {
// Ensure we always get the complete range by fetching the next block also
startTimestamp = endTimestamp - int64(timeIntervalDuration[timeInterval].Seconds())
fetchTimestamp = startTimestamp - int64(timeIntervalToStrideDuration[timeInterval].Seconds())
}
cached, _, err := b.db.filter(&assetIdentity{chainID, address, currency}, nil, &balanceFilter{fetchTimestamp, endTimestamp, expandFlag(timeIntervalToBitsetFilter[timeInterval])}, 800, asc)
func NewBalance(db *BalanceDB) *Balance {
return &Balance{db}
}
// get returns the balance history for the given address from the given timestamp till now
func (b *Balance) get(ctx context.Context, chainID uint64, currency string, address common.Address, fromTimestamp uint64) ([]*entry, error) {
log.Debug("Getting balance history", "chainID", chainID, "currency", currency, "address", address, "fromTimestamp", fromTimestamp)
cached, err := b.db.getNewerThan(&assetIdentity{chainID, address, currency}, fromTimestamp)
if err != nil {
return nil, err
}
points := make([]*DataPoint, 0, len(cached)+1)
for _, entry := range cached {
dataPoint := DataPoint{
Balance: (*hexutil.Big)(entry.balance),
Timestamp: uint64(entry.timestamp),
BlockNumber: (*hexutil.Big)(entry.block),
return cached, nil
}
func (b *Balance) addEdgePoints(chainID uint64, currency string, address common.Address, fromTimestamp, toTimestamp uint64, data []*entry) (res []*entry, err error) {
log.Debug("Adding edge points", "chainID", chainID, "currency", currency, "address", address, "fromTimestamp", fromTimestamp)
var firstEntry *entry
if len(data) > 0 {
firstEntry = data[0]
} else {
firstEntry = &entry{
chainID: chainID,
address: address,
tokenSymbol: currency,
timestamp: int64(fromTimestamp),
}
points = append(points, &dataPoint)
}
lastCached, _, err := b.db.get(&assetIdentity{chainID, address, currency}, nil, 1, desc)
previous, err := b.db.getEntryPreviousTo(firstEntry)
if err != nil {
return nil, err
}
if len(lastCached) > 0 && len(cached) > 0 && lastCached[0].block.Cmp(cached[len(cached)-1].block) > 0 {
points = append(points, &DataPoint{
Balance: (*hexutil.Big)(lastCached[0].balance),
Timestamp: uint64(lastCached[0].timestamp),
BlockNumber: (*hexutil.Big)(lastCached[0].block),
firstTimestamp, lastTimestamp := timestampBoundaries(fromTimestamp, toTimestamp, data)
if previous != nil {
previous.timestamp = int64(firstTimestamp) // We might need to use another minimal offset respecting the time interval
previous.block = nil
res = append([]*entry{previous}, data...)
} else {
// Add a zero point at the beginning to draw a line from
res = append([]*entry{
{
chainID: chainID,
address: address,
tokenSymbol: currency,
timestamp: int64(firstTimestamp),
balance: big.NewInt(0),
},
}, data...)
}
if res[len(res)-1].timestamp < int64(lastTimestamp) {
// Add a last point to draw a line to
res = append(res, &entry{
chainID: chainID,
address: address,
tokenSymbol: currency,
timestamp: int64(lastTimestamp),
balance: res[len(res)-1].balance,
})
}
return points, nil
return res, nil
}
// fetchBackwardAndCache fetches and adds to DB balance entries starting one stride before the endBlock and stops
// when reaching a block timestamp older than startTimestamp or genesis block
// relies on the approximation of a block length to match averageBlockDurationForChain for sampling the data
func (b *Balance) fetchBackwardAndCache(ctx context.Context, source DataSource, address common.Address, endBlock *big.Int, startTimestamp int64, timeInterval TimeInterval) error {
stride := strideBlockCount(timeInterval, source.ChainID())
nextBlock := new(big.Int).Set(endBlock)
for nextBlock.Cmp(big.NewInt(1)) > 0 {
if shouldCancel(ctx) {
return errors.New("context cancelled")
}
nextBlock.Sub(nextBlock, big.NewInt(int64(stride)))
if nextBlock.Cmp(big.NewInt(0)) <= 0 {
// we reached the genesis block which doesn't have a usable timestamp, fetch next
nextBlock.SetUint64(1)
}
dataPoint, _, err := b.fetchAndCache(ctx, source, address, nextBlock, timeIntervalToBitsetFilter[timeInterval])
if err != nil {
return err
}
// Allow to go back one stride to match the requested interval
if int64(dataPoint.Timestamp) < startTimestamp {
return nil
}
}
return nil
}
// fetchForwardAndCache fetches and adds to DB balance entries starting one stride before the startBlock and stops
// when block not found
// relies on the approximation of a block length to match averageBlockDurationForChain
func (b *Balance) fetchForwardAndCache(ctx context.Context, source DataSource, address common.Address, startBlock *big.Int, timeInterval TimeInterval) error {
stride := strideBlockCount(timeInterval, source.ChainID())
nextBlock := new(big.Int).Set(startBlock)
for {
if shouldCancel(ctx) {
return errors.New("context cancelled")
}
nextBlock.Add(nextBlock, big.NewInt(int64(stride)))
_, _, err := b.fetchAndCache(ctx, source, address, nextBlock, timeIntervalToBitsetFilter[timeInterval])
if err != nil {
if err == ethereum.NotFound {
// We overshoot, stop and return what we have
return nil
func timestampBoundaries(fromTimestamp, toTimestamp uint64, data []*entry) (firstTimestamp, lastTimestamp uint64) {
firstTimestamp = fromTimestamp
if fromTimestamp == 0 {
if len(data) > 0 {
if data[0].timestamp == 0 {
panic("data[0].timestamp must never be 0")
}
return err
firstTimestamp = uint64(data[0].timestamp) - 1
} else {
firstTimestamp = genesisTimestamp
}
}
if toTimestamp < firstTimestamp {
panic("toTimestamp < fromTimestamp")
}
lastTimestamp = toTimestamp
return firstTimestamp, lastTimestamp
}
// firstCachedStartingAt returns first cached entry for the given identity and time interval starting at fetchTimestamp or nil if none found
func (b *Balance) firstCachedStartingAt(identity *assetIdentity, startTimestamp int64, timeInterval TimeInterval) (first *entry, err error) {
entries, _, err := b.db.filter(identity, nil, &balanceFilter{startTimestamp, maxAllRangeTimestamp, expandFlag(timeIntervalToBitsetFilter[timeInterval])}, 1, desc)
if err != nil {
return nil, err
} else if len(entries) == 0 {
return nil, nil
}
return entries[0], nil
}
func addPaddingPoints(currency string, address common.Address, toTimestamp uint64, data []*entry, limit int) (res []*entry, err error) {
log.Debug("addPaddingPoints start", "currency", currency, "address", address, "len(data)", len(data), "data", data, "limit", limit)
// lastCached returns last cached entry for the given identity and time interval or nil if none found
func (b *Balance) lastCached(identity *assetIdentity, timeInterval TimeInterval) (first *entry, err error) {
entries, _, err := b.db.filter(identity, nil, &balanceFilter{minAllRangeTimestamp, maxAllRangeTimestamp, expandFlag(timeIntervalToBitsetFilter[timeInterval])}, 1, desc)
if err != nil {
return nil, err
} else if len(entries) == 0 {
return nil, nil
if len(data) < 2 { // Edge points must be added separately during the previous step
return nil, errors.New("slice is empty")
}
return entries[0], nil
}
// shouldCancel returns true if the context has been cancelled and task should be aborted
func shouldCancel(ctx context.Context) bool {
select {
case <-ctx.Done():
return true
default:
if limit <= len(data) {
return data, nil
}
return false
fromTimestamp := uint64(data[0].timestamp)
delta := (toTimestamp - fromTimestamp) / uint64(limit-1)
res = make([]*entry, len(data))
copy(res, data)
for i, j, index := 1, 0, 0; len(res) < limit; index++ {
// Add a last point to draw a line to. For some cases we might not need it,
// but when merging with points from other chains, we might get wrong balance if we don't have it.
paddingTimestamp := int64(fromTimestamp + delta*uint64(i))
if paddingTimestamp < data[j].timestamp {
// make a room for a new point
res = append(res[:index+1], res[index:]...)
// insert a new point
entry := &entry{
address: address,
tokenSymbol: currency,
timestamp: paddingTimestamp,
balance: data[j-1].balance, // take the previous balance
}
res[index] = entry
log.Debug("Added padding point", "entry", entry, "timestamp", paddingTimestamp, "i", i, "j", j, "index", index)
i++
} else if paddingTimestamp >= data[j].timestamp {
log.Debug("Kept real point", "entry", data[j], "timestamp", paddingTimestamp, "i", i, "j", j, "index", index)
j++
}
}
log.Debug("addPaddingPoints end", "len(res)", len(res))
return res, nil
}

View File

@ -3,10 +3,10 @@ package history
import (
"database/sql"
"fmt"
"math"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/status-im/status-go/services/wallet/bigint"
)
@ -22,154 +22,117 @@ func NewBalanceDB(sqlDb *sql.DB) *BalanceDB {
// entry represents a single row in the balance_history table
type entry struct {
chainID uint64
address common.Address
tokenSymbol string
block *big.Int
timestamp int64
balance *big.Int
chainID uint64
address common.Address
tokenSymbol string
tokenAddress common.Address
block *big.Int
timestamp int64
balance *big.Int
}
// bitsetFilter stores the time interval for which the data points are matching
type bitsetFilter int
const (
minAllRangeTimestamp = 0
maxAllRangeTimestamp = math.MaxInt64
bitsetFilterFlagCount = 30
)
// expandFlag will generate a bitset that matches all lower value flags (fills the less significant bits of the flag with 1; e.g. 0b1000 -> 0b1111)
func expandFlag(flag bitsetFilter) bitsetFilter {
return (flag << 1) - 1
}
func (b *BalanceDB) add(entry *entry, bitset bitsetFilter) error {
_, err := b.db.Exec("INSERT INTO balance_history (chain_id, address, currency, block, timestamp, bitset, balance) VALUES (?, ?, ?, ?, ?, ?, ?)", entry.chainID, entry.address, entry.tokenSymbol, (*bigint.SQLBigInt)(entry.block), entry.timestamp, int(bitset), (*bigint.SQLBigIntBytes)(entry.balance))
return err
}
type sortDirection = int
const (
asc sortDirection = 0
desc sortDirection = 1
)
type assetIdentity struct {
ChainID uint64
Address common.Address
TokenSymbol string
}
// bitset is used so higher values can include lower values to simulate time interval levels and high granularity intervals include lower ones
// minTimestamp and maxTimestamp interval filter the results by timestamp.
type balanceFilter struct {
minTimestamp int64
maxTimestamp int64
bitset bitsetFilter
func (e *entry) String() string {
return fmt.Sprintf("chainID: %v, address: %v, tokenSymbol: %v, tokenAddress: %v, block: %v, timestamp: %v, balance: %v",
e.chainID, e.address, e.tokenSymbol, e.tokenAddress, e.block, e.timestamp, e.balance)
}
// filters returns a sorted list of entries, empty array if none is found for the given input or nil if error
// if startingAtBlock is provided, the result will start with the provided block number or the next available one
// if startingAtBlock is NOT provided the result will begin from the first available block that matches filter.minTimestamp
// sort defines the order of the result by block number (which correlates also with timestamp)
func (b *BalanceDB) filter(identity *assetIdentity, startingAtBlock *big.Int, filter *balanceFilter, maxEntries int, sort sortDirection) (entries []*entry, bitsetList []bitsetFilter, err error) {
// Start from the first block in case a specific one was not provided
if startingAtBlock == nil {
startingAtBlock = big.NewInt(0)
func (b *BalanceDB) add(entry *entry) error {
log.Debug("Adding entry to balance_history", "entry", entry)
_, err := b.db.Exec("INSERT OR IGNORE INTO balance_history (chain_id, address, currency, block, timestamp, balance) VALUES (?, ?, ?, ?, ?, ?)", entry.chainID, entry.address, entry.tokenSymbol, (*bigint.SQLBigInt)(entry.block), entry.timestamp, (*bigint.SQLBigIntBytes)(entry.balance))
return err
}
func (b *BalanceDB) getEntriesWithoutBalances(chainID uint64, address common.Address) (entries []*entry, err error) {
rows, err := b.db.Query("SELECT blk_number, tr.timestamp, token_address from transfers tr LEFT JOIN balance_history bh ON bh.block = tr.blk_number WHERE tr.network_id = ? AND tr.address = ? AND tr.type != 'erc721' AND bh.block IS NULL",
chainID, address)
if err == sql.ErrNoRows {
return nil, nil
}
// We are interested in order by timestamp, but we request by block number that correlates to the order of timestamp and it is indexed
var queryStr string
rawQueryStr := "SELECT block, timestamp, balance, bitset FROM balance_history WHERE chain_id = ? AND address = ? AND currency = ? AND block >= ? AND timestamp BETWEEN ? AND ? AND (bitset & ?) > 0 ORDER BY block %s LIMIT ?"
if sort == asc {
queryStr = fmt.Sprintf(rawQueryStr, "ASC")
} else {
queryStr = fmt.Sprintf(rawQueryStr, "DESC")
}
rows, err := b.db.Query(queryStr, identity.ChainID, identity.Address, identity.TokenSymbol, (*bigint.SQLBigInt)(startingAtBlock), filter.minTimestamp, filter.maxTimestamp, filter.bitset, maxEntries)
if err != nil {
return nil, nil, err
return nil, err
}
defer rows.Close()
entries = make([]*entry, 0)
for rows.Next() {
entry := &entry{
chainID: chainID,
address: address,
block: new(big.Int),
}
// tokenAddress can be NULL and can not unmarshal to common.Address
tokenHexAddress := make([]byte, common.AddressLength)
err := rows.Scan((*bigint.SQLBigInt)(entry.block), &entry.timestamp, &tokenHexAddress)
if err != nil {
return nil, err
}
tokenAddress := common.BytesToAddress(tokenHexAddress)
if tokenAddress != (common.Address{}) {
entry.tokenAddress = tokenAddress
}
entries = append(entries, entry)
}
return entries, nil
}
func (b *BalanceDB) getNewerThan(identity *assetIdentity, timestamp uint64) (entries []*entry, err error) {
// DISTINCT removes duplicates that can happen when a block has multiple transfers of same token
rawQueryStr := "SELECT DISTINCT block, timestamp, balance FROM balance_history WHERE chain_id = ? AND address = ? AND currency = ? AND timestamp > ? ORDER BY timestamp"
rows, err := b.db.Query(rawQueryStr, identity.ChainID, identity.Address, identity.TokenSymbol, timestamp)
if err == sql.ErrNoRows {
return nil, nil
} else if err != nil {
return nil, err
}
defer rows.Close()
result := make([]*entry, 0)
for rows.Next() {
entry := &entry{
chainID: 0,
chainID: identity.ChainID,
address: identity.Address,
tokenSymbol: identity.TokenSymbol,
block: new(big.Int),
balance: new(big.Int),
}
var bitset int
err := rows.Scan((*bigint.SQLBigInt)(entry.block), &entry.timestamp, (*bigint.SQLBigIntBytes)(entry.balance), &bitset)
err := rows.Scan((*bigint.SQLBigInt)(entry.block), &entry.timestamp, (*bigint.SQLBigIntBytes)(entry.balance))
if err != nil {
return nil, nil, err
return nil, err
}
entry.chainID = identity.ChainID
result = append(result, entry)
bitsetList = append(bitsetList, bitsetFilter(bitset))
}
return result, bitsetList, nil
return result, nil
}
// get calls filter that matches all entries
func (b *BalanceDB) get(identity *assetIdentity, startingAtBlock *big.Int, maxEntries int, sort sortDirection) (entries []*entry, bitsetList []bitsetFilter, err error) {
return b.filter(identity, startingAtBlock, &balanceFilter{
minTimestamp: minAllRangeTimestamp,
maxTimestamp: maxAllRangeTimestamp,
bitset: expandFlag(1 << bitsetFilterFlagCount),
}, maxEntries, sort)
}
// getFirst returns the first entry for the block or nil if no entry is found
func (b *BalanceDB) getFirst(chainID uint64, block *big.Int) (res *entry, bitset bitsetFilter, err error) {
func (b *BalanceDB) getEntryPreviousTo(item *entry) (res *entry, err error) {
res = &entry{
chainID: chainID,
block: new(big.Int).Set(block),
balance: new(big.Int),
chainID: item.chainID,
address: item.address,
block: new(big.Int),
balance: new(big.Int),
tokenSymbol: item.tokenSymbol,
}
queryStr := "SELECT address, currency, timestamp, balance, bitset FROM balance_history WHERE chain_id = ? AND block = ?"
row := b.db.QueryRow(queryStr, chainID, (*bigint.SQLBigInt)(block))
var bitsetRaw int
queryStr := "SELECT block, timestamp, balance FROM balance_history WHERE chain_id = ? AND address = ? AND currency = ? AND timestamp < ? ORDER BY timestamp DESC LIMIT 1"
row := b.db.QueryRow(queryStr, item.chainID, item.address, item.tokenSymbol, item.timestamp)
err = row.Scan(&res.address, &res.tokenSymbol, &res.timestamp, (*bigint.SQLBigIntBytes)(res.balance), &bitsetRaw)
err = row.Scan((*bigint.SQLBigInt)(res.block), &res.timestamp, (*bigint.SQLBigIntBytes)(res.balance))
if err == sql.ErrNoRows {
return nil, 0, nil
return nil, nil
} else if err != nil {
return nil, 0, err
return nil, err
}
return res, bitsetFilter(bitsetRaw), nil
}
// getFirst returns the last entry for the chainID or nil if no entry is found
func (b *BalanceDB) getLastEntryForChain(chainID uint64) (res *entry, bitset bitsetFilter, err error) {
res = &entry{
chainID: chainID,
block: new(big.Int),
balance: new(big.Int),
}
queryStr := "SELECT address, currency, timestamp, block, balance, bitset FROM balance_history WHERE chain_id = ? ORDER BY block DESC"
row := b.db.QueryRow(queryStr, chainID)
var bitsetRaw int
err = row.Scan(&res.address, &res.tokenSymbol, &res.timestamp, (*bigint.SQLBigInt)(res.block), (*bigint.SQLBigIntBytes)(res.balance), &bitsetRaw)
if err == sql.ErrNoRows {
return nil, 0, nil
} else if err != nil {
return nil, 0, err
}
return res, bitsetFilter(bitsetRaw), nil
}
func (b *BalanceDB) updateBitset(asset *assetIdentity, block *big.Int, newBitset bitsetFilter) error {
// Updating bitset value in place doesn't work.
// Tried "INSERT INTO balance_history ... ON CONFLICT(chain_id, address, currency, block) DO UPDATE SET timestamp=excluded.timestamp, bitset=(bitset | excluded.bitset), balance=excluded.balance"
_, err := b.db.Exec("UPDATE balance_history SET bitset = ? WHERE chain_id = ? AND address = ? AND currency = ? AND block = ?", int(newBitset), asset.ChainID, asset.Address, asset.TokenSymbol, (*bigint.SQLBigInt)(block))
return err
return res, nil
}

View File

@ -1,329 +0,0 @@
package history
import (
"database/sql"
"math/big"
"testing"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
"github.com/status-im/status-go/common/dbsetup"
"github.com/status-im/status-go/services/wallet/bigint"
"github.com/status-im/status-go/walletdatabase"
)
func setupBalanceDBTest(t *testing.T) (*BalanceDB, func()) {
db, err := walletdatabase.InitializeDB(dbsetup.InMemoryPath, "wallet-history-balance_db-tests", 1)
require.NoError(t, err)
return NewBalanceDB(db), func() {
require.NoError(t, db.Close())
}
}
// generateTestDataForElementCount generates dummy consecutive blocks of data for the same chain_id, address and currency
func generateTestDataForElementCount(count int) (result []*entry) {
baseDataPoint := entry{
chainID: 777,
address: common.Address{7},
tokenSymbol: "ETH",
block: big.NewInt(11),
balance: big.NewInt(101),
timestamp: 11,
}
result = make([]*entry, 0, count)
for i := 0; i < count; i++ {
newDataPoint := baseDataPoint
newDataPoint.block = new(big.Int).Add(baseDataPoint.block, big.NewInt(int64(i)))
newDataPoint.balance = new(big.Int).Add(baseDataPoint.balance, big.NewInt(int64(i)))
newDataPoint.timestamp += int64(i)
result = append(result, &newDataPoint)
}
return result
}
func TestBalanceDBAddDataPoint(t *testing.T) {
bDB, cleanDB := setupBalanceDBTest(t)
defer cleanDB()
testDataPoint := generateTestDataForElementCount(1)[0]
err := bDB.add(testDataPoint, filterWeekly)
require.NoError(t, err)
outDataPoint := entry{
chainID: 0,
block: big.NewInt(0),
balance: big.NewInt(0),
}
rows, err := bDB.db.Query("SELECT * FROM balance_history")
require.NoError(t, err)
ok := rows.Next()
require.True(t, ok)
bitset := 0
err = rows.Scan(&outDataPoint.chainID, &outDataPoint.address, &outDataPoint.tokenSymbol, (*bigint.SQLBigInt)(outDataPoint.block), &outDataPoint.timestamp, &bitset, (*bigint.SQLBigIntBytes)(outDataPoint.balance))
require.NoError(t, err)
require.NotEqual(t, err, sql.ErrNoRows)
require.Equal(t, testDataPoint, &outDataPoint)
ok = rows.Next()
require.False(t, ok)
}
func TestBalanceDBGetOldestDataPoint(t *testing.T) {
bDB, cleanDB := setupBalanceDBTest(t)
defer cleanDB()
testDataPoints := generateTestDataForElementCount(5)
for i := len(testDataPoints) - 1; i >= 0; i-- {
err := bDB.add(testDataPoints[i], 1)
require.NoError(t, err)
}
outDataPoints, _, err := bDB.get(&assetIdentity{testDataPoints[0].chainID, testDataPoints[0].address, testDataPoints[0].tokenSymbol}, nil, 1, asc)
require.NoError(t, err)
require.NotEqual(t, outDataPoints, nil)
require.Equal(t, outDataPoints[0], testDataPoints[0])
}
func TestBalanceDBGetLatestDataPoint(t *testing.T) {
bDB, cleanDB := setupBalanceDBTest(t)
defer cleanDB()
testDataPoints := generateTestDataForElementCount(5)
for i := 0; i < len(testDataPoints); i++ {
err := bDB.add(testDataPoints[i], 1)
require.NoError(t, err)
}
outDataPoints, _, err := bDB.get(&assetIdentity{testDataPoints[0].chainID, testDataPoints[0].address, testDataPoints[0].tokenSymbol}, nil, 1, desc)
require.NoError(t, err)
require.NotEqual(t, outDataPoints, nil)
require.Equal(t, outDataPoints[0], testDataPoints[len(testDataPoints)-1])
}
func TestBalanceDBGetFirst(t *testing.T) {
bDB, cleanDB := setupBalanceDBTest(t)
defer cleanDB()
testDataPoints := generateTestDataForElementCount(5)
for i := 0; i < len(testDataPoints); i++ {
err := bDB.add(testDataPoints[i], 1)
require.NoError(t, err)
}
duplicateIndex := 2
newDataPoint := entry{
chainID: testDataPoints[duplicateIndex].chainID,
address: common.Address{77},
tokenSymbol: testDataPoints[duplicateIndex].tokenSymbol,
block: new(big.Int).Set(testDataPoints[duplicateIndex].block),
balance: big.NewInt(102),
timestamp: testDataPoints[duplicateIndex].timestamp,
}
err := bDB.add(&newDataPoint, 2)
require.NoError(t, err)
outDataPoint, _, err := bDB.getFirst(testDataPoints[duplicateIndex].chainID, testDataPoints[duplicateIndex].block)
require.NoError(t, err)
require.NotEqual(t, nil, outDataPoint)
require.Equal(t, testDataPoints[duplicateIndex], outDataPoint)
}
func TestBalanceDBGetLastEntryForChain(t *testing.T) {
bDB, cleanDB := setupBalanceDBTest(t)
defer cleanDB()
testDataPoints := generateTestDataForElementCount(5)
for i := 0; i < len(testDataPoints); i++ {
err := bDB.add(testDataPoints[i], 1)
require.NoError(t, err)
}
// Same data with different addresses
for i := 0; i < len(testDataPoints); i++ {
newDataPoint := testDataPoints[i]
newDataPoint.address = common.Address{77}
err := bDB.add(newDataPoint, 1)
require.NoError(t, err)
}
outDataPoint, _, err := bDB.getLastEntryForChain(testDataPoints[0].chainID)
require.NoError(t, err)
require.NotEqual(t, nil, outDataPoint)
expectedDataPoint := testDataPoints[len(testDataPoints)-1]
require.Equal(t, expectedDataPoint.chainID, outDataPoint.chainID)
require.Equal(t, expectedDataPoint.tokenSymbol, outDataPoint.tokenSymbol)
require.Equal(t, expectedDataPoint.block, outDataPoint.block)
require.Equal(t, expectedDataPoint.timestamp, outDataPoint.timestamp)
require.Equal(t, expectedDataPoint.balance, outDataPoint.balance)
}
func TestBalanceDBGetDataPointsInTimeRange(t *testing.T) {
bDB, cleanDB := setupBalanceDBTest(t)
defer cleanDB()
testDataPoints := generateTestDataForElementCount(5)
for i := 0; i < len(testDataPoints); i++ {
err := bDB.add(testDataPoints[i], 1)
require.NoError(t, err)
}
startIndex := 1
endIndex := 3
outDataPoints, _, err := bDB.filter(&assetIdentity{testDataPoints[0].chainID, testDataPoints[0].address, testDataPoints[0].tokenSymbol}, nil, &balanceFilter{testDataPoints[startIndex].timestamp, testDataPoints[endIndex].timestamp, 1}, 100, asc)
require.NoError(t, err)
require.NotEqual(t, outDataPoints, nil)
require.Equal(t, len(outDataPoints), endIndex-startIndex+1)
for i := startIndex; i <= endIndex; i++ {
require.Equal(t, outDataPoints[i-startIndex], testDataPoints[i])
}
}
func TestBalanceDBGetClosestDataPointToTimestamp(t *testing.T) {
bDB, cleanDB := setupBalanceDBTest(t)
defer cleanDB()
testDataPoints := generateTestDataForElementCount(5)
for i := 0; i < len(testDataPoints); i++ {
err := bDB.add(testDataPoints[i], 1)
require.NoError(t, err)
}
itemToGetIndex := 2
outDataPoints, _, err := bDB.filter(&assetIdentity{testDataPoints[0].chainID, testDataPoints[0].address, testDataPoints[0].tokenSymbol}, nil, &balanceFilter{testDataPoints[itemToGetIndex].timestamp, maxAllRangeTimestamp, 1}, 1, asc)
require.NoError(t, err)
require.NotEqual(t, outDataPoints, nil)
require.Equal(t, len(outDataPoints), 1)
require.Equal(t, outDataPoints[0], testDataPoints[itemToGetIndex])
}
func TestBalanceDBUpdateUpdateBitset(t *testing.T) {
bDB, cleanDB := setupBalanceDBTest(t)
defer cleanDB()
testDataPoints := generateTestDataForElementCount(1)
err := bDB.add(testDataPoints[0], 1)
require.NoError(t, err)
err = bDB.add(testDataPoints[0], 2)
require.Error(t, err, "Expected \"UNIQUE constraint failed: ...\"")
err = bDB.updateBitset(&assetIdentity{testDataPoints[0].chainID, testDataPoints[0].address, testDataPoints[0].tokenSymbol}, testDataPoints[0].block, 2)
require.NoError(t, err)
outDataPoint := entry{
chainID: 0,
block: big.NewInt(0),
balance: big.NewInt(0),
}
rows, err := bDB.db.Query("SELECT * FROM balance_history")
require.NoError(t, err)
ok := rows.Next()
require.True(t, ok)
bitset := 0
err = rows.Scan(&outDataPoint.chainID, &outDataPoint.address, &outDataPoint.tokenSymbol, (*bigint.SQLBigInt)(outDataPoint.block), &outDataPoint.timestamp, &bitset, (*bigint.SQLBigIntBytes)(outDataPoint.balance))
require.NoError(t, err)
require.NotEqual(t, err, sql.ErrNoRows)
require.Equal(t, testDataPoints[0], &outDataPoint)
require.Equal(t, 2, bitset)
ok = rows.Next()
require.False(t, ok)
}
func TestBalanceDBCheckMissingDataPoint(t *testing.T) {
bDB, cleanDB := setupBalanceDBTest(t)
defer cleanDB()
testDataPoint := generateTestDataForElementCount(1)[0]
err := bDB.add(testDataPoint, 1)
require.NoError(t, err)
missingDataPoint := testDataPoint
missingDataPoint.block = big.NewInt(12)
outDataPoints, bitset, err := bDB.get(&assetIdentity{missingDataPoint.chainID, missingDataPoint.address, missingDataPoint.tokenSymbol}, missingDataPoint.block, 1, asc)
require.NoError(t, err)
require.Equal(t, 0, len(outDataPoints))
require.Equal(t, 0, len(bitset))
}
func TestBalanceDBBitsetFilter(t *testing.T) {
bDB, cleanDB := setupBalanceDBTest(t)
defer cleanDB()
data := generateTestDataForElementCount(3)
for i := 0; i < len(data); i++ {
err := bDB.add(data[i], 1<<i)
require.NoError(t, err)
}
for i := 0; i < len(data); i++ {
outDataPoints, bitset, err := bDB.filter(&assetIdentity{data[0].chainID, data[0].address, data[0].tokenSymbol}, nil, &balanceFilter{
minTimestamp: minAllRangeTimestamp,
maxTimestamp: maxAllRangeTimestamp,
bitset: expandFlag(1 << i),
}, 10, asc)
require.NoError(t, err)
require.Equal(t, i+1, len(outDataPoints))
require.Equal(t, bitsetFilter(1<<i), bitset[i])
}
}
func TestBalanceDBBDataPointUniquenessConstraint(t *testing.T) {
bDB, cleanDB := setupBalanceDBTest(t)
defer cleanDB()
dataPoint := generateTestDataForElementCount(1)[0]
err := bDB.add(dataPoint, 1)
require.NoError(t, err)
testDataPointSame := dataPoint
testDataPointSame.balance = big.NewInt(102)
testDataPointSame.timestamp = 12
err = bDB.add(testDataPointSame, 1)
require.ErrorContains(t, err, "UNIQUE constraint failed", "should fail because of uniqueness constraint")
rows, err := bDB.db.Query("SELECT * FROM balance_history")
require.NoError(t, err)
ok := rows.Next()
require.True(t, ok)
ok = rows.Next()
require.False(t, ok)
testDataPointNew := testDataPointSame
testDataPointNew.block = big.NewInt(21)
err = bDB.add(testDataPointNew, 277)
require.NoError(t, err)
rows, err = bDB.db.Query("SELECT * FROM balance_history")
require.NoError(t, err)
ok = rows.Next()
require.True(t, ok)
ok = rows.Next()
require.True(t, ok)
ok = rows.Next()
require.False(t, ok)
outDataPoints, bitsets, err := bDB.get(&assetIdentity{testDataPointNew.chainID, testDataPointNew.address, testDataPointNew.tokenSymbol}, testDataPointNew.block, 10, asc)
require.NoError(t, err)
require.NotEqual(t, outDataPoints, nil)
require.Equal(t, 1, len(outDataPoints))
require.Equal(t, 1, len(bitsets))
require.Equal(t, testDataPointNew, outDataPoints[0])
require.Equal(t, bitsetFilter(277), bitsets[0])
}

File diff suppressed because it is too large Load Diff

View File

@ -6,58 +6,59 @@ import (
"errors"
"math"
"math/big"
"reflect"
"sort"
"sync"
"time"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
statustypes "github.com/status-im/status-go/eth-node/types"
"github.com/status-im/status-go/multiaccounts/accounts"
"github.com/status-im/status-go/params"
statusrpc "github.com/status-im/status-go/rpc"
"github.com/status-im/status-go/rpc/chain"
"github.com/status-im/status-go/rpc/network"
"github.com/status-im/status-go/rpc/chain"
"github.com/status-im/status-go/services/wallet/balance"
"github.com/status-im/status-go/services/wallet/market"
"github.com/status-im/status-go/services/wallet/token"
"github.com/status-im/status-go/services/wallet/transfer"
"github.com/status-im/status-go/services/wallet/walletevent"
)
const minPointsForGraph = 14 // for minimal time frame - 7 days, twice a day
// EventBalanceHistoryUpdateStarted and EventBalanceHistoryUpdateDone are used to notify the UI that balance history is being updated
const (
EventBalanceHistoryUpdateStarted walletevent.EventType = "wallet-balance-history-update-started"
EventBalanceHistoryUpdateFinished walletevent.EventType = "wallet-balance-history-update-finished"
EventBalanceHistoryUpdateFinishedWithError walletevent.EventType = "wallet-balance-history-update-finished-with-error"
balanceHistoryUpdateInterval = 12 * time.Hour
)
type Service struct {
balance *Balance
db *sql.DB
accountsDB *accounts.Database
eventFeed *event.Feed
rpcClient *statusrpc.Client
networkManager *network.Manager
tokenManager *token.Manager
serviceContext context.Context
cancelFn context.CancelFunc
exchange *Exchange
timer *time.Timer
visibleTokenSymbols []string
visibleTokenSymbolsMutex sync.Mutex
type ValuePoint struct {
Value float64 `json:"value"`
Timestamp uint64 `json:"time"`
}
type chainIdentity uint64
type Service struct {
balance *Balance
db *sql.DB
accountsDB *accounts.Database
eventFeed *event.Feed
rpcClient *statusrpc.Client
networkManager *network.Manager
tokenManager *token.Manager
serviceContext context.Context
cancelFn context.CancelFunc
transferWatcher *Watcher
exchange *Exchange
balanceCache balance.CacheIface
}
func NewService(db *sql.DB, accountsDB *accounts.Database, eventFeed *event.Feed, rpcClient *statusrpc.Client, tokenManager *token.Manager, marketManager *market.Manager) *Service {
func NewService(db *sql.DB, accountsDB *accounts.Database, eventFeed *event.Feed, rpcClient *statusrpc.Client, tokenManager *token.Manager, marketManager *market.Manager, balanceCache balance.CacheIface) *Service {
return &Service{
balance: NewBalance(NewBalanceDB(db)),
db: db,
@ -67,6 +68,7 @@ func NewService(db *sql.DB, accountsDB *accounts.Database, eventFeed *event.Feed
networkManager: rpcClient.NetworkManager,
tokenManager: tokenManager,
exchange: NewExchange(marketManager),
balanceCache: balanceCache,
}
}
@ -74,6 +76,8 @@ func (s *Service) Stop() {
if s.cancelFn != nil {
s.cancelFn()
}
s.stopTransfersWatcher()
}
func (s *Service) triggerEvent(eventType walletevent.EventType, account statustypes.Address, message string) {
@ -87,153 +91,158 @@ func (s *Service) triggerEvent(eventType walletevent.EventType, account statusty
}
func (s *Service) Start() {
log.Debug("Starting balance history service")
s.startTransfersWatcher()
go func() {
s.serviceContext, s.cancelFn = context.WithCancel(context.Background())
s.timer = time.NewTimer(balanceHistoryUpdateInterval)
update := func() (exit bool) {
err := s.updateBalanceHistory(s.serviceContext)
if s.serviceContext.Err() != nil {
s.triggerEvent(EventBalanceHistoryUpdateFinished, statustypes.Address{}, "Service canceled")
s.timer.Stop()
return true
}
if err != nil {
s.triggerEvent(EventBalanceHistoryUpdateFinishedWithError, statustypes.Address{}, err.Error())
}
return false
err := s.updateBalanceHistory(s.serviceContext)
if s.serviceContext.Err() != nil {
s.triggerEvent(EventBalanceHistoryUpdateFinished, statustypes.Address{}, "Service canceled")
}
if update() {
return
}
for range s.timer.C {
s.resetTimer(balanceHistoryUpdateInterval)
if update() {
return
}
if err != nil {
s.triggerEvent(EventBalanceHistoryUpdateFinishedWithError, statustypes.Address{}, err.Error())
}
}()
}
func (s *Service) resetTimer(interval time.Duration) {
if s.timer != nil {
s.timer.Stop()
s.timer.Reset(interval)
}
}
func (s *Service) mergeChainsBalances(chainIDs []uint64, address common.Address, tokenSymbol string, fromTimestamp uint64, data map[uint64][]*entry) ([]*DataPoint, error) {
log.Debug("Merging balances", "address", address, "tokenSymbol", tokenSymbol, "fromTimestamp", fromTimestamp, "len(data)", len(data))
func (s *Service) UpdateVisibleTokens(symbols []string) {
s.visibleTokenSymbolsMutex.Lock()
defer s.visibleTokenSymbolsMutex.Unlock()
toTimestamp := uint64(time.Now().UTC().Unix())
allData := make([]*entry, 0)
startUpdate := len(s.visibleTokenSymbols) == 0 && len(symbols) > 0
s.visibleTokenSymbols = symbols
if startUpdate {
s.resetTimer(0)
}
}
func (s *Service) isTokenVisible(tokenSymbol string) bool {
s.visibleTokenSymbolsMutex.Lock()
defer s.visibleTokenSymbolsMutex.Unlock()
for _, visibleSymbol := range s.visibleTokenSymbols {
if visibleSymbol == tokenSymbol {
return true
}
}
return false
}
// Native token implementation of DataSource interface
type chainClientSource struct {
chainClient chain.ClientInterface
currency string
}
func (src *chainClientSource) HeaderByNumber(ctx context.Context, blockNo *big.Int) (*types.Header, error) {
return src.chainClient.HeaderByNumber(ctx, blockNo)
}
func (src *chainClientSource) BalanceAt(ctx context.Context, account common.Address, blockNo *big.Int) (*big.Int, error) {
return src.chainClient.BalanceAt(ctx, account, blockNo)
}
func (src *chainClientSource) ChainID() uint64 {
return src.chainClient.NetworkID()
}
func (src *chainClientSource) Currency() string {
return src.currency
}
func (src *chainClientSource) TimeNow() int64 {
return time.Now().UTC().Unix()
}
// ERC20 token implementation of DataSource interface
type tokenChainClientSource struct {
chainClientSource
TokenManager *token.Manager
NetworkManager *network.Manager
firstUnavailableBlockNo *big.Int
}
func (src *tokenChainClientSource) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) {
network := src.NetworkManager.Find(src.chainClient.NetworkID())
if network == nil {
return nil, errors.New("network not found")
}
token := src.TokenManager.FindToken(network, src.currency)
if token == nil {
return nil, errors.New("token not found")
}
if src.firstUnavailableBlockNo != nil && blockNumber.Cmp(src.firstUnavailableBlockNo) < 0 {
return big.NewInt(0), nil
}
balance, err := src.TokenManager.GetTokenBalanceAt(ctx, src.chainClient, account, token.Address, blockNumber)
if err != nil {
if err == bind.ErrNoCode {
// Ignore requests before contract deployment and mark this state for future requests
src.firstUnavailableBlockNo = new(big.Int).Set(blockNumber)
return big.NewInt(0), nil
}
return nil, err
}
return balance, err
}
type ValuePoint struct {
Value float64 `json:"value"`
Timestamp uint64 `json:"time"`
BlockNumber *hexutil.Big `json:"blockNumber"`
}
// GetBalanceHistory returns token count balance
func (s *Service) GetBalanceHistory(ctx context.Context, chainIDs []uint64, address common.Address, tokenSymbol string, currencySymbol string, endTimestamp int64, timeInterval TimeInterval) ([]*ValuePoint, error) {
// Retrieve cached data for all chains
allData := make(map[chainIdentity][]*DataPoint)
// Add edge points per chain
// Iterate over chainIDs param, not data keys, because data may not contain all the chains, but we need edge points for all of them
for _, chainID := range chainIDs {
data, err := s.balance.get(ctx, chainID, tokenSymbol, address, endTimestamp, timeInterval)
// edge points are needed to properly calculate total balance, as they contain the balance for the first and last timestamp
chainData, err := s.balance.addEdgePoints(chainID, tokenSymbol, address, fromTimestamp, toTimestamp, data[chainID])
if err != nil {
return nil, err
}
if len(data) > 0 {
allData[chainIdentity(chainID)] = data
allData = append(allData, chainData...)
}
// Sort by timestamp
sort.Slice(allData, func(i, j int) bool {
return allData[i].timestamp < allData[j].timestamp
})
log.Debug("Sorted balances", "len", len(allData))
for _, entry := range allData {
log.Debug("Sorted balances", "entry", entry)
}
// Add padding points to make chart look nice
if len(allData) < minPointsForGraph {
allData, _ = addPaddingPoints(tokenSymbol, address, toTimestamp, allData, minPointsForGraph)
}
return entriesToDataPoints(chainIDs, allData)
}
// Expects sorted data
func entriesToDataPoints(chainIDs []uint64, data []*entry) ([]*DataPoint, error) {
var resSlice []*DataPoint
var groupedEntries []*entry // Entries with the same timestamp
sumBalances := func(entries []*entry) *big.Int {
sum := big.NewInt(0)
for _, entry := range entries {
sum.Add(sum, entry.balance)
}
return sum
}
// calculate balance for entries with the same timestam and add a single point for them
for _, entry := range data {
if len(groupedEntries) > 0 {
if entry.timestamp == groupedEntries[0].timestamp {
groupedEntries = append(groupedEntries, entry)
continue
} else {
// Calculate balance for the grouped entries
cumulativeBalance := sumBalances(groupedEntries)
// Points in slice contain balances for all chains
resSlice = appendPointToSlice(resSlice, &DataPoint{
Timestamp: uint64(groupedEntries[0].timestamp),
Balance: (*hexutil.Big)(cumulativeBalance),
})
// Reset grouped entries
groupedEntries = nil
groupedEntries = append(groupedEntries, entry)
}
} else {
groupedEntries = append(groupedEntries, entry)
}
}
data, err := mergeDataPoints(allData, timeIntervalToStrideDuration[timeInterval])
// If only edge points are present, groupedEntries will be non-empty
if len(groupedEntries) > 0 {
cumulativeBalance := sumBalances(groupedEntries)
resSlice = appendPointToSlice(resSlice, &DataPoint{
Timestamp: uint64(groupedEntries[0].timestamp),
Balance: (*hexutil.Big)(cumulativeBalance),
})
}
return resSlice, nil
}
func appendPointToSlice(slice []*DataPoint, point *DataPoint) []*DataPoint {
// Replace the last point in slice if it has the same timestamp or add a new one if different
if len(slice) > 0 {
if slice[len(slice)-1].Timestamp != point.Timestamp {
// Timestamps are different, appending to slice
slice = append(slice, point)
} else {
// Replace last item in slice because timestamps are the same
slice[len(slice)-1] = point
}
} else {
slice = append(slice, point)
}
return slice
}
// GetBalanceHistory returns token count balance
func (s *Service) GetBalanceHistory(ctx context.Context, chainIDs []uint64, address common.Address, tokenSymbol string, currencySymbol string, fromTimestamp uint64) ([]*ValuePoint, error) {
log.Debug("GetBalanceHistory", "chainIDs", chainIDs, "address", address.String(), "tokenSymbol", tokenSymbol, "currencySymbol", currencySymbol, "fromTimestamp", fromTimestamp)
chainDataMap := make(map[uint64][]*entry)
for _, chainID := range chainIDs {
chainData, err := s.balance.get(ctx, chainID, tokenSymbol, address, fromTimestamp) // TODO Make chainID a slice?
if err != nil {
return nil, err
}
if len(chainData) == 0 {
continue
}
chainDataMap[chainID] = chainData
}
// Need to get balance for all the chains for the first timestamp, otherwise total values will be incorrect
data, err := s.mergeChainsBalances(chainIDs, address, tokenSymbol, fromTimestamp, chainDataMap)
if err != nil {
return nil, err
} else if len(data) == 0 {
return make([]*ValuePoint, 0), nil
}
return s.dataPointsToValuePoints(chainIDs, tokenSymbol, currencySymbol, data)
}
func (s *Service) dataPointsToValuePoints(chainIDs []uint64, tokenSymbol string, currencySymbol string, data []*DataPoint) ([]*ValuePoint, error) {
if len(data) == 0 {
return make([]*ValuePoint, 0), nil
}
// Check if historical exchange rate for data point is present and fetch remaining if not
lastDayTime := time.Unix(int64(data[len(data)-1].Timestamp), 0).UTC()
currentTime := time.Now().UTC()
@ -243,10 +252,17 @@ func (s *Service) GetBalanceHistory(ctx context.Context, chainIDs []uint64, addr
lastDayTime = lastDayTime.AddDate(0, 0, -1)
}
_, err = s.exchange.GetExchangeRateForDay(tokenSymbol, currencySymbol, lastDayTime)
lastDayValue, err := s.exchange.GetExchangeRateForDay(tokenSymbol, currencySymbol, lastDayTime)
if err != nil {
err := s.exchange.FetchAndCacheMissingRates(tokenSymbol, currencySymbol)
if err != nil {
log.Error("Error fetching exchange rates", "tokenSymbol", tokenSymbol, "currencySymbol", currencySymbol, "err", err)
return nil, err
}
lastDayValue, err = s.exchange.GetExchangeRateForDay(tokenSymbol, currencySymbol, lastDayTime)
if err != nil {
log.Error("Exchange rate missing for", "tokenSymbol", tokenSymbol, "currencySymbol", currencySymbol, "lastDayTime", lastDayTime, "err", err)
return nil, err
}
}
@ -259,24 +275,31 @@ func (s *Service) GetBalanceHistory(ctx context.Context, chainIDs []uint64, addr
var res []*ValuePoint
for _, d := range data {
var dayValue float32
dayTime := time.Unix(int64(d.Timestamp), 0).UTC()
if dayTime.After(currentDayStart) {
// No chance to have today, use the previous day value for the last data point
dayTime = lastDayTime
}
dayValue, err := s.exchange.GetExchangeRateForDay(tokenSymbol, currencySymbol, dayTime)
if err != nil {
log.Warn("Echange rate missing for", dayTime, "- err", err)
continue
if lastDayValue > 0 {
dayValue = lastDayValue
} else {
log.Warn("Exchange rate missing for", "dayTime", dayTime, "err", err)
continue
}
} else {
dayValue, err = s.exchange.GetExchangeRateForDay(tokenSymbol, currencySymbol, dayTime)
if err != nil {
log.Warn("Exchange rate missing for", "dayTime", dayTime, "err", err)
continue
}
}
// The big.Int values are discarded, hence copy the original values
res = append(res, &ValuePoint{
Timestamp: d.Timestamp,
Value: tokenToValue((*big.Int)(d.Balance), dayValue, weisInOneMain),
BlockNumber: d.BlockNumber,
Timestamp: d.Timestamp,
Value: tokenToValue((*big.Int)(d.Balance), dayValue, weisInOneMain),
})
}
return res, nil
}
@ -306,176 +329,12 @@ func tokenToValue(tokenCount *big.Int, mainDenominationValue float32, weisInOneM
return res
}
// mergeDataPoints merges close in time block numbers. Drops the ones that are not in a stride duration
// this should improve merging balance data from different chains which are incompatible due to different timelines
// and block length
func mergeDataPoints(data map[chainIdentity][]*DataPoint, stride time.Duration) ([]*DataPoint, error) {
// Special cases
if len(data) == 0 {
return make([]*DataPoint, 0), nil
} else if len(data) == 1 {
for k := range data {
return data[k], nil
}
}
res := make([]*DataPoint, 0)
strideStart, pos := findFirstStrideWindow(data, stride)
for {
strideEnd := strideStart + int64(stride.Seconds())
// - Gather all points in the stride window starting with current pos
var strideIdentities map[chainIdentity][]timeIdentity
strideIdentities, pos = dataInStrideWindowAndNextPos(data, pos, strideEnd)
// Check if all chains have data
strideComplete := true
for k := range data {
_, strideComplete = strideIdentities[k]
if !strideComplete {
break
}
}
if strideComplete {
chainMaxBalance := make(map[chainIdentity]*DataPoint)
for chainID, identities := range strideIdentities {
for _, identity := range identities {
_, exists := chainMaxBalance[chainID]
if exists && (*big.Int)(identity.dataPoint(data).Balance).Cmp((*big.Int)(chainMaxBalance[chainID].Balance)) <= 0 {
continue
}
chainMaxBalance[chainID] = identity.dataPoint(data)
}
}
balance := big.NewInt(0)
for _, chainBalance := range chainMaxBalance {
balance.Add(balance, (*big.Int)(chainBalance.Balance))
}
// if last stride, the timestamp might be in the future
if strideEnd > time.Now().UTC().Unix() {
strideEnd = time.Now().UTC().Unix()
}
res = append(res, &DataPoint{
Timestamp: uint64(strideEnd),
Balance: (*hexutil.Big)(balance),
BlockNumber: (*hexutil.Big)(getBlockID(chainMaxBalance)),
})
}
if allPastEnd(data, pos) {
return res, nil
}
strideStart = strideEnd
}
}
func getBlockID(chainBalance map[chainIdentity]*DataPoint) *big.Int {
var res *big.Int
for _, balance := range chainBalance {
if res == nil {
res = new(big.Int).Set(balance.BlockNumber.ToInt())
} else if res.Cmp(balance.BlockNumber.ToInt()) != 0 {
return nil
}
}
return res
}
type timeIdentity struct {
chain chainIdentity
index int
}
func (i timeIdentity) dataPoint(data map[chainIdentity][]*DataPoint) *DataPoint {
return data[i.chain][i.index]
}
func (i timeIdentity) atEnd(data map[chainIdentity][]*DataPoint) bool {
return (i.index + 1) == len(data[i.chain])
}
func (i timeIdentity) pastEnd(data map[chainIdentity][]*DataPoint) bool {
return i.index >= len(data[i.chain])
}
func allPastEnd(data map[chainIdentity][]*DataPoint, pos map[chainIdentity]int) bool {
for chainID := range pos {
if !(timeIdentity{chainID, pos[chainID]}).pastEnd(data) {
return false
}
}
return true
}
// findFirstStrideWindow returns the start of the first stride window (timestamp and all positions)
//
// Note: tried to implement finding an optimal stride window but it was becoming too complicated and not worth it given that it will potentially save the first and last stride but it is not guaranteed. Current implementation should give good results as long as the the DataPoints are regular enough
func findFirstStrideWindow(data map[chainIdentity][]*DataPoint, stride time.Duration) (firstTimestamp int64, pos map[chainIdentity]int) {
pos = make(map[chainIdentity]int)
for k := range data {
pos[k] = 0
}
cur := sortTimeAsc(data, pos)
return int64(cur[0].dataPoint(data).Timestamp), pos
}
func copyMap[K comparable, V any](original map[K]V) map[K]V {
copy := make(map[K]V, len(original))
for key, value := range original {
copy[key] = value
}
return copy
}
// startPos might have indexes past the end of the data for a chain
func dataInStrideWindowAndNextPos(data map[chainIdentity][]*DataPoint, startPos map[chainIdentity]int, endT int64) (identities map[chainIdentity][]timeIdentity, nextPos map[chainIdentity]int) {
pos := copyMap(startPos)
identities = make(map[chainIdentity][]timeIdentity)
// Identify the current oldest and newest block
lastLen := int(-1)
for lastLen < len(identities) {
lastLen = len(identities)
sorted := sortTimeAsc(data, pos)
for _, identity := range sorted {
if identity.dataPoint(data).Timestamp < uint64(endT) {
identities[identity.chain] = append(identities[identity.chain], identity)
pos[identity.chain]++
}
}
}
return identities, pos
}
// sortTimeAsc expect indexes in pos past the end of the data for a chain
func sortTimeAsc(data map[chainIdentity][]*DataPoint, pos map[chainIdentity]int) []timeIdentity {
res := make([]timeIdentity, 0, len(data))
for k := range data {
identity := timeIdentity{
chain: k,
index: pos[k],
}
if !identity.pastEnd(data) {
res = append(res, identity)
}
}
sort.Slice(res, func(i, j int) bool {
return res[i].dataPoint(data).Timestamp < res[j].dataPoint(data).Timestamp
})
return res
}
// updateBalanceHistory iterates over all networks depending on test/prod for the s.visibleTokenSymbol
// and updates the balance history for the given address
//
// expects ctx to have cancellation support and processing to be cancelled by the caller
func (s *Service) updateBalanceHistory(ctx context.Context) error {
log.Debug("updateBalanceHistory started")
addresses, err := s.accountsDB.GetWalletAddresses()
if err != nil {
@ -487,7 +346,8 @@ func (s *Service) updateBalanceHistory(ctx context.Context) error {
return err
}
networks, err := s.networkManager.Get(false)
onlyEnabledNetworks := false
networks, err := s.networkManager.Get(onlyEnabledNetworks)
if err != nil {
return err
}
@ -499,49 +359,177 @@ func (s *Service) updateBalanceHistory(ctx context.Context) error {
if network.IsTest != areTestNetworksEnabled {
continue
}
tokensForChain, err := s.tokenManager.GetTokens(network.ChainID)
entries, err := s.balance.db.getEntriesWithoutBalances(network.ChainID, common.Address(address))
if err != nil {
tokensForChain = make([]*token.Token, 0)
log.Error("Error getting blocks without balances", "chainID", network.ChainID, "address", address.String(), "err", err)
return err
}
tokensForChain = append(tokensForChain, s.tokenManager.ToToken(network))
for _, token := range tokensForChain {
if !s.isTokenVisible(token.Symbol) {
continue
}
log.Debug("Blocks without balances", "chainID", network.ChainID, "address", address.String(), "entries", entries)
var dataSource DataSource
chainClient, err := s.rpcClient.EthClient(network.ChainID)
if err != nil {
return err
}
if token.IsNative() {
dataSource = &chainClientSource{chainClient, token.Symbol}
} else {
dataSource = &tokenChainClientSource{
chainClientSource: chainClientSource{
chainClient: chainClient,
currency: token.Symbol,
},
TokenManager: s.tokenManager,
NetworkManager: s.networkManager,
}
}
client, err := s.rpcClient.EthClient(network.ChainID)
if err != nil {
log.Error("Error getting client", "chainID", network.ChainID, "address", address.String(), "err", err)
return err
}
for currentInterval := int(BalanceHistoryAllTime); currentInterval >= int(BalanceHistory7Days); currentInterval-- {
select {
case <-ctx.Done():
return errors.New("context cancelled")
default:
}
err = s.balance.update(ctx, dataSource, common.Address(address), TimeInterval(currentInterval))
if err != nil {
log.Warn("Error updating balance history", "chainID", dataSource.ChainID(), "currency", dataSource.Currency(), "address", address.String(), "interval", currentInterval, "err", err)
}
}
err = s.addEntriesToDB(ctx, client, network, address, entries)
if err != nil {
return err
}
}
s.triggerEvent(EventBalanceHistoryUpdateFinished, address, "")
}
log.Debug("updateBalanceHistory finished")
return nil
}
func (s *Service) addEntriesToDB(ctx context.Context, client chain.ClientInterface, network *params.Network, address statustypes.Address, entries []*entry) (err error) {
for _, entry := range entries {
var balance *big.Int
// tokenAddess is zero for native currency
if (entry.tokenAddress == common.Address{}) {
// Check in cache
balance = s.balanceCache.GetBalance(common.Address(address), network.ChainID, entry.block)
log.Debug("Balance from cache", "chainID", network.ChainID, "address", address.String(), "block", entry.block, "balance", balance)
if balance == nil {
balance, err = client.BalanceAt(ctx, common.Address(address), entry.block)
if balance == nil {
log.Error("Error getting balance", "chainID", network.ChainID, "address", address.String(), "err", err, "unwrapped", errors.Unwrap(err))
return err
}
time.Sleep(50 * time.Millisecond) // TODO Remove this sleep after fixing exceeding rate limit
}
entry.tokenSymbol = network.NativeCurrencySymbol
} else {
// Check token first if it is supported
token := s.tokenManager.FindTokenByAddress(network.ChainID, entry.tokenAddress)
if token == nil {
log.Warn("Token not found", "chainID", network.ChainID, "address", address.String(), "tokenAddress", entry.tokenAddress.String())
// TODO Add "supported=false" flag to such tokens to avoid checking them again and again
continue // Skip token that we don't have symbol for. For example we don't have tokens in store for goerli optimism
} else {
entry.tokenSymbol = token.Symbol
}
// Check balance for token
balance, err = s.tokenManager.GetTokenBalanceAt(ctx, client, common.Address(address), entry.tokenAddress, entry.block)
log.Debug("Balance from token manager", "chainID", network.ChainID, "address", address.String(), "block", entry.block, "balance", balance)
if err != nil {
log.Error("Error getting token balance", "chainID", network.ChainID, "address", address.String(), "tokenAddress", entry.tokenAddress.String(), "err", err)
return err
}
}
entry.balance = balance
err = s.balance.db.add(entry)
if err != nil {
log.Error("Error adding balance", "chainID", network.ChainID, "address", address.String(), "err", err)
return err
}
}
return nil
}
func (s *Service) startTransfersWatcher() {
if s.transferWatcher != nil {
return
}
transferLoadedCb := func(chainID uint64, addresses []common.Address, block *big.Int) {
log.Debug("Balance history watcher: transfer loaded:", "chainID", chainID, "addresses", addresses, "block", block.Uint64())
client, err := s.rpcClient.EthClient(chainID)
if err != nil {
log.Error("Error getting client", "chainID", chainID, "err", err)
return
}
transferDB := transfer.NewDB(s.db)
for _, address := range addresses {
network := s.networkManager.Find(chainID)
transfers, err := transferDB.GetTransfersByAddressAndBlock(chainID, address, block, 1500) // 1500 is quite arbitrary and far from real, but should be enough to cover all transfers in a block
if err != nil {
log.Error("Error getting transfers", "chainID", chainID, "address", address.String(), "err", err)
continue
}
if len(transfers) == 0 {
log.Debug("No transfers found", "chainID", chainID, "address", address.String(), "block", block.Uint64())
continue
}
entries := transfersToEntries(address, block, transfers) // TODO Remove address and block after testing that they match
unique := removeDuplicates(entries)
log.Debug("Entries after filtering", "entries", entries, "unique", unique)
err = s.addEntriesToDB(s.serviceContext, client, network, statustypes.Address(address), unique)
if err != nil {
log.Error("Error adding entries to DB", "chainID", chainID, "address", address.String(), "err", err)
continue
}
// No event triggering here, because noone cares about balance history updates yet
}
}
s.transferWatcher = NewWatcher(s.eventFeed, transferLoadedCb)
s.transferWatcher.Start()
}
func removeDuplicates(entries []*entry) []*entry {
unique := make([]*entry, 0, len(entries))
for _, entry := range entries {
found := false
for _, u := range unique {
if reflect.DeepEqual(entry, u) {
found = true
break
}
}
if !found {
unique = append(unique, entry)
}
}
return unique
}
func transfersToEntries(address common.Address, block *big.Int, transfers []transfer.Transfer) []*entry {
entries := make([]*entry, 0)
for _, transfer := range transfers {
if transfer.Address != address {
panic("Address mismatch") // coding error
}
if transfer.BlockNumber.Cmp(block) != 0 {
panic("Block number mismatch") // coding error
}
entry := &entry{
chainID: transfer.NetworkID,
address: transfer.Address,
tokenAddress: transfer.Receipt.ContractAddress,
block: transfer.BlockNumber,
timestamp: (int64)(transfer.Timestamp),
}
entries = append(entries, entry)
}
return entries
}
func (s *Service) stopTransfersWatcher() {
if s.transferWatcher != nil {
s.transferWatcher.Stop()
s.transferWatcher = nil
}
}

View File

@ -1,284 +1,288 @@
package history
import (
"context"
"math"
"math/big"
"reflect"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/event"
gethrpc "github.com/ethereum/go-ethereum/rpc"
"github.com/golang/mock/gomock"
"github.com/status-im/status-go/params"
statusRPC "github.com/status-im/status-go/rpc"
"github.com/status-im/status-go/services/wallet/market"
"github.com/status-im/status-go/services/wallet/thirdparty/cryptocompare"
"github.com/status-im/status-go/t/helpers"
"github.com/status-im/status-go/transactions/fake"
"github.com/status-im/status-go/walletdatabase"
"github.com/stretchr/testify/require"
)
func setupDummyServiceNoDependencies(t *testing.T) (service *Service, closeFn func()) {
db, err := helpers.SetupTestMemorySQLDB(walletdatabase.DbInitializer{})
require.NoError(t, err)
cryptoCompare := cryptocompare.NewClient()
// Creating a dummy status node to simulate what it's done in get_status_node.go
upstreamConfig := params.UpstreamRPCConfig{
URL: "https://mainnet.infura.io/v3/800c641949d64d768a5070a1b0511938",
Enabled: true,
func Test_entriesToDataPoints(t *testing.T) {
type args struct {
chainIDs []uint64
data []*entry
}
tests := []struct {
name string
args args
want []*DataPoint
wantErr bool
}{
{
name: "zeroAllChainsSameTimestamp",
args: args{
chainIDs: []uint64{1, 2},
data: []*entry{
{
chainID: 1,
balance: big.NewInt(0),
timestamp: 1,
block: big.NewInt(1),
},
{
chainID: 2,
balance: big.NewInt(0),
timestamp: 1,
block: big.NewInt(5),
},
},
},
want: []*DataPoint{
{
Balance: (*hexutil.Big)(big.NewInt(0)),
Timestamp: 1,
},
},
wantErr: false,
},
{
name: "oneZeroAllChainsDifferentTimestamp",
args: args{
chainIDs: []uint64{1, 2},
data: []*entry{
{
chainID: 2,
balance: big.NewInt(0),
timestamp: 1,
block: big.NewInt(1),
},
{
chainID: 1,
balance: big.NewInt(2),
timestamp: 2,
block: big.NewInt(2),
},
},
},
want: []*DataPoint{
{
Balance: (*hexutil.Big)(big.NewInt(0)),
Timestamp: 1,
},
{
Balance: (*hexutil.Big)(big.NewInt(2)),
Timestamp: 2,
},
},
wantErr: false,
},
{
name: "nonZeroAllChainsDifferentTimestamp",
args: args{
chainIDs: []uint64{1, 2},
data: []*entry{
{
chainID: 2,
balance: big.NewInt(1),
timestamp: 1,
},
{
chainID: 1,
balance: big.NewInt(2),
timestamp: 2,
},
},
},
want: []*DataPoint{
{
Balance: (*hexutil.Big)(big.NewInt(1)),
Timestamp: 1,
},
{
Balance: (*hexutil.Big)(big.NewInt(2)),
Timestamp: 2,
},
},
wantErr: false,
},
{
name: "sameChainDifferentTimestamp",
args: args{
chainIDs: []uint64{1, 2},
data: []*entry{
{
chainID: 1,
balance: big.NewInt(1),
timestamp: 1,
block: big.NewInt(1),
},
{
chainID: 1,
balance: big.NewInt(2),
timestamp: 2,
block: big.NewInt(2),
},
{
chainID: 1,
balance: big.NewInt(0),
timestamp: 3,
},
},
},
want: []*DataPoint{
{
Balance: (*hexutil.Big)(big.NewInt(1)),
Timestamp: 1,
},
{
Balance: (*hexutil.Big)(big.NewInt(2)),
Timestamp: 2,
},
{
Balance: (*hexutil.Big)(big.NewInt(0)),
Timestamp: 3,
},
},
wantErr: false,
},
{
name: "sameChainDifferentTimestampOtherChainsEmpty",
args: args{
chainIDs: []uint64{1, 2},
data: []*entry{
{
chainID: 1,
balance: big.NewInt(1),
timestamp: 1,
block: big.NewInt(1),
},
{
chainID: 1,
balance: big.NewInt(2),
timestamp: 2,
block: big.NewInt(2),
},
{
chainID: 2,
balance: big.NewInt(0),
timestamp: 2,
block: big.NewInt(2),
},
{
chainID: 1,
balance: big.NewInt(2),
timestamp: 3,
},
},
},
want: []*DataPoint{
{
Balance: (*hexutil.Big)(big.NewInt(1)),
Timestamp: 1,
},
{
Balance: (*hexutil.Big)(big.NewInt(2)),
Timestamp: 2,
},
{
Balance: (*hexutil.Big)(big.NewInt(2)),
Timestamp: 3,
},
},
wantErr: false,
},
{
name: "onlyEdgePointsOnManyChainsWithPadding",
args: args{
chainIDs: []uint64{1, 2, 3},
data: []*entry{
// Left edge - same timestamp
{
chainID: 1,
balance: big.NewInt(1),
timestamp: 1,
},
{
chainID: 2,
balance: big.NewInt(2),
timestamp: 1,
},
{
chainID: 3,
balance: big.NewInt(3),
timestamp: 1,
},
// Padding
{
chainID: 0,
balance: big.NewInt(6),
timestamp: 2,
},
{
chainID: 0,
balance: big.NewInt(6),
timestamp: 3,
},
{
chainID: 0,
balance: big.NewInt(6),
timestamp: 4,
},
// Right edge - same timestamp
{
chainID: 1,
balance: big.NewInt(1),
timestamp: 5,
},
{
chainID: 2,
balance: big.NewInt(2),
timestamp: 5,
},
{
chainID: 3,
balance: big.NewInt(3),
timestamp: 5,
},
},
},
want: []*DataPoint{
{
Balance: (*hexutil.Big)(big.NewInt(6)),
Timestamp: 1,
},
{
Balance: (*hexutil.Big)(big.NewInt(6)),
Timestamp: 2,
},
{
Balance: (*hexutil.Big)(big.NewInt(6)),
Timestamp: 3,
},
{
Balance: (*hexutil.Big)(big.NewInt(6)),
Timestamp: 4,
},
{
Balance: (*hexutil.Big)(big.NewInt(6)),
Timestamp: 5,
},
},
wantErr: false,
},
}
txServiceMockCtrl := gomock.NewController(t)
server, _ := fake.NewTestServer(txServiceMockCtrl)
client := gethrpc.DialInProc(server)
rpcClient, err := statusRPC.NewClient(client, 1, upstreamConfig, nil, db)
require.NoError(t, err)
return NewService(db, nil, nil, rpcClient, nil, market.NewManager(cryptoCompare, cryptoCompare, &event.Feed{})), func() {
require.NoError(t, db.Close())
}
}
type TestDataPoint struct {
value int64
timestamp uint64
blockNumber int64
chainID chainIdentity
}
// generateTestDataForElementCount generates dummy consecutive blocks of data for the same chain_id, address and currency
func prepareTestData(data []TestDataPoint) map[chainIdentity][]*DataPoint {
res := make(map[chainIdentity][]*DataPoint)
for i := 0; i < len(data); i++ {
entry := data[i]
_, found := res[entry.chainID]
if !found {
res[entry.chainID] = make([]*DataPoint, 0)
}
res[entry.chainID] = append(res[entry.chainID], &DataPoint{
BlockNumber: (*hexutil.Big)(big.NewInt(data[i].blockNumber)),
Timestamp: data[i].timestamp,
Balance: (*hexutil.Big)(big.NewInt(data[i].value)),
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := entriesToDataPoints(tt.args.chainIDs, tt.args.data)
if (err != nil) != tt.wantErr {
t.Errorf("entriesToDataPoints() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("entriesToDataPoints() = %v, want %v", got, tt.want)
}
})
}
return res
}
// getBlockNumbers returns -1 if block number is nil
func getBlockNumbers(data []*DataPoint) []int64 {
res := make([]int64, 0)
for _, entry := range data {
if entry.BlockNumber == nil {
res = append(res, -1)
} else {
res = append(res, entry.BlockNumber.ToInt().Int64())
}
}
return res
}
func getValues(data []*DataPoint) []int64 {
res := make([]int64, 0)
for _, entry := range data {
res = append(res, entry.Balance.ToInt().Int64())
}
return res
}
func getTimestamps(data []*DataPoint) []int64 {
res := make([]int64, 0)
for _, entry := range data {
res = append(res, int64(entry.Timestamp))
}
return res
}
func TestServiceMergeDataPoints(t *testing.T) {
strideDuration := 5 * time.Second
testData := prepareTestData([]TestDataPoint{
// Drop 100
{value: 1, timestamp: 100, blockNumber: 100, chainID: 1},
{value: 1, timestamp: 100, blockNumber: 100, chainID: 2},
// Keep 105
{value: 1, timestamp: 105, blockNumber: 105, chainID: 1},
{value: 1, timestamp: 105, blockNumber: 105, chainID: 2},
{value: 1, timestamp: 105, blockNumber: 105, chainID: 3},
// Drop 110
{value: 1, timestamp: 105, blockNumber: 105, chainID: 2},
{value: 1, timestamp: 105, blockNumber: 105, chainID: 3},
// Keep 115
{value: 2, timestamp: 115, blockNumber: 115, chainID: 1},
{value: 2, timestamp: 115, blockNumber: 115, chainID: 2},
{value: 2, timestamp: 115, blockNumber: 115, chainID: 3},
// Drop 120
{value: 1, timestamp: 120, blockNumber: 120, chainID: 3},
// Keep 125
{value: 3, timestamp: 125, blockNumber: 125, chainID: 1},
{value: 3, timestamp: 125, blockNumber: 125, chainID: 2},
{value: 3, timestamp: 125, blockNumber: 125, chainID: 3},
// Keep 130
{value: 4, timestamp: 130, blockNumber: 130, chainID: 1},
{value: 4, timestamp: 130, blockNumber: 130, chainID: 2},
{value: 4, timestamp: 130, blockNumber: 130, chainID: 3},
// Drop 135
{value: 1, timestamp: 135, blockNumber: 135, chainID: 1},
})
res, err := mergeDataPoints(testData, strideDuration)
require.NoError(t, err)
require.Equal(t, 4, len(res))
require.Equal(t, []int64{105, 115, 125, 130}, getBlockNumbers(res))
require.Equal(t, []int64{3, 3 * 2, 3 * 3, 3 * 4}, getValues(res))
require.Equal(t, []int64{110, 120, 130, 135}, getTimestamps(res))
}
func TestServiceMergeDataPointsAllMatch(t *testing.T) {
strideDuration := 10 * time.Second
testData := prepareTestData([]TestDataPoint{
// Keep 105
{value: 1, timestamp: 105, blockNumber: 105, chainID: 1},
{value: 1, timestamp: 105, blockNumber: 105, chainID: 2},
{value: 1, timestamp: 105, blockNumber: 105, chainID: 3},
// Keep 115
{value: 2, timestamp: 115, blockNumber: 115, chainID: 1},
{value: 2, timestamp: 115, blockNumber: 115, chainID: 2},
{value: 2, timestamp: 115, blockNumber: 115, chainID: 3},
// Keep 125
{value: 3, timestamp: 125, blockNumber: 125, chainID: 1},
{value: 3, timestamp: 125, blockNumber: 125, chainID: 2},
{value: 3, timestamp: 125, blockNumber: 125, chainID: 3},
// Keep 135
{value: 4, timestamp: 135, blockNumber: 135, chainID: 1},
{value: 4, timestamp: 135, blockNumber: 135, chainID: 2},
{value: 4, timestamp: 135, blockNumber: 135, chainID: 3},
})
res, err := mergeDataPoints(testData, strideDuration)
require.NoError(t, err)
require.Equal(t, 4, len(res))
require.Equal(t, []int64{105, 115, 125, 135}, getBlockNumbers(res))
require.Equal(t, []int64{3, 3 * 2, 3 * 3, 3 * 4}, getValues(res))
require.Equal(t, []int64{115, 125, 135, 145}, getTimestamps(res))
}
func TestServiceMergeDataPointsOneChain(t *testing.T) {
strideDuration := 10 * time.Second
testData := prepareTestData([]TestDataPoint{
// Keep 105
{value: 1, timestamp: 105, blockNumber: 105, chainID: 1},
// Keep 115
{value: 2, timestamp: 115, blockNumber: 115, chainID: 1},
// Keep 125
{value: 3, timestamp: 125, blockNumber: 125, chainID: 1},
})
res, err := mergeDataPoints(testData, strideDuration)
require.NoError(t, err)
require.Equal(t, 3, len(res))
require.Equal(t, []int64{105, 115, 125}, getBlockNumbers(res))
require.Equal(t, []int64{1, 2, 3}, getValues(res))
require.Equal(t, []int64{105, 115, 125}, getTimestamps(res), "Expect no merging for one chain")
}
func TestServiceMergeDataPointsDropAll(t *testing.T) {
strideDuration := 10 * time.Second
testData := prepareTestData([]TestDataPoint{
{value: 1, timestamp: 100, blockNumber: 100, chainID: 1},
{value: 1, timestamp: 110, blockNumber: 110, chainID: 2},
{value: 1, timestamp: 120, blockNumber: 120, chainID: 3},
{value: 1, timestamp: 130, blockNumber: 130, chainID: 4},
})
res, err := mergeDataPoints(testData, strideDuration)
require.NoError(t, err)
require.Equal(t, 0, len(res))
}
func TestServiceMergeDataPointsEmptyDB(t *testing.T) {
testData := prepareTestData([]TestDataPoint{})
strideDuration := 10 * time.Second
res, err := mergeDataPoints(testData, strideDuration)
require.NoError(t, err)
require.Equal(t, 0, len(res))
}
func TestServiceFindFirstStrideWindowFirstForAllChainInOneStride(t *testing.T) {
strideDuration := 10 * time.Second
testData := prepareTestData([]TestDataPoint{
{value: 1, timestamp: 103, blockNumber: 101, chainID: 2},
{value: 1, timestamp: 106, blockNumber: 102, chainID: 3},
{value: 1, timestamp: 100, blockNumber: 100, chainID: 1},
{value: 1, timestamp: 110, blockNumber: 103, chainID: 1},
{value: 1, timestamp: 110, blockNumber: 103, chainID: 2},
})
startTimestamp, pos := findFirstStrideWindow(testData, strideDuration)
require.Equal(t, testData[1][0].Timestamp, uint64(startTimestamp))
require.Equal(t, map[chainIdentity]int{1: 0, 2: 0, 3: 0}, pos)
}
func TestServiceSortTimeAsc(t *testing.T) {
testData := prepareTestData([]TestDataPoint{
{value: 3, timestamp: 103, blockNumber: 103, chainID: 3},
{value: 4, timestamp: 104, blockNumber: 104, chainID: 4},
{value: 2, timestamp: 102, blockNumber: 102, chainID: 2},
{value: 1, timestamp: 101, blockNumber: 101, chainID: 1},
})
sorted := sortTimeAsc(testData, map[chainIdentity]int{4: 0, 3: 0, 2: 0, 1: 0})
require.Equal(t, []timeIdentity{{1, 0}, {2, 0}, {3, 0}, {4, 0}}, sorted)
}
func TestServiceAtEnd(t *testing.T) {
testData := prepareTestData([]TestDataPoint{
{value: 1, timestamp: 101, blockNumber: 101, chainID: 1},
{value: 1, timestamp: 103, blockNumber: 103, chainID: 2},
{value: 1, timestamp: 105, blockNumber: 105, chainID: 1},
})
sorted := sortTimeAsc(testData, map[chainIdentity]int{1: 0, 2: 0})
require.False(t, sorted[0].atEnd(testData))
require.True(t, sorted[1].atEnd(testData))
sorted = sortTimeAsc(testData, map[chainIdentity]int{1: 1, 2: 0})
require.True(t, sorted[1].atEnd(testData))
}
func TestServiceTokenToValue(t *testing.T) {
weisInOneMain := big.NewFloat(math.Pow(10, 18.0))
res := tokenToValue(big.NewInt(12345), 1000, weisInOneMain)
require.Equal(t, 0.000000000012345, res)
in, ok := new(big.Int).SetString("1234567890000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", 10)
require.True(t, ok)
res = tokenToValue(in, 10000, weisInOneMain)
require.Equal(t, 1.23456789e+112, res)
res = tokenToValue(big.NewInt(1000000000000000000), 1.0, weisInOneMain)
require.Equal(t, 1.0, res)
res = tokenToValue(big.NewInt(1), 1.23456789, weisInOneMain)
require.InEpsilonf(t, 1.23456789e-18, res, 1.0e-8, "Expects error for handling such low values")
res = tokenToValue(new(big.Int).Exp(big.NewInt(10), big.NewInt(254), nil), 100000, weisInOneMain)
require.Equal(t, 1e+241, res, "Expect exponent 254-18+5")
}
func TestServiceGetBalanceHistoryNoData(t *testing.T) {
service, closeFn := setupDummyServiceNoDependencies(t)
defer closeFn()
res, err := service.GetBalanceHistory(context.Background(), []uint64{777}, common.HexToAddress(`0x1`), "ETH", "EUR", time.Now().Unix(), BalanceHistory1Year)
require.NoError(t, err)
require.Equal(t, 0, len(res))
}

View File

@ -0,0 +1,75 @@
package history
import (
"context"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/status-im/status-go/services/wallet/async"
"github.com/status-im/status-go/services/wallet/transfer"
"github.com/status-im/status-go/services/wallet/walletevent"
)
type TransfersLoadedCb func(chainID uint64, addresses []common.Address, block *big.Int)
// Watcher executes a given callback whenever an account gets added/removed
type Watcher struct {
feed *event.Feed
group *async.Group
callback TransfersLoadedCb
}
func NewWatcher(feed *event.Feed, callback TransfersLoadedCb) *Watcher {
return &Watcher{
feed: feed,
callback: callback,
}
}
func (w *Watcher) Start() {
if w.group != nil {
return
}
w.group = async.NewGroup(context.Background())
w.group.Add(func(ctx context.Context) error {
return watch(ctx, w.feed, w.callback)
})
}
func (w *Watcher) Stop() {
if w.group != nil {
w.group.Stop()
w.group.Wait()
w.group = nil
}
}
func onTransfersLoaded(callback TransfersLoadedCb, chainID uint64, addresses []common.Address, blockNum *big.Int) {
if callback != nil {
callback(chainID, addresses, blockNum)
}
}
func watch(ctx context.Context, feed *event.Feed, callback TransfersLoadedCb) error {
ch := make(chan walletevent.Event, 100)
sub := feed.Subscribe(ch)
defer sub.Unsubscribe()
for {
select {
case <-ctx.Done():
return nil
case err := <-sub.Err():
if err != nil {
log.Error("history: transfers watcher subscription failed", "error", err)
}
case ev := <-ch:
if ev.Type == transfer.EventNewTransfers {
onTransfersLoaded(callback, ev.ChainID, ev.Accounts, ev.BlockNumber)
}
}
}
}

View File

@ -93,18 +93,17 @@ func NewService(
})
})
balanceCache := balance.NewCache()
balanceCacher := balance.NewCacherWithTTL(5 * time.Minute)
tokenManager := token.NewTokenManager(db, rpcClient, rpcClient.NetworkManager)
savedAddressesManager := &SavedAddressesManager{db: db}
transactionManager := transfer.NewTransactionManager(db, gethManager, transactor, config, accountsDB, pendingTxManager, feed)
transferController := transfer.NewTransferController(db, rpcClient, accountFeed, feed, transactionManager, pendingTxManager,
tokenManager, balanceCache, config.WalletConfig.LoadAllTransfers)
tokenManager, balanceCacher, config.WalletConfig.LoadAllTransfers)
cryptoCompare := cryptocompare.NewClient()
coingecko := coingecko.NewClient()
marketManager := market.NewManager(cryptoCompare, coingecko, feed)
reader := NewReader(rpcClient, tokenManager, marketManager, accountsDB, NewPersistence(db), feed)
history := history.NewService(db, accountsDB, feed, rpcClient, tokenManager, marketManager)
history := history.NewService(db, accountsDB, feed, rpcClient, tokenManager, marketManager, balanceCacher.Cache())
currency := currency.NewService(db, feed, tokenManager, marketManager)
blockChainState := NewBlockChainState(rpcClient, accountsDB)

View File

@ -22,3 +22,12 @@ func StructExistsInSlice[T any](target T, slice []T) bool {
}
return false
}
func Filter[T any](ss []T, test func(T) bool) (ret []T) {
for _, s := range ss {
if test(s) {
ret = append(ret, s)
}
}
return
}

View File

@ -182,3 +182,27 @@ func TestGetNewRanges(t *testing.T) {
require.Equal(t, int64(50), newRange.to.Int64())
require.Equal(t, 4, len(d))
}
func TestInsertZeroBalance(t *testing.T) {
db, _, err := helpers.SetupTestSQLDB(walletdatabase.DbInitializer{}, "zero-balance")
require.NoError(t, err)
b := &BlockDAO{db}
r := &BlocksRange{
from: big.NewInt(0),
to: big.NewInt(10),
}
nonce := uint64(199)
balance := big.NewInt(0)
account := common.Address{2}
err = b.insertRange(777, account, r.from, r.to, balance, nonce)
require.NoError(t, err)
block, err := b.GetLastKnownBlockByAddress(777, account)
require.NoError(t, err)
require.Equal(t, 0, block.Number.Cmp(r.to))
require.Equal(t, big.NewInt(0).Int64(), block.Balance.Int64())
require.Equal(t, nonce, uint64(*block.Nonce))
}

View File

@ -286,7 +286,6 @@ func (c *controlCommand) Run(parent context.Context) error {
return cmnd.error
}
c.balanceCacher.Clear()
err = c.LoadTransfers(parent, numberOfBlocksCheckedPerIteration)
if err != nil {
if c.NewError(err) {
@ -432,7 +431,7 @@ func (c *transfersCommand) Run(ctx context.Context) (err error) {
c.fetchedTransfers = append(c.fetchedTransfers, allTransfers...)
c.notifyOfNewTransfers(allTransfers)
c.notifyOfNewTransfers(blockNum, allTransfers)
log.Debug("transfersCommand block end", "chain", c.chainClient.NetworkID(), "address", c.address,
"block", blockNum, "tranfers.len", len(allTransfers), "fetchedTransfers.len", len(c.fetchedTransfers))
@ -609,13 +608,14 @@ func (c *transfersCommand) processMultiTransactions(ctx context.Context, allTran
return nil
}
func (c *transfersCommand) notifyOfNewTransfers(transfers []Transfer) {
func (c *transfersCommand) notifyOfNewTransfers(blockNum *big.Int, transfers []Transfer) {
if c.feed != nil {
if len(transfers) > 0 {
c.feed.Send(walletevent.Event{
Type: EventNewTransfers,
Accounts: []common.Address{c.address},
ChainID: c.chainClient.NetworkID(),
Type: EventNewTransfers,
Accounts: []common.Address{c.address},
ChainID: c.chainClient.NetworkID(),
BlockNumber: blockNum,
})
}
}

View File

@ -279,7 +279,7 @@ func (c *findBlocksCommand) Run(parent context.Context) (err error) {
}
func (c *findBlocksCommand) blocksFound(headers []*DBHeader) {
c.blocksLoadedCh <- headers // TODO Use notifyOfNewBlocksLoaded instead ??
c.blocksLoadedCh <- headers
}
func (c *findBlocksCommand) upsertBlockRange(blockRange *BlockRange) error {
@ -543,7 +543,6 @@ func (c *loadBlocksAndTransfersCommand) Run(parent context.Context) error {
select {
case <-ctx.Done():
c.balanceCacher.Clear()
return ctx.Err()
case <-group.WaitAsync():
log.Debug("end loadBlocksAndTransfers command", "chain", c.chainClient.NetworkID(), "account", c.account)

View File

@ -6,6 +6,7 @@ import (
"sort"
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
@ -632,11 +633,11 @@ func TestFindBlocksCommand(t *testing.T) {
},
})
fbc := &findBlocksCommand{
account: common.HexToAddress("0x12345"),
account: common.HexToAddress("0x1234"),
db: wdb,
blockRangeDAO: &BlockRangeSequentialDAO{wdb.client},
chainClient: tc,
balanceCacher: balance.NewCache(),
balanceCacher: balance.NewCacherWithTTL(5 * time.Minute),
feed: &event.Feed{},
noLimit: false,
fromBlockNumber: big.NewInt(testCase.fromBlock),

View File

@ -98,7 +98,7 @@ func checkRangesWithStartBlock(parent context.Context, client balance.Reader, ca
account common.Address, ranges [][]*big.Int, threadLimit uint32, startBlock *big.Int) (
resRanges [][]*big.Int, headers []*DBHeader, newStartBlock *big.Int, err error) {
log.Debug("start checkRanges", "account", account.Hex(), "ranges len", len(ranges))
log.Debug("start checkRanges", "account", account.Hex(), "ranges len", len(ranges), "startBlock", startBlock)
ctx, cancel := context.WithTimeout(parent, 30*time.Second)
defer cancel()
@ -111,6 +111,8 @@ func checkRangesWithStartBlock(parent context.Context, client balance.Reader, ca
from := blocksRange[0]
to := blocksRange[1]
log.Debug("check block range", "from", from, "to", to)
if startBlock != nil {
if to.Cmp(newStartBlock) <= 0 {
log.Debug("'to' block is less than 'start' block", "to", to, "startBlock", startBlock)
@ -120,6 +122,7 @@ func checkRangesWithStartBlock(parent context.Context, client balance.Reader, ca
c.Add(func(ctx context.Context) error {
if from.Cmp(to) >= 0 {
log.Debug("'from' block is greater than or equal to 'to' block", "from", from, "to", to)
return nil
}
log.Debug("eth transfers comparing blocks", "from", from, "to", to)

View File

@ -147,7 +147,7 @@ func TestConcurrentEthDownloader(t *testing.T) {
defer cancel()
concurrent := NewConcurrentDownloader(ctx, 0)
_, headers, _, _ := findBlocksWithEthTransfers(
ctx, tc.options.balances, balance.NewCache(),
ctx, tc.options.balances, balance.NewCacherWithTTL(5*time.Minute),
common.Address{}, zero, tc.options.last, false, NoThreadLimit)
concurrent.Wait()
require.NoError(t, concurrent.Error())

View File

@ -216,7 +216,6 @@ func (s *OnDemandFetchStrategy) getTransfersByAddress(ctx context.Context, chain
if err = blocksCommand.Command()(ctx); err != nil {
return nil, err
}
s.balanceCacher.Clear()
blocks, err := s.blockDAO.GetBlocksToLoadByAddress(chainID, address, numberOfBlocksCheckedPerIteration)
if err != nil {

21
vendor/github.com/jellydator/ttlcache/v3/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2022 Jellydator
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

142
vendor/github.com/jellydator/ttlcache/v3/README.md generated vendored Normal file
View File

@ -0,0 +1,142 @@
## TTLCache - an in-memory cache with item expiration and generics
[![Go Reference](https://pkg.go.dev/badge/github.com/jellydator/ttlcache/v3.svg)](https://pkg.go.dev/github.com/jellydator/ttlcache/v3)
[![Build Status](https://github.com/jellydator/ttlcache/actions/workflows/go.yml/badge.svg)](https://github.com/jellydator/ttlcache/actions/workflows/go.yml)
[![Coverage Status](https://coveralls.io/repos/github/jellydator/ttlcache/badge.svg?branch=master)](https://coveralls.io/github/jellydator/ttlcache?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/jellydator/ttlcache/v3)](https://goreportcard.com/report/github.com/jellydator/ttlcache/v3)
## Features
- Simple API
- Type parameters
- Item expiration and automatic deletion
- Automatic expiration time extension on each `Get` call
- `Loader` interface that may be used to load/lazily initialize missing cache
items
- Event handlers (insertion and eviction)
- Metrics
## Installation
```
go get github.com/jellydator/ttlcache/v3
```
## Usage
The main type of `ttlcache` is `Cache`. It represents a single
in-memory data store.
To create a new instance of `ttlcache.Cache`, the `ttlcache.New()` function
should be called:
```go
func main() {
cache := ttlcache.New[string, string]()
}
```
Note that by default, a new cache instance does not let any of its
items to expire or be automatically deleted. However, this feature
can be activated by passing a few additional options into the
`ttlcache.New()` function and calling the `cache.Start()` method:
```go
func main() {
cache := ttlcache.New[string, string](
ttlcache.WithTTL[string, string](30 * time.Minute),
)
go cache.Start() // starts automatic expired item deletion
}
```
Even though the `cache.Start()` method handles expired item deletion well,
there may be times when the system that uses `ttlcache` needs to determine
when to delete the expired items itself. For example, it may need to
delete them only when the resource load is at its lowest (e.g., after
midnight, when the number of users/HTTP requests drops). So, in situations
like these, instead of calling `cache.Start()`, the system could
periodically call `cache.DeleteExpired()`:
```go
func main() {
cache := ttlcache.New[string, string](
ttlcache.WithTTL[string, string](30 * time.Minute),
)
for {
time.Sleep(4 * time.Hour)
cache.DeleteExpired()
}
}
```
The data stored in `ttlcache.Cache` can be retrieved, checked and updated with
`Set`, `Get`, `Delete`, `Has` etc. methods:
```go
func main() {
cache := ttlcache.New[string, string](
ttlcache.WithTTL[string, string](30 * time.Minute),
)
// insert data
cache.Set("first", "value1", ttlcache.DefaultTTL)
cache.Set("second", "value2", ttlcache.NoTTL)
cache.Set("third", "value3", ttlcache.DefaultTTL)
// retrieve data
item := cache.Get("first")
fmt.Println(item.Value(), item.ExpiresAt())
// check key
ok := cache.Has("third")
// delete data
cache.Delete("second")
cache.DeleteExpired()
cache.DeleteAll()
// retrieve data if in cache otherwise insert data
item, retrieved := cache.GetOrSet("fourth", "value4", WithTTL[string, string](ttlcache.DefaultTTL))
// retrieve and delete data
item, present := cache.GetAndDelete("fourth")
}
```
To subscribe to insertion and eviction events, `cache.OnInsertion()` and
`cache.OnEviction()` methods should be used:
```go
func main() {
cache := ttlcache.New[string, string](
ttlcache.WithTTL[string, string](30 * time.Minute),
ttlcache.WithCapacity[string, string](300),
)
cache.OnInsertion(func(ctx context.Context, item *ttlcache.Item[string, string]) {
fmt.Println(item.Value(), item.ExpiresAt())
})
cache.OnEviction(func(ctx context.Context, reason ttlcache.EvictionReason, item *ttlcache.Item[string, string]) {
if reason == ttlcache.EvictionReasonCapacityReached {
fmt.Println(item.Key(), item.Value())
}
})
cache.Set("first", "value1", ttlcache.DefaultTTL)
cache.DeleteAll()
}
```
To load data when the cache does not have it, a custom or
existing implementation of `ttlcache.Loader` can be used:
```go
func main() {
loader := ttlcache.LoaderFunc[string, string](
func(c *ttlcache.Cache[string, string], key string) *ttlcache.Item[string, string] {
// load from file/make an HTTP request
item := c.Set("key from file", "value from file")
return item
},
)
cache := ttlcache.New[string, string](
ttlcache.WithLoader[string, string](loader),
)
item := cache.Get("key from file")
}
```

709
vendor/github.com/jellydator/ttlcache/v3/cache.go generated vendored Normal file
View File

@ -0,0 +1,709 @@
package ttlcache
import (
"container/list"
"context"
"fmt"
"sync"
"time"
"golang.org/x/sync/singleflight"
)
// Available eviction reasons.
const (
EvictionReasonDeleted EvictionReason = iota + 1
EvictionReasonCapacityReached
EvictionReasonExpired
)
// EvictionReason is used to specify why a certain item was
// evicted/deleted.
type EvictionReason int
// Cache is a synchronised map of items that are automatically removed
// when they expire or the capacity is reached.
type Cache[K comparable, V any] struct {
items struct {
mu sync.RWMutex
values map[K]*list.Element
// a generic doubly linked list would be more convenient
// (and more performant?). It's possible that this
// will be introduced with/in go1.19+
lru *list.List
expQueue expirationQueue[K, V]
timerCh chan time.Duration
}
metricsMu sync.RWMutex
metrics Metrics
events struct {
insertion struct {
mu sync.RWMutex
nextID uint64
fns map[uint64]func(*Item[K, V])
}
eviction struct {
mu sync.RWMutex
nextID uint64
fns map[uint64]func(EvictionReason, *Item[K, V])
}
}
stopCh chan struct{}
options options[K, V]
}
// New creates a new instance of cache.
func New[K comparable, V any](opts ...Option[K, V]) *Cache[K, V] {
c := &Cache[K, V]{
stopCh: make(chan struct{}),
}
c.items.values = make(map[K]*list.Element)
c.items.lru = list.New()
c.items.expQueue = newExpirationQueue[K, V]()
c.items.timerCh = make(chan time.Duration, 1) // buffer is important
c.events.insertion.fns = make(map[uint64]func(*Item[K, V]))
c.events.eviction.fns = make(map[uint64]func(EvictionReason, *Item[K, V]))
applyOptions(&c.options, opts...)
return c
}
// updateExpirations updates the expiration queue and notifies
// the cache auto cleaner if needed.
// Not safe for concurrent use by multiple goroutines without additional
// locking.
func (c *Cache[K, V]) updateExpirations(fresh bool, elem *list.Element) {
var oldExpiresAt time.Time
if !c.items.expQueue.isEmpty() {
oldExpiresAt = c.items.expQueue[0].Value.(*Item[K, V]).expiresAt
}
if fresh {
c.items.expQueue.push(elem)
} else {
c.items.expQueue.update(elem)
}
newExpiresAt := c.items.expQueue[0].Value.(*Item[K, V]).expiresAt
// check if the closest/soonest expiration timestamp changed
if newExpiresAt.IsZero() || (!oldExpiresAt.IsZero() && !newExpiresAt.Before(oldExpiresAt)) {
return
}
d := time.Until(newExpiresAt)
// It's possible that the auto cleaner isn't active or
// is busy, so we need to drain the channel before
// sending a new value.
// Also, since this method is called after locking the items' mutex,
// we can be sure that there is no other concurrent call of this
// method
if len(c.items.timerCh) > 0 {
// we need to drain this channel in a select with a default
// case because it's possible that the auto cleaner
// read this channel just after we entered this if
select {
case d1 := <-c.items.timerCh:
if d1 < d {
d = d1
}
default:
}
}
// since the channel has a size 1 buffer, we can be sure
// that the line below won't block (we can't overfill the buffer
// because we just drained it)
c.items.timerCh <- d
}
// set creates a new item, adds it to the cache and then returns it.
// Not safe for concurrent use by multiple goroutines without additional
// locking.
func (c *Cache[K, V]) set(key K, value V, ttl time.Duration) *Item[K, V] {
if ttl == DefaultTTL {
ttl = c.options.ttl
}
elem := c.get(key, false)
if elem != nil {
// update/overwrite an existing item
item := elem.Value.(*Item[K, V])
item.update(value, ttl)
c.updateExpirations(false, elem)
return item
}
if c.options.capacity != 0 && uint64(len(c.items.values)) >= c.options.capacity {
// delete the oldest item
c.evict(EvictionReasonCapacityReached, c.items.lru.Back())
}
// create a new item
item := newItem(key, value, ttl, c.options.enableVersionTracking)
elem = c.items.lru.PushFront(item)
c.items.values[key] = elem
c.updateExpirations(true, elem)
c.metricsMu.Lock()
c.metrics.Insertions++
c.metricsMu.Unlock()
c.events.insertion.mu.RLock()
for _, fn := range c.events.insertion.fns {
fn(item)
}
c.events.insertion.mu.RUnlock()
return item
}
// get retrieves an item from the cache and extends its expiration
// time if 'touch' is set to true.
// It returns nil if the item is not found or is expired.
// Not safe for concurrent use by multiple goroutines without additional
// locking.
func (c *Cache[K, V]) get(key K, touch bool) *list.Element {
elem := c.items.values[key]
if elem == nil {
return nil
}
item := elem.Value.(*Item[K, V])
if item.isExpiredUnsafe() {
return nil
}
c.items.lru.MoveToFront(elem)
if touch && item.ttl > 0 {
item.touch()
c.updateExpirations(false, elem)
}
return elem
}
// getWithOpts wraps the get method, applies the given options, and updates
// the metrics.
// It returns nil if the item is not found or is expired.
// If 'lockAndLoad' is set to true, the mutex is locked before calling the
// get method and unlocked after it returns. It also indicates that the
// loader should be used to load external data when the get method returns
// a nil value and the mutex is unlocked.
// If 'lockAndLoad' is set to false, neither the mutex nor the loader is
// used.
func (c *Cache[K, V]) getWithOpts(key K, lockAndLoad bool, opts ...Option[K, V]) *Item[K, V] {
getOpts := options[K, V]{
loader: c.options.loader,
disableTouchOnHit: c.options.disableTouchOnHit,
}
applyOptions(&getOpts, opts...)
if lockAndLoad {
c.items.mu.Lock()
}
elem := c.get(key, !getOpts.disableTouchOnHit)
if lockAndLoad {
c.items.mu.Unlock()
}
if elem == nil {
c.metricsMu.Lock()
c.metrics.Misses++
c.metricsMu.Unlock()
if lockAndLoad && getOpts.loader != nil {
return getOpts.loader.Load(c, key)
}
return nil
}
c.metricsMu.Lock()
c.metrics.Hits++
c.metricsMu.Unlock()
return elem.Value.(*Item[K, V])
}
// evict deletes items from the cache.
// If no items are provided, all currently present cache items
// are evicted.
// Not safe for concurrent use by multiple goroutines without additional
// locking.
func (c *Cache[K, V]) evict(reason EvictionReason, elems ...*list.Element) {
if len(elems) > 0 {
c.metricsMu.Lock()
c.metrics.Evictions += uint64(len(elems))
c.metricsMu.Unlock()
c.events.eviction.mu.RLock()
for i := range elems {
item := elems[i].Value.(*Item[K, V])
delete(c.items.values, item.key)
c.items.lru.Remove(elems[i])
c.items.expQueue.remove(elems[i])
for _, fn := range c.events.eviction.fns {
fn(reason, item)
}
}
c.events.eviction.mu.RUnlock()
return
}
c.metricsMu.Lock()
c.metrics.Evictions += uint64(len(c.items.values))
c.metricsMu.Unlock()
c.events.eviction.mu.RLock()
for _, elem := range c.items.values {
item := elem.Value.(*Item[K, V])
for _, fn := range c.events.eviction.fns {
fn(reason, item)
}
}
c.events.eviction.mu.RUnlock()
c.items.values = make(map[K]*list.Element)
c.items.lru.Init()
c.items.expQueue = newExpirationQueue[K, V]()
}
// delete deletes an item by the provided key.
// The method is no-op if the item is not found.
// Not safe for concurrent use by multiple goroutines without additional
// locking.
func (c *Cache[K, V]) delete(key K) {
elem := c.items.values[key]
if elem == nil {
return
}
c.evict(EvictionReasonDeleted, elem)
}
// Set creates a new item from the provided key and value, adds
// it to the cache and then returns it. If an item associated with the
// provided key already exists, the new item overwrites the existing one.
// NoTTL constant or -1 can be used to indicate that the item should never
// expire.
// DefaultTTL constant or 0 can be used to indicate that the item should use
// the default/global TTL that was specified when the cache instance was
// created.
func (c *Cache[K, V]) Set(key K, value V, ttl time.Duration) *Item[K, V] {
c.items.mu.Lock()
defer c.items.mu.Unlock()
return c.set(key, value, ttl)
}
// Get retrieves an item from the cache by the provided key.
// Unless this is disabled, it also extends/touches an item's
// expiration timestamp on successful retrieval.
// If the item is not found, a nil value is returned.
func (c *Cache[K, V]) Get(key K, opts ...Option[K, V]) *Item[K, V] {
return c.getWithOpts(key, true, opts...)
}
// Delete deletes an item from the cache. If the item associated with
// the key is not found, the method is no-op.
func (c *Cache[K, V]) Delete(key K) {
c.items.mu.Lock()
defer c.items.mu.Unlock()
c.delete(key)
}
// Has checks whether the key exists in the cache.
func (c *Cache[K, V]) Has(key K) bool {
c.items.mu.RLock()
defer c.items.mu.RUnlock()
_, ok := c.items.values[key]
return ok
}
// GetOrSet retrieves an item from the cache by the provided key.
// If the item is not found, it is created with the provided options and
// then returned.
// The bool return value is true if the item was found, false if created
// during the execution of the method.
// If the loader is non-nil (i.e., used as an option or specified when
// creating the cache instance), its execution is skipped.
func (c *Cache[K, V]) GetOrSet(key K, value V, opts ...Option[K, V]) (*Item[K, V], bool) {
c.items.mu.Lock()
defer c.items.mu.Unlock()
elem := c.getWithOpts(key, false, opts...)
if elem != nil {
return elem, true
}
setOpts := options[K, V]{
ttl: c.options.ttl,
}
applyOptions(&setOpts, opts...) // used only to update the TTL
item := c.set(key, value, setOpts.ttl)
return item, false
}
// GetAndDelete retrieves an item from the cache by the provided key and
// then deletes it.
// The bool return value is true if the item was found before
// its deletion, false if not.
// If the loader is non-nil (i.e., used as an option or specified when
// creating the cache instance), it is executed normaly, i.e., only when
// the item is not found.
func (c *Cache[K, V]) GetAndDelete(key K, opts ...Option[K, V]) (*Item[K, V], bool) {
c.items.mu.Lock()
elem := c.getWithOpts(key, false, opts...)
if elem == nil {
c.items.mu.Unlock()
getOpts := options[K, V]{
loader: c.options.loader,
}
applyOptions(&getOpts, opts...) // used only to update the loader
if getOpts.loader != nil {
item := getOpts.loader.Load(c, key)
return item, item != nil
}
return nil, false
}
c.delete(key)
c.items.mu.Unlock()
return elem, true
}
// DeleteAll deletes all items from the cache.
func (c *Cache[K, V]) DeleteAll() {
c.items.mu.Lock()
c.evict(EvictionReasonDeleted)
c.items.mu.Unlock()
}
// DeleteExpired deletes all expired items from the cache.
func (c *Cache[K, V]) DeleteExpired() {
c.items.mu.Lock()
defer c.items.mu.Unlock()
if c.items.expQueue.isEmpty() {
return
}
e := c.items.expQueue[0]
for e.Value.(*Item[K, V]).isExpiredUnsafe() {
c.evict(EvictionReasonExpired, e)
if c.items.expQueue.isEmpty() {
break
}
// expiration queue has a new root
e = c.items.expQueue[0]
}
}
// Touch simulates an item's retrieval without actually returning it.
// Its main purpose is to extend an item's expiration timestamp.
// If the item is not found, the method is no-op.
func (c *Cache[K, V]) Touch(key K) {
c.items.mu.Lock()
c.get(key, true)
c.items.mu.Unlock()
}
// Len returns the total number of items in the cache.
func (c *Cache[K, V]) Len() int {
c.items.mu.RLock()
defer c.items.mu.RUnlock()
return len(c.items.values)
}
// Keys returns all keys currently present in the cache.
func (c *Cache[K, V]) Keys() []K {
c.items.mu.RLock()
defer c.items.mu.RUnlock()
res := make([]K, 0, len(c.items.values))
for k := range c.items.values {
res = append(res, k)
}
return res
}
// Items returns a copy of all items in the cache.
// It does not update any expiration timestamps.
func (c *Cache[K, V]) Items() map[K]*Item[K, V] {
c.items.mu.RLock()
defer c.items.mu.RUnlock()
items := make(map[K]*Item[K, V], len(c.items.values))
for k := range c.items.values {
item := c.get(k, false)
if item != nil {
items[k] = item.Value.(*Item[K, V])
}
}
return items
}
// Range calls fn for each item present in the cache. If fn returns false,
// Range stops the iteration.
func (c *Cache[K, V]) Range(fn func(item *Item[K, V]) bool) {
c.items.mu.RLock()
for item := c.items.lru.Front(); item != c.items.lru.Back().Next(); item = item.Next() {
i := item.Value.(*Item[K, V])
c.items.mu.RUnlock()
if !fn(i) {
return
}
if item.Next() != nil {
c.items.mu.RLock()
}
}
}
// Metrics returns the metrics of the cache.
func (c *Cache[K, V]) Metrics() Metrics {
c.metricsMu.RLock()
defer c.metricsMu.RUnlock()
return c.metrics
}
// Start starts an automatic cleanup process that periodically deletes
// expired items.
// It blocks until Stop is called.
func (c *Cache[K, V]) Start() {
waitDur := func() time.Duration {
c.items.mu.RLock()
defer c.items.mu.RUnlock()
if !c.items.expQueue.isEmpty() &&
!c.items.expQueue[0].Value.(*Item[K, V]).expiresAt.IsZero() {
d := time.Until(c.items.expQueue[0].Value.(*Item[K, V]).expiresAt)
if d <= 0 {
// execute immediately
return time.Microsecond
}
return d
}
if c.options.ttl > 0 {
return c.options.ttl
}
return time.Hour
}
timer := time.NewTimer(waitDur())
stop := func() {
if !timer.Stop() {
// drain the timer chan
select {
case <-timer.C:
default:
}
}
}
defer stop()
for {
select {
case <-c.stopCh:
return
case d := <-c.items.timerCh:
stop()
timer.Reset(d)
case <-timer.C:
c.DeleteExpired()
stop()
timer.Reset(waitDur())
}
}
}
// Stop stops the automatic cleanup process.
// It blocks until the cleanup process exits.
func (c *Cache[K, V]) Stop() {
c.stopCh <- struct{}{}
}
// OnInsertion adds the provided function to be executed when
// a new item is inserted into the cache. The function is executed
// on a separate goroutine and does not block the flow of the cache
// manager.
// The returned function may be called to delete the subscription function
// from the list of insertion subscribers.
// When the returned function is called, it blocks until all instances of
// the same subscription function return. A context is used to notify the
// subscription function when the returned/deletion function is called.
func (c *Cache[K, V]) OnInsertion(fn func(context.Context, *Item[K, V])) func() {
var (
wg sync.WaitGroup
ctx, cancel = context.WithCancel(context.Background())
)
c.events.insertion.mu.Lock()
id := c.events.insertion.nextID
c.events.insertion.fns[id] = func(item *Item[K, V]) {
wg.Add(1)
go func() {
fn(ctx, item)
wg.Done()
}()
}
c.events.insertion.nextID++
c.events.insertion.mu.Unlock()
return func() {
cancel()
c.events.insertion.mu.Lock()
delete(c.events.insertion.fns, id)
c.events.insertion.mu.Unlock()
wg.Wait()
}
}
// OnEviction adds the provided function to be executed when
// an item is evicted/deleted from the cache. The function is executed
// on a separate goroutine and does not block the flow of the cache
// manager.
// The returned function may be called to delete the subscription function
// from the list of eviction subscribers.
// When the returned function is called, it blocks until all instances of
// the same subscription function return. A context is used to notify the
// subscription function when the returned/deletion function is called.
func (c *Cache[K, V]) OnEviction(fn func(context.Context, EvictionReason, *Item[K, V])) func() {
var (
wg sync.WaitGroup
ctx, cancel = context.WithCancel(context.Background())
)
c.events.eviction.mu.Lock()
id := c.events.eviction.nextID
c.events.eviction.fns[id] = func(r EvictionReason, item *Item[K, V]) {
wg.Add(1)
go func() {
fn(ctx, r, item)
wg.Done()
}()
}
c.events.eviction.nextID++
c.events.eviction.mu.Unlock()
return func() {
cancel()
c.events.eviction.mu.Lock()
delete(c.events.eviction.fns, id)
c.events.eviction.mu.Unlock()
wg.Wait()
}
}
// Loader is an interface that handles missing data loading.
type Loader[K comparable, V any] interface {
// Load should execute a custom item retrieval logic and
// return the item that is associated with the key.
// It should return nil if the item is not found/valid.
// The method is allowed to fetch data from the cache instance
// or update it for future use.
Load(c *Cache[K, V], key K) *Item[K, V]
}
// LoaderFunc type is an adapter that allows the use of ordinary
// functions as data loaders.
type LoaderFunc[K comparable, V any] func(*Cache[K, V], K) *Item[K, V]
// Load executes a custom item retrieval logic and returns the item that
// is associated with the key.
// It returns nil if the item is not found/valid.
func (l LoaderFunc[K, V]) Load(c *Cache[K, V], key K) *Item[K, V] {
return l(c, key)
}
// SuppressedLoader wraps another Loader and suppresses duplicate
// calls to its Load method.
type SuppressedLoader[K comparable, V any] struct {
loader Loader[K, V]
group *singleflight.Group
}
// NewSuppressedLoader creates a new instance of suppressed loader.
// If the group parameter is nil, a newly created instance of
// *singleflight.Group is used.
func NewSuppressedLoader[K comparable, V any](loader Loader[K, V], group *singleflight.Group) *SuppressedLoader[K, V] {
if group == nil {
group = &singleflight.Group{}
}
return &SuppressedLoader[K, V]{
loader: loader,
group: group,
}
}
// Load executes a custom item retrieval logic and returns the item that
// is associated with the key.
// It returns nil if the item is not found/valid.
// It also ensures that only one execution of the wrapped Loader's Load
// method is in-flight for a given key at a time.
func (l *SuppressedLoader[K, V]) Load(c *Cache[K, V], key K) *Item[K, V] {
// there should be a better/generic way to create a
// singleflight Group's key. It's possible that a generic
// singleflight.Group will be introduced with/in go1.19+
strKey := fmt.Sprint(key)
// the error can be discarded since the singleflight.Group
// itself does not return any of its errors, it returns
// the error that we return ourselves in the func below, which
// is also nil
res, _, _ := l.group.Do(strKey, func() (interface{}, error) {
item := l.loader.Load(c, key)
if item == nil {
return nil, nil
}
return item, nil
})
if res == nil {
return nil
}
return res.(*Item[K, V])
}

View File

@ -0,0 +1,85 @@
package ttlcache
import (
"container/heap"
"container/list"
)
// expirationQueue stores items that are ordered by their expiration
// timestamps. The 0th item is closest to its expiration.
type expirationQueue[K comparable, V any] []*list.Element
// newExpirationQueue creates and initializes a new expiration queue.
func newExpirationQueue[K comparable, V any]() expirationQueue[K, V] {
q := make(expirationQueue[K, V], 0)
heap.Init(&q)
return q
}
// isEmpty checks if the queue is empty.
func (q expirationQueue[K, V]) isEmpty() bool {
return q.Len() == 0
}
// update updates an existing item's value and position in the queue.
func (q *expirationQueue[K, V]) update(elem *list.Element) {
heap.Fix(q, elem.Value.(*Item[K, V]).queueIndex)
}
// push pushes a new item into the queue and updates the order of its
// elements.
func (q *expirationQueue[K, V]) push(elem *list.Element) {
heap.Push(q, elem)
}
// remove removes an item from the queue and updates the order of its
// elements.
func (q *expirationQueue[K, V]) remove(elem *list.Element) {
heap.Remove(q, elem.Value.(*Item[K, V]).queueIndex)
}
// Len returns the total number of items in the queue.
func (q expirationQueue[K, V]) Len() int {
return len(q)
}
// Less checks if the item at the i position expires sooner than
// the one at the j position.
func (q expirationQueue[K, V]) Less(i, j int) bool {
item1, item2 := q[i].Value.(*Item[K, V]), q[j].Value.(*Item[K, V])
if item1.expiresAt.IsZero() {
return false
}
if item2.expiresAt.IsZero() {
return true
}
return item1.expiresAt.Before(item2.expiresAt)
}
// Swap switches the places of two queue items.
func (q expirationQueue[K, V]) Swap(i, j int) {
q[i], q[j] = q[j], q[i]
q[i].Value.(*Item[K, V]).queueIndex = i
q[j].Value.(*Item[K, V]).queueIndex = j
}
// Push appends a new item to the item slice.
func (q *expirationQueue[K, V]) Push(x interface{}) {
elem := x.(*list.Element)
elem.Value.(*Item[K, V]).queueIndex = len(*q)
*q = append(*q, elem)
}
// Pop removes and returns the last item.
func (q *expirationQueue[K, V]) Pop() interface{} {
old := *q
i := len(old) - 1
elem := old[i]
elem.Value.(*Item[K, V]).queueIndex = -1
old[i] = nil // avoid memory leak
*q = old[:i]
return elem
}

151
vendor/github.com/jellydator/ttlcache/v3/item.go generated vendored Normal file
View File

@ -0,0 +1,151 @@
package ttlcache
import (
"sync"
"time"
)
const (
// NoTTL indicates that an item should never expire.
NoTTL time.Duration = -1
// DefaultTTL indicates that the default TTL value of the cache
// instance should be used.
DefaultTTL time.Duration = 0
)
// Item holds all the information that is associated with a single
// cache value.
type Item[K comparable, V any] struct {
// the mutex needs to be locked only when:
// - data fields are being read inside accessor methods
// - data fields are being updated
// when data fields are being read in one of the cache's
// methods, we can be sure that these fields are not modified
// concurrently since the item list is locked by its own mutex as
// well, so locking this mutex would be redundant.
// In other words, this mutex is only useful when these fields
// are being read from the outside (e.g. in event functions).
mu sync.RWMutex
key K
value V
ttl time.Duration
expiresAt time.Time
queueIndex int
version int64
}
// newItem creates a new cache item.
func newItem[K comparable, V any](key K, value V, ttl time.Duration, enableVersionTracking bool) *Item[K, V] {
item := &Item[K, V]{
key: key,
value: value,
ttl: ttl,
}
if !enableVersionTracking {
item.version = -1
}
item.touch()
return item
}
// update modifies the item's value, TTL, and version.
func (item *Item[K, V]) update(value V, ttl time.Duration) {
item.mu.Lock()
defer item.mu.Unlock()
item.value = value
item.ttl = ttl
// reset expiration timestamp because the new TTL may be
// 0 or below
item.expiresAt = time.Time{}
item.touchUnsafe()
// update version if enabled
if item.version > -1 {
item.version++
}
}
// touch updates the item's expiration timestamp.
func (item *Item[K, V]) touch() {
item.mu.Lock()
defer item.mu.Unlock()
item.touchUnsafe()
}
// touchUnsafe updates the item's expiration timestamp without
// locking the mutex.
func (item *Item[K, V]) touchUnsafe() {
if item.ttl <= 0 {
return
}
item.expiresAt = time.Now().Add(item.ttl)
}
// IsExpired returns a bool value that indicates whether the item
// is expired.
func (item *Item[K, V]) IsExpired() bool {
item.mu.RLock()
defer item.mu.RUnlock()
return item.isExpiredUnsafe()
}
// isExpiredUnsafe returns a bool value that indicates whether the
// the item is expired without locking the mutex
func (item *Item[K, V]) isExpiredUnsafe() bool {
if item.ttl <= 0 {
return false
}
return item.expiresAt.Before(time.Now())
}
// Key returns the key of the item.
func (item *Item[K, V]) Key() K {
item.mu.RLock()
defer item.mu.RUnlock()
return item.key
}
// Value returns the value of the item.
func (item *Item[K, V]) Value() V {
item.mu.RLock()
defer item.mu.RUnlock()
return item.value
}
// TTL returns the TTL value of the item.
func (item *Item[K, V]) TTL() time.Duration {
item.mu.RLock()
defer item.mu.RUnlock()
return item.ttl
}
// ExpiresAt returns the expiration timestamp of the item.
func (item *Item[K, V]) ExpiresAt() time.Time {
item.mu.RLock()
defer item.mu.RUnlock()
return item.expiresAt
}
// Version returns the version of the item. It shows the total number of
// changes made to the item.
// If version tracking is disabled, the return value is always -1.
func (item *Item[K, V]) Version() int64 {
item.mu.RLock()
defer item.mu.RUnlock()
return item.version
}

22
vendor/github.com/jellydator/ttlcache/v3/metrics.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
package ttlcache
// Metrics contains common cache metrics calculated over the course
// of the cache's lifetime.
type Metrics struct {
// Insertions specifies how many items were inserted.
Insertions uint64
// Hits specifies how many items were successfully retrieved
// from the cache.
// Retrievals made with a loader function are not tracked.
Hits uint64
// Misses specifies how many items were not found in the cache.
// Retrievals made with a loader function are considered misses as
// well.
Misses uint64
// Evictions specifies how many items were removed from the
// cache.
Evictions uint64
}

77
vendor/github.com/jellydator/ttlcache/v3/options.go generated vendored Normal file
View File

@ -0,0 +1,77 @@
package ttlcache
import "time"
// Option sets a specific cache option.
type Option[K comparable, V any] interface {
apply(opts *options[K, V])
}
// optionFunc wraps a function and implements the Option interface.
type optionFunc[K comparable, V any] func(*options[K, V])
// apply calls the wrapped function.
func (fn optionFunc[K, V]) apply(opts *options[K, V]) {
fn(opts)
}
// options holds all available cache configuration options.
type options[K comparable, V any] struct {
capacity uint64
ttl time.Duration
loader Loader[K, V]
disableTouchOnHit bool
enableVersionTracking bool
}
// applyOptions applies the provided option values to the option struct.
func applyOptions[K comparable, V any](v *options[K, V], opts ...Option[K, V]) {
for i := range opts {
opts[i].apply(v)
}
}
// WithCapacity sets the maximum capacity of the cache.
// It has no effect when used with Get().
func WithCapacity[K comparable, V any](c uint64) Option[K, V] {
return optionFunc[K, V](func(opts *options[K, V]) {
opts.capacity = c
})
}
// WithTTL sets the TTL of the cache.
// It has no effect when used with Get().
func WithTTL[K comparable, V any](ttl time.Duration) Option[K, V] {
return optionFunc[K, V](func(opts *options[K, V]) {
opts.ttl = ttl
})
}
// WithVersion activates item version tracking.
// If version tracking is disabled, the version is always -1.
// It has no effect when used with Get().
func WithVersion[K comparable, V any](enable bool) Option[K, V] {
return optionFunc[K, V](func(opts *options[K, V]) {
opts.enableVersionTracking = enable
})
}
// WithLoader sets the loader of the cache.
// When passing into Get(), it sets an ephemeral loader that
// is used instead of the cache's default one.
func WithLoader[K comparable, V any](l Loader[K, V]) Option[K, V] {
return optionFunc[K, V](func(opts *options[K, V]) {
opts.loader = l
})
}
// WithDisableTouchOnHit prevents the cache instance from
// extending/touching an item's expiration timestamp when it is being
// retrieved.
// When used with Get(), it overrides the default value of the
// cache.
func WithDisableTouchOnHit[K comparable, V any]() Option[K, V] {
return optionFunc[K, V](func(opts *options[K, V]) {
opts.disableTouchOnHit = true
})
}

3
vendor/modules.txt vendored
View File

@ -433,6 +433,9 @@ github.com/jackpal/go-nat-pmp
# github.com/jbenet/go-temp-err-catcher v0.1.0
## explicit; go 1.13
github.com/jbenet/go-temp-err-catcher
# github.com/jellydator/ttlcache/v3 v3.1.0
## explicit; go 1.18
github.com/jellydator/ttlcache/v3
# github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a
## explicit
github.com/jinzhu/copier

View File

@ -6,6 +6,7 @@
// 1694540071_add_collectibles_ownership_update_timestamp.up.sql (349B)
// 1694692748_add_raw_balance_to_token_balances.up.sql (165B)
// 1695133989_add_community_id_to_collectibles_and_collections_data_cache.up.sql (275B)
// 1695932536_balance_history_v2.up.sql (653B)
// doc.go (74B)
package migrations
@ -16,6 +17,7 @@ import (
"crypto/sha256"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
@ -25,7 +27,7 @@ import (
func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("read %q: %w", name, err)
return nil, fmt.Errorf("read %q: %v", name, err)
}
var buf bytes.Buffer
@ -33,7 +35,7 @@ func bindataRead(data []byte, name string) ([]byte, error) {
clErr := gz.Close()
if err != nil {
return nil, fmt.Errorf("read %q: %w", name, err)
return nil, fmt.Errorf("read %q: %v", name, err)
}
if clErr != nil {
return nil, err
@ -89,7 +91,7 @@ func _1691753758_initialUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "1691753758_initial.up.sql", size: 5738, mode: os.FileMode(0644), modTime: time.Unix(1695161107, 0)}
info := bindataFileInfo{name: "1691753758_initial.up.sql", size: 5738, mode: os.FileMode(0664), modTime: time.Unix(1692342414, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6b, 0x25, 0x31, 0xc8, 0x27, 0x3, 0x6b, 0x9f, 0x15, 0x42, 0x2f, 0x85, 0xfb, 0xe3, 0x6, 0xea, 0xf7, 0x97, 0x12, 0x56, 0x3c, 0x9a, 0x5b, 0x1a, 0xca, 0xb1, 0x23, 0xfa, 0xcd, 0x57, 0x25, 0x5c}}
return a, nil
}
@ -109,7 +111,7 @@ func _1692701329_add_collectibles_and_collections_data_cacheUpSql() (*asset, err
return nil, err
}
info := bindataFileInfo{name: "1692701329_add_collectibles_and_collections_data_cache.up.sql", size: 1808, mode: os.FileMode(0644), modTime: time.Unix(1695161107, 0)}
info := bindataFileInfo{name: "1692701329_add_collectibles_and_collections_data_cache.up.sql", size: 1808, mode: os.FileMode(0664), modTime: time.Unix(1692717976, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1, 0x51, 0xf4, 0x2b, 0x92, 0xde, 0x59, 0x65, 0xd8, 0x9b, 0x57, 0xe0, 0xfd, 0x7b, 0x12, 0xb, 0x29, 0x6e, 0x9d, 0xb5, 0x90, 0xe, 0xfa, 0x12, 0x97, 0xd, 0x61, 0x60, 0x7f, 0x32, 0x1d, 0xc3}}
return a, nil
}
@ -129,7 +131,7 @@ func _1692701339_add_scope_to_pendingUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "1692701339_add_scope_to_pending.up.sql", size: 576, mode: os.FileMode(0644), modTime: time.Unix(1695161107, 0)}
info := bindataFileInfo{name: "1692701339_add_scope_to_pending.up.sql", size: 576, mode: os.FileMode(0664), modTime: time.Unix(1692966636, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x36, 0x8a, 0x5e, 0xe2, 0x63, 0x15, 0x37, 0xba, 0x55, 0x18, 0xf3, 0xcc, 0xe0, 0x5, 0x84, 0xe1, 0x5b, 0xe8, 0x1, 0x32, 0x6b, 0x9f, 0x7d, 0x9f, 0xd9, 0x23, 0x6c, 0xa9, 0xb5, 0xdc, 0xf4, 0x93}}
return a, nil
}
@ -149,7 +151,7 @@ func _1694540071_add_collectibles_ownership_update_timestampUpSql() (*asset, err
return nil, err
}
info := bindataFileInfo{name: "1694540071_add_collectibles_ownership_update_timestamp.up.sql", size: 349, mode: os.FileMode(0644), modTime: time.Unix(1695161107, 0)}
info := bindataFileInfo{name: "1694540071_add_collectibles_ownership_update_timestamp.up.sql", size: 349, mode: os.FileMode(0664), modTime: time.Unix(1695919463, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7f, 0x45, 0xc7, 0xce, 0x79, 0x63, 0xbc, 0x6f, 0x83, 0x5f, 0xe2, 0x3, 0x56, 0xcc, 0x5, 0x2f, 0x85, 0xda, 0x7e, 0xea, 0xf5, 0xd2, 0xac, 0x19, 0xd4, 0xd8, 0x5e, 0xdd, 0xed, 0xe2, 0xa9, 0x97}}
return a, nil
}
@ -169,7 +171,7 @@ func _1694692748_add_raw_balance_to_token_balancesUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "1694692748_add_raw_balance_to_token_balances.up.sql", size: 165, mode: os.FileMode(0644), modTime: time.Unix(1695211597, 0)}
info := bindataFileInfo{name: "1694692748_add_raw_balance_to_token_balances.up.sql", size: 165, mode: os.FileMode(0664), modTime: time.Unix(1695919463, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd4, 0xe0, 0x5b, 0x42, 0xf0, 0x96, 0xa5, 0xf5, 0xed, 0xc0, 0x97, 0x88, 0xb0, 0x6d, 0xfe, 0x7d, 0x97, 0x2e, 0x17, 0xd2, 0x16, 0xbc, 0x2a, 0xf2, 0xcc, 0x67, 0x9e, 0xc5, 0x47, 0xf6, 0x69, 0x1}}
return a, nil
}
@ -189,11 +191,31 @@ func _1695133989_add_community_id_to_collectibles_and_collections_data_cacheUpSq
return nil, err
}
info := bindataFileInfo{name: "1695133989_add_community_id_to_collectibles_and_collections_data_cache.up.sql", size: 275, mode: os.FileMode(0644), modTime: time.Unix(1695211597, 0)}
info := bindataFileInfo{name: "1695133989_add_community_id_to_collectibles_and_collections_data_cache.up.sql", size: 275, mode: os.FileMode(0664), modTime: time.Unix(1695919463, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xfa, 0x2, 0xa, 0x7f, 0x4b, 0xd1, 0x3, 0xd0, 0x3, 0x29, 0x84, 0x31, 0xed, 0x49, 0x4f, 0xb1, 0x2d, 0xd7, 0x80, 0x41, 0x5b, 0xfa, 0x6, 0xae, 0xb4, 0xf6, 0x6b, 0x49, 0xee, 0x57, 0x33, 0x76}}
return a, nil
}
var __1695932536_balance_history_v2UpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x91\xc1\x4e\xeb\x30\x10\x45\xf7\xfe\x8a\xbb\x7c\x95\x92\xb7\x60\xdb\x55\xd2\x9a\x62\x29\x38\x90\x3a\xa8\xbb\xc8\x4d\x5c\x3a\x22\x75\x2b\xdb\x20\x85\xaf\x47\x0d\x0d\x48\x6d\x50\x11\xeb\xeb\x99\x33\xe7\x3a\x8e\x91\x5b\x03\x63\x83\xeb\x70\x30\x0e\xf5\x56\x93\xad\xa8\x89\xa0\x9b\xc6\x19\xef\x23\xd4\xaf\xce\x19\x5b\x77\x11\xd6\xed\xbe\x7e\x61\x71\x8c\xb5\x6e\xb5\xad\x0d\xc8\x43\xc3\x1b\x47\xba\xa5\x77\xd3\x60\x4d\xcf\xff\x85\x0d\x6c\x56\xf0\x44\x71\xa8\x24\xcd\x38\xc4\x2d\x64\xae\xc0\x57\x62\xa9\x96\xc3\x68\xb5\x25\x1f\xf6\xae\xab\xde\x6e\xf0\x8f\x01\xf8\x22\xa3\x94\x4b\xb1\x90\x7c\x8e\x54\x2c\x84\x54\xfd\xb0\x2c\xb3\x2c\xea\x9f\x9d\xce\xc2\x53\x52\xcc\xee\x92\xe2\x2c\x1d\x6e\xfd\x21\xee\x05\xc6\xf7\x06\xda\x19\x1f\xf4\xee\x80\xcb\x6c\xd0\x4d\xb3\x3c\x65\x93\x29\x1b\xfc\x4a\x29\x1e\x4b\x0e\x21\xe7\x7c\x75\x45\x93\x1a\x63\x03\x6d\xba\xea\xb3\xea\x5c\x8e\x16\x71\xbd\xfd\xc9\x74\x80\xff\x86\xba\xa1\x36\x18\xd7\x33\xc9\xf8\xbf\x53\xa3\xef\x7a\x8e\xfa\xf3\x22\x7f\x38\x7d\xee\xd9\xbe\x29\x4b\x32\xc5\x8b\xf1\xf0\x08\x2b\xb8\x4c\xee\x39\x54\x7e\x39\xc9\x3e\x02\x00\x00\xff\xff\x1e\x5e\x83\xea\x8d\x02\x00\x00")
func _1695932536_balance_history_v2UpSqlBytes() ([]byte, error) {
return bindataRead(
__1695932536_balance_history_v2UpSql,
"1695932536_balance_history_v2.up.sql",
)
}
func _1695932536_balance_history_v2UpSql() (*asset, error) {
bytes, err := _1695932536_balance_history_v2UpSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "1695932536_balance_history_v2.up.sql", size: 653, mode: os.FileMode(0664), modTime: time.Unix(1695930816, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x37, 0xf4, 0x14, 0x91, 0xf6, 0x5f, 0xc4, 0x9b, 0xb7, 0x83, 0x32, 0x72, 0xbe, 0x82, 0x42, 0x39, 0xa4, 0x3b, 0xc9, 0x78, 0x3d, 0xca, 0xd4, 0xbf, 0xfc, 0x7a, 0x33, 0x1e, 0xcd, 0x9e, 0xe4, 0x85}}
return a, nil
}
var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x2c\xc9\xb1\x0d\xc4\x20\x0c\x05\xd0\x9e\x29\xfe\x02\xd8\xfd\x6d\xe3\x4b\xac\x2f\x44\x82\x09\x78\x7f\xa5\x49\xfd\xa6\x1d\xdd\xe8\xd8\xcf\x55\x8a\x2a\xe3\x47\x1f\xbe\x2c\x1d\x8c\xfa\x6f\xe3\xb4\x34\xd4\xd9\x89\xbb\x71\x59\xb6\x18\x1b\x35\x20\xa2\x9f\x0a\x03\xa2\xe5\x0d\x00\x00\xff\xff\x60\xcd\x06\xbe\x4a\x00\x00\x00")
func docGoBytes() ([]byte, error) {
@ -209,7 +231,7 @@ func docGo() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "doc.go", size: 74, mode: os.FileMode(0644), modTime: time.Unix(1695161107, 0)}
info := bindataFileInfo{name: "doc.go", size: 74, mode: os.FileMode(0664), modTime: time.Unix(1692342414, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xde, 0x7c, 0x28, 0xcd, 0x47, 0xf2, 0xfa, 0x7c, 0x51, 0x2d, 0xd8, 0x38, 0xb, 0xb0, 0x34, 0x9d, 0x4c, 0x62, 0xa, 0x9e, 0x28, 0xc3, 0x31, 0x23, 0xd9, 0xbb, 0x89, 0x9f, 0xa0, 0x89, 0x1f, 0xe8}}
return a, nil
}
@ -305,29 +327,32 @@ func AssetNames() []string {
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"1691753758_initial.up.sql": _1691753758_initialUpSql,
"1692701329_add_collectibles_and_collections_data_cache.up.sql": _1692701329_add_collectibles_and_collections_data_cacheUpSql,
"1692701339_add_scope_to_pending.up.sql": _1692701339_add_scope_to_pendingUpSql,
"1694540071_add_collectibles_ownership_update_timestamp.up.sql": _1694540071_add_collectibles_ownership_update_timestampUpSql,
"1694692748_add_raw_balance_to_token_balances.up.sql": _1694692748_add_raw_balance_to_token_balancesUpSql,
"1691753758_initial.up.sql": _1691753758_initialUpSql,
"1692701329_add_collectibles_and_collections_data_cache.up.sql": _1692701329_add_collectibles_and_collections_data_cacheUpSql,
"1692701339_add_scope_to_pending.up.sql": _1692701339_add_scope_to_pendingUpSql,
"1694540071_add_collectibles_ownership_update_timestamp.up.sql": _1694540071_add_collectibles_ownership_update_timestampUpSql,
"1694692748_add_raw_balance_to_token_balances.up.sql": _1694692748_add_raw_balance_to_token_balancesUpSql,
"1695133989_add_community_id_to_collectibles_and_collections_data_cache.up.sql": _1695133989_add_community_id_to_collectibles_and_collections_data_cacheUpSql,
"1695932536_balance_history_v2.up.sql": _1695932536_balance_history_v2UpSql,
"doc.go": docGo,
}
// AssetDebug is true if the assets were built with the debug flag enabled.
const AssetDebug = false
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
//
// data/
// foo.txt
// img/
// a.png
// b.png
//
// data/
// foo.txt
// img/
// a.png
// b.png
// then AssetDir("data") would return []string{"foo.txt", "img"},
// AssetDir("data/img") would return []string{"a.png", "b.png"},
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
@ -360,13 +385,14 @@ type bintree struct {
}
var _bintree = &bintree{nil, map[string]*bintree{
"1691753758_initial.up.sql": {_1691753758_initialUpSql, map[string]*bintree{}},
"1692701329_add_collectibles_and_collections_data_cache.up.sql": {_1692701329_add_collectibles_and_collections_data_cacheUpSql, map[string]*bintree{}},
"1692701339_add_scope_to_pending.up.sql": {_1692701339_add_scope_to_pendingUpSql, map[string]*bintree{}},
"1694540071_add_collectibles_ownership_update_timestamp.up.sql": {_1694540071_add_collectibles_ownership_update_timestampUpSql, map[string]*bintree{}},
"1694692748_add_raw_balance_to_token_balances.up.sql": {_1694692748_add_raw_balance_to_token_balancesUpSql, map[string]*bintree{}},
"1695133989_add_community_id_to_collectibles_and_collections_data_cache.up.sql": {_1695133989_add_community_id_to_collectibles_and_collections_data_cacheUpSql, map[string]*bintree{}},
"doc.go": {docGo, map[string]*bintree{}},
"1691753758_initial.up.sql": &bintree{_1691753758_initialUpSql, map[string]*bintree{}},
"1692701329_add_collectibles_and_collections_data_cache.up.sql": &bintree{_1692701329_add_collectibles_and_collections_data_cacheUpSql, map[string]*bintree{}},
"1692701339_add_scope_to_pending.up.sql": &bintree{_1692701339_add_scope_to_pendingUpSql, map[string]*bintree{}},
"1694540071_add_collectibles_ownership_update_timestamp.up.sql": &bintree{_1694540071_add_collectibles_ownership_update_timestampUpSql, map[string]*bintree{}},
"1694692748_add_raw_balance_to_token_balances.up.sql": &bintree{_1694692748_add_raw_balance_to_token_balancesUpSql, map[string]*bintree{}},
"1695133989_add_community_id_to_collectibles_and_collections_data_cache.up.sql": &bintree{_1695133989_add_community_id_to_collectibles_and_collections_data_cacheUpSql, map[string]*bintree{}},
"1695932536_balance_history_v2.up.sql": &bintree{_1695932536_balance_history_v2UpSql, map[string]*bintree{}},
"doc.go": &bintree{docGo, map[string]*bintree{}},
}}
// RestoreAsset restores an asset under the given directory.
@ -383,7 +409,7 @@ func RestoreAsset(dir, name string) error {
if err != nil {
return err
}
err = os.WriteFile(_filePath(dir, name), data, info.Mode())
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}

View File

@ -0,0 +1,17 @@
-- One entry per chain_id, address, currency, block
-- balance is a serialized big.Int
CREATE TABLE IF NOT EXISTS balance_history_v2 (
chain_id UNSIGNED BIGINT NOT NULL,
address VARCHAR NOT NULL,
currency VARCHAR NOT NULL,
block BIGINT NOT NULL,
timestamp INT NOT NULL,
balance BLOB
);
CREATE UNIQUE INDEX IF NOT EXISTS balance_history_identify_entry ON balance_history_v2 (chain_id, address, currency, block);
CREATE INDEX IF NOT EXISTS balance_history_filter_entries ON balance_history_v2 (chain_id, address, currency, block, timestamp);
DROP TABLE balance_history;
ALTER TABLE balance_history_v2 RENAME TO balance_history;