chore(wallet): hardcode specific average block duration for L2

The L2 networks have their own block number and average block duration,
so we need to hardcode to keep the logic simplified.

Updates status-desktop: #9582
This commit is contained in:
Stefan 2023-02-22 22:22:50 +01:00 committed by Stefan Dunca
parent 60e1978bb5
commit 411607d43d
3 changed files with 41 additions and 30 deletions

View File

@ -16,18 +16,24 @@ type Balance struct {
db *BalanceDB
}
type blocksStride = int
const (
blockTime = time.Duration(12) * time.Second
aDay = time.Duration(24) * time.Hour
defaultChains = uint64(0)
aDay = time.Duration(24) * time.Hour
)
var averageBlockDurationForChain = map[uint64]time.Duration{
defaultChains: time.Duration(12000) * time.Millisecond,
10: time.Duration(400) * time.Millisecond, // Optimism
420: time.Duration(2000) * time.Millisecond, // Optimism Testnet
42161: time.Duration(300) * time.Millisecond, // Arbitrum
421611: time.Duration(1500) * time.Millisecond, // Arbitrum Testnet
}
// Must have a common divisor to share common blocks and increase the cache hit
const (
twiceADayStride blocksStride = blocksStride((time.Duration(12) * time.Hour) / blockTime)
weekStride = 14 * twiceADayStride
fourMonthsStride = 4 /*months*/ * 4 * weekStride
twiceADayStride time.Duration = time.Duration(12) * time.Hour
weekStride = 14 * twiceADayStride
fourMonthsStride = 4 /*months*/ * 4 * weekStride
)
// bitsetFilters used to fetch relevant data points in one batch and to increase cache hit
@ -63,7 +69,7 @@ var timeIntervalToBitsetFilter = map[TimeInterval]bitsetFilter{
BalanceHistoryAllTime: filterAllTime,
}
var timeIntervalToStride = map[TimeInterval]blocksStride{
var timeIntervalToStrideDuration = map[TimeInterval]time.Duration{
BalanceHistory7Days: twiceADayStride,
BalanceHistory1Month: twiceADayStride,
BalanceHistory6Months: weekStride,
@ -71,6 +77,15 @@ var timeIntervalToStride = map[TimeInterval]blocksStride{
BalanceHistoryAllTime: fourMonthsStride,
}
func strideBlockCount(timeInterval TimeInterval, chainID uint64) int {
blockDuration, found := averageBlockDurationForChain[chainID]
if !found {
blockDuration = averageBlockDurationForChain[defaultChains]
}
return int(timeIntervalToStrideDuration[timeInterval] / blockDuration)
}
func NewBalance(db *BalanceDB) *Balance {
return &Balance{
db: db,
@ -92,10 +107,6 @@ type DataPoint struct {
BlockNumber *hexutil.Big
}
func strideDuration(timeInterval TimeInterval) time.Duration {
return time.Duration(timeIntervalToStride[timeInterval]) * blockTime
}
// fetchAndCache will process the last available block if blocNo is nil
// reuses previous fetched blocks timestamp to avoid fetching block headers again
func (b *Balance) fetchAndCache(ctx context.Context, source DataSource, address common.Address, blockNo *big.Int, bitset bitsetFilter) (*DataPoint, *big.Int, error) {
@ -170,7 +181,7 @@ func (b *Balance) update(ctx context.Context, source DataSource, address common.
if timeInterval != BalanceHistoryAllTime {
// Ensure we always get the complete range by fetching the next block also
startTimestamp = endTime - int64(timeIntervalDuration[timeInterval].Seconds())
fetchTimestamp = startTimestamp - int64(strideDuration(timeInterval).Seconds())
fetchTimestamp = startTimestamp - int64(timeIntervalToStrideDuration[timeInterval].Seconds())
}
identity := &assetIdentity{source.ChainID(), address, source.Currency()}
firstCached, err := b.firstCachedStartingAt(identity, fetchTimestamp, timeInterval)
@ -231,7 +242,7 @@ func (b *Balance) get(ctx context.Context, chainID uint64, currency string, addr
if timeInterval != BalanceHistoryAllTime {
// Ensure we always get the complete range by fetching the next block also
startTimestamp = endTimestamp - int64(timeIntervalDuration[timeInterval].Seconds())
fetchTimestamp = startTimestamp - int64(strideDuration(timeInterval).Seconds())
fetchTimestamp = startTimestamp - int64(timeIntervalToStrideDuration[timeInterval].Seconds())
}
cached, _, err := b.db.filter(&assetIdentity{chainID, address, currency}, nil, &balanceFilter{fetchTimestamp, endTimestamp, expandFlag(timeIntervalToBitsetFilter[timeInterval])}, 200, asc)
if err != nil {
@ -265,9 +276,9 @@ func (b *Balance) get(ctx context.Context, chainID uint64, currency string, addr
// fetchBackwardAndCache fetches and adds to DB balance entries starting one stride before the endBlock and stops
// when reaching a block timestamp older than startTimestamp or genesis block
// relies on the approximation of a block length to be blockTime for sampling the data
// relies on the approximation of a block length to match averageBlockDurationForChain for sampling the data
func (b *Balance) fetchBackwardAndCache(ctx context.Context, source DataSource, address common.Address, endBlock *big.Int, startTimestamp int64, timeInterval TimeInterval) error {
stride := timeIntervalToStride[timeInterval]
stride := strideBlockCount(timeInterval, source.ChainID())
nextBlock := new(big.Int).Set(endBlock)
for nextBlock.Cmp(big.NewInt(1)) > 0 {
if shouldCancel(ctx) {
@ -296,9 +307,9 @@ func (b *Balance) fetchBackwardAndCache(ctx context.Context, source DataSource,
// fetchForwardAndCache fetches and adds to DB balance entries starting one stride before the startBlock and stops
// when block not found
// relies on the approximation of a block length to be blockTime
// relies on the approximation of a block length to match averageBlockDurationForChain
func (b *Balance) fetchForwardAndCache(ctx context.Context, source DataSource, address common.Address, startBlock *big.Int, timeInterval TimeInterval) error {
stride := timeIntervalToStride[timeInterval]
stride := strideBlockCount(timeInterval, source.ChainID())
nextBlock := new(big.Int).Set(startBlock)
for {
if shouldCancel(ctx) {

View File

@ -213,7 +213,7 @@ func extractTestData(dataSource *chainClientTestSource) (reqBlkNos []int64, info
}
func minimumExpectedDataPoints(interval TimeInterval) int {
return int(math.Ceil(float64(timeIntervalDuration[interval]) / float64(strideDuration(interval))))
return int(math.Ceil(float64(timeIntervalDuration[interval]) / float64(timeIntervalToStrideDuration[interval])))
}
func getTimeError(dataSource *chainClientTestSource, data []*DataPoint, interval TimeInterval) int64 {
@ -377,7 +377,7 @@ func TestBalanceHistoryFetchFirstTime(t *testing.T) {
}
errorFromIdeal := getTimeError(dataSource, balanceData, testInput.interval)
require.Less(t, math.Abs(float64(errorFromIdeal)), strideDuration(testInput.interval).Seconds(), "Duration error [%d s] is within 1 stride [%.f s] for interval [%#v]", errorFromIdeal, strideDuration(testInput.interval).Seconds(), testInput.interval)
require.Less(t, math.Abs(float64(errorFromIdeal)), timeIntervalToStrideDuration[testInput.interval].Seconds(), "Duration error [%d s] is within 1 stride [%.f s] for interval [%#v]", errorFromIdeal, timeIntervalToStrideDuration[testInput.interval].Seconds(), testInput.interval)
})
}
}
@ -445,7 +445,7 @@ func TestBalanceHistoryFetchError(t *testing.T) {
}
errorFromIdeal := getTimeError(dataSource, balanceData, BalanceHistory1Year)
require.Less(t, math.Abs(float64(errorFromIdeal)), strideDuration(BalanceHistory1Year).Seconds(), "Duration error [%d s] is within 1 stride [%.f s] for interval [%#v]", errorFromIdeal, strideDuration(BalanceHistory1Year).Seconds(), BalanceHistory1Year)
require.Less(t, math.Abs(float64(errorFromIdeal)), timeIntervalToStrideDuration[BalanceHistory1Year].Seconds(), "Duration error [%d s] is within 1 stride [%.f s] for interval [%#v]", errorFromIdeal, timeIntervalToStrideDuration[BalanceHistory1Year].Seconds(), BalanceHistory1Year)
}
func TestBalanceHistoryValidateBalanceValuesAndCacheHit(t *testing.T) {
@ -569,7 +569,7 @@ func TestGetBalanceHistoryUpdateLater(t *testing.T) {
}
errorFromIdeal := getTimeError(dataSource, updatedBalanceData, BalanceHistory1Month)
require.Less(t, math.Abs(float64(errorFromIdeal)), strideDuration(BalanceHistory1Month).Seconds(), "Duration error [%d s] is within 1 stride [%.f s] for interval [%#v]", errorFromIdeal, strideDuration(BalanceHistory1Month).Seconds(), BalanceHistory1Month)
require.Less(t, math.Abs(float64(errorFromIdeal)), timeIntervalToStrideDuration[BalanceHistory1Month].Seconds(), "Duration error [%d s] is within 1 stride [%.f s] for interval [%#v]", errorFromIdeal, timeIntervalToStrideDuration[BalanceHistory1Month].Seconds(), BalanceHistory1Month)
// Advance little bit more than a month
dataSource.setCurrentTime(currentTime.Unix())
@ -601,7 +601,7 @@ func TestGetBalanceHistoryUpdateLater(t *testing.T) {
}
errorFromIdeal = getTimeError(dataSource, newBalanceData, BalanceHistory1Month)
require.Less(t, math.Abs(float64(errorFromIdeal)), strideDuration(BalanceHistory1Month).Seconds(), "Duration error [%d s] is within 1 stride [%.f s] for interval [%#v]", errorFromIdeal, strideDuration(BalanceHistory1Month).Seconds(), BalanceHistory1Month)
require.Less(t, math.Abs(float64(errorFromIdeal)), timeIntervalToStrideDuration[BalanceHistory1Month].Seconds(), "Duration error [%d s] is within 1 stride [%.f s] for interval [%#v]", errorFromIdeal, timeIntervalToStrideDuration[BalanceHistory1Month].Seconds(), BalanceHistory1Month)
}
func TestGetBalanceHistoryFetchMultipleAccounts(t *testing.T) {
@ -674,27 +674,27 @@ func TestGetBalanceHistoryUpdateCancellation(t *testing.T) {
}
func TestBlockStrideHaveCommonDivisor(t *testing.T) {
values := make([]blocksStride, 0, len(timeIntervalToStride))
for _, blockCount := range timeIntervalToStride {
values = append(values, blockCount)
values := make([]time.Duration, 0, len(timeIntervalToStrideDuration))
for _, blockDuration := range timeIntervalToStrideDuration {
values = append(values, blockDuration)
}
sort.Slice(values, func(i, j int) bool {
return values[i] < values[j]
})
for i := 1; i < len(values); i++ {
require.Equal(t, blocksStride(0), values[i]%values[i-1], " %d value from index %d is divisible with previous %d", values[i], i, values[i-1])
require.Equal(t, time.Duration(0), values[i]%values[i-1], " %d value from index %d is divisible with previous %d", values[i], i, values[i-1])
}
}
func TestBlockStrideMatchesBitsetFilter(t *testing.T) {
filterToStrideEquivalence := map[bitsetFilter]blocksStride{
filterToStrideEquivalence := map[bitsetFilter]time.Duration{
filterAllTime: fourMonthsStride,
filterWeekly: weekStride,
filterTwiceADay: twiceADayStride,
}
for interval, bitsetFiler := range timeIntervalToBitsetFilter {
stride, found := timeIntervalToStride[interval]
stride, found := timeIntervalToStrideDuration[interval]
require.True(t, found)
require.Equal(t, stride, filterToStrideEquivalence[bitsetFiler])
}

View File

@ -225,7 +225,7 @@ func (s *Service) GetBalanceHistory(ctx context.Context, chainIDs []uint64, addr
}
}
data, err := mergeDataPoints(allData, strideDuration(timeInterval))
data, err := mergeDataPoints(allData, timeIntervalToStrideDuration[timeInterval])
if err != nil {
return nil, err
} else if len(data) == 0 {