mirror of https://github.com/status-im/op-geth.git
eth/downloader: increase downloader block body allowance (#23074)
This change increases the cache size from 64 to 256 Mb for block bodies. Benchmarks have shown this to be one bottleneck when trying to achieve higher download speeds. The commit also includes a minor optimization for header inserts in package core: previously, the presence of headers in the database was checked for every header before writing it. With the change, if one header fails the presence check, all subsequent headers are also assumed to be missing. This is an improvement because in practice, the headers are almost always missing during sync.
This commit is contained in:
parent
bca8c03e57
commit
2d4eff21ca
|
@ -165,6 +165,7 @@ func (hc *HeaderChain) writeHeaders(headers []*types.Header) (result *headerWrit
|
||||||
)
|
)
|
||||||
|
|
||||||
batch := hc.chainDb.NewBatch()
|
batch := hc.chainDb.NewBatch()
|
||||||
|
parentKnown := true // Set to true to force hc.HasHeader check the first iteration
|
||||||
for i, header := range headers {
|
for i, header := range headers {
|
||||||
var hash common.Hash
|
var hash common.Hash
|
||||||
// The headers have already been validated at this point, so we already
|
// The headers have already been validated at this point, so we already
|
||||||
|
@ -178,8 +179,10 @@ func (hc *HeaderChain) writeHeaders(headers []*types.Header) (result *headerWrit
|
||||||
number := header.Number.Uint64()
|
number := header.Number.Uint64()
|
||||||
newTD.Add(newTD, header.Difficulty)
|
newTD.Add(newTD, header.Difficulty)
|
||||||
|
|
||||||
|
// If the parent was not present, store it
|
||||||
// If the header is already known, skip it, otherwise store
|
// If the header is already known, skip it, otherwise store
|
||||||
if !hc.HasHeader(hash, number) {
|
alreadyKnown := parentKnown && hc.HasHeader(hash, number)
|
||||||
|
if !alreadyKnown {
|
||||||
// Irrelevant of the canonical status, write the TD and header to the database.
|
// Irrelevant of the canonical status, write the TD and header to the database.
|
||||||
rawdb.WriteTd(batch, hash, number, newTD)
|
rawdb.WriteTd(batch, hash, number, newTD)
|
||||||
hc.tdCache.Add(hash, new(big.Int).Set(newTD))
|
hc.tdCache.Add(hash, new(big.Int).Set(newTD))
|
||||||
|
@ -192,6 +195,7 @@ func (hc *HeaderChain) writeHeaders(headers []*types.Header) (result *headerWrit
|
||||||
firstInserted = i
|
firstInserted = i
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
parentKnown = alreadyKnown
|
||||||
lastHeader, lastHash, lastNumber = header, hash, number
|
lastHeader, lastHash, lastNumber = header, hash, number
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -40,10 +40,10 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
blockCacheMaxItems = 8192 // Maximum number of blocks to cache before throttling the download
|
blockCacheMaxItems = 8192 // Maximum number of blocks to cache before throttling the download
|
||||||
blockCacheInitialItems = 2048 // Initial number of blocks to start fetching, before we know the sizes of the blocks
|
blockCacheInitialItems = 2048 // Initial number of blocks to start fetching, before we know the sizes of the blocks
|
||||||
blockCacheMemory = 64 * 1024 * 1024 // Maximum amount of memory to use for block caching
|
blockCacheMemory = 256 * 1024 * 1024 // Maximum amount of memory to use for block caching
|
||||||
blockCacheSizeWeight = 0.1 // Multiplier to approximate the average block size based on past ones
|
blockCacheSizeWeight = 0.1 // Multiplier to approximate the average block size based on past ones
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -783,8 +783,9 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
|
||||||
func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) {
|
func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) {
|
||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
|
trieHasher := trie.NewStackTrie(nil)
|
||||||
validate := func(index int, header *types.Header) error {
|
validate := func(index int, header *types.Header) error {
|
||||||
if types.DeriveSha(types.Transactions(txLists[index]), trie.NewStackTrie(nil)) != header.TxHash {
|
if types.DeriveSha(types.Transactions(txLists[index]), trieHasher) != header.TxHash {
|
||||||
return errInvalidBody
|
return errInvalidBody
|
||||||
}
|
}
|
||||||
if types.CalcUncleHash(uncleLists[index]) != header.UncleHash {
|
if types.CalcUncleHash(uncleLists[index]) != header.UncleHash {
|
||||||
|
@ -808,8 +809,9 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLi
|
||||||
func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) {
|
func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) {
|
||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
|
trieHasher := trie.NewStackTrie(nil)
|
||||||
validate := func(index int, header *types.Header) error {
|
validate := func(index int, header *types.Header) error {
|
||||||
if types.DeriveSha(types.Receipts(receiptList[index]), trie.NewStackTrie(nil)) != header.ReceiptHash {
|
if types.DeriveSha(types.Receipts(receiptList[index]), trieHasher) != header.ReceiptHash {
|
||||||
return errInvalidReceipt
|
return errInvalidReceipt
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
Loading…
Reference in New Issue