avoid initTable (#2328)

`initTable` is obsolete since nim 0.19 and can introduce significant
memory overhead while providing no benefit (since the table will be
grown to the default initial size on first use anyway).

In particular, aristo layers will not necessarily use all tables they
initialize, for exampe when many empty accounts are being created.
This commit is contained in:
Jacek Sieka 2024-06-10 11:05:30 +02:00 committed by GitHub
parent 02c655fe32
commit f6be4bd0ec
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
19 changed files with 32 additions and 96 deletions

View File

@ -551,7 +551,7 @@ proc new*(
radiusCache: RadiusCache.init(256),
offerQueue: newAsyncQueue[OfferRequest](concurrentOffers),
disablePoke: config.disablePoke,
pingTimings: initTable[NodeId, chronos.Moment](),
pingTimings: Table[NodeId, chronos.Moment](),
)
proto.baseProtocol.registerTalkProtocol(@(proto.protocolId), proto).expect(
@ -984,7 +984,7 @@ proc lookup*(p: PortalProtocol, target: NodeId): Future[seq[Node]] {.async.} =
# Unvalidated nodes are used for requests as a form of validation.
var closestNodes = p.routingTable.neighbours(target, BUCKET_SIZE, seenOnly = false)
var asked, seen = initHashSet[NodeId]()
var asked, seen = HashSet[NodeId]()
asked.incl(p.localNode.id) # No need to ask our own node
seen.incl(p.localNode.id) # No need to discover our own node
for node in closestNodes:
@ -1080,7 +1080,7 @@ proc contentLookup*(
# first for the same request.
p.baseProtocol.rng[].shuffle(closestNodes)
var asked, seen = initHashSet[NodeId]()
var asked, seen = HashSet[NodeId]()
asked.incl(p.localNode.id) # No need to ask our own node
seen.incl(p.localNode.id) # No need to discover our own node
for node in closestNodes:
@ -1180,10 +1180,10 @@ proc traceContentLookup*(
p.baseProtocol.rng[].shuffle(closestNodes)
let ts = now(chronos.Moment)
var responses = initTable[string, TraceResponse]()
var metadata = initTable[string, NodeMetadata]()
var responses = Table[string, TraceResponse]()
var metadata = Table[string, NodeMetadata]()
var asked, seen = initHashSet[NodeId]()
var asked, seen = HashSet[NodeId]()
asked.incl(p.localNode.id) # No need to ask our own node
seen.incl(p.localNode.id) # No need to discover our own node
for node in closestNodes:
@ -1355,7 +1355,7 @@ proc query*(
## the routing table, nodes returned by the first queries will be used.
var queryBuffer = p.routingTable.neighbours(target, k, seenOnly = false)
var asked, seen = initHashSet[NodeId]()
var asked, seen = HashSet[NodeId]()
asked.incl(p.localNode.id) # No need to ask our own node
seen.incl(p.localNode.id) # No need to discover our own node
for node in queryBuffer:

View File

@ -109,7 +109,7 @@ proc toState*(
alloc: GenesisAlloc
): (HexaryTrie, Table[EthAddress, HexaryTrie]) {.raises: [RlpError].} =
var accountTrie = initHexaryTrie(newMemoryDB())
var storageStates = initTable[EthAddress, HexaryTrie]()
var storageStates = Table[EthAddress, HexaryTrie]()
for address, genAccount in alloc:
var storageRoot = EMPTY_ROOT_HASH

View File

@ -91,7 +91,7 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool =
startAccount: 1.u256 shl 160,
nextIndex : 0,
wdHistory : WDHistory(),
sidechain : initTable[uint64, ExecutionPayload]()
sidechain : Table[uint64, ExecutionPayload]()
)
# Sidechain withdraws on the max account value range 0xffffffffffffffffffffffffffffffffffffffff

View File

@ -1,6 +1,6 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -22,67 +22,3 @@ export
executor_helpers.makeReceipt,
process_block,
process_transaction
#[
method executeTransaction(vmState: BaseVMState, transaction: Transaction): (Computation, BlockHeader) {.base.}=
# Execute the transaction in the vm
# TODO: introduced here: https://github.com/ethereum/py-evm/commit/21c57f2d56ab91bb62723c3f9ebe291d0b132dde
# Refactored/Removed here: https://github.com/ethereum/py-evm/commit/cc991bf
# Deleted here: https://github.com/ethereum/py-evm/commit/746defb6f8e83cee2c352a0ab8690e1281c4227c
raise newException(ValueError, "Must be implemented by subclasses")
method addTransaction*(vmState: BaseVMState, transaction: Transaction, c: Computation, b: Block): (Block, Table[string, string]) =
# Add a transaction to the given block and
# return `trieData` to store the transaction data in chaindb in VM layer
# Update the bloomFilter, transaction trie and receipt trie roots, bloom_filter,
# bloom, and usedGas of the block
# transaction: the executed transaction
# computation: the Computation object with executed result
# block: the Block which the transaction is added in
# var receipt = vmState.makeReceipt(transaction, computation)
# vmState.add_receipt(receipt)
# block.transactions.append(transaction)
# # Get trie roots and changed key-values.
# tx_root_hash, tx_kv_nodes = make_trie_root_and_nodes(block.transactions)
# receipt_root_hash, receipt_kv_nodes = make_trie_root_and_nodes(self.receipts)
# trie_data = merge(tx_kv_nodes, receipt_kv_nodes)
# block.bloom_filter |= receipt.bloom
# block.header.transaction_root = tx_root_hash
# block.header.receipt_root = receipt_root_hash
# block.header.bloom = int(block.bloom_filter)
# block.header.gas_used = receipt.gas_used
# return block, trie_data
result = (b, initTable[string, string]())
method applyTransaction*(
vmState: BaseVMState,
transaction: Transaction,
b: Block,
isStateless: bool): (Computation, Block, Table[string, string]) =
# Apply transaction to the given block
# transaction: the transaction need to be applied
# b: the block which the transaction applies on
# isStateless: if isStateless, call vmState.addTransaction to set block
if isStateless:
var ourBlock = b # deepcopy
vmState.blockHeader = b.header
var (computation, blockHeader) = vmState.executeTransaction(transaction)
ourBlock.header = blockHeader
var trieData: Table[string, string]
(ourBlock, trieData) = vmState.addTransaction(transaction, computation, ourBlock)
result = (computation, ourBlock, trieData)
else:
var (computation, blockHeader) = vmState.executeTransaction(transaction)
return (computation, nil, initTable[string, string]())
]#

View File

@ -144,7 +144,7 @@ proc validateUncles(com: CommonRef; header: BlockHeader;
return err("Header suggests block should have uncles but block has none")
# Check for duplicates
var uncleSet = initHashSet[Hash256]()
var uncleSet = HashSet[Hash256]()
for uncle in uncles:
let uncleHash = uncle.blockHash
if uncleHash in uncleSet:

View File

@ -32,7 +32,7 @@ func toStorageKeys(slots: SlotSet): seq[StorageKey] =
# ------------------------------------------------------------------------------
proc init*(ac: var AccessList) =
ac.slots = initTable[EthAddress, SlotSet]()
ac.slots = Table[EthAddress, SlotSet]()
proc init*(_: type AccessList): AccessList {.inline.} =
result.init()
@ -58,7 +58,7 @@ proc merge*(ac: var AccessList, other: AccessList) {.inline.} =
proc add*(ac: var AccessList, address: EthAddress) =
if address notin ac.slots:
ac.slots[address] = initHashSet[UInt256]()
ac.slots[address] = HashSet[UInt256]()
proc add*(ac: var AccessList, address: EthAddress, slot: UInt256) =
ac.slots.withValue(address, val):

View File

@ -149,7 +149,7 @@ proc init*(x: typedesc[AccountsLedgerRef], db: CoreDbRef,
new result
result.ledger = AccountLedger.init(db, root)
result.kvt = db.newKvt() # save manually in `persist()`
result.witnessCache = initTable[EthAddress, WitnessData]()
result.witnessCache = Table[EthAddress, WitnessData]()
discard result.beginSavepoint
proc init*(x: typedesc[AccountsLedgerRef], db: CoreDbRef, pruneTrie = true): AccountsLedgerRef =
@ -169,7 +169,7 @@ proc isTopLevelClean*(ac: AccountsLedgerRef): bool =
proc beginSavepoint*(ac: AccountsLedgerRef): LedgerSavePoint =
new result
result.cache = initTable[EthAddress, AccountRef]()
result.cache = Table[EthAddress, AccountRef]()
result.accessList.init()
result.transientStorage.init()
result.state = Pending
@ -712,7 +712,7 @@ proc update(wd: var WitnessData, acc: AccountRef) =
wd.storageKeys.incl k
proc witnessData(acc: AccountRef): WitnessData =
result.storageKeys = initHashSet[UInt256]()
result.storageKeys = HashSet[UInt256]()
update(result, acc)
proc collectWitnessData*(ac: AccountsLedgerRef) =

View File

@ -85,7 +85,7 @@ proc newAccountStateDB*(backingStore: CoreDbRef,
result.trie = AccountLedger.init(backingStore, root)
result.originalRoot = root
when aleth_compat:
result.cleared = initHashSet[EthAddress]()
result.cleared = HashSet[EthAddress]()
#proc getTrie*(db: AccountStateDB): CoreDxMptRef =
# db.trie.mpt

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -33,7 +33,7 @@ proc merge(a, b: StorageTable) =
#######################################################################
proc init*(ac: var TransientStorage) =
ac.map = initTable[EthAddress, StorageTable]()
ac.map = Table[EthAddress, StorageTable]()
proc init*(_: type TransientStorage): TransientStorage {.inline.} =
result.init()

View File

@ -28,7 +28,7 @@ proc newCodeStream*(codeBytes: sink seq[byte]): CodeStream =
new(result)
result.bytes = system.move(codeBytes)
result.pc = 0
result.invalidPositions = initHashSet[int]()
result.invalidPositions = HashSet[int]()
result.depthProcessed = 0
result.cached = @[]

View File

@ -130,9 +130,9 @@ method capturePrepare*(ctx: JsonTracer, comp: Computation, depth: int) {.gcsafe.
let prevLen = ctx.storageKeys.len
ctx.storageKeys.setLen(depth + 1)
for i in prevLen ..< ctx.storageKeys.len - 1:
ctx.storageKeys[i] = initHashSet[UInt256]()
ctx.storageKeys[i] = HashSet[UInt256]()
ctx.storageKeys[depth] = initHashSet[UInt256]()
ctx.storageKeys[depth] = HashSet[UInt256]()
# Top call frame
method captureStart*(ctx: JsonTracer, comp: Computation,

View File

@ -63,9 +63,9 @@ method capturePrepare*(ctx: LegacyTracer, comp: Computation, depth: int) {.gcsaf
let prevLen = ctx.storageKeys.len
ctx.storageKeys.setLen(depth + 1)
for i in prevLen ..< ctx.storageKeys.len - 1:
ctx.storageKeys[i] = initHashSet[UInt256]()
ctx.storageKeys[i] = HashSet[UInt256]()
ctx.storageKeys[depth] = initHashSet[UInt256]()
ctx.storageKeys[depth] = HashSet[UInt256]()
# Opcode level
method captureOpStart*(ctx: LegacyTracer, c: Computation,

View File

@ -83,7 +83,7 @@ proc mapBodiesToHeader(buddy: BeaconBuddyRef,
reqBodies: openArray[bool]) {.raises: [].} =
var
headers = system.move(job.getBlocksJob.headers)
map = initTable[Hash256, int]()
map = Table[Hash256, int]()
for i, x in bodies:
let bodyHash = sumHash(x)

View File

@ -165,7 +165,7 @@ proc banPeer(pool: PeerPool, peer: Peer, banTime: chronos.Duration) {.async.} =
proc cleanupKnownByPeer(ctx: EthWireRef) =
let now = getTime()
var tmp = initHashSet[Hash256]()
var tmp = HashSet[Hash256]()
for _, map in ctx.knownByPeer:
for hash, time in map:
if time - now >= POOLED_STORAGE_TIME_LIMIT:
@ -174,7 +174,7 @@ proc cleanupKnownByPeer(ctx: EthWireRef) =
map.del(hash)
tmp.clear()
var tmpPeer = initHashSet[Peer]()
var tmpPeer = HashSet[Peer]()
for peer, map in ctx.knownByPeer:
if map.len == 0:
tmpPeer.incl peer

View File

@ -80,7 +80,7 @@ proc hash*(x: UInt256): Hash =
proc new(T: type HunterVMState; parent, header: BlockHeader, com: CommonRef): T =
new result
result.init(parent, header, com)
result.headers = initTable[BlockNumber, BlockHeader]()
result.headers = Table[BlockNumber, BlockHeader]()
method getAncestorHash*(vmState: HunterVMState, blockNumber: BlockNumber): Hash256 =
if blockNumber in vmState.headers:

View File

@ -36,7 +36,7 @@ proc buildAccountsTableFromKeys(
let code = if key.codeLen > 0:
db.db.newKvt().get(account.codeHash.data).valueOr: EmptyBlob
else: @[]
var storage = initTable[UInt256, UInt256]()
var storage = Table[UInt256, UInt256]()
for slot in key.slots:
let slotKey = fromBytesBE(UInt256, slot)

View File

@ -52,7 +52,7 @@ proc parseHash(data: string): Hash256 =
proc parseTests(testData: JSonNode): Tests =
const hex = true
result = initTable[string, Tester]()
result = Table[string, Tester]()
var t: Tester
for title, data in testData:
t.parentTimestamp = hexOrInt64(data, "parentTimestamp", hex)

View File

@ -14,7 +14,7 @@ import
../nimbus/common/[context, common]
func revmap(x: Table[EVMFork, string]): Table[string, EVMFork] =
result = initTable[string, EVMFork]()
result = Table[string, EVMFork]()
for k, v in x:
result[v] = k

View File

@ -58,7 +58,7 @@ proc stateDBMain*() =
# give access to private fields of AccountRef
privateAccess(AccountRef)
var x = AccountRef(
overlayStorage: initTable[UInt256, UInt256](),
overlayStorage: Table[UInt256, UInt256](),
originalStorage: newTable[UInt256, UInt256]()
)