This reverts commit ddbdf34c3d
.
This commit is contained in:
parent
c5e895aaab
commit
7f56e90654
4
Makefile
4
Makefile
|
@ -138,10 +138,6 @@ ifneq ($(ENABLE_EVMC), 0)
|
|||
T8N_PARAMS := -d:chronicles_enabled=off
|
||||
endif
|
||||
|
||||
ifneq ($(ENABLE_SPECULATIVE_EXECUTION), 0)
|
||||
NIM_PARAMS += -d:evm_speculative_execution
|
||||
endif
|
||||
|
||||
# disabled by default, enable with ENABLE_VMLOWMEM=1
|
||||
ifneq ($(if $(ENABLE_VMLOWMEM),$(ENABLE_VMLOWMEM),0),0)
|
||||
NIM_PARAMS += -d:lowmem:1
|
||||
|
|
|
@ -117,7 +117,7 @@ proc procBlkPreamble(vmState: BaseVMState;
|
|||
|
||||
proc procBlkEpilogue(vmState: BaseVMState;
|
||||
header: BlockHeader; body: BlockBody): bool
|
||||
{.gcsafe, raises: [RlpError, CatchableError].} =
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
# Reward beneficiary
|
||||
vmState.mutateStateDB:
|
||||
if vmState.generateWitness:
|
||||
|
|
|
@ -59,29 +59,6 @@ proc commitOrRollbackDependingOnGasUsed(vmState: BaseVMState, accTx: SavePoint,
|
|||
vmState.gasPool += tx.gasLimit - gasBurned
|
||||
return ok(gasBurned)
|
||||
|
||||
# For stateless mode with on-demand fetching, we need to do
|
||||
# this, because it's possible for deletion to result in a
|
||||
# branch node with only one child, which then needs to be
|
||||
# transformed into an extension node or leaf node (or
|
||||
# grafted onto one), but we don't actually have that node
|
||||
# yet so we have to fetch it and then retry.
|
||||
proc repeatedlyTryToPersist(vmState: BaseVMState, fork: EVMFork): Future[void] {.async.} =
|
||||
#info("repeatedlyTryToPersist about to get started")
|
||||
var i = 0
|
||||
while i < 100:
|
||||
#info("repeatedlyTryToPersist making an attempt to persist", i)
|
||||
try:
|
||||
await vmState.stateDB.asyncPersist(
|
||||
clearEmptyAccount = fork >= FkSpurious,
|
||||
clearCache = false)
|
||||
return
|
||||
except MissingNodesError as e:
|
||||
#warn("repeatedlyTryToPersist found missing paths", missingPaths=e.paths, missingNodeHashes=e.nodeHashes)
|
||||
await fetchAndPopulateNodes(vmState, e.paths, e.nodeHashes)
|
||||
i += 1
|
||||
error("repeatedlyTryToPersist failed after 100 tries")
|
||||
raise newException(CatchableError, "repeatedlyTryToPersist failed after 100 tries")
|
||||
|
||||
proc asyncProcessTransactionImpl(
|
||||
vmState: BaseVMState; ## Parent accounts environment for transaction
|
||||
tx: Transaction; ## Transaction to validate
|
||||
|
@ -135,7 +112,9 @@ proc asyncProcessTransactionImpl(
|
|||
|
||||
if vmState.generateWitness:
|
||||
vmState.stateDB.collectWitnessData()
|
||||
await repeatedlyTryToPersist(vmState, fork)
|
||||
vmState.stateDB.persist(
|
||||
clearEmptyAccount = fork >= FkSpurious,
|
||||
clearCache = false)
|
||||
|
||||
return res
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ template safeExecutor(info: string; code: untyped) =
|
|||
raise newException(TxPackerError, info & "(): " & $e.name & " -- " & e.msg)
|
||||
|
||||
proc persist(pst: TxPackerStateRef)
|
||||
{.gcsafe,raises: [RlpError, CatchableError].} =
|
||||
{.gcsafe,raises: [RlpError].} =
|
||||
## Smart wrapper
|
||||
if not pst.cleanState:
|
||||
let fork = pst.xp.chain.nextFork
|
||||
|
|
|
@ -1,11 +1,8 @@
|
|||
import
|
||||
std/[tables, hashes, sets],
|
||||
chronos,
|
||||
eth/[common, rlp], eth/trie/[hexary, db, trie_defs, nibbles],
|
||||
../utils/functors/possible_futures,
|
||||
eth/[common, rlp], eth/trie/[hexary, db, trie_defs],
|
||||
../constants, ../utils/utils, storage_types,
|
||||
../../stateless/multi_keys,
|
||||
../evm/async/speculex,
|
||||
./distinct_tries,
|
||||
./access_list as ac_access_list
|
||||
|
||||
|
@ -24,14 +21,12 @@ type
|
|||
|
||||
AccountFlags = set[AccountFlag]
|
||||
|
||||
StorageCell* = SpeculativeExecutionCell[UInt256]
|
||||
|
||||
RefAccount = ref object
|
||||
account: Account
|
||||
flags: AccountFlags
|
||||
code: seq[byte]
|
||||
originalStorage: TableRef[UInt256, UInt256]
|
||||
overlayStorage: Table[UInt256, StorageCell]
|
||||
overlayStorage: Table[UInt256, UInt256]
|
||||
|
||||
WitnessData* = object
|
||||
storageKeys*: HashSet[UInt256]
|
||||
|
@ -266,15 +261,11 @@ proc originalStorageValue(acc: RefAccount, slot: UInt256, db: TrieDatabaseRef):
|
|||
|
||||
acc.originalStorage[slot] = result
|
||||
|
||||
proc storageCell(acc: RefAccount, slot: UInt256, db: TrieDatabaseRef): StorageCell =
|
||||
acc.overlayStorage.withValue(slot, cell) do:
|
||||
return cell[]
|
||||
do:
|
||||
return pureCell(acc.originalStorageValue(slot, db))
|
||||
|
||||
# FIXME-removeSynchronousInterface: we use this down below, but I don't think that's okay anymore.
|
||||
proc storageValue(acc: RefAccount, slot: UInt256, db: TrieDatabaseRef): UInt256 =
|
||||
waitForValueOf(storageCell(acc, slot, db))
|
||||
acc.overlayStorage.withValue(slot, val) do:
|
||||
return val[]
|
||||
do:
|
||||
result = acc.originalStorageValue(slot, db)
|
||||
|
||||
proc kill(acc: RefAccount) =
|
||||
acc.flags.excl Alive
|
||||
|
@ -305,7 +296,7 @@ proc persistCode(acc: RefAccount, db: TrieDatabaseRef) =
|
|||
else:
|
||||
db.put(contractHashKey(acc.account.codeHash).toOpenArray, acc.code)
|
||||
|
||||
proc asyncPersistStorage(acc: RefAccount, db: TrieDatabaseRef, clearCache: bool): Future[void] {.async.} =
|
||||
proc persistStorage(acc: RefAccount, db: TrieDatabaseRef, clearCache: bool) =
|
||||
if acc.overlayStorage.len == 0:
|
||||
# TODO: remove the storage too if we figure out
|
||||
# how to create 'virtual' storage room for each account
|
||||
|
@ -316,10 +307,9 @@ proc asyncPersistStorage(acc: RefAccount, db: TrieDatabaseRef, clearCache: bool)
|
|||
|
||||
var storageTrie = getStorageTrie(db, acc)
|
||||
|
||||
for slot, valueCell in acc.overlayStorage:
|
||||
for slot, value in acc.overlayStorage:
|
||||
let slotAsKey = createTrieKeyFromSlot slot
|
||||
|
||||
let value = await valueCell.toFuture
|
||||
if value > 0:
|
||||
let encodedValue = rlp.encode(value)
|
||||
storageTrie.putSlotBytes(slotAsKey, encodedValue)
|
||||
|
@ -337,8 +327,7 @@ proc asyncPersistStorage(acc: RefAccount, db: TrieDatabaseRef, clearCache: bool)
|
|||
if not clearCache:
|
||||
# if we preserve cache, move the overlayStorage
|
||||
# to originalStorage, related to EIP2200, EIP1283
|
||||
for slot, valueCell in acc.overlayStorage:
|
||||
let value = unsafeGetAlreadyAvailableValue(valueCell)
|
||||
for slot, value in acc.overlayStorage:
|
||||
if value > 0:
|
||||
acc.originalStorage[slot] = value
|
||||
else:
|
||||
|
@ -401,15 +390,11 @@ proc getCommittedStorage*(ac: AccountsCache, address: EthAddress, slot: UInt256)
|
|||
return
|
||||
acc.originalStorageValue(slot, ac.db)
|
||||
|
||||
proc getStorageCell*(ac: AccountsCache, address: EthAddress, slot: UInt256): StorageCell =
|
||||
proc getStorage*(ac: AccountsCache, address: EthAddress, slot: UInt256): UInt256 {.inline.} =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil:
|
||||
return pureCell(UInt256.zero)
|
||||
return acc.storageCell(slot, ac.db)
|
||||
|
||||
# FIXME-removeSynchronousInterface
|
||||
proc getStorage*(ac: AccountsCache, address: EthAddress, slot: UInt256): UInt256 =
|
||||
waitForValueOf(getStorageCell(ac, address, slot))
|
||||
return
|
||||
acc.storageValue(slot, ac.db)
|
||||
|
||||
proc hasCodeOrNonce*(ac: AccountsCache, address: EthAddress): bool {.inline.} =
|
||||
let acc = ac.getAccount(address, false)
|
||||
|
@ -475,21 +460,15 @@ proc setCode*(ac: AccountsCache, address: EthAddress, code: seq[byte]) =
|
|||
acc.code = code
|
||||
acc.flags.incl CodeChanged
|
||||
|
||||
proc setStorageCell*(ac: AccountsCache, address: EthAddress, slot: UInt256, cell: StorageCell) =
|
||||
proc setStorage*(ac: AccountsCache, address: EthAddress, slot, value: UInt256) =
|
||||
let acc = ac.getAccount(address)
|
||||
acc.flags.incl {Alive}
|
||||
# FIXME-removeSynchronousInterface: ugh, this seems like a problem (that we need the values to be able to check whether they're equal)
|
||||
let oldValue = acc.storageValue(slot, ac.db)
|
||||
let value = waitForValueOf(cell)
|
||||
if oldValue != value:
|
||||
var acc = ac.makeDirty(address)
|
||||
acc.overlayStorage[slot] = cell
|
||||
acc.overlayStorage[slot] = value
|
||||
acc.flags.incl StorageChanged
|
||||
|
||||
# FIXME-removeSynchronousInterface
|
||||
proc setStorage*(ac: AccountsCache, address: EthAddress, slot: UInt256, value: UInt256) =
|
||||
setStorageCell(ac, address, slot, pureCell(value))
|
||||
|
||||
proc clearStorage*(ac: AccountsCache, address: EthAddress) =
|
||||
let acc = ac.getAccount(address)
|
||||
acc.flags.incl {Alive}
|
||||
|
@ -549,52 +528,9 @@ proc clearEmptyAccounts(ac: AccountsCache) =
|
|||
ac.deleteEmptyAccount(ripemdAddr)
|
||||
ac.ripemdSpecial = false
|
||||
|
||||
|
||||
type MissingNodesError* = ref object of Defect
|
||||
paths*: seq[seq[seq[byte]]]
|
||||
nodeHashes*: seq[Hash256]
|
||||
|
||||
# FIXME-Adam: Move this elsewhere.
|
||||
# Also, I imagine there's a more efficient way to do this.
|
||||
proc padRight[V](s: seq[V], n: int, v: V): seq[V] =
|
||||
for sv in s:
|
||||
result.add(sv)
|
||||
while result.len < n:
|
||||
result.add(v)
|
||||
|
||||
proc padRightWithZeroes(s: NibblesSeq, n: int): NibblesSeq =
|
||||
initNibbleRange(padRight(s.getBytes, (n + 1) div 2, byte(0)))
|
||||
|
||||
# FIXME-Adam: Why can I never find the conversion function I need?
|
||||
func toHash*(value: seq[byte]): Hash256 =
|
||||
doAssert(value.len == 32)
|
||||
var byteArray: array[32, byte]
|
||||
for i, b in value:
|
||||
byteArray[i] = b
|
||||
result.data = byteArray
|
||||
|
||||
func encodePath(path: NibblesSeq): seq[byte] =
|
||||
if path.len == 64:
|
||||
path.getBytes
|
||||
else:
|
||||
hexPrefixEncode(path)
|
||||
|
||||
proc createMissingNodesErrorForAccount(missingAccountPath: NibblesSeq, nodeHash: Hash256): MissingNodesError =
|
||||
MissingNodesError(
|
||||
paths: @[@[encodePath(padRightWithZeroes(missingAccountPath, 64))]],
|
||||
nodeHashes: @[nodeHash]
|
||||
)
|
||||
|
||||
proc createMissingNodesErrorForSlot(address: EthAddress, missingSlotPath: NibblesSeq, nodeHash: Hash256): MissingNodesError =
|
||||
MissingNodesError(
|
||||
paths: @[@[@(address.keccakHash.data), encodePath(padRightWithZeroes(missingSlotPath, 64))]],
|
||||
nodeHashes: @[nodeHash]
|
||||
)
|
||||
|
||||
|
||||
proc asyncPersist*(ac: AccountsCache,
|
||||
clearEmptyAccount: bool = false,
|
||||
clearCache: bool = true): Future[void] {.async.} =
|
||||
proc persist*(ac: AccountsCache,
|
||||
clearEmptyAccount: bool = false,
|
||||
clearCache: bool = true) =
|
||||
# make sure all savepoint already committed
|
||||
doAssert(ac.savePoint.parentSavepoint.isNil)
|
||||
var cleanAccounts = initHashSet[EthAddress]()
|
||||
|
@ -613,19 +549,10 @@ proc asyncPersist*(ac: AccountsCache,
|
|||
if StorageChanged in acc.flags:
|
||||
# storageRoot must be updated first
|
||||
# before persisting account into merkle trie
|
||||
#
|
||||
# Also, see the comment on repeatedlyTryToPersist in
|
||||
# process_transaction.nim.
|
||||
try:
|
||||
await acc.asyncPersistStorage(ac.db, clearCache)
|
||||
except MissingNodeError as e:
|
||||
raise createMissingNodesErrorForSlot(address, e.path, toHash(e.nodeHashBytes))
|
||||
acc.persistStorage(ac.db, clearCache)
|
||||
ac.trie.putAccountBytes address, rlp.encode(acc.account)
|
||||
of Remove:
|
||||
try:
|
||||
ac.trie.delAccountBytes address
|
||||
except MissingNodeError as e:
|
||||
raise createMissingNodesErrorForAccount(e.path, toHash(e.nodeHashBytes))
|
||||
ac.trie.delAccountBytes address
|
||||
if not clearCache:
|
||||
cleanAccounts.incl address
|
||||
of DoNothing:
|
||||
|
@ -649,12 +576,6 @@ proc asyncPersist*(ac: AccountsCache,
|
|||
|
||||
ac.isDirty = false
|
||||
|
||||
# FIXME-removeSynchronousInterface
|
||||
proc persist*(ac: AccountsCache,
|
||||
clearEmptyAccount: bool = false,
|
||||
clearCache: bool = true) =
|
||||
waitFor(asyncPersist(ac, clearEmptyAccount, clearCache))
|
||||
|
||||
iterator addresses*(ac: AccountsCache): EthAddress =
|
||||
# make sure all savepoint already committed
|
||||
doAssert(ac.savePoint.parentSavepoint.isNil)
|
||||
|
@ -709,8 +630,7 @@ func update(wd: var WitnessData, acc: RefAccount) =
|
|||
if v.isZero: continue
|
||||
wd.storageKeys.incl k
|
||||
|
||||
for k, cell in acc.overlayStorage:
|
||||
let v = unsafeGetAlreadyAvailableValue(cell) # FIXME-Adam: should be resolved by now, I think? wait, maybe not?
|
||||
for k, v in acc.overlayStorage:
|
||||
if v.isZero and k notin wd.storageKeys:
|
||||
continue
|
||||
if v.isZero and k in wd.storageKeys:
|
||||
|
|
|
@ -30,13 +30,13 @@ proc ifNodesExistGetStorageBytesWithinAccount*(storageTrie: StorageTrie, slotAsK
|
|||
|
||||
|
||||
proc populateDbWithNodes*(db: TrieDatabaseRef, nodes: seq[seq[byte]]) =
|
||||
error("AARDVARK: populateDbWithNodes received nodes, about to populate", nodes) # AARDVARK not an error, I just want it to stand out
|
||||
error("GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG AARDVARK: populateDbWithNodes received nodes, about to populate", nodes) # AARDVARK not an error, I just want it to stand out
|
||||
for nodeBytes in nodes:
|
||||
let nodeHash = keccakHash(nodeBytes)
|
||||
info("AARDVARK: populateDbWithNodes about to add node", nodeHash, nodeBytes)
|
||||
db.put(nodeHash.data, nodeBytes)
|
||||
|
||||
# FIXME-Adam: just make the callers call populateDbWithNodes directly?
|
||||
# AARDVARK: just make the callers call populateDbWithNodes directly?
|
||||
proc populateDbWithBranch*(db: TrieDatabaseRef, branch: seq[seq[byte]]) =
|
||||
for nodeBytes in branch:
|
||||
let nodeHash = keccakHash(nodeBytes)
|
||||
|
|
|
@ -4,7 +4,6 @@ import
|
|||
stint,
|
||||
eth/common,
|
||||
eth/trie/db,
|
||||
../../sync/protocol,
|
||||
../../db/db_chain
|
||||
|
||||
type
|
||||
|
@ -13,7 +12,8 @@ type
|
|||
ifNecessaryGetCode*: proc(db: TrieDatabaseRef, blockNumber: BlockNumber, stateRoot: Hash256, address: EthAddress, newStateRootForSanityChecking: Hash256): Future[void] {.gcsafe.}
|
||||
ifNecessaryGetAccount*: proc(db: TrieDatabaseRef, blockNumber: BlockNumber, stateRoot: Hash256, address: EthAddress, newStateRootForSanityChecking: Hash256): Future[void] {.gcsafe.}
|
||||
ifNecessaryGetBlockHeaderByNumber*: proc(chainDB: ChainDBRef, blockNumber: BlockNumber): Future[void] {.gcsafe.}
|
||||
fetchNodes*: proc(stateRoot: Hash256, paths: seq[SnapTriePaths], nodeHashes: seq[Hash256]): Future[seq[seq[byte]]] {.gcsafe.}
|
||||
# FIXME-Adam: Later.
|
||||
#fetchNodes*: proc(stateRoot: Hash256, paths: seq[seq[seq[byte]]], nodeHashes: seq[Hash256]): Future[seq[seq[byte]]] {.gcsafe.}
|
||||
fetchBlockHeaderWithHash*: proc(h: Hash256): Future[BlockHeader] {.gcsafe.}
|
||||
fetchBlockHeaderWithNumber*: proc(n: BlockNumber): Future[BlockHeader] {.gcsafe.}
|
||||
fetchBlockHeaderAndBodyWithHash*: proc(h: Hash256): Future[(BlockHeader, BlockBody)] {.gcsafe.}
|
||||
|
|
|
@ -501,9 +501,10 @@ proc realAsyncDataSource*(peerPool: PeerPool, client: RpcClient, justChecking: b
|
|||
await ifNecessaryGetBlockHeaderByNumber(client, chainDB, blockNumber, justChecking)
|
||||
),
|
||||
|
||||
fetchNodes: (proc(stateRoot: Hash256, paths: seq[SnapTriePaths], nodeHashes: seq[Hash256]): Future[seq[seq[byte]]] {.async.} =
|
||||
return await fetchNodes(peerPool, stateRoot, paths, nodeHashes)
|
||||
),
|
||||
# FIXME-Adam: This will be needed later, but for now let's just get the basic methods in place.
|
||||
#fetchNodes: (proc(stateRoot: Hash256, paths: seq[seq[seq[byte]]], nodeHashes: seq[Hash256]): Future[seq[seq[byte]]] {.async.} =
|
||||
# return await fetchNodes(peerPool, stateRoot, paths, nodeHashes)
|
||||
#),
|
||||
|
||||
fetchBlockHeaderWithHash: (proc(h: Hash256): Future[BlockHeader] {.async.} =
|
||||
return await fetchBlockHeaderWithHash(client, h)
|
||||
|
|
|
@ -1,14 +1,12 @@
|
|||
import
|
||||
chronicles,
|
||||
chronos,
|
||||
sequtils,
|
||||
stint,
|
||||
eth/common/eth_types,
|
||||
../../common,
|
||||
../../db/distinct_tries,
|
||||
../../db/accounts_cache,
|
||||
../../db/incomplete_db,
|
||||
../../sync/protocol,
|
||||
#../../db/incomplete_db,
|
||||
../types,
|
||||
./data_sources
|
||||
|
||||
|
@ -29,19 +27,15 @@ proc ifNecessaryGetSlot*(vmState: BaseVMState, address: EthAddress, slot: UInt25
|
|||
proc ifNecessaryGetBlockHeaderByNumber*(vmState: BaseVMState, blockNumber: BlockNumber): Future[void] {.async.} =
|
||||
await vmState.asyncFactory.ifNecessaryGetBlockHeaderByNumber(vmState.com.db, blockNumber)
|
||||
|
||||
proc snapTriePathFromByteSeqs(byteSeqs: seq[seq[byte]]): SnapTriePaths =
|
||||
SnapTriePaths(
|
||||
accPath: byteSeqs[0],
|
||||
slotPaths: byteSeqs[1 ..< byteSeqs.len]
|
||||
)
|
||||
|
||||
proc fetchAndPopulateNodes*(vmState: BaseVMState, pathByteSeqs: seq[seq[seq[byte]]], nodeHashes: seq[Hash256]): Future[void] {.async.} =
|
||||
#[
|
||||
FIXME-Adam: This is for later.
|
||||
proc fetchAndPopulateNodes*(vmState: BaseVMState, paths: seq[seq[seq[byte]]], nodeHashes: seq[Hash256]): Future[void] {.async.} =
|
||||
if vmState.asyncFactory.maybeDataSource.isSome:
|
||||
# let stateRoot = vmState.stateDB.rawTrie.rootHash # FIXME-Adam: this might not be right, huh? the peer might expect the parent block's final stateRoot, not this weirdo intermediate one
|
||||
let stateRoot = vmState.parent.stateRoot
|
||||
let paths = pathByteSeqs.map(snapTriePathFromByteSeqs)
|
||||
let nodes = await vmState.asyncFactory.maybeDataSource.get.fetchNodes(stateRoot, paths, nodeHashes)
|
||||
populateDbWithNodes(vmState.stateDB.rawDb, nodes)
|
||||
]#
|
||||
|
||||
|
||||
# Sometimes it's convenient to be able to do multiple at once.
|
||||
|
|
|
@ -1,29 +0,0 @@
|
|||
import
|
||||
chronos,
|
||||
../../utils/functors/[identity, possible_futures]
|
||||
|
||||
# FIXME-Adam: I have no idea whether speculative execution even makes sense in the context of EVMC.
|
||||
const shouldUseSpeculativeExecution* = defined(evm_speculative_execution) and not defined(evmc_enabled)
|
||||
|
||||
|
||||
# For now let's keep it possible to switch back at compile-time to
|
||||
# having stack/memory/storage cells that are always a concrete value.
|
||||
|
||||
when shouldUseSpeculativeExecution:
|
||||
type SpeculativeExecutionCell*[V] = Future[V]
|
||||
else:
|
||||
type SpeculativeExecutionCell*[V] = Identity[V]
|
||||
|
||||
|
||||
# I'm disappointed that I can't do this and have the callers resolve
|
||||
# properly based on the return type.
|
||||
#[
|
||||
proc pureCell*[V](v: V): Identity[V] {.inline.} =
|
||||
pureIdentity(v)
|
||||
|
||||
proc pureCell*[V](v: V): Future[V] {.inline.} =
|
||||
pureFuture(v)
|
||||
]#
|
||||
|
||||
proc pureCell*[V](v: V): SpeculativeExecutionCell[V] {.inline.} =
|
||||
createPure(v, result)
|
|
@ -9,9 +9,7 @@
|
|||
# according to those terms.
|
||||
|
||||
import
|
||||
std/sequtils,
|
||||
".."/[db/accounts_cache, constants],
|
||||
".."/utils/functors/[identity, futures, possible_futures],
|
||||
"."/[code_stream, memory, message, stack, state],
|
||||
"."/[transaction_tracer, types],
|
||||
./interpreter/[gas_meter, gas_costs, op_codes],
|
||||
|
@ -22,8 +20,7 @@ import
|
|||
sets
|
||||
|
||||
export
|
||||
common,
|
||||
chronos
|
||||
common
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
|
@ -318,111 +315,10 @@ template chainTo*(c: Computation, toChild: typeof(c.child), after: untyped) =
|
|||
# Register an async operation to be performed before the continuation is called.
|
||||
template asyncChainTo*(c: Computation, asyncOperation: Future[void], after: untyped) =
|
||||
c.pendingAsyncOperation = asyncOperation
|
||||
c.continuation = proc() {.raises: [CatchableError].} =
|
||||
c.continuation = proc() =
|
||||
c.continuation = nil
|
||||
after
|
||||
|
||||
# FIXME-Adam: can I do some type magic to handle tuples of any length? In practice,
|
||||
# I think we only ever need 4, though. So this is livable for now.
|
||||
# Oh, actually 7. Fine, I really need to make a macro or something for this.
|
||||
proc popStackValue*(cpt: Computation, body: (proc(v: UInt256): void {.gcsafe, raises: [CatchableError].})) {.raises: [CatchableError]} =
|
||||
let elem = cpt.stack.popElement()
|
||||
cpt.asyncChainTo(discardFutureValue(futureStackValue(elem))):
|
||||
let v = unsafeGetAlreadyAvailableValue(elem)
|
||||
body(v)
|
||||
|
||||
proc popStackValues*(cpt: Computation, body: (proc(vA, vB: UInt256): void {.gcsafe, raises: [CatchableError].})) {.raises: [CatchableError]} =
|
||||
let (elemA, elemB) = cpt.stack.popElements(2)
|
||||
cpt.asyncChainTo(discardFutureValue(combine(futureStackValue(elemA), futureStackValue(elemB)))):
|
||||
let vA = unsafeGetAlreadyAvailableValue(elemA)
|
||||
let vB = unsafeGetAlreadyAvailableValue(elemB)
|
||||
body(vA, vB)
|
||||
|
||||
proc popStackValues*(cpt: Computation, body: (proc(vA, vB, vC: UInt256): void {.gcsafe, raises: [CatchableError].})) {.raises: [CatchableError]} =
|
||||
let (elemA, elemB, elemC) = cpt.stack.popElements(3)
|
||||
cpt.asyncChainTo(discardFutureValue(combine(futureStackValue(elemA), futureStackValue(elemB), futureStackValue(elemC)))):
|
||||
let vA = unsafeGetAlreadyAvailableValue(elemA)
|
||||
let vB = unsafeGetAlreadyAvailableValue(elemB)
|
||||
let vC = unsafeGetAlreadyAvailableValue(elemC)
|
||||
body(vA, vB, vC)
|
||||
|
||||
proc popStackValues*(cpt: Computation, body: (proc(vA, vB, vC, vD: UInt256): void {.gcsafe, raises: [CatchableError].})) {.raises: [CatchableError]} =
|
||||
let (elemA, elemB, elemC, elemD) = cpt.stack.popElements(4)
|
||||
cpt.asyncChainTo(discardFutureValue(combine(futureStackValue(elemA), futureStackValue(elemB), futureStackValue(elemC), futureStackValue(elemD)))):
|
||||
let vA = unsafeGetAlreadyAvailableValue(elemA)
|
||||
let vB = unsafeGetAlreadyAvailableValue(elemB)
|
||||
let vC = unsafeGetAlreadyAvailableValue(elemC)
|
||||
let vD = unsafeGetAlreadyAvailableValue(elemD)
|
||||
body(vA, vB, vC, vD)
|
||||
|
||||
proc popStackValues*(cpt: Computation, body: (proc(vA, vB, vC, vD, vE: UInt256): void {.gcsafe, raises: [CatchableError].})) {.raises: [CatchableError]} =
|
||||
let (elemA, elemB, elemC, elemD, elemE) = cpt.stack.popElements(5)
|
||||
cpt.asyncChainTo(discardFutureValue(combine(futureStackValue(elemA), futureStackValue(elemB), futureStackValue(elemC), futureStackValue(elemD), futureStackValue(elemE)))):
|
||||
let vA = unsafeGetAlreadyAvailableValue(elemA)
|
||||
let vB = unsafeGetAlreadyAvailableValue(elemB)
|
||||
let vC = unsafeGetAlreadyAvailableValue(elemC)
|
||||
let vD = unsafeGetAlreadyAvailableValue(elemD)
|
||||
let vE = unsafeGetAlreadyAvailableValue(elemE)
|
||||
body(vA, vB, vC, vD, vE)
|
||||
|
||||
proc popStackValues*(cpt: Computation, body: (proc(vA, vB, vC, vD, vE, vF: UInt256): void {.gcsafe, raises: [CatchableError].})) {.raises: [CatchableError]} =
|
||||
let (elemA, elemB, elemC, elemD, elemE, elemF) = cpt.stack.popElements(6)
|
||||
cpt.asyncChainTo(discardFutureValue(combine(futureStackValue(elemA), futureStackValue(elemB), futureStackValue(elemC), futureStackValue(elemD), futureStackValue(elemE), futureStackValue(elemF)))):
|
||||
let vA = unsafeGetAlreadyAvailableValue(elemA)
|
||||
let vB = unsafeGetAlreadyAvailableValue(elemB)
|
||||
let vC = unsafeGetAlreadyAvailableValue(elemC)
|
||||
let vD = unsafeGetAlreadyAvailableValue(elemD)
|
||||
let vE = unsafeGetAlreadyAvailableValue(elemE)
|
||||
let vF = unsafeGetAlreadyAvailableValue(elemF)
|
||||
body(vA, vB, vC, vD, vE, vF)
|
||||
|
||||
proc popStackValues*(cpt: Computation, body: (proc(vA, vB, vC, vD, vE, vF, vG: UInt256): void {.gcsafe, raises: [CatchableError].})) {.raises: [CatchableError]} =
|
||||
let (elemA, elemB, elemC, elemD, elemE, elemF, elemG) = cpt.stack.popElements(7)
|
||||
cpt.asyncChainTo(discardFutureValue(combine(futureStackValue(elemA), futureStackValue(elemB), futureStackValue(elemC), futureStackValue(elemD), futureStackValue(elemE), futureStackValue(elemF), futureStackValue(elemG)))):
|
||||
let vA = unsafeGetAlreadyAvailableValue(elemA)
|
||||
let vB = unsafeGetAlreadyAvailableValue(elemB)
|
||||
let vC = unsafeGetAlreadyAvailableValue(elemC)
|
||||
let vD = unsafeGetAlreadyAvailableValue(elemD)
|
||||
let vE = unsafeGetAlreadyAvailableValue(elemE)
|
||||
let vF = unsafeGetAlreadyAvailableValue(elemF)
|
||||
let vG = unsafeGetAlreadyAvailableValue(elemG)
|
||||
body(vA, vB, vC, vD, vE, vF, vG)
|
||||
|
||||
# Here's my attempt at doing the type magic. It seems to almost compile, except for the
|
||||
# vals argument being a genTupleType.
|
||||
#proc popStackValues*(cpt: Computation, tupleLen: static[int], body: (proc(vals: genTupleType(tupleLen, UInt256)): void {.gcsafe.})) =
|
||||
# let elems = cpt.stack.popElements(tupleLen)
|
||||
# var futs: genTupleType(2, Future[UInt256])
|
||||
# for e, f in fields(elems, futs):
|
||||
# f = futureStackValue(e)
|
||||
# cpt.asyncChainTo(discardFutureValue(combine(futs))):
|
||||
# var vals: genTupleType(2, UInt256)
|
||||
# for e, v in fields(elems, vals):
|
||||
# v = unsafeGetAlreadyAvailableValue(e)
|
||||
# body(vals)
|
||||
|
||||
proc popStackValues*(cpt: Computation, numItems: int, body: (proc(vals: seq[UInt256]): void {.gcsafe, raises: [CatchableError].})) {.raises: [CatchableError]} =
|
||||
let elems = cpt.stack.popSeqOfElements(numItems)
|
||||
cpt.asyncChainTo(discardFutureValue(traverse(elems.map(futureStackValue)))):
|
||||
let vals = elems.map(unsafeGetAlreadyAvailableValue)
|
||||
body(vals)
|
||||
|
||||
proc popStackAddress*(cpt: Computation, body: (proc(a: EthAddress): void {.gcsafe, raises: [CatchableError].})) {.raises: [CatchableError]} =
|
||||
let elem = cpt.stack.popElement()
|
||||
cpt.asyncChainTo(discardFutureValue(futureStackValue(elem))):
|
||||
let a = addressFromStackValue(unsafeGetAlreadyAvailableValue(elem))
|
||||
body(a)
|
||||
|
||||
proc readMemory*(cpt: Computation, startPos: Natural, size: Natural, body: (proc(bytes: seq[byte]): void {.gcsafe, raises: [CatchableError].})) {.raises: [CatchableError]} =
|
||||
let futBytes = cpt.memory.futureBytes(startPos, size)
|
||||
cpt.asyncChainTo(discardFutureValue(futBytes)):
|
||||
let bytes: seq[byte] = unsafeGetAlreadyAvailableValue(futBytes)
|
||||
body(bytes)
|
||||
|
||||
#FIXME-Adam: unused?
|
||||
#proc writeMemory*(cpt: Computation, first: int, last: int, newBytesFut: Future[seq[byte]], body: (proc(): void {.gcsafe.})) =
|
||||
# cpt.memory.writeFutureBytes(first, last - first + 1, newBytesFut)
|
||||
|
||||
proc merge*(c, child: Computation) =
|
||||
c.gasMeter.refundGas(child.gasMeter.gasRefunded)
|
||||
|
||||
|
|
|
@ -39,8 +39,8 @@ template handleStopDirective(k: var Vm2Ctx) =
|
|||
#trace "op: Stop"
|
||||
if not k.cpt.code.atEnd() and k.cpt.tracingEnabled:
|
||||
# we only trace `REAL STOP` and ignore `FAKE STOP`
|
||||
k.cpt.opIndex = k.cpt.traceOpCodeStarted(Op.Stop)
|
||||
k.cpt.traceOpCodeEnded(Op.Stop, k.cpt.opIndex)
|
||||
k.cpt.opIndex = k.cpt.traceOpCodeStarted(Stop)
|
||||
k.cpt.traceOpCodeEnded(Stop, k.cpt.opIndex)
|
||||
|
||||
|
||||
template handleFixedGasCostsDirective(fork: EVMFork; op: Op; k: var Vm2Ctx) =
|
||||
|
@ -85,7 +85,7 @@ proc toCaseStmt(forkArg, opArg, k: NimNode): NimNode =
|
|||
let asFork = quote do: EVMFork(`fork`)
|
||||
|
||||
let branchStmt = block:
|
||||
if op == Op.Stop:
|
||||
if op == Stop:
|
||||
quote do:
|
||||
handleStopDirective(`k`)
|
||||
elif BaseGasCosts[op].kind == GckFixed:
|
||||
|
@ -100,7 +100,7 @@ proc toCaseStmt(forkArg, opArg, k: NimNode): NimNode =
|
|||
# Wrap innner case/switch into outer case/switch
|
||||
let branchStmt = block:
|
||||
case op
|
||||
of Op.Stop, Return, Revert, SelfDestruct:
|
||||
of Stop, Return, Revert, SelfDestruct:
|
||||
quote do:
|
||||
`forkCaseSubExpr`
|
||||
break
|
||||
|
@ -137,7 +137,7 @@ macro genOptimisedDispatcher*(fork: EVMFork; op: Op; k: Vm2Ctx): untyped =
|
|||
|
||||
|
||||
template genLowMemDispatcher*(fork: EVMFork; op: Op; k: Vm2Ctx) =
|
||||
if op == Op.Stop:
|
||||
if op == Stop:
|
||||
handleStopDirective(k)
|
||||
break
|
||||
|
||||
|
|
|
@ -157,13 +157,13 @@ when evmc_enabled:
|
|||
|
||||
let actualOutputSize = min(p.memOutLen, c.returnData.len)
|
||||
if actualOutputSize > 0:
|
||||
c.memory.writeConcreteBytes(p.memOutPos,
|
||||
c.memory.write(p.memOutPos,
|
||||
c.returnData.toOpenArray(0, actualOutputSize - 1))
|
||||
|
||||
c.gasMeter.returnGas(c.res.gas_left)
|
||||
|
||||
if c.res.status_code == EVMC_SUCCESS:
|
||||
c.stack.replaceTopElement(pureStackElement(stackValueFrom(1)))
|
||||
c.stack.top(1)
|
||||
|
||||
if not c.res.release.isNil:
|
||||
c.res.release(c.res)
|
||||
|
@ -183,12 +183,12 @@ else:
|
|||
|
||||
if child.isSuccess:
|
||||
c.merge(child)
|
||||
c.stack.replaceTopElement(pureStackElement(stackValueFrom(1)))
|
||||
c.stack.top(1)
|
||||
|
||||
c.returnData = child.output
|
||||
let actualOutputSize = min(memLen, child.output.len)
|
||||
if actualOutputSize > 0:
|
||||
c.memory.writeConcreteBytes(memPos, child.output.toOpenArray(0, actualOutputSize - 1))
|
||||
c.memory.write(memPos, child.output.toOpenArray(0, actualOutputSize - 1))
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private, op handlers implementation
|
||||
|
@ -267,20 +267,19 @@ const
|
|||
)
|
||||
c.execSubCall(msg, p)
|
||||
else:
|
||||
cpt.readMemory(p.memInPos, p.memInLen) do (memBytes: seq[byte]):
|
||||
cpt.execSubCall(
|
||||
memPos = p.memOutPos,
|
||||
memLen = p.memOutLen,
|
||||
childMsg = Message(
|
||||
kind: evmcCall,
|
||||
depth: cpt.msg.depth + 1,
|
||||
gas: childGasLimit,
|
||||
sender: p.sender,
|
||||
contractAddress: p.contractAddress,
|
||||
codeAddress: p.codeAddress,
|
||||
value: p.value,
|
||||
data: memBytes,
|
||||
flags: p.flags))
|
||||
cpt.execSubCall(
|
||||
memPos = p.memOutPos,
|
||||
memLen = p.memOutLen,
|
||||
childMsg = Message(
|
||||
kind: evmcCall,
|
||||
depth: cpt.msg.depth + 1,
|
||||
gas: childGasLimit,
|
||||
sender: p.sender,
|
||||
contractAddress: p.contractAddress,
|
||||
codeAddress: p.codeAddress,
|
||||
value: p.value,
|
||||
data: cpt.memory.read(p.memInPos, p.memInLen),
|
||||
flags: p.flags))
|
||||
|
||||
# ---------------------
|
||||
|
||||
|
@ -355,20 +354,19 @@ const
|
|||
)
|
||||
c.execSubCall(msg, p)
|
||||
else:
|
||||
cpt.readMemory(p.memInPos, p.memInLen) do (memBytes: seq[byte]):
|
||||
cpt.execSubCall(
|
||||
memPos = p.memOutPos,
|
||||
memLen = p.memOutLen,
|
||||
childMsg = Message(
|
||||
kind: evmcCallCode,
|
||||
depth: cpt.msg.depth + 1,
|
||||
gas: childGasLimit,
|
||||
sender: p.sender,
|
||||
contractAddress: p.contractAddress,
|
||||
codeAddress: p.codeAddress,
|
||||
value: p.value,
|
||||
data: memBytes,
|
||||
flags: p.flags))
|
||||
cpt.execSubCall(
|
||||
memPos = p.memOutPos,
|
||||
memLen = p.memOutLen,
|
||||
childMsg = Message(
|
||||
kind: evmcCallCode,
|
||||
depth: cpt.msg.depth + 1,
|
||||
gas: childGasLimit,
|
||||
sender: p.sender,
|
||||
contractAddress: p.contractAddress,
|
||||
codeAddress: p.codeAddress,
|
||||
value: p.value,
|
||||
data: cpt.memory.read(p.memInPos, p.memInLen),
|
||||
flags: p.flags))
|
||||
|
||||
# ---------------------
|
||||
|
||||
|
@ -432,20 +430,19 @@ const
|
|||
)
|
||||
c.execSubCall(msg, p)
|
||||
else:
|
||||
cpt.readMemory(p.memInPos, p.memInLen) do (memBytes: seq[byte]):
|
||||
cpt.execSubCall(
|
||||
memPos = p.memOutPos,
|
||||
memLen = p.memOutLen,
|
||||
childMsg = Message(
|
||||
kind: evmcDelegateCall,
|
||||
depth: cpt.msg.depth + 1,
|
||||
gas: childGasLimit,
|
||||
sender: p.sender,
|
||||
contractAddress: p.contractAddress,
|
||||
codeAddress: p.codeAddress,
|
||||
value: p.value,
|
||||
data: memBytes,
|
||||
flags: p.flags))
|
||||
cpt.execSubCall(
|
||||
memPos = p.memOutPos,
|
||||
memLen = p.memOutLen,
|
||||
childMsg = Message(
|
||||
kind: evmcDelegateCall,
|
||||
depth: cpt.msg.depth + 1,
|
||||
gas: childGasLimit,
|
||||
sender: p.sender,
|
||||
contractAddress: p.contractAddress,
|
||||
codeAddress: p.codeAddress,
|
||||
value: p.value,
|
||||
data: cpt.memory.read(p.memInPos, p.memInLen),
|
||||
flags: p.flags))
|
||||
|
||||
# ---------------------
|
||||
|
||||
|
@ -514,20 +511,19 @@ const
|
|||
)
|
||||
c.execSubCall(msg, p)
|
||||
else:
|
||||
cpt.readMemory(p.memInPos, p.memInLen) do (memBytes: seq[byte]):
|
||||
cpt.execSubCall(
|
||||
memPos = p.memOutPos,
|
||||
memLen = p.memOutLen,
|
||||
childMsg = Message(
|
||||
kind: evmcCall,
|
||||
depth: cpt.msg.depth + 1,
|
||||
gas: childGasLimit,
|
||||
sender: p.sender,
|
||||
contractAddress: p.contractAddress,
|
||||
codeAddress: p.codeAddress,
|
||||
value: p.value,
|
||||
data: memBytes,
|
||||
flags: p.flags))
|
||||
cpt.execSubCall(
|
||||
memPos = p.memOutPos,
|
||||
memLen = p.memOutLen,
|
||||
childMsg = Message(
|
||||
kind: evmcCall,
|
||||
depth: cpt.msg.depth + 1,
|
||||
gas: childGasLimit,
|
||||
sender: p.sender,
|
||||
contractAddress: p.contractAddress,
|
||||
codeAddress: p.codeAddress,
|
||||
value: p.value,
|
||||
data: cpt.memory.read(p.memInPos, p.memInLen),
|
||||
flags: p.flags))
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public, op exec table entries
|
||||
|
|
|
@ -49,7 +49,7 @@ when evmc_enabled:
|
|||
c.chainTo(msg):
|
||||
c.gasMeter.returnGas(c.res.gas_left)
|
||||
if c.res.status_code == EVMC_SUCCESS:
|
||||
c.stack.replaceTopElement(pureStackElement(stackValueFrom(c.res.create_address)))
|
||||
c.stack.top(c.res.create_address)
|
||||
elif c.res.status_code == EVMC_REVERT:
|
||||
# From create, only use `outputData` if child returned with `REVERT`.
|
||||
c.returnData = @(makeOpenArray(c.res.outputData, c.res.outputSize.int))
|
||||
|
@ -71,7 +71,7 @@ else:
|
|||
|
||||
if child.isSuccess:
|
||||
c.merge(child)
|
||||
c.stack.replaceTopElement(pureStackElement(stackValueFrom(child.msg.contractAddress)))
|
||||
c.stack.top child.msg.contractAddress
|
||||
elif not child.error.burnsGas: # Means return was `REVERT`.
|
||||
# From create, only use `outputData` if child returned with `REVERT`.
|
||||
c.returnData = child.output
|
||||
|
@ -84,163 +84,159 @@ else:
|
|||
const
|
||||
createOp: Vm2OpFn = proc(k: var Vm2Ctx) =
|
||||
## 0xf0, Create a new account with associated code
|
||||
let cpt = k.cpt
|
||||
checkInStaticContext(k.cpt)
|
||||
|
||||
cpt.popStackValues do (endowment, memPosUnsafe, memLenUnsafe: UInt256):
|
||||
let
|
||||
memPos = memPosUnsafe.safeInt
|
||||
memLen = memLenUnsafe.safeInt
|
||||
let
|
||||
endowment = k.cpt.stack.popInt()
|
||||
memPos = k.cpt.stack.popInt().safeInt
|
||||
memLen = k.cpt.stack.peekInt().safeInt
|
||||
|
||||
cpt.stack.push(0)
|
||||
k.cpt.stack.top(0)
|
||||
|
||||
# EIP-3860
|
||||
if cpt.fork >= FkShanghai and memLen > EIP3860_MAX_INITCODE_SIZE:
|
||||
trace "Initcode size exceeds maximum", initcodeSize = memLen
|
||||
raise newException(InitcodeError,
|
||||
&"CREATE: have {memLen}, max {EIP3860_MAX_INITCODE_SIZE}")
|
||||
# EIP-3860
|
||||
if k.cpt.fork >= FkShanghai and memLen > EIP3860_MAX_INITCODE_SIZE:
|
||||
trace "Initcode size exceeds maximum", initcodeSize = memLen
|
||||
raise newException(InitcodeError,
|
||||
&"CREATE: have {memLen}, max {EIP3860_MAX_INITCODE_SIZE}")
|
||||
|
||||
let gasParams = GasParams(
|
||||
kind: Create,
|
||||
cr_currentMemSize: cpt.memory.len,
|
||||
cr_memOffset: memPos,
|
||||
cr_memLength: memLen)
|
||||
let gasParams = GasParams(
|
||||
kind: Create,
|
||||
cr_currentMemSize: k.cpt.memory.len,
|
||||
cr_memOffset: memPos,
|
||||
cr_memLength: memLen)
|
||||
|
||||
var gasCost = cpt.gasCosts[Create].c_handler(1.u256, gasParams).gasCost
|
||||
cpt.gasMeter.consumeGas(
|
||||
gasCost, reason = &"CREATE: GasCreate + {memLen} * memory expansion")
|
||||
cpt.memory.extend(memPos, memLen)
|
||||
cpt.returnData.setLen(0)
|
||||
var gasCost = k.cpt.gasCosts[Create].c_handler(1.u256, gasParams).gasCost
|
||||
k.cpt.gasMeter.consumeGas(
|
||||
gasCost, reason = &"CREATE: GasCreate + {memLen} * memory expansion")
|
||||
k.cpt.memory.extend(memPos, memLen)
|
||||
k.cpt.returnData.setLen(0)
|
||||
|
||||
if cpt.msg.depth >= MaxCallDepth:
|
||||
if k.cpt.msg.depth >= MaxCallDepth:
|
||||
debug "Computation Failure",
|
||||
reason = "Stack too deep",
|
||||
maxDepth = MaxCallDepth,
|
||||
depth = k.cpt.msg.depth
|
||||
return
|
||||
|
||||
if endowment != 0:
|
||||
let senderBalance = k.cpt.getBalance(k.cpt.msg.contractAddress)
|
||||
if senderBalance < endowment:
|
||||
debug "Computation Failure",
|
||||
reason = "Stack too deep",
|
||||
maxDepth = MaxCallDepth,
|
||||
depth = cpt.msg.depth
|
||||
reason = "Insufficient funds available to transfer",
|
||||
required = endowment,
|
||||
balance = senderBalance
|
||||
return
|
||||
|
||||
if endowment != 0:
|
||||
let senderBalance = cpt.getBalance(cpt.msg.contractAddress)
|
||||
if senderBalance < endowment:
|
||||
debug "Computation Failure",
|
||||
reason = "Insufficient funds available to transfer",
|
||||
required = endowment,
|
||||
balance = senderBalance
|
||||
return
|
||||
var createMsgGas = k.cpt.gasMeter.gasRemaining
|
||||
if k.cpt.fork >= FkTangerine:
|
||||
createMsgGas -= createMsgGas div 64
|
||||
k.cpt.gasMeter.consumeGas(createMsgGas, reason = "CREATE")
|
||||
|
||||
var createMsgGas = cpt.gasMeter.gasRemaining
|
||||
if cpt.fork >= FkTangerine:
|
||||
createMsgGas -= createMsgGas div 64
|
||||
cpt.gasMeter.consumeGas(createMsgGas, reason = "CREATE")
|
||||
|
||||
when evmc_enabled:
|
||||
let
|
||||
msg = new(nimbus_message)
|
||||
c = cpt
|
||||
msg[] = nimbus_message(
|
||||
kind: evmcCreate.ord.evmc_call_kind,
|
||||
depth: (cpt.msg.depth + 1).int32,
|
||||
gas: createMsgGas,
|
||||
sender: cpt.msg.contractAddress,
|
||||
input_data: cpt.memory.readPtr(memPos),
|
||||
input_size: memLen.uint,
|
||||
value: toEvmc(endowment),
|
||||
create2_salt: toEvmc(ZERO_CONTRACTSALT),
|
||||
)
|
||||
c.execSubCreate(msg)
|
||||
else:
|
||||
cpt.readMemory(memPos, memLen) do (memBytes: seq[byte]):
|
||||
cpt.execSubCreate(
|
||||
childMsg = Message(
|
||||
kind: evmcCreate,
|
||||
depth: cpt.msg.depth + 1,
|
||||
gas: createMsgGas,
|
||||
sender: cpt.msg.contractAddress,
|
||||
value: endowment,
|
||||
data: memBytes))
|
||||
when evmc_enabled:
|
||||
let
|
||||
msg = new(nimbus_message)
|
||||
c = k.cpt
|
||||
msg[] = nimbus_message(
|
||||
kind: evmcCreate.ord.evmc_call_kind,
|
||||
depth: (k.cpt.msg.depth + 1).int32,
|
||||
gas: createMsgGas,
|
||||
sender: k.cpt.msg.contractAddress,
|
||||
input_data: k.cpt.memory.readPtr(memPos),
|
||||
input_size: memLen.uint,
|
||||
value: toEvmc(endowment),
|
||||
create2_salt: toEvmc(ZERO_CONTRACTSALT),
|
||||
)
|
||||
c.execSubCreate(msg)
|
||||
else:
|
||||
k.cpt.execSubCreate(
|
||||
childMsg = Message(
|
||||
kind: evmcCreate,
|
||||
depth: k.cpt.msg.depth + 1,
|
||||
gas: createMsgGas,
|
||||
sender: k.cpt.msg.contractAddress,
|
||||
value: endowment,
|
||||
data: k.cpt.memory.read(memPos, memLen)))
|
||||
|
||||
# ---------------------
|
||||
|
||||
create2Op: Vm2OpFn = proc(k: var Vm2Ctx) =
|
||||
## 0xf5, Behaves identically to CREATE, except using keccak256
|
||||
let cpt = k.cpt
|
||||
checkInStaticContext(cpt)
|
||||
checkInStaticContext(k.cpt)
|
||||
|
||||
cpt.popStackValues do (endowment, memPosUnsafe, memLenUnsafe, saltInt: UInt256):
|
||||
let
|
||||
memPos = memPosUnsafe.safeInt
|
||||
memLen = memLenUnsafe.safeInt
|
||||
salt = ContractSalt(bytes: saltInt.toBytesBE)
|
||||
|
||||
cpt.stack.push(0)
|
||||
let
|
||||
endowment = k.cpt.stack.popInt()
|
||||
memPos = k.cpt.stack.popInt().safeInt
|
||||
memLen = k.cpt.stack.popInt().safeInt
|
||||
salt = ContractSalt(bytes: k.cpt.stack.peekInt().toBytesBE)
|
||||
|
||||
# EIP-3860
|
||||
if cpt.fork >= FkShanghai and memLen > EIP3860_MAX_INITCODE_SIZE:
|
||||
trace "Initcode size exceeds maximum", initcodeSize = memLen
|
||||
raise newException(InitcodeError,
|
||||
&"CREATE2: have {memLen}, max {EIP3860_MAX_INITCODE_SIZE}")
|
||||
k.cpt.stack.top(0)
|
||||
|
||||
let gasParams = GasParams(
|
||||
kind: Create,
|
||||
cr_currentMemSize: cpt.memory.len,
|
||||
cr_memOffset: memPos,
|
||||
cr_memLength: memLen)
|
||||
# EIP-3860
|
||||
if k.cpt.fork >= FkShanghai and memLen > EIP3860_MAX_INITCODE_SIZE:
|
||||
trace "Initcode size exceeds maximum", initcodeSize = memLen
|
||||
raise newException(InitcodeError,
|
||||
&"CREATE2: have {memLen}, max {EIP3860_MAX_INITCODE_SIZE}")
|
||||
|
||||
var gasCost = cpt.gasCosts[Create].c_handler(1.u256, gasParams).gasCost
|
||||
gasCost = gasCost + cpt.gasCosts[Create2].m_handler(0, 0, memLen)
|
||||
let gasParams = GasParams(
|
||||
kind: Create,
|
||||
cr_currentMemSize: k.cpt.memory.len,
|
||||
cr_memOffset: memPos,
|
||||
cr_memLength: memLen)
|
||||
|
||||
cpt.gasMeter.consumeGas(
|
||||
gasCost, reason = &"CREATE2: GasCreate + {memLen} * memory expansion")
|
||||
cpt.memory.extend(memPos, memLen)
|
||||
cpt.returnData.setLen(0)
|
||||
var gasCost = k.cpt.gasCosts[Create].c_handler(1.u256, gasParams).gasCost
|
||||
gasCost = gasCost + k.cpt.gasCosts[Create2].m_handler(0, 0, memLen)
|
||||
|
||||
if cpt.msg.depth >= MaxCallDepth:
|
||||
k.cpt.gasMeter.consumeGas(
|
||||
gasCost, reason = &"CREATE2: GasCreate + {memLen} * memory expansion")
|
||||
k.cpt.memory.extend(memPos, memLen)
|
||||
k.cpt.returnData.setLen(0)
|
||||
|
||||
if k.cpt.msg.depth >= MaxCallDepth:
|
||||
debug "Computation Failure",
|
||||
reason = "Stack too deep",
|
||||
maxDepth = MaxCallDepth,
|
||||
depth = k.cpt.msg.depth
|
||||
return
|
||||
|
||||
if endowment != 0:
|
||||
let senderBalance = k.cpt.getBalance(k.cpt.msg.contractAddress)
|
||||
if senderBalance < endowment:
|
||||
debug "Computation Failure",
|
||||
reason = "Stack too deep",
|
||||
maxDepth = MaxCallDepth,
|
||||
depth = cpt.msg.depth
|
||||
reason = "Insufficient funds available to transfer",
|
||||
required = endowment,
|
||||
balance = senderBalance
|
||||
return
|
||||
|
||||
if endowment != 0:
|
||||
let senderBalance = cpt.getBalance(cpt.msg.contractAddress)
|
||||
if senderBalance < endowment:
|
||||
debug "Computation Failure",
|
||||
reason = "Insufficient funds available to transfer",
|
||||
required = endowment,
|
||||
balance = senderBalance
|
||||
return
|
||||
var createMsgGas = k.cpt.gasMeter.gasRemaining
|
||||
if k.cpt.fork >= FkTangerine:
|
||||
createMsgGas -= createMsgGas div 64
|
||||
k.cpt.gasMeter.consumeGas(createMsgGas, reason = "CREATE2")
|
||||
|
||||
var createMsgGas = cpt.gasMeter.gasRemaining
|
||||
if cpt.fork >= FkTangerine:
|
||||
createMsgGas -= createMsgGas div 64
|
||||
cpt.gasMeter.consumeGas(createMsgGas, reason = "CREATE2")
|
||||
|
||||
when evmc_enabled:
|
||||
let
|
||||
msg = new(nimbus_message)
|
||||
c = cpt
|
||||
msg[] = nimbus_message(
|
||||
kind: evmcCreate2.ord.evmc_call_kind,
|
||||
depth: (cpt.msg.depth + 1).int32,
|
||||
gas: createMsgGas,
|
||||
sender: cpt.msg.contractAddress,
|
||||
input_data: cpt.memory.readPtr(memPos),
|
||||
input_size: memLen.uint,
|
||||
value: toEvmc(endowment),
|
||||
create2_salt: toEvmc(salt),
|
||||
)
|
||||
c.execSubCreate(msg)
|
||||
else:
|
||||
cpt.readMemory(memPos, memLen) do (memBytes: seq[byte]):
|
||||
cpt.execSubCreate(
|
||||
salt = salt,
|
||||
childMsg = Message(
|
||||
kind: evmcCreate2,
|
||||
depth: cpt.msg.depth + 1,
|
||||
gas: createMsgGas,
|
||||
sender: cpt.msg.contractAddress,
|
||||
value: endowment,
|
||||
data: memBytes))
|
||||
when evmc_enabled:
|
||||
let
|
||||
msg = new(nimbus_message)
|
||||
c = k.cpt
|
||||
msg[] = nimbus_message(
|
||||
kind: evmcCreate2.ord.evmc_call_kind,
|
||||
depth: (k.cpt.msg.depth + 1).int32,
|
||||
gas: createMsgGas,
|
||||
sender: k.cpt.msg.contractAddress,
|
||||
input_data: k.cpt.memory.readPtr(memPos),
|
||||
input_size: memLen.uint,
|
||||
value: toEvmc(endowment),
|
||||
create2_salt: toEvmc(salt),
|
||||
)
|
||||
c.execSubCreate(msg)
|
||||
else:
|
||||
k.cpt.execSubCreate(
|
||||
salt = salt,
|
||||
childMsg = Message(
|
||||
kind: evmcCreate2,
|
||||
depth: k.cpt.msg.depth + 1,
|
||||
gas: createMsgGas,
|
||||
sender: k.cpt.msg.contractAddress,
|
||||
value: endowment,
|
||||
data: k.cpt.memory.read(memPos, memLen)))
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public, op exec table entries
|
||||
|
|
|
@ -49,14 +49,14 @@ proc writePaddedResult(mem: var Memory,
|
|||
let sourceBytes =
|
||||
data[min(dataPos, data.len) .. min(data.len - 1, dataEndPosition)]
|
||||
|
||||
mem.writeConcreteBytes(memPos, sourceBytes)
|
||||
mem.write(memPos, sourceBytes)
|
||||
|
||||
# Don't duplicate zero-padding of mem.extend
|
||||
let paddingOffset = min(memPos + sourceBytes.len, mem.len)
|
||||
let numPaddingBytes = min(mem.len - paddingOffset, len - sourceBytes.len)
|
||||
if numPaddingBytes > 0:
|
||||
# TODO: avoid unnecessary memory allocation
|
||||
mem.writeConcreteBytes(paddingOffset, repeat(paddingValue, numPaddingBytes))
|
||||
mem.write(paddingOffset, repeat(paddingValue, numPaddingBytes))
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private, op handlers implementation
|
||||
|
|
|
@ -51,7 +51,7 @@ const
|
|||
k.cpt.stack.push(EMPTY_SHA3)
|
||||
else:
|
||||
k.cpt.stack.push:
|
||||
keccakHash k.cpt.memory.waitForBytes.toOpenArray(pos, endRange)
|
||||
keccakHash k.cpt.memory.bytes.toOpenArray(pos, endRange)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public, op exec table entries
|
||||
|
|
|
@ -70,7 +70,7 @@ proc logImpl(c: Computation, opcode: Op, topicCount: int) =
|
|||
topics[i].bytes = c.stack.popTopic()
|
||||
|
||||
c.host.emitLog(c.msg.contractAddress,
|
||||
c.memory.readConcreteBytes(memPos, len),
|
||||
c.memory.read(memPos, len),
|
||||
topics[0].addr, topicCount)
|
||||
else:
|
||||
var log: Log
|
||||
|
@ -78,10 +78,9 @@ proc logImpl(c: Computation, opcode: Op, topicCount: int) =
|
|||
for i in 0 ..< topicCount:
|
||||
log.topics.add(c.stack.popTopic())
|
||||
|
||||
c.readMemory(memPos, len) do (memBytes: seq[byte]):
|
||||
log.data = memBytes
|
||||
log.address = c.msg.contractAddress
|
||||
c.addLogEntry(log)
|
||||
log.data = c.memory.read(memPos, len)
|
||||
log.address = c.msg.contractAddress
|
||||
c.addLogEntry(log)
|
||||
|
||||
const
|
||||
inxRange = toSeq(0 .. 4)
|
||||
|
|
|
@ -125,18 +125,16 @@ const
|
|||
|
||||
mloadOp: Vm2OpFn = proc (k: var Vm2Ctx) =
|
||||
## 0x51, Load word from memory
|
||||
let cpt = k.cpt
|
||||
let (memStartPos) = cpt.stack.popInt(1)
|
||||
let (memStartPos) = k.cpt.stack.popInt(1)
|
||||
|
||||
let memPos = memStartPos.cleanMemRef
|
||||
cpt.gasMeter.consumeGas(
|
||||
cpt.gasCosts[Mload].m_handler(cpt.memory.len, memPos, 32),
|
||||
k.cpt.gasMeter.consumeGas(
|
||||
k.cpt.gasCosts[Mload].m_handler(k.cpt.memory.len, memPos, 32),
|
||||
reason = "MLOAD: GasVeryLow + memory expansion")
|
||||
|
||||
cpt.memory.extend(memPos, 32)
|
||||
# FIXME-speculex: this could be made speculative
|
||||
cpt.readMemory(memPos, 32) do (memBytes: seq[byte]):
|
||||
cpt.stack.push(memBytes)
|
||||
k.cpt.memory.extend(memPos, 32)
|
||||
k.cpt.stack.push:
|
||||
k.cpt.memory.read(memPos, 32)
|
||||
|
||||
|
||||
mstoreOp: Vm2OpFn = proc (k: var Vm2Ctx) =
|
||||
|
@ -149,7 +147,7 @@ const
|
|||
reason = "MSTORE: GasVeryLow + memory expansion")
|
||||
|
||||
k.cpt.memory.extend(memPos, 32)
|
||||
k.cpt.memory.writeConcreteBytes(memPos, @(value.toByteArrayBE))
|
||||
k.cpt.memory.write(memPos, value.toByteArrayBE)
|
||||
|
||||
|
||||
mstore8Op: Vm2OpFn = proc (k: var Vm2Ctx) =
|
||||
|
@ -162,7 +160,7 @@ const
|
|||
reason = "MSTORE8: GasVeryLow + memory expansion")
|
||||
|
||||
k.cpt.memory.extend(memPos, 1)
|
||||
k.cpt.memory.writeConcreteBytes(memPos, @[value.toByteArrayBE[31]])
|
||||
k.cpt.memory.write(memPos, [value.toByteArrayBE[31]])
|
||||
|
||||
# -------
|
||||
|
||||
|
|
|
@ -42,34 +42,30 @@ when not defined(evmc_enabled):
|
|||
const
|
||||
returnOp: Vm2OpFn = proc(k: var Vm2Ctx) =
|
||||
## 0xf3, Halt execution returning output data.
|
||||
let cpt = k.cpt
|
||||
let (startPos, size) = cpt.stack.popInt(2)
|
||||
let (startPos, size) = k.cpt.stack.popInt(2)
|
||||
|
||||
let (pos, len) = (startPos.cleanMemRef, size.cleanMemRef)
|
||||
cpt.gasMeter.consumeGas(
|
||||
cpt.gasCosts[Return].m_handler(cpt.memory.len, pos, len),
|
||||
k.cpt.gasMeter.consumeGas(
|
||||
k.cpt.gasCosts[Return].m_handler(k.cpt.memory.len, pos, len),
|
||||
reason = "RETURN")
|
||||
cpt.memory.extend(pos, len)
|
||||
cpt.readMemory(pos, len) do (memBytes: seq[byte]):
|
||||
cpt.output = memBytes
|
||||
k.cpt.memory.extend(pos, len)
|
||||
k.cpt.output = k.cpt.memory.read(pos, len)
|
||||
|
||||
|
||||
revertOp: Vm2OpFn = proc(k: var Vm2Ctx) =
|
||||
## 0xfd, Halt execution reverting state changes but returning data
|
||||
## and remaining gas.
|
||||
let cpt = k.cpt
|
||||
let (startPos, size) = cpt.stack.popInt(2)
|
||||
let (startPos, size) = k.cpt.stack.popInt(2)
|
||||
|
||||
let (pos, len) = (startPos.cleanMemRef, size.cleanMemRef)
|
||||
cpt.gasMeter.consumeGas(
|
||||
cpt.gasCosts[Revert].m_handler(cpt.memory.len, pos, len),
|
||||
k.cpt.gasMeter.consumeGas(
|
||||
k.cpt.gasCosts[Revert].m_handler(k.cpt.memory.len, pos, len),
|
||||
reason = "REVERT")
|
||||
|
||||
cpt.memory.extend(pos, len)
|
||||
cpt.readMemory(pos, len) do (memBytes: seq[byte]):
|
||||
cpt.output = memBytes
|
||||
# setError(msg, false) will signal cheap revert
|
||||
cpt.setError("REVERT opcode executed", false)
|
||||
k.cpt.memory.extend(pos, len)
|
||||
k.cpt.output = k.cpt.memory.read(pos, len)
|
||||
# setError(msg, false) will signal cheap revert
|
||||
k.cpt.setError("REVERT opcode executed", false)
|
||||
|
||||
|
||||
invalidOp: Vm2OpFn = proc(k: var Vm2Ctx) =
|
||||
|
|
|
@ -7,81 +7,23 @@
|
|||
|
||||
import
|
||||
sequtils,
|
||||
std/typetraits,
|
||||
chronicles, eth/common/eth_types,
|
||||
../utils/functors/[identity, futures, possible_futures],
|
||||
../errors, ./validation,
|
||||
./interpreter/utils/utils_numeric,
|
||||
./async/speculex
|
||||
./interpreter/utils/utils_numeric
|
||||
|
||||
type
|
||||
Memory* = ref object
|
||||
bytes*: seq[byte]
|
||||
|
||||
logScope:
|
||||
topics = "vm memory"
|
||||
|
||||
# FIXME-Adam: this is obviously horribly inefficient; I can try doing
|
||||
# something more clever (some sort of binary search tree?) later.
|
||||
|
||||
type
|
||||
ByteCell* = SpeculativeExecutionCell[byte]
|
||||
BytesCell* = SpeculativeExecutionCell[seq[byte]]
|
||||
|
||||
proc readCells*(byteCells: var seq[ByteCell], startPos: Natural, size: Natural): seq[ByteCell] =
|
||||
byteCells[startPos ..< (startPos + size)]
|
||||
|
||||
proc writeCells*(byteCells: var seq[ByteCell], startPos: Natural, newCells: openArray[ByteCell]) =
|
||||
let size = newCells.len
|
||||
if size == 0:
|
||||
return
|
||||
validateLte(startPos + size, byteCells.len)
|
||||
if byteCells.len < startPos + size:
|
||||
byteCells = byteCells.concat(repeat(pureCell(0.byte), byteCells.len - (startPos + size))) # TODO: better logarithmic scaling?
|
||||
|
||||
for z, c in newCells:
|
||||
byteCells[z + startPos] = c
|
||||
|
||||
|
||||
type
|
||||
Memory* = ref object
|
||||
byteCells*: seq[ByteCell]
|
||||
|
||||
proc newMemory*: Memory =
|
||||
new(result)
|
||||
result.byteCells = @[]
|
||||
result.bytes = @[]
|
||||
|
||||
proc len*(memory: Memory): int =
|
||||
result = memory.byteCells.len
|
||||
|
||||
proc readBytes*(memory: Memory, startPos: Natural, size: Natural): BytesCell {.raises: [CatchableError].} =
|
||||
traverse(readCells(memory.byteCells, startPos, size))
|
||||
|
||||
proc writeBytes*(memory: Memory, startPos: Natural, size: Natural, newBytesF: BytesCell) =
|
||||
var newCells: seq[ByteCell]
|
||||
for i in 0..(size-1):
|
||||
newCells.add(newBytesF.map(proc(newBytes: seq[byte]): byte = newBytes[i]))
|
||||
writeCells(memory.byteCells, startPos, newCells)
|
||||
|
||||
proc readAllBytes*(memory: Memory): BytesCell =
|
||||
readBytes(memory, 0, len(memory))
|
||||
|
||||
proc futureBytes*(memory: Memory, startPos: Natural, size: Natural): Future[seq[byte]] =
|
||||
toFuture(readBytes(memory, startPos, size))
|
||||
|
||||
proc readConcreteBytes*(memory: Memory, startPos: Natural, size: Natural): seq[byte] =
|
||||
waitForValueOf(readBytes(memory, startPos, size))
|
||||
|
||||
proc writeConcreteBytes*(memory: Memory, startPos: Natural, value: openArray[byte]) =
|
||||
writeBytes(memory, startPos, value.len, pureCell(@value))
|
||||
|
||||
when shouldUseSpeculativeExecution:
|
||||
proc writeFutureBytes*(memory: Memory, startPos: Natural, size: Natural, newBytesFut: Future[seq[byte]]) =
|
||||
writeBytes(memory, startPos, size, newBytesFut)
|
||||
|
||||
# FIXME-removeSynchronousInterface: the callers should be fixed so that they don't need this
|
||||
proc waitForBytes*(memory: Memory): seq[byte] =
|
||||
waitForValueOf(readAllBytes(memory))
|
||||
|
||||
# FIXME-removeSynchronousInterface: the tests call it "bytes", I dunno how many call sites there are
|
||||
proc bytes*(memory: Memory): seq[byte] =
|
||||
waitForBytes(memory)
|
||||
result = memory.bytes.len
|
||||
|
||||
proc extend*(memory: var Memory; startPos: Natural; size: Natural) =
|
||||
if size == 0:
|
||||
|
@ -90,13 +32,28 @@ proc extend*(memory: var Memory; startPos: Natural; size: Natural) =
|
|||
if newSize <= len(memory):
|
||||
return
|
||||
var sizeToExtend = newSize - len(memory)
|
||||
memory.byteCells = memory.byteCells.concat(repeat(pureCell(0.byte), sizeToExtend))
|
||||
memory.bytes = memory.bytes.concat(repeat(0.byte, sizeToExtend))
|
||||
|
||||
proc newMemory*(size: Natural): Memory =
|
||||
result = newMemory()
|
||||
result.extend(0, size)
|
||||
|
||||
proc read*(memory: var Memory, startPos: Natural, size: Natural): seq[byte] =
|
||||
# TODO: use an openArray[byte]
|
||||
result = memory.bytes[startPos ..< (startPos + size)]
|
||||
|
||||
when defined(evmc_enabled):
|
||||
proc readPtr*(memory: var Memory, startPos: Natural): ptr byte =
|
||||
if memory.len == 0 or startPos >= memory.len: return
|
||||
result = distinctBase(distinctBase(memory.byteCells[startPos])).addr
|
||||
if memory.bytes.len == 0 or startPos >= memory.bytes.len: return
|
||||
result = memory.bytes[startPos].addr
|
||||
|
||||
proc write*(memory: var Memory, startPos: Natural, value: openArray[byte]) =
|
||||
let size = value.len
|
||||
if size == 0:
|
||||
return
|
||||
validateLte(startPos + size, memory.len)
|
||||
if memory.len < startPos + size:
|
||||
memory.bytes = memory.bytes.concat(repeat(0.byte, memory.len - (startPos + size))) # TODO: better logarithmic scaling?
|
||||
|
||||
for z, b in value:
|
||||
memory.bytes[z + startPos] = b
|
||||
|
|
|
@ -7,73 +7,44 @@
|
|||
|
||||
import
|
||||
std/[strformat, strutils, sequtils, macros],
|
||||
chronicles, chronos, eth/common,
|
||||
../utils/functors/[identity, futures, possible_futures],
|
||||
../errors, ./validation,
|
||||
./async/speculex
|
||||
chronicles, eth/common,
|
||||
../errors, ./validation
|
||||
|
||||
logScope:
|
||||
topics = "vm stack"
|
||||
|
||||
# Now that we need a stack that contains values that may not be available yet,
|
||||
# the terminology below makes a clearer distinction between:
|
||||
# "stack value" (the actual UInt256)
|
||||
# "stack element" (the possibly-not-resolved-yet box that will eventually hold the value).
|
||||
|
||||
type StackElement = SpeculativeExecutionCell[UInt256]
|
||||
proc pureStackElement*(v: UInt256): StackElement {.inline.} = pureCell(v)
|
||||
|
||||
type
|
||||
Stack* = ref object of RootObj
|
||||
elements*: seq[StackElement]
|
||||
values*: seq[StackElement]
|
||||
|
||||
proc values*(stack: Stack): seq[UInt256] =
|
||||
stack.elements.map(proc(elem: StackElement): UInt256 =
|
||||
unsafeGetAlreadyAvailableValue(elem))
|
||||
|
||||
proc len*(stack: Stack): int {.inline.} =
|
||||
len(stack.elements)
|
||||
StackElement = UInt256
|
||||
|
||||
template ensureStackLimit: untyped =
|
||||
if len(stack.elements) > 1023:
|
||||
if len(stack.values) > 1023:
|
||||
raise newException(FullStack, "Stack limit reached")
|
||||
|
||||
proc len*(stack: Stack): int {.inline.} =
|
||||
len(stack.values)
|
||||
|
||||
proc stackValueFrom*(v: UInt256): UInt256 {.inline.} = v
|
||||
proc stackValueFrom*(v: uint | int | GasInt): UInt256 {.inline.} = v.u256
|
||||
proc stackValueFrom*(v: EthAddress): UInt256 {.inline.} = result.initFromBytesBE(v)
|
||||
proc stackValueFrom*(v: MDigest): UInt256 {.inline.} = result.initFromBytesBE(v.data, allowPadding = false)
|
||||
proc toStackElement(v: UInt256, elem: var StackElement) {.inline.} = elem = v
|
||||
proc toStackElement(v: uint | int | GasInt, elem: var StackElement) {.inline.} = elem = v.u256
|
||||
proc toStackElement(v: EthAddress, elem: var StackElement) {.inline.} = elem.initFromBytesBE(v)
|
||||
proc toStackElement(v: MDigest, elem: var StackElement) {.inline.} = elem.initFromBytesBE(v.data, allowPadding = false)
|
||||
|
||||
proc stackValueFrom*(v: openArray[byte]): UInt256 {.inline.} =
|
||||
proc fromStackElement(elem: StackElement, v: var UInt256) {.inline.} = v = elem
|
||||
proc fromStackElement(elem: StackElement, v: var EthAddress) {.inline.} = v[0 .. ^1] = elem.toByteArrayBE().toOpenArray(12, 31)
|
||||
proc fromStackElement(elem: StackElement, v: var Hash256) {.inline.} = v.data = elem.toByteArrayBE()
|
||||
proc fromStackElement(elem: StackElement, v: var Topic) {.inline.} = v = elem.toByteArrayBE()
|
||||
|
||||
proc toStackElement(v: openArray[byte], elem: var StackElement) {.inline.} =
|
||||
# TODO: This needs to go
|
||||
validateStackItem(v) # This is necessary to pass stack tests
|
||||
result.initFromBytesBE(v)
|
||||
elem.initFromBytesBE(v)
|
||||
|
||||
proc fromStackValue(i: UInt256, v: var UInt256) {.inline.} = v = i
|
||||
proc fromStackValue(i: UInt256, v: var EthAddress) {.inline.} = v[0 .. ^1] = i.toByteArrayBE().toOpenArray(12, 31)
|
||||
proc fromStackValue(i: UInt256, v: var Hash256) {.inline.} = v.data = i.toByteArrayBE()
|
||||
proc fromStackValue(i: UInt256, v: var Topic) {.inline.} = v = i.toByteArrayBE()
|
||||
|
||||
proc intFromStackValue*(i: UInt256): UInt256 {.inline.} = i
|
||||
proc addressFromStackValue*(i: UInt256): EthAddress {.inline.} = fromStackValue(i, result)
|
||||
proc hashFromStackValue*(i: UInt256): Hash256 {.inline.} = fromStackValue(i, result)
|
||||
proc topicFromStackValue*(i: UInt256): Topic {.inline.} = fromStackValue(i, result)
|
||||
|
||||
proc futureStackValue*(elem: StackElement): Future[UInt256] =
|
||||
toFuture(elem)
|
||||
|
||||
proc futureInt* (elem: StackElement): Future[UInt256] {.async.} = return intFromStackValue(await futureStackValue(elem))
|
||||
proc futureAddress*(elem: StackElement): Future[EthAddress] {.async.} = return addressFromStackValue(await futureStackValue(elem))
|
||||
proc futureHash* (elem: StackElement): Future[Hash256] {.async.} = return hashFromStackValue(await futureStackValue(elem))
|
||||
proc futureTopic* (elem: StackElement): Future[Topic] {.async.} = return topicFromStackValue(await futureStackValue(elem))
|
||||
|
||||
|
||||
|
||||
# FIXME-Adam: we may not need anything other than the StackElement one, after we're done refactoring
|
||||
proc pushAux[T](stack: var Stack, value: T) =
|
||||
ensureStackLimit()
|
||||
stack.elements.setLen(stack.elements.len + 1)
|
||||
stack.elements[^1] = pureStackElement(stackValueFrom(value))
|
||||
stack.values.setLen(stack.values.len + 1)
|
||||
toStackElement(value, stack.values[^1])
|
||||
|
||||
proc push*(stack: var Stack, value: uint | int | GasInt | UInt256 | EthAddress | Hash256) {.inline.} =
|
||||
pushAux(stack, value)
|
||||
|
@ -82,71 +53,54 @@ proc push*(stack: var Stack, value: openArray[byte]) {.inline.} =
|
|||
# TODO: This needs to go...
|
||||
pushAux(stack, value)
|
||||
|
||||
proc pushElement*(stack: var Stack, elem: StackElement) =
|
||||
ensureStackLimit()
|
||||
stack.elements.setLen(stack.elements.len + 1)
|
||||
stack.elements[^1] = elem
|
||||
|
||||
proc push*(stack: var Stack, elem: StackElement) =
|
||||
stack.pushElement(elem)
|
||||
|
||||
proc ensurePop(stack: Stack, expected: int) =
|
||||
let num = stack.len
|
||||
proc ensurePop(elements: Stack, a: int) =
|
||||
let num = elements.len
|
||||
let expected = a
|
||||
if num < expected:
|
||||
raise newException(InsufficientStack,
|
||||
&"Stack underflow: expected {expected} elements, got {num} instead.")
|
||||
|
||||
proc internalPopElementsTuple(stack: var Stack, v: var tuple, tupleLen: static[int]) =
|
||||
proc popAux[T](stack: var Stack, value: var T) =
|
||||
ensurePop(stack, 1)
|
||||
fromStackElement(stack.values[^1], value)
|
||||
stack.values.setLen(stack.values.len - 1)
|
||||
|
||||
proc internalPopTuple(stack: var Stack, v: var tuple, tupleLen: static[int]) =
|
||||
ensurePop(stack, tupleLen)
|
||||
var i = 0
|
||||
let sz = stack.elements.high
|
||||
let sz = stack.values.high
|
||||
for f in fields(v):
|
||||
f = stack.elements[sz - i]
|
||||
fromStackElement(stack.values[sz - i], f)
|
||||
inc i
|
||||
stack.elements.setLen(sz - tupleLen + 1)
|
||||
stack.values.setLen(sz - tupleLen + 1)
|
||||
|
||||
macro genTupleType*(len: static[int], elemType: untyped): untyped =
|
||||
proc popInt*(stack: var Stack): UInt256 {.inline.} =
|
||||
popAux(stack, result)
|
||||
|
||||
macro genTupleType(len: static[int], elemType: untyped): untyped =
|
||||
result = nnkTupleConstr.newNimNode()
|
||||
for i in 0 ..< len: result.add(elemType)
|
||||
|
||||
proc popElement*(stack: var Stack): StackElement {.inline.} =
|
||||
ensurePop(stack, 1)
|
||||
result = stack.elements[^1]
|
||||
stack.elements.setLen(stack.elements.len - 1)
|
||||
|
||||
proc popElements*(stack: var Stack, numItems: static[int]): auto {.inline.} =
|
||||
var r: genTupleType(numItems, StackElement)
|
||||
stack.internalPopElementsTuple(r, numItems)
|
||||
proc popInt*(stack: var Stack, numItems: static[int]): auto {.inline.} =
|
||||
var r: genTupleType(numItems, UInt256)
|
||||
stack.internalPopTuple(r, numItems)
|
||||
return r
|
||||
|
||||
proc popSeqOfElements*(stack: var Stack, numItems: int): seq[StackElement] {.inline.} =
|
||||
ensurePop(stack, numItems)
|
||||
let sz = stack.elements.high
|
||||
for i in 0 ..< numItems:
|
||||
result.add(stack.elements[sz - i])
|
||||
stack.elements.setLen(sz - numItems + 1)
|
||||
proc popAddress*(stack: var Stack): EthAddress {.inline.} =
|
||||
popAux(stack, result)
|
||||
|
||||
template popAndMap*(stack: var Stack, lvalueA: untyped, body: untyped): StackElement =
|
||||
map(stack.popElement) do (lvalueA: UInt256) -> UInt256:
|
||||
body
|
||||
|
||||
template popAndCombine*(stack: var Stack, lvalueA: untyped, lvalueB: untyped, body: untyped): StackElement =
|
||||
combineAndApply(stack.popElements(2)) do (lvalueA, lvalueB: UInt256) -> UInt256:
|
||||
body
|
||||
|
||||
template popAndCombine*(stack: var Stack, lvalueA: untyped, lvalueB: untyped, lvalueC: untyped, body: untyped): StackElement =
|
||||
combineAndApply(stack.popElements(3)) do (lvalueA, lvalueB, lvalueC: UInt256) -> UInt256:
|
||||
body
|
||||
proc popTopic*(stack: var Stack): Topic {.inline.} =
|
||||
popAux(stack, result)
|
||||
|
||||
proc newStack*(): Stack =
|
||||
new(result)
|
||||
result.elements = @[]
|
||||
result.values = @[]
|
||||
|
||||
proc swap*(stack: var Stack, position: int) =
|
||||
## Perform a SWAP operation on the stack
|
||||
var idx = position + 1
|
||||
if idx < len(stack) + 1:
|
||||
(stack.elements[^1], stack.elements[^idx]) = (stack.elements[^idx], stack.elements[^1])
|
||||
(stack.values[^1], stack.values[^idx]) = (stack.values[^idx], stack.values[^1])
|
||||
else:
|
||||
raise newException(InsufficientStack,
|
||||
&"Insufficient stack items for SWAP{position}")
|
||||
|
@ -157,71 +111,26 @@ proc dup*(stack: var Stack, position: int | UInt256) =
|
|||
## Perform a DUP operation on the stack
|
||||
let position = position.getInt
|
||||
if position in 1 .. stack.len:
|
||||
stack.pushElement(stack.elements[^position])
|
||||
stack.push(stack.values[^position])
|
||||
else:
|
||||
raise newException(InsufficientStack,
|
||||
&"Insufficient stack items for DUP{position}")
|
||||
|
||||
proc peekElement*(stack: Stack): StackElement =
|
||||
stack.elements[^1]
|
||||
|
||||
proc peek*(stack: Stack): UInt256 =
|
||||
# This should be used only for testing purposes!
|
||||
unsafeGetAlreadyAvailableValue(peekElement(stack))
|
||||
|
||||
proc `$`*(elem: StackElement): string =
|
||||
let m = maybeAlreadyAvailableValueOf(elem)
|
||||
if m.isSome:
|
||||
$(m.get)
|
||||
else:
|
||||
"not yet available"
|
||||
fromStackElement(stack.values[^1], result)
|
||||
|
||||
proc `$`*(stack: Stack): string =
|
||||
let elements = stack.elements.mapIt(&" {$it}").join("\n")
|
||||
&"Stack:\n{elements}"
|
||||
let values = stack.values.mapIt(&" {$it}").join("\n")
|
||||
&"Stack:\n{values}"
|
||||
|
||||
# FIXME-Adam: is it okay for this to be unsafe?
|
||||
proc `[]`*(stack: Stack, i: BackwardsIndex, T: typedesc): T =
|
||||
ensurePop(stack, int(i))
|
||||
fromStackValue(unsafeGetAlreadyAvailableValue(stack.elements[i]), result)
|
||||
fromStackElement(stack.values[i], result)
|
||||
|
||||
proc replaceTopElement*(stack: Stack, newTopElem: StackElement) {.inline.} =
|
||||
stack.elements[^1] = newTopElem
|
||||
proc peekInt*(stack: Stack): UInt256 =
|
||||
ensurePop(stack, 1)
|
||||
fromStackElement(stack.values[^1], result)
|
||||
|
||||
|
||||
|
||||
|
||||
# FIXME-Adam: These need to be removed, because calling waitFor is obviously
|
||||
# not what we want. I'm only leaving them here for now to keep the compiler
|
||||
# happy until we switch over to the new way.
|
||||
proc popInt*(stack: var Stack): UInt256 =
|
||||
let elem = stack.popElement
|
||||
waitFor(elem.futureInt())
|
||||
|
||||
proc popAddress*(stack: var Stack): EthAddress =
|
||||
let elem = stack.popElement
|
||||
waitFor(elem.futureAddress())
|
||||
|
||||
proc popTopic*(stack: var Stack): Topic =
|
||||
let elem = stack.popElement
|
||||
waitFor(elem.futureTopic())
|
||||
|
||||
proc internalPopTuple(stack: var Stack, v: var tuple, tupleLen: static[int]) =
|
||||
ensurePop(stack, tupleLen)
|
||||
var i = 0
|
||||
let sz = stack.elements.high
|
||||
for f in fields(v):
|
||||
let elem = stack.elements[sz - i]
|
||||
# FIXME-Adam: terrible idea, waits after each one instead of waiting once for
|
||||
# all of them, but this is temporary code that will be deleted after we've
|
||||
# switched over to the new way.
|
||||
waitFor(discardFutureValue(futureStackValue(elem)))
|
||||
let v = unsafeGetAlreadyAvailableValue(elem)
|
||||
fromStackValue(v, f)
|
||||
inc i
|
||||
stack.elements.setLen(sz - tupleLen + 1)
|
||||
|
||||
proc popInt*(stack: var Stack, numItems: static[int]): auto {.inline.} =
|
||||
var r: genTupleType(numItems, UInt256)
|
||||
stack.internalPopTuple(r, numItems)
|
||||
return r
|
||||
proc top*(stack: Stack, value: uint | int | GasInt | UInt256 | EthAddress | Hash256) {.inline.} =
|
||||
toStackElement(value, stack.values[^1])
|
||||
|
|
|
@ -12,7 +12,6 @@ import
|
|||
std/[json, strutils, sets, hashes],
|
||||
chronicles, eth/common, stint,
|
||||
nimcrypto/utils,
|
||||
../utils/functors/possible_futures,
|
||||
./types, ./memory, ./stack, ../db/accounts_cache,
|
||||
./interpreter/op_codes
|
||||
|
||||
|
@ -107,12 +106,11 @@ proc traceOpCodeStarted*(tracer: var TransactionTracer, c: Computation, op: Op):
|
|||
|
||||
# log memory
|
||||
if TracerFlags.DisableMemory notin tracer.flags:
|
||||
let bytes = c.memory.waitForBytes # FIXME-Adam: it's either this or make the tracer async; ugh.
|
||||
let mem = newJArray()
|
||||
const chunkLen = 32
|
||||
let numChunks = c.memory.len div chunkLen
|
||||
for i in 0 ..< numChunks:
|
||||
let memHex = bytes.toOpenArray(i * chunkLen, (i + 1) * chunkLen - 1).toHex()
|
||||
let memHex = c.memory.bytes.toOpenArray(i * chunkLen, (i + 1) * chunkLen - 1).toHex()
|
||||
if TracerFlags.GethCompatibility in tracer.flags:
|
||||
mem.add(%("0x" & memHex.toLowerAscii))
|
||||
else:
|
||||
|
@ -147,7 +145,7 @@ proc traceOpCodeEnded*(tracer: var TransactionTracer, c: Computation, op: Op, la
|
|||
if c.msg.depth < tracer.storageKeys.len:
|
||||
var stateDB = c.vmState.stateDB
|
||||
for key in tracer.storage(c.msg.depth):
|
||||
let value = waitForValueOf(stateDB.getStorageCell(c.msg.contractAddress, key)) # FIXME-Adam: again, I don't like the waitFor
|
||||
let value = stateDB.getStorage(c.msg.contractAddress, key)
|
||||
if TracerFlags.GethCompatibility in tracer.flags:
|
||||
storage["0x" & key.dumpHex.stripLeadingZeros] =
|
||||
%("0x" & value.dumpHex.stripLeadingZeros)
|
||||
|
|
|
@ -78,6 +78,7 @@ proc txPriorityFee(ttx: TypedTransaction): UInt256 =
|
|||
except RlpError:
|
||||
doAssert(false, "found TypedTransaction that RLP failed to decode")
|
||||
|
||||
# AARDVARK: make sure I have the right units (wei/gwei)
|
||||
proc sumOfBlockPriorityFees(payload: ExecutionPayloadV1OrV2): UInt256 =
|
||||
payload.transactions.foldl(a + txPriorityFee(b), UInt256.zero)
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ proc toWithdrawal*(w: WithdrawalV1): Withdrawal =
|
|||
index: uint64(w.index),
|
||||
validatorIndex: uint64(w.validatorIndex),
|
||||
address: distinctBase(w.address),
|
||||
amount: uint64(w.amount)
|
||||
amount: uint64(w.amount) # AARDVARK: is this wei or gwei or what?
|
||||
)
|
||||
|
||||
proc toWithdrawalV1*(w: Withdrawal): WithdrawalV1 =
|
||||
|
@ -101,7 +101,7 @@ proc toWithdrawalV1*(w: Withdrawal): WithdrawalV1 =
|
|||
index: Quantity(w.index),
|
||||
validatorIndex: Quantity(w.validatorIndex),
|
||||
address: Address(w.address),
|
||||
amount: Quantity(w.amount)
|
||||
amount: Quantity(w.amount) # AARDVARK: is this wei or gwei or what?
|
||||
)
|
||||
|
||||
proc toTypedTransaction*(tx: Transaction): TypedTransaction =
|
||||
|
|
|
@ -1,71 +0,0 @@
|
|||
import
|
||||
chronos
|
||||
|
||||
export chronos
|
||||
|
||||
# FIXME-Adam: These are a bunch of operations on Futures that I figure
|
||||
# should exist somewhere in the chronos library, except that I couldn't
|
||||
# find them. Are they in there somewhere? Can I add them?
|
||||
|
||||
proc pureFuture*[V](value: V): Future[V] =
|
||||
var fut = newFuture[V]("pureFuture")
|
||||
fut.complete(value)
|
||||
fut
|
||||
|
||||
|
||||
|
||||
proc discardFutureValue*[A](fut: Future[A]): Future[void] {.async.} =
|
||||
discard await fut
|
||||
|
||||
proc map*[A, B](futA: Future[A], callback: (proc(a: A): B {.gcsafe.})): Future[B] {.async.} =
|
||||
return callback(await futA)
|
||||
|
||||
proc flatMap*[A, B](futA: Future[A], callback: (proc(a: A): Future[B] {.gcsafe.})): Future[B] {.async.} =
|
||||
return await callback(await futA)
|
||||
|
||||
# FIXME-Adam: can I do some type magic to handle tuples of any length?
|
||||
proc combine*[A, B](fA: Future[A], fB: Future[B]): Future[(A, B)] {.async.} =
|
||||
return (await fA, await fB)
|
||||
|
||||
proc combine*[A, B, C](fA: Future[A], fB: Future[B], fC: Future[C]): Future[(A, B, C)] {.async.} =
|
||||
return (await fA, await fB, await fC)
|
||||
|
||||
proc combine*[A, B, C, D](fA: Future[A], fB: Future[B], fC: Future[C], fD: Future[D]): Future[(A, B, C, D)] {.async.} =
|
||||
return (await fA, await fB, await fC, await fD)
|
||||
|
||||
proc combine*[A, B, C, D, E](fA: Future[A], fB: Future[B], fC: Future[C], fD: Future[D], fE: Future[E]): Future[(A, B, C, D, E)] {.async.} =
|
||||
return (await fA, await fB, await fC, await fD, await fE)
|
||||
|
||||
proc combine*[A, B, C, D, E, F](fA: Future[A], fB: Future[B], fC: Future[C], fD: Future[D], fE: Future[E], fF: Future[F]): Future[(A, B, C, D, E, F)] {.async.} =
|
||||
return (await fA, await fB, await fC, await fD, await fE, await fF)
|
||||
|
||||
proc combine*[A, B, C, D, E, F, G](fA: Future[A], fB: Future[B], fC: Future[C], fD: Future[D], fE: Future[E], fF: Future[F], fG: Future[G]): Future[(A, B, C, D, E, F, G)] {.async.} =
|
||||
return (await fA, await fB, await fC, await fD, await fE, await fF, await fG)
|
||||
|
||||
proc combineAndApply*[A, B, R](fA: Future[A], fB: Future[B], f: (proc(a: A, b: B): R {.gcsafe.})): Future[R] {.async.} =
|
||||
return f(await fA, await fB)
|
||||
|
||||
proc combineAndApply*[A, B, C, R](fA: Future[A], fB: Future[B], fC: Future[C], f: (proc(a: A, b: B, c: C): R {.gcsafe.})): Future[R] {.async.} =
|
||||
return f(await fA, await fB, await fC)
|
||||
|
||||
proc combineAndApply*[A, B, C, D, R](fA: Future[A], fB: Future[B], fC: Future[C], fD: Future[D], f: (proc(a: A, b: B, c: C, d: D): R {.gcsafe.})): Future[R] {.async.} =
|
||||
return f(await fA, await fB, await fC, await fD)
|
||||
|
||||
# FIXME-Adam: ugh, need to just implement all of this once
|
||||
proc combineAndApply*[A, B, R](futs: (Future[A], Future[B]), f: (proc(a: A, b: B): R {.gcsafe.})): Future[R] =
|
||||
let (fA, fB) = futs
|
||||
combineAndApply(fA, fB, f)
|
||||
|
||||
proc combineAndApply*[A, B, C, R](futs: (Future[A], Future[B], Future[C]), f: (proc(a: A, b: B, c: C): R {.gcsafe.})): Future[R] =
|
||||
let (fA, fB, fC) = futs
|
||||
combineAndApply(fA, fB, fC, f)
|
||||
|
||||
proc combineAndApply*[A, B, C, D, R](futs: (Future[A], Future[B], Future[C], Future[D]), f: (proc(a: A, b: B, c: C, d: D): R {.gcsafe.})): Future[R] =
|
||||
let (fA, fB, fC, fD) = futs
|
||||
combineAndApply(fA, fB, fC, fD, f)
|
||||
|
||||
proc traverse*[A](futs: seq[Future[A]]): Future[seq[A]] {.async.} =
|
||||
var values: seq[A] = @[]
|
||||
for fut in futs:
|
||||
values.add(await fut)
|
||||
return values
|
|
@ -1,68 +0,0 @@
|
|||
import
|
||||
chronos
|
||||
|
||||
# This is simply a wrapper around a value. It should
|
||||
# hopefully be zero-cost, since it's using 'distinct'.
|
||||
# But I'm not sure whether it'll actually be zero-cost
|
||||
# in situations (as I'm intending to use it) where
|
||||
# it's used polymorphically from sites that could be
|
||||
# an Identity or could be a Future.
|
||||
# (See possible_futures.nim.)
|
||||
|
||||
type Identity*[Value] = distinct Value
|
||||
|
||||
proc pureIdentity*[V](value: V): Identity[V] {.inline.} =
|
||||
Identity(value)
|
||||
|
||||
proc valueOf*[V](i: Identity[V]): V {.inline.} =
|
||||
V(i)
|
||||
|
||||
proc map*[A, B](iA: Identity[A], callback: (proc(a: A): B {.gcsafe.})): Identity[B] {.inline.} =
|
||||
return Identity(callback(valueOf(iA)))
|
||||
|
||||
# FIXME-Adam: can I do some type magic to handle tuples of any length?
|
||||
proc combine*[A, B](iA: Identity[A], iB: Identity[B]): Identity[(A, B)] =
|
||||
pureIdentity((valueOf(iA), valueOf(iB)))
|
||||
|
||||
proc combine*[A, B, C](iA: Identity[A], iB: Identity[B], iC: Identity[C]): Identity[(A, B, C)] =
|
||||
pureIdentity((valueOf(iA), valueOf(iB), valueOf(iC)))
|
||||
|
||||
proc combine*[A, B, C, D](iA: Identity[A], iB: Identity[B], iC: Identity[C], iD: Identity[D]): Identity[(A, B, C, D)] =
|
||||
pureIdentity((valueOf(iA), valueOf(iB), valueOf(iC), valueOf(iD)))
|
||||
|
||||
proc combine*[A, B, C, D, E](iA: Identity[A], iB: Identity[B], iC: Identity[C], iD: Identity[D], iE: Identity[E]): Identity[(A, B, C, D, E)] =
|
||||
pureIdentity((valueOf(iA), valueOf(iB), valueOf(iC), valueOf(iD), valueOf(iE)))
|
||||
|
||||
proc combine*[A, B, C, D, E, F](iA: Identity[A], iB: Identity[B], iC: Identity[C], iD: Identity[D], iE: Identity[E], iFF: Identity[F]): Identity[(A, B, C, D, E, F)] =
|
||||
pureIdentity((valueOf(iA), valueOf(iB), valueOf(iC), valueOf(iD), valueOf(iE), valueOf(iFF)))
|
||||
|
||||
proc combine*[A, B, C, D, E, F, G](iA: Identity[A], iB: Identity[B], iC: Identity[C], iD: Identity[D], iE: Identity[E], iFF: Identity[F], iG: Identity[G]): Identity[(A, B, C, D, E, F, G)] =
|
||||
pureIdentity((valueOf(iA), valueOf(iB), valueOf(iC), valueOf(iD), valueOf(iE), valueOf(iFF), valueOf(iG)))
|
||||
|
||||
proc combineAndApply*[A, B, R](iA: Identity[A], iB: Identity[B], f: (proc(a: A, b: B): R {.gcsafe.})): Identity[R] =
|
||||
pureIdentity(f(valueOf(iA), valueOf(iB)))
|
||||
|
||||
proc combineAndApply*[A, B, C, R](iA: Identity[A], iB: Identity[B], iC: Identity[C], f: (proc(a: A, b: B, c: C): R {.gcsafe.})): Identity[R] =
|
||||
pureIdentity(f(valueOf(iA), valueOf(iB), valueOf(iC)))
|
||||
|
||||
proc combineAndApply*[A, B, C, D, R](iA: Identity[A], iB: Identity[B], iC: Identity[C], iD: Identity[D], f: (proc(a: A, b: B, c: C, d: D): R {.gcsafe.})): Identity[R] =
|
||||
pureIdentity(f(valueOf(iA), valueOf(iB), valueOf(iC), valueOf(iD)))
|
||||
|
||||
# FIXME-Adam: ugh, need to just implement all of this once
|
||||
proc combineAndApply*[A, B, R](idents: (Identity[A], Identity[B]), f: (proc(a: A, b: B): R {.gcsafe.})): Identity[R] =
|
||||
let (iA, iB) = idents
|
||||
combineAndApply(iA, iB, f)
|
||||
|
||||
proc combineAndApply*[A, B, C, R](idents: (Identity[A], Identity[B], Identity[C]), f: (proc(a: A, b: B, c: C): R {.gcsafe.})): Identity[R] =
|
||||
let (iA, iB, iC) = idents
|
||||
combineAndApply(iA, iB, iC, f)
|
||||
|
||||
proc combineAndApply*[A, B, C, D, R](idents: (Identity[A], Identity[B], Identity[C], Identity[D]), f: (proc(a: A, b: B, c: C, d: D): R {.gcsafe.})): Identity[R] =
|
||||
let (iA, iB, iC, iD) = idents
|
||||
combineAndApply(iA, iB, iC, iD, f)
|
||||
|
||||
proc traverse*[A](idents: seq[Identity[A]]): Identity[seq[A]] =
|
||||
var values: seq[A] = @[]
|
||||
for i in idents:
|
||||
values.add(valueOf(i))
|
||||
return pureIdentity(values)
|
|
@ -1,36 +0,0 @@
|
|||
import
|
||||
chronos,
|
||||
options,
|
||||
./identity,
|
||||
./futures
|
||||
|
||||
# This file contains some operations that can work on either
|
||||
# Identity or Future.
|
||||
|
||||
proc createPure*[V](v: V, c: var Identity[V]) {.inline.} = c = pureIdentity(v)
|
||||
proc createPure*[V](v: V, c: var Future[V]) {.inline.} = c = pureFuture(v)
|
||||
|
||||
|
||||
proc toFuture*[V](i: Identity[V]): Future[V] = pureFuture(valueOf(i))
|
||||
proc toFuture*[V](f: Future[V]): Future[V] = f
|
||||
|
||||
|
||||
proc waitForValueOf*[V](i: Identity[V]): V = valueOf(i)
|
||||
proc waitForValueOf*[V](f: Future[V]): V = waitFor(f)
|
||||
|
||||
|
||||
proc maybeAlreadyAvailableValueOf*[V](i: Identity[V]): Option[V] =
|
||||
some(valueOf(i))
|
||||
|
||||
proc maybeAlreadyAvailableValueOf*[V](f: Future[V]): Option[V] =
|
||||
if f.completed:
|
||||
some(f.read)
|
||||
else:
|
||||
none[V]()
|
||||
|
||||
|
||||
proc unsafeGetAlreadyAvailableValue*[V](c: Identity[V] | Future[V]): V =
|
||||
try:
|
||||
return maybeAlreadyAvailableValueOf(c).get
|
||||
except:
|
||||
doAssert(false, "Assertion failure: unsafeGetAlreadyAvailableValue called but the value is not yet available.")
|
|
@ -18,9 +18,8 @@ export
|
|||
vmm.extend,
|
||||
vmm.len,
|
||||
vmm.newMemory,
|
||||
vmm.bytes,
|
||||
vmm.readConcreteBytes,
|
||||
vmm.writeConcreteBytes
|
||||
vmm.read,
|
||||
vmm.write
|
||||
|
||||
when defined(evmc_enabled):
|
||||
export
|
||||
|
@ -125,11 +124,12 @@ export
|
|||
hStk.len,
|
||||
hStk.newStack,
|
||||
hStk.peek,
|
||||
hStk.peekInt,
|
||||
hStk.popAddress,
|
||||
hStk.popInt,
|
||||
hStk.popTopic,
|
||||
hStk.push,
|
||||
hStk.swap,
|
||||
hStk.values
|
||||
hStk.top
|
||||
|
||||
# End
|
||||
|
|
|
@ -23,33 +23,33 @@ proc memoryMain*() =
|
|||
test "write":
|
||||
var mem = memory32()
|
||||
# Test that write creates 32byte string == value padded with zeros
|
||||
mem.writeConcreteBytes(startPos = 0, value = @[1.byte, 0.byte, 1.byte, 0.byte])
|
||||
mem.write(startPos = 0, value = @[1.byte, 0.byte, 1.byte, 0.byte])
|
||||
check(mem.bytes == @[1.byte, 0.byte, 1.byte, 0.byte].concat(repeat(0.byte, 28)))
|
||||
|
||||
# test "write rejects invalid position":
|
||||
# expect(ValidationError):
|
||||
# var mem = memory32()
|
||||
# mem.writeConcreteBytes(startPosition = -1.i256, size = 2.i256, value = @[1.byte, 0.byte])
|
||||
# mem.write(startPosition = -1.i256, size = 2.i256, value = @[1.byte, 0.byte])
|
||||
# expect(ValidationError):
|
||||
# TODO: work on 256
|
||||
# var mem = memory32()
|
||||
# echo "pow ", pow(2.i256, 255) - 1.i256
|
||||
# mem.writeConcreteBytes(startPosition = pow(2.i256, 256), size = 2.i256, value = @[1.byte, 0.byte])
|
||||
# mem.write(startPosition = pow(2.i256, 256), size = 2.i256, value = @[1.byte, 0.byte])
|
||||
|
||||
# test "write rejects invalid size":
|
||||
# # expect(ValidationError):
|
||||
# # var mem = memory32()
|
||||
# # mem.writeConcreteBytes(startPosition = 0.i256, size = -1.i256, value = @[1.byte, 0.byte])
|
||||
# # mem.write(startPosition = 0.i256, size = -1.i256, value = @[1.byte, 0.byte])
|
||||
|
||||
# #TODO deactivated because of no pow support in Stint: https://github.com/status-im/nim-stint/issues/37
|
||||
# expect(ValidationError):
|
||||
# var mem = memory32()
|
||||
# mem.writeConcreteBytes(startPosition = 0.u256, size = pow(2.u256, 256), value = @[1.byte, 0.byte])
|
||||
# mem.write(startPosition = 0.u256, size = pow(2.u256, 256), value = @[1.byte, 0.byte])
|
||||
|
||||
test "write rejects values beyond memory size":
|
||||
expect(ValidationError):
|
||||
var mem = memory128()
|
||||
mem.writeConcreteBytes(startPos = 128, value = @[1.byte, 0.byte, 1.byte, 0.byte])
|
||||
mem.write(startPos = 128, value = @[1.byte, 0.byte, 1.byte, 0.byte])
|
||||
|
||||
test "extends appropriately extends memory":
|
||||
var mem = newMemory()
|
||||
|
@ -65,10 +65,10 @@ proc memoryMain*() =
|
|||
|
||||
test "read returns correct bytes":
|
||||
var mem = memory32()
|
||||
mem.writeConcreteBytes(startPos = 5, value = @[1.byte, 0.byte, 1.byte, 0.byte])
|
||||
check(mem.readConcreteBytes(startPos = 5, size = 4) == @[1.byte, 0.byte, 1.byte, 0.byte])
|
||||
check(mem.readConcreteBytes(startPos = 6, size = 4) == @[0.byte, 1.byte, 0.byte, 0.byte])
|
||||
check(mem.readConcreteBytes(startPos = 1, size = 3) == @[0.byte, 0.byte, 0.byte])
|
||||
mem.write(startPos = 5, value = @[1.byte, 0.byte, 1.byte, 0.byte])
|
||||
check(mem.read(startPos = 5, size = 4) == @[1.byte, 0.byte, 1.byte, 0.byte])
|
||||
check(mem.read(startPos = 6, size = 4) == @[0.byte, 1.byte, 0.byte, 0.byte])
|
||||
check(mem.read(startPos = 1, size = 3) == @[0.byte, 0.byte, 0.byte])
|
||||
|
||||
when isMainModule:
|
||||
memoryMain()
|
||||
|
|
|
@ -54,18 +54,18 @@ proc stateDBMain*() =
|
|||
|
||||
test "clone storage":
|
||||
var x = RefAccount(
|
||||
overlayStorage: initTable[UInt256, StorageCell](),
|
||||
overlayStorage: initTable[UInt256, UInt256](),
|
||||
originalStorage: newTable[UInt256, UInt256]()
|
||||
)
|
||||
|
||||
x.overlayStorage[10.u256] = pureCell(11.u256)
|
||||
x.overlayStorage[11.u256] = pureCell(12.u256)
|
||||
x.overlayStorage[10.u256] = 11.u256
|
||||
x.overlayStorage[11.u256] = 12.u256
|
||||
|
||||
x.originalStorage[10.u256] = 11.u256
|
||||
x.originalStorage[11.u256] = 12.u256
|
||||
|
||||
var y = x.clone(cloneStorage = true)
|
||||
y.overlayStorage[12.u256] = pureCell(13.u256)
|
||||
y.overlayStorage[12.u256] = 13.u256
|
||||
y.originalStorage[12.u256] = 13.u256
|
||||
|
||||
check 12.u256 notin x.overlayStorage
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit f5dd26eac05a00134ebf1af537e16235702c343b
|
||||
Subproject commit 9a1d35f803d4ae2bda0ede2356cc3213977a3d60
|
|
@ -1 +1 @@
|
|||
Subproject commit f05e7b0f4864b58c9c564db6368b3b1d46192150
|
||||
Subproject commit 708a739d35f3fb595a83dc6e6cb9edb4c3e5361e
|
Loading…
Reference in New Issue