nimbus-eth1/nimbus/tracer.nim
Jordan Hrycaj ca07c40a48
Fearture/poa clique tuning (#765)
* Provide API

details:
  API is bundled via clique.nim.

* Set extraValidation as default for PoA chains

why:
  This triggers consensus verification and an update of the list
  of authorised signers. These signers are integral part of the
  PoA block chain.

todo:
  Option argument to control validation for the nimbus binary.

* Fix snapshot state block number

why:
  Using sub-sequence here, so the len() function was wrong.

* Optional start where block verification begins

why:
  Can speed up time building loading initial parts of block chain. For
  PoA, this allows to prove & test that authorised signers can be
  (correctly) calculated starting at any point on the block chain.

todo:
  On Goerli around blocks #193537..#197568, processing time increases
  disproportionally -- needs to be understand

* For Clique test, get old grouping back (7 transactions per log entry)

why:
  Forgot to change back after troubleshooting

* Fix field/function/module-name misunderstanding

why:
  Make compilation work

* Use eth_types.blockHash() rather than utils.hash() in Clique modules

why:
  Prefer lib module

* Dissolve snapshot_misc.nim

details:
  .. into clique_verify.nim (the other source file clique_unused.nim
  is inactive)

* Hide unused AsyncLock in Clique descriptor

details:
  Unused here but was part of the Go reference implementation

* Remove fakeDiff flag from Clique descriptor

details:
  This flag was a kludge in the Go reference implementation used for the
  canonical tests. The tests have been adapted so there is no need for
  the fakeDiff flag and its implementation.

* Not observing minimum distance from epoch sync point

why:
  For compiling PoA state, the go implementation will walk back to the
  epoch header with at least 90000 blocks apart from the current header
  in the absence of other synchronisation points.

  Here just the nearest epoch header is used. The assumption is that all
  the checkpoints before have been vetted already regardless of the
  current branch.

details:
  The behaviour of using the nearest vs the minimum distance epoch is
  controlled by a flag and can be changed at run time.

* Analysing processing time (patch adds some debugging/visualisation support)

why:
  At the first half million blocks of the Goerli replay, blocks on the
  interval #194854..#196224 take exceptionally long to process, but not
  due to PoA processing.

details:
  It turns out that much time is spent in p2p/excecutor.processBlock()
  where the elapsed transaction execution time is significantly greater
  for many of these blocks.

  Between the 1371 blocks #194854..#196224 there are 223 blocks with more
  than 1/2 seconds execution time whereas there are only 4 such blocks
  before and 13 such after this range up to #504192.

* fix debugging symbol in clique_desc (causes CI failing)

* Fixing canonical reference tests

why:
  Two errors were introduced earlier but ovelooked:
   1. "Remove fakeDiff flag .." patch was incomplete
   2. "Not observing minimum distance .." introduced problem w/tests 23/24

details:
  Fixing 2. needed to revert the behaviour by setting the
  applySnapsMinBacklog flag for the Clique descriptor. Also a new
  test was added to lock the new behaviour.

* Remove cruft

why:
  Clique/PoA processing was intended to take place somewhere in
  executor/process_block.processBlock() but was decided later to run
  from chain/persist_block.persistBlock() instead.

* Update API comment

* ditto
2021-07-30 15:06:51 +01:00

271 lines
9.2 KiB
Nim

import
db/[db_chain, accounts_cache, capturedb], eth/common, utils, json,
constants, vm_state, vm_types, transaction, p2p/executor,
eth/trie/db, nimcrypto, strutils,
chronicles, rpc/hexstrings, launcher
when defined(geth):
import db/geth_db
proc getParentHeader(db: BaseChainDB, header: BlockHeader): BlockHeader =
db.blockHeader(header.blockNumber.truncate(uint64) - 1)
else:
proc getParentHeader(self: BaseChainDB, header: BlockHeader): BlockHeader =
self.getBlockHeader(header.parentHash)
proc `%`(x: openArray[byte]): JsonNode =
result = %toHex(x, false)
proc toJson(receipt: Receipt): JsonNode =
result = newJObject()
result["cumulativeGasUsed"] = %receipt.cumulativeGasUsed
result["bloom"] = %receipt.bloom
result["logs"] = %receipt.logs
if receipt.hasStateRoot:
result["root"] = %($receipt.stateRoot)
else:
result["status"] = %receipt.status
proc dumpReceipts*(chainDB: BaseChainDB, header: BlockHeader): JsonNode =
result = newJArray()
for receipt in chainDB.getReceipts(header.receiptRoot):
result.add receipt.toJson
proc toJson*(receipts: seq[Receipt]): JsonNode =
result = newJArray()
for receipt in receipts:
result.add receipt.toJson
proc captureAccount(n: JsonNode, db: AccountsCache, address: EthAddress, name: string) =
var jaccount = newJObject()
jaccount["name"] = %name
jaccount["address"] = %("0x" & $address)
let nonce = db.getNonce(address)
let balance = db.getBalance(address)
let codeHash = db.getCodeHash(address)
let storageRoot = db.getStorageRoot(address)
jaccount["nonce"] = %(encodeQuantity(nonce).string.toLowerAscii)
jaccount["balance"] = %("0x" & balance.toHex)
let code = db.getCode(address)
jaccount["codeHash"] = %("0x" & ($codeHash).toLowerAscii)
jaccount["code"] = %("0x" & toHex(code, true))
jaccount["storageRoot"] = %("0x" & ($storageRoot).toLowerAscii)
var storage = newJObject()
for key, value in db.storage(address):
storage["0x" & key.dumpHex] = %("0x" & value.dumpHex)
jaccount["storage"] = storage
n.add jaccount
proc dumpMemoryDB*(node: JsonNode, memoryDB: TrieDatabaseRef) =
var n = newJObject()
for k, v in pairsInMemoryDB(memoryDB):
n[k.toHex(false)] = %v
node["state"] = n
const
senderName = "sender"
recipientName = "recipient"
minerName = "miner"
uncleName = "uncle"
internalTxName = "internalTx"
proc traceTransaction*(chainDB: BaseChainDB, header: BlockHeader,
body: BlockBody, txIndex: int, tracerFlags: set[TracerFlags] = {}): JsonNode =
let
parent = chainDB.getParentHeader(header)
# we add a memory layer between backend/lower layer db
# and capture state db snapshot during transaction execution
memoryDB = newMemoryDB()
captureDB = newCaptureDB(chainDB.db, memoryDB)
captureTrieDB = trieDB captureDB
captureChainDB = newBaseChainDB(captureTrieDB, false, chainDB.networkId) # prune or not prune?
vmState = newBaseVMState(parent.stateRoot, header, captureChainDB, tracerFlags + {EnableAccount})
var stateDb = vmState.accountDb
if header.txRoot == BLANK_ROOT_HASH: return newJNull()
doAssert(body.transactions.calcTxRoot == header.txRoot)
doAssert(body.transactions.len != 0)
var
gasUsed: GasInt
before = newJArray()
after = newJArray()
stateDiff = %{"before": before, "after": after}
beforeRoot: Hash256
let
miner = vmState.coinbase()
for idx, tx in body.transactions:
let sender = tx.getSender
let recipient = tx.getRecipient(sender)
if idx == txIndex:
vmState.enableTracing()
before.captureAccount(stateDb, sender, senderName)
before.captureAccount(stateDb, recipient, recipientName)
before.captureAccount(stateDb, miner, minerName)
stateDb.persist()
stateDiff["beforeRoot"] = %($stateDb.rootHash)
beforeRoot = stateDb.rootHash
gasUsed = processTransaction(tx, sender, vmState)
if idx == txIndex:
after.captureAccount(stateDb, sender, senderName)
after.captureAccount(stateDb, recipient, recipientName)
after.captureAccount(stateDb, miner, minerName)
vmState.removeTracedAccounts(sender, recipient, miner)
stateDb.persist()
stateDiff["afterRoot"] = %($stateDb.rootHash)
break
# internal transactions:
var stateBefore = AccountsCache.init(captureTrieDB, beforeRoot, chainDB.pruneTrie)
for idx, acc in tracedAccountsPairs(vmState):
before.captureAccount(stateBefore, acc, internalTxName & $idx)
for idx, acc in tracedAccountsPairs(vmState):
after.captureAccount(stateDb, acc, internalTxName & $idx)
result = vmState.getTracingResult()
result["gas"] = %gasUsed
if TracerFlags.DisableStateDiff notin tracerFlags:
result["stateDiff"] = stateDiff
# now we dump captured state db
if TracerFlags.DisableState notin tracerFlags:
result.dumpMemoryDB(memoryDB)
proc dumpBlockState*(db: BaseChainDB, header: BlockHeader, body: BlockBody, dumpState = false): JsonNode =
let
parent = db.getParentHeader(header)
memoryDB = newMemoryDB()
captureDB = newCaptureDB(db.db, memoryDB)
captureTrieDB = trieDB captureDB
captureChainDB = newBaseChainDB(captureTrieDB, false, db.networkId)
# we only need stack dump if we want to scan for internal transaction address
vmState = newBaseVMState(parent.stateRoot, header, captureChainDB, {EnableTracing, DisableMemory, DisableStorage, EnableAccount})
miner = vmState.coinbase()
var
before = newJArray()
after = newJArray()
stateBefore = AccountsCache.init(captureTrieDB, parent.stateRoot, db.pruneTrie)
for idx, tx in body.transactions:
let sender = tx.getSender
let recipient = tx.getRecipient(sender)
before.captureAccount(stateBefore, sender, senderName & $idx)
before.captureAccount(stateBefore, recipient, recipientName & $idx)
before.captureAccount(stateBefore, miner, minerName)
for idx, uncle in body.uncles:
before.captureAccount(stateBefore, uncle.coinbase, uncleName & $idx)
discard vmState.processBlockNotPoA(header, body)
var stateAfter = vmState.accountDb
for idx, tx in body.transactions:
let sender = tx.getSender
let recipient = tx.getRecipient(sender)
after.captureAccount(stateAfter, sender, senderName & $idx)
after.captureAccount(stateAfter, recipient, recipientName & $idx)
vmState.removeTracedAccounts(sender, recipient)
after.captureAccount(stateAfter, miner, minerName)
vmState.removeTracedAccounts(miner)
for idx, uncle in body.uncles:
after.captureAccount(stateAfter, uncle.coinbase, uncleName & $idx)
vmState.removeTracedAccounts(uncle.coinbase)
# internal transactions:
for idx, acc in tracedAccountsPairs(vmState):
before.captureAccount(stateBefore, acc, internalTxName & $idx)
for idx, acc in tracedAccountsPairs(vmState):
after.captureAccount(stateAfter, acc, internalTxName & $idx)
result = %{"before": before, "after": after}
if dumpState:
result.dumpMemoryDB(memoryDB)
proc traceBlock*(chainDB: BaseChainDB, header: BlockHeader, body: BlockBody, tracerFlags: set[TracerFlags] = {}): JsonNode =
let
parent = chainDB.getParentHeader(header)
memoryDB = newMemoryDB()
captureDB = newCaptureDB(chainDB.db, memoryDB)
captureTrieDB = trieDB captureDB
captureChainDB = newBaseChainDB(captureTrieDB, false, chainDB.networkId)
vmState = newBaseVMState(parent.stateRoot, header, captureChainDB, tracerFlags + {EnableTracing})
if header.txRoot == BLANK_ROOT_HASH: return newJNull()
doAssert(body.transactions.calcTxRoot == header.txRoot)
doAssert(body.transactions.len != 0)
var gasUsed = GasInt(0)
for tx in body.transactions:
let sender = tx.getSender
gasUsed = gasUsed + processTransaction(tx, sender, vmState)
result = vmState.getTracingResult()
result["gas"] = %gasUsed
if TracerFlags.DisableState notin tracerFlags:
result.dumpMemoryDB(memoryDB)
proc traceTransactions*(chainDB: BaseChainDB, header: BlockHeader, blockBody: BlockBody): JsonNode =
result = newJArray()
for i in 0 ..< blockBody.transactions.len:
result.add traceTransaction(chainDB, header, blockBody, i, {DisableState})
proc dumpDebuggingMetaData*(chainDB: BaseChainDB, header: BlockHeader,
blockBody: BlockBody, vmState: BaseVMState, launchDebugger = true) =
let
blockNumber = header.blockNumber
var
memoryDB = newMemoryDB()
captureDB = newCaptureDB(chainDB.db, memoryDB)
captureTrieDB = trieDB captureDB
captureChainDB = newBaseChainDB(captureTrieDB, false, chainDB.networkId)
bloom = createBloom(vmState.receipts)
let blockSummary = %{
"receiptsRoot": %("0x" & toHex(calcReceiptRoot(vmState.receipts).data)),
"stateRoot": %("0x" & toHex(vmState.accountDb.rootHash.data)),
"logsBloom": %("0x" & toHex(bloom))
}
var metaData = %{
"blockNumber": %blockNumber.toHex,
"txTraces": traceTransactions(captureChainDB, header, blockBody),
"stateDump": dumpBlockState(captureChainDB, header, blockBody),
"blockTrace": traceBlock(captureChainDB, header, blockBody, {DisableState}),
"receipts": toJson(vmState.receipts),
"block": blockSummary
}
metaData.dumpMemoryDB(memoryDB)
let jsonFileName = "debug" & $blockNumber & ".json"
if launchDebugger:
launchPremix(jsonFileName, metaData)
else:
writeFile(jsonFileName, metaData.pretty())