Re-adjust canonical head to parent of block to be inserted (#726)
* Re-adjust canonical head to parent of block to be inserted why: of the failing tests that remain to be solved, 30 of those will succeed if the canonical database chain head is cleverly adjusted -- yes, it looks like a hack, indeed. details: at the moment, this hack works for the non-hive tests only and is triggered by a boolean argument passed on to the chain.persistBlocks() method. * Use parent instead of canonical head for block to be inserted why: side chains need to be inserted typically somewhere before the canonical head. details: the previous _hack_ was unnecessary and removed, it was inspired by some verification in persistBlocks() which explicitly referenced the canonical head (which now might or might not refer to the newly inserted header.) * remove unnecessary code + comment
This commit is contained in:
parent
2269d16c4c
commit
2d6bf34175
|
@ -8,8 +8,8 @@
|
||||||
# those terms.
|
# those terms.
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[os, parseopt, json],
|
std/[os, parseopt, strformat, json],
|
||||||
eth/[common, p2p, trie/db], stew/byteutils,
|
eth/[common, trie/db], stew/byteutils,
|
||||||
../../../nimbus/db/db_chain,
|
../../../nimbus/db/db_chain,
|
||||||
../../../nimbus/[genesis, config, conf_utils],
|
../../../nimbus/[genesis, config, conf_utils],
|
||||||
../sim_utils
|
../sim_utils
|
||||||
|
@ -42,7 +42,13 @@ proc main() =
|
||||||
else:
|
else:
|
||||||
paramStr(1)
|
paramStr(1)
|
||||||
|
|
||||||
|
if not caseFolder.dirExists:
|
||||||
|
# Handy early error message and stop directive
|
||||||
|
let progname = getAppFilename().extractFilename
|
||||||
|
quit(&"*** {progname}: Not a case folder: {caseFolder}")
|
||||||
|
|
||||||
runTest("Consensus", caseFolder):
|
runTest("Consensus", caseFolder):
|
||||||
|
# Variable `fileName` is injected by `runTest()`
|
||||||
let node = parseFile(fileName)
|
let node = parseFile(fileName)
|
||||||
processNode(fileName, node["chainfile"].getStr,
|
processNode(fileName, node["chainfile"].getStr,
|
||||||
node["lastblockhash"].getStr, testStatusIMPL)
|
node["lastblockhash"].getStr, testStatusIMPL)
|
||||||
|
|
|
@ -19,7 +19,7 @@ type
|
||||||
EthHeader = object
|
EthHeader = object
|
||||||
header: BlockHeader
|
header: BlockHeader
|
||||||
|
|
||||||
proc importRlpBlock*(importFile: string, chainDB: BasechainDB): bool =
|
proc importRlpBlock*(importFile: string; chainDB: BasechainDB): bool =
|
||||||
let res = io2.readAllBytes(importFile)
|
let res = io2.readAllBytes(importFile)
|
||||||
if res.isErr:
|
if res.isErr:
|
||||||
error "failed to import", fileName = importFile
|
error "failed to import", fileName = importFile
|
||||||
|
|
|
@ -1,7 +1,17 @@
|
||||||
import ../db/db_chain, eth/common, chronicles, ../vm_state,
|
import
|
||||||
stint, nimcrypto,
|
../chain_config,
|
||||||
../utils, eth/trie/db, ./executor, ../chain_config, ../genesis, ../utils,
|
../db/db_chain,
|
||||||
stew/endians2, ./validate, ./validate/epoch_hash_cache
|
../genesis,
|
||||||
|
../utils,
|
||||||
|
../vm_state,
|
||||||
|
./executor,
|
||||||
|
./validate,
|
||||||
|
./validate/epoch_hash_cache,
|
||||||
|
chronicles,
|
||||||
|
eth/[common, trie/db],
|
||||||
|
nimcrypto,
|
||||||
|
stew/endians2,
|
||||||
|
stint
|
||||||
|
|
||||||
when not defined(release):
|
when not defined(release):
|
||||||
import ../tracer
|
import ../tracer
|
||||||
|
@ -130,7 +140,8 @@ method getAncestorHeader*(c: Chain, h: BlockHeader, output: var BlockHeader, ski
|
||||||
method getBlockBody*(c: Chain, blockHash: KeccakHash): BlockBodyRef =
|
method getBlockBody*(c: Chain, blockHash: KeccakHash): BlockBodyRef =
|
||||||
result = nil
|
result = nil
|
||||||
|
|
||||||
method persistBlocks*(c: Chain, headers: openarray[BlockHeader], bodies: openarray[BlockBody]): ValidationResult {.gcsafe.} =
|
method persistBlocks*(c: Chain; headers: openarray[BlockHeader];
|
||||||
|
bodies: openarray[BlockBody]): ValidationResult {.gcsafe.} =
|
||||||
# Run the VM here
|
# Run the VM here
|
||||||
if headers.len != bodies.len:
|
if headers.len != bodies.len:
|
||||||
debug "Number of headers not matching number of bodies"
|
debug "Number of headers not matching number of bodies"
|
||||||
|
@ -140,11 +151,15 @@ method persistBlocks*(c: Chain, headers: openarray[BlockHeader], bodies: openarr
|
||||||
let transaction = c.db.db.beginTransaction()
|
let transaction = c.db.db.beginTransaction()
|
||||||
defer: transaction.dispose()
|
defer: transaction.dispose()
|
||||||
|
|
||||||
trace "Persisting blocks", fromBlock = headers[0].blockNumber, toBlock = headers[^1].blockNumber
|
trace "Persisting blocks",
|
||||||
|
fromBlock = headers[0].blockNumber,
|
||||||
|
toBlock = headers[^1].blockNumber
|
||||||
|
|
||||||
for i in 0 ..< headers.len:
|
for i in 0 ..< headers.len:
|
||||||
let head = c.db.getCanonicalHead()
|
let
|
||||||
let vmState = newBaseVMState(head.stateRoot, headers[i], c.db)
|
head = c.db.getBlockHeader(headers[i].parentHash)
|
||||||
let validationResult = processBlock(c.db, headers[i], bodies[i], vmState)
|
vmState = newBaseVMState(head.stateRoot, headers[i], c.db)
|
||||||
|
validationResult = processBlock(c.db, headers[i], bodies[i], vmState)
|
||||||
|
|
||||||
when not defined(release):
|
when not defined(release):
|
||||||
if validationResult == ValidationResult.Error and
|
if validationResult == ValidationResult.Error and
|
||||||
|
@ -167,10 +182,6 @@ method persistBlocks*(c: Chain, headers: openarray[BlockHeader], bodies: openarr
|
||||||
return ValidationResult.Error
|
return ValidationResult.Error
|
||||||
|
|
||||||
discard c.db.persistHeaderToDb(headers[i])
|
discard c.db.persistHeaderToDb(headers[i])
|
||||||
if c.db.getCanonicalHead().blockHash != headers[i].blockHash:
|
|
||||||
debug "Stored block header hash doesn't match declared hash"
|
|
||||||
return ValidationResult.Error
|
|
||||||
|
|
||||||
discard c.db.persistTransactions(headers[i].blockNumber, bodies[i].transactions)
|
discard c.db.persistTransactions(headers[i].blockNumber, bodies[i].transactions)
|
||||||
discard c.db.persistReceipts(vmState.receipts)
|
discard c.db.persistReceipts(vmState.receipts)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue