2020-04-20 18:12:44 +00:00
|
|
|
import ../db/db_chain, eth/common, chronicles, ../vm_state, ../vm_types,
|
2020-07-04 06:23:09 +00:00
|
|
|
../vm/[computation, message], ../vm/interpreter/vm_forks, stint, nimcrypto,
|
2020-07-21 08:12:59 +00:00
|
|
|
../utils, eth/trie/db, ./executor, ../config, ../genesis, ../utils,
|
2020-07-04 06:23:09 +00:00
|
|
|
stew/endians2
|
2018-08-29 08:49:01 +00:00
|
|
|
|
2020-07-22 16:51:26 +00:00
|
|
|
when not defined(release):
|
|
|
|
import ../tracer
|
|
|
|
|
2018-08-29 08:49:01 +00:00
|
|
|
type
|
2020-07-04 06:23:09 +00:00
|
|
|
# Chain's forks not always equals to EVM's forks
|
|
|
|
ChainFork = enum
|
|
|
|
Frontier,
|
|
|
|
Homestead,
|
|
|
|
DAOFork,
|
|
|
|
Tangerine,
|
|
|
|
Spurious,
|
|
|
|
Byzantium,
|
|
|
|
Constantinople,
|
|
|
|
Petersburg,
|
|
|
|
Istanbul,
|
|
|
|
MuirGlacier
|
|
|
|
|
2018-08-29 08:49:01 +00:00
|
|
|
Chain* = ref object of AbstractChainDB
|
|
|
|
db: BaseChainDB
|
2020-07-04 06:23:09 +00:00
|
|
|
forkIds: array[ChainFork, ForkID]
|
|
|
|
blockZeroHash: KeccakHash
|
|
|
|
|
|
|
|
func toChainFork(c: ChainConfig, number: BlockNumber): ChainFork =
|
|
|
|
if number >= c.muirGlacierBlock: MuirGlacier
|
|
|
|
elif number >= c.istanbulBlock: Istanbul
|
|
|
|
elif number >= c.petersburgBlock: Petersburg
|
|
|
|
elif number >= c.constantinopleBlock: Constantinople
|
|
|
|
elif number >= c.byzantiumBlock: Byzantium
|
|
|
|
elif number >= c.eip158Block: Spurious
|
|
|
|
elif number >= c.eip150Block: Tangerine
|
|
|
|
elif number >= c.daoForkBlock: DAOFork
|
|
|
|
elif number >= c.homesteadBlock: Homestead
|
|
|
|
else: Frontier
|
|
|
|
|
|
|
|
func toNextFork(n: BlockNumber): uint64 =
|
|
|
|
if n == high(BlockNumber):
|
|
|
|
result = 0'u64
|
|
|
|
else:
|
|
|
|
result = n.truncate(uint64)
|
|
|
|
|
|
|
|
func getNextFork(c: ChainConfig, fork: ChainFork): uint64 =
|
|
|
|
let next: array[ChainFork, uint64] = [
|
|
|
|
0'u64,
|
|
|
|
toNextFork(c.homesteadBlock),
|
|
|
|
toNextFork(c.daoForkBlock),
|
|
|
|
toNextFork(c.eip150Block),
|
|
|
|
toNextFork(c.eip158Block),
|
|
|
|
toNextFork(c.byzantiumBlock),
|
|
|
|
toNextFork(c.constantinopleBlock),
|
|
|
|
toNextFork(c.petersburgBlock),
|
|
|
|
toNextFork(c.istanbulBlock),
|
|
|
|
toNextFork(c.muirGlacierBlock),
|
|
|
|
]
|
|
|
|
|
|
|
|
if fork == high(ChainFork):
|
|
|
|
result = 0
|
|
|
|
return
|
|
|
|
|
|
|
|
result = next[fork]
|
|
|
|
for x in fork..high(ChainFork):
|
|
|
|
if result != next[x]:
|
|
|
|
result = next[x]
|
|
|
|
break
|
|
|
|
|
|
|
|
func calculateForkId(c: ChainConfig, fork: ChainFork, prevCRC: uint32, prevFork: uint64): ForkID =
|
|
|
|
result.nextFork = c.getNextFork(fork)
|
|
|
|
|
|
|
|
if result.nextFork != prevFork:
|
|
|
|
result.crc = crc32(prevCRC, toBytesBE(prevFork))
|
|
|
|
else:
|
|
|
|
result.crc = prevCRC
|
|
|
|
|
|
|
|
func calculateForkIds(c: ChainConfig, genesisCRC: uint32): array[ChainFork, ForkID] =
|
|
|
|
var prevCRC = genesisCRC
|
|
|
|
var prevFork = c.getNextFork(Frontier)
|
|
|
|
|
|
|
|
for fork in ChainFork:
|
|
|
|
result[fork] = calculateForkId(c, fork, prevCRC, prevFork)
|
|
|
|
prevFork = result[fork].nextFork
|
|
|
|
prevCRC = result[fork].crc
|
2018-08-29 08:49:01 +00:00
|
|
|
|
|
|
|
proc newChain*(db: BaseChainDB): Chain =
|
|
|
|
result.new
|
|
|
|
result.db = db
|
|
|
|
|
2020-07-04 06:23:09 +00:00
|
|
|
if not db.config.daoForkSupport:
|
|
|
|
db.config.daoForkBlock = db.config.homesteadBlock
|
|
|
|
let chainId = PublicNetwork(db.config.chainId)
|
|
|
|
let g = defaultGenesisBlockForNetwork(chainId)
|
|
|
|
result.blockZeroHash = g.toBlock.blockHash
|
|
|
|
let genesisCRC = crc32(0, result.blockZeroHash.data)
|
|
|
|
result.forkIds = calculateForkIds(db.config, genesisCRC)
|
|
|
|
|
2019-01-15 14:02:25 +00:00
|
|
|
method genesisHash*(c: Chain): KeccakHash {.gcsafe.} =
|
2020-07-04 06:23:09 +00:00
|
|
|
c.blockZeroHash
|
2018-08-29 08:49:01 +00:00
|
|
|
|
2019-01-15 14:02:25 +00:00
|
|
|
method getBlockHeader*(c: Chain, b: HashOrNum, output: var BlockHeader): bool {.gcsafe.} =
|
2018-08-29 08:49:01 +00:00
|
|
|
case b.isHash
|
|
|
|
of true:
|
|
|
|
c.db.getBlockHeader(b.hash, output)
|
|
|
|
else:
|
|
|
|
c.db.getBlockHeader(b.number, output)
|
|
|
|
|
2019-01-15 14:02:25 +00:00
|
|
|
method getBestBlockHeader*(c: Chain): BlockHeader {.gcsafe.} =
|
2018-08-29 08:49:01 +00:00
|
|
|
c.db.getCanonicalHead()
|
|
|
|
|
2019-07-08 15:10:59 +00:00
|
|
|
method getSuccessorHeader*(c: Chain, h: BlockHeader, output: var BlockHeader, skip = 0'u): bool {.gcsafe.} =
|
2019-07-09 13:52:41 +00:00
|
|
|
let offset = 1 + skip.toBlockNumber
|
|
|
|
if h.blockNumber <= (not 0.toBlockNumber) - offset:
|
|
|
|
result = c.db.getBlockHeader(h.blockNumber + offset, output)
|
2019-07-08 15:10:59 +00:00
|
|
|
|
|
|
|
method getAncestorHeader*(c: Chain, h: BlockHeader, output: var BlockHeader, skip = 0'u): bool {.gcsafe.} =
|
2019-07-09 13:52:41 +00:00
|
|
|
let offset = 1 + skip.toBlockNumber
|
|
|
|
if h.blockNumber >= offset:
|
|
|
|
result = c.db.getBlockHeader(h.blockNumber - offset, output)
|
2018-08-29 08:49:01 +00:00
|
|
|
|
|
|
|
method getBlockBody*(c: Chain, blockHash: KeccakHash): BlockBodyRef =
|
|
|
|
result = nil
|
|
|
|
|
2020-06-22 00:48:23 +00:00
|
|
|
method persistBlocks*(c: Chain, headers: openarray[BlockHeader], bodies: openarray[BlockBody]): ValidationResult {.gcsafe.} =
|
2018-08-29 08:49:01 +00:00
|
|
|
# Run the VM here
|
2018-12-14 12:46:56 +00:00
|
|
|
if headers.len != bodies.len:
|
|
|
|
debug "Number of headers not matching number of bodies"
|
|
|
|
return ValidationResult.Error
|
2018-08-29 08:49:01 +00:00
|
|
|
|
2020-07-22 16:51:26 +00:00
|
|
|
c.db.highestBlock = headers[^1].blockNumber
|
2018-10-05 00:20:12 +00:00
|
|
|
let transaction = c.db.db.beginTransaction()
|
|
|
|
defer: transaction.dispose()
|
|
|
|
|
2018-12-14 12:46:56 +00:00
|
|
|
trace "Persisting blocks", fromBlock = headers[0].blockNumber, toBlock = headers[^1].blockNumber
|
2018-08-29 08:49:01 +00:00
|
|
|
for i in 0 ..< headers.len:
|
2018-09-10 08:44:07 +00:00
|
|
|
let head = c.db.getCanonicalHead()
|
2019-02-14 15:20:41 +00:00
|
|
|
let vmState = newBaseVMState(head.stateRoot, headers[i], c.db)
|
2019-03-20 03:22:37 +00:00
|
|
|
let validationResult = processBlock(c.db, headers[i], bodies[i], vmState)
|
2018-09-10 08:44:07 +00:00
|
|
|
|
2019-01-12 12:49:08 +00:00
|
|
|
when not defined(release):
|
2019-07-15 11:26:03 +00:00
|
|
|
if validationResult == ValidationResult.Error and
|
|
|
|
bodies[i].transactions.calcTxRoot == headers[i].txRoot:
|
2019-02-03 09:08:13 +00:00
|
|
|
dumpDebuggingMetaData(c.db, headers[i], bodies[i], vmState)
|
2019-07-15 11:26:03 +00:00
|
|
|
warn "Validation error. Debugging metadata dumped."
|
2018-09-10 08:44:07 +00:00
|
|
|
|
2019-01-09 08:35:02 +00:00
|
|
|
if validationResult != ValidationResult.OK:
|
2019-03-31 20:34:11 +00:00
|
|
|
return validationResult
|
2018-12-10 12:04:34 +00:00
|
|
|
|
2018-09-06 17:05:22 +00:00
|
|
|
discard c.db.persistHeaderToDb(headers[i])
|
2018-12-14 12:46:56 +00:00
|
|
|
if c.db.getCanonicalHead().blockHash != headers[i].blockHash:
|
|
|
|
debug "Stored block header hash doesn't match declared hash"
|
|
|
|
return ValidationResult.Error
|
2018-10-05 00:20:12 +00:00
|
|
|
|
2020-07-28 16:48:45 +00:00
|
|
|
discard c.db.persistTransactions(headers[i].blockNumber, bodies[i].transactions)
|
|
|
|
discard c.db.persistReceipts(vmState.receipts)
|
2018-11-24 01:00:58 +00:00
|
|
|
|
2020-07-22 16:51:26 +00:00
|
|
|
# update currentBlock *after* we persist it
|
|
|
|
# so the rpc return consistent result
|
|
|
|
# between eth_blockNumber and eth_syncing
|
|
|
|
c.db.currentBlock = headers[i].blockNumber
|
|
|
|
|
2018-10-05 00:20:12 +00:00
|
|
|
transaction.commit()
|
2019-03-13 00:13:59 +00:00
|
|
|
|
2019-03-13 20:41:52 +00:00
|
|
|
method getTrieDB*(c: Chain): TrieDatabaseRef {.gcsafe.} =
|
2019-03-13 00:13:59 +00:00
|
|
|
c.db.db
|
|
|
|
|
2020-07-04 06:23:09 +00:00
|
|
|
method getForkId*(c: Chain, n: BlockNumber): ForkID {.gcsafe.} =
|
|
|
|
# EIP 2364/2124
|
|
|
|
let fork = c.db.config.toChainFork(n)
|
|
|
|
c.forkIds[fork]
|