Silence compiler gossip after nim upgrade cont1 (#1455)
* Silence some compiler gossip -- part 5, common details: Mostly removing redundant imports and `Defect` tracer after switch to nim 1.6 * Silence some compiler gossip -- part 6, db, rpc, utils details: Mostly removing redundant imports and `Defect` tracer after switch to nim 1.6 * Silence some compiler gossip -- part 7, randomly collected source files details: Mostly removing redundant imports and `Defect` tracer after switch to nim 1.6 * Silence some compiler gossip -- part 8, assorted tests details: Mostly removing redundant imports and `Defect` tracer after switch to nim 1.6 * Clique update why: More impossible exceptions (undoes temporary fix from previous PR)
This commit is contained in:
parent
89ae9621c4
commit
6b9f3c9ac5
|
@ -19,7 +19,7 @@ import
|
||||||
export
|
export
|
||||||
hardforks
|
hardforks
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [].}
|
||||||
|
|
||||||
type
|
type
|
||||||
Genesis* = ref object
|
Genesis* = ref object
|
||||||
|
@ -82,16 +82,16 @@ const
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc read(rlp: var Rlp, x: var AddressBalance, _: type EthAddress): EthAddress
|
proc read(rlp: var Rlp, x: var AddressBalance, _: type EthAddress): EthAddress
|
||||||
{.gcsafe, raises: [Defect,RlpError].} =
|
{.gcsafe, raises: [RlpError].} =
|
||||||
let val = rlp.read(UInt256).toByteArrayBE()
|
let val = rlp.read(UInt256).toByteArrayBE()
|
||||||
result[0 .. ^1] = val.toOpenArray(12, val.high)
|
result[0 .. ^1] = val.toOpenArray(12, val.high)
|
||||||
|
|
||||||
proc read(rlp: var Rlp, x: var AddressBalance, _: type GenesisAccount): GenesisAccount
|
proc read(rlp: var Rlp, x: var AddressBalance, _: type GenesisAccount): GenesisAccount
|
||||||
{.gcsafe, raises: [Defect,RlpError].} =
|
{.gcsafe, raises: [RlpError].} =
|
||||||
GenesisAccount(balance: rlp.read(UInt256))
|
GenesisAccount(balance: rlp.read(UInt256))
|
||||||
|
|
||||||
func decodePrealloc*(data: seq[byte]): GenesisAlloc
|
func decodePrealloc*(data: seq[byte]): GenesisAlloc
|
||||||
{.gcsafe, raises: [Defect,RlpError].} =
|
{.gcsafe, raises: [RlpError].} =
|
||||||
for tup in rlp.decode(data, seq[AddressBalance]):
|
for tup in rlp.decode(data, seq[AddressBalance]):
|
||||||
result[tup.address] = tup.account
|
result[tup.address] = tup.account
|
||||||
|
|
||||||
|
@ -104,7 +104,7 @@ proc fromHex(c: char): int =
|
||||||
else: -1
|
else: -1
|
||||||
|
|
||||||
proc readValue(reader: var JsonReader, value: var UInt256)
|
proc readValue(reader: var JsonReader, value: var UInt256)
|
||||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
## Mixin for `Json.loadFile()`. Note that this driver applies the same
|
## Mixin for `Json.loadFile()`. Note that this driver applies the same
|
||||||
## to `BlockNumber` fields as well as generic `UInt265` fields like the
|
## to `BlockNumber` fields as well as generic `UInt265` fields like the
|
||||||
## account `balance`.
|
## account `balance`.
|
||||||
|
@ -148,35 +148,35 @@ proc readValue(reader: var JsonReader, value: var UInt256)
|
||||||
reader.lexer.next()
|
reader.lexer.next()
|
||||||
|
|
||||||
proc readValue(reader: var JsonReader, value: var ChainId)
|
proc readValue(reader: var JsonReader, value: var ChainId)
|
||||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
value = reader.readValue(int).ChainId
|
value = reader.readValue(int).ChainId
|
||||||
|
|
||||||
proc readValue(reader: var JsonReader, value: var Hash256)
|
proc readValue(reader: var JsonReader, value: var Hash256)
|
||||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
value = Hash256.fromHex(reader.readValue(string))
|
value = Hash256.fromHex(reader.readValue(string))
|
||||||
|
|
||||||
proc readValue(reader: var JsonReader, value: var BlockNonce)
|
proc readValue(reader: var JsonReader, value: var BlockNonce)
|
||||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
value = fromHex[uint64](reader.readValue(string)).toBlockNonce
|
value = fromHex[uint64](reader.readValue(string)).toBlockNonce
|
||||||
|
|
||||||
proc readValue(reader: var JsonReader, value: var EthTime)
|
proc readValue(reader: var JsonReader, value: var EthTime)
|
||||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
value = fromHex[int64](reader.readValue(string)).fromUnix
|
value = fromHex[int64](reader.readValue(string)).fromUnix
|
||||||
|
|
||||||
proc readValue(reader: var JsonReader, value: var seq[byte])
|
proc readValue(reader: var JsonReader, value: var seq[byte])
|
||||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
value = hexToSeqByte(reader.readValue(string))
|
value = hexToSeqByte(reader.readValue(string))
|
||||||
|
|
||||||
proc readValue(reader: var JsonReader, value: var GasInt)
|
proc readValue(reader: var JsonReader, value: var GasInt)
|
||||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
value = fromHex[GasInt](reader.readValue(string))
|
value = fromHex[GasInt](reader.readValue(string))
|
||||||
|
|
||||||
proc readValue(reader: var JsonReader, value: var EthAddress)
|
proc readValue(reader: var JsonReader, value: var EthAddress)
|
||||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
value = parseAddress(reader.readValue(string))
|
value = parseAddress(reader.readValue(string))
|
||||||
|
|
||||||
proc readValue(reader: var JsonReader, value: var AccountNonce)
|
proc readValue(reader: var JsonReader, value: var AccountNonce)
|
||||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
value = fromHex[uint64](reader.readValue(string))
|
value = fromHex[uint64](reader.readValue(string))
|
||||||
|
|
||||||
template to(a: string, b: type EthAddress): EthAddress =
|
template to(a: string, b: type EthAddress): EthAddress =
|
||||||
|
@ -201,7 +201,7 @@ macro fillToBlockNumberArray(conf, arr: typed): untyped =
|
||||||
for fork, forkField in forkBlockNumber:
|
for fork, forkField in forkBlockNumber:
|
||||||
let
|
let
|
||||||
fieldIdent = newIdentNode(forkField)
|
fieldIdent = newIdentNode(forkField)
|
||||||
forkIdent = newIdentNode($HardFork(fork))
|
forkIdent = newIdentNode($HardFork(fork.ord))
|
||||||
result.add quote do:
|
result.add quote do:
|
||||||
`arr`[`forkIdent`] = `conf`.`fieldIdent`
|
`arr`[`forkIdent`] = `conf`.`fieldIdent`
|
||||||
|
|
||||||
|
@ -269,7 +269,7 @@ proc validateNetworkParams*(params: var NetworkParams): bool =
|
||||||
validateChainConfig(params.config)
|
validateChainConfig(params.config)
|
||||||
|
|
||||||
proc loadNetworkParams*(fileName: string, params: var NetworkParams):
|
proc loadNetworkParams*(fileName: string, params: var NetworkParams):
|
||||||
bool {.raises: [Defect,SerializationError].} =
|
bool =
|
||||||
try:
|
try:
|
||||||
params = Json.loadFile(fileName, NetworkParams, allowUnknownFields = true)
|
params = Json.loadFile(fileName, NetworkParams, allowUnknownFields = true)
|
||||||
except IOError as e:
|
except IOError as e:
|
||||||
|
@ -299,7 +299,7 @@ proc decodeNetworkParams*(jsonString: string, params: var NetworkParams): bool =
|
||||||
validateNetworkParams(params)
|
validateNetworkParams(params)
|
||||||
|
|
||||||
proc parseGenesisAlloc*(data: string, ga: var GenesisAlloc): bool
|
proc parseGenesisAlloc*(data: string, ga: var GenesisAlloc): bool
|
||||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
try:
|
try:
|
||||||
ga = Json.decode(data, GenesisAlloc, allowUnknownFields = true)
|
ga = Json.decode(data, GenesisAlloc, allowUnknownFields = true)
|
||||||
except JsonReaderError as e:
|
except JsonReaderError as e:
|
||||||
|
@ -309,7 +309,7 @@ proc parseGenesisAlloc*(data: string, ga: var GenesisAlloc): bool
|
||||||
return true
|
return true
|
||||||
|
|
||||||
proc parseGenesis*(data: string): Genesis
|
proc parseGenesis*(data: string): Genesis
|
||||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
try:
|
try:
|
||||||
result = Json.decode(data, Genesis, allowUnknownFields = true)
|
result = Json.decode(data, Genesis, allowUnknownFields = true)
|
||||||
except JsonReaderError as e:
|
except JsonReaderError as e:
|
||||||
|
@ -428,7 +428,7 @@ proc chainConfigForNetwork*(id: NetworkId): ChainConfig =
|
||||||
ChainConfig()
|
ChainConfig()
|
||||||
|
|
||||||
proc genesisBlockForNetwork*(id: NetworkId): Genesis
|
proc genesisBlockForNetwork*(id: NetworkId): Genesis
|
||||||
{.gcsafe, raises: [Defect, ValueError, RlpError].} =
|
{.gcsafe, raises: [ValueError, RlpError].} =
|
||||||
result = case id
|
result = case id
|
||||||
of MainNet:
|
of MainNet:
|
||||||
Genesis(
|
Genesis(
|
||||||
|
@ -477,7 +477,7 @@ proc genesisBlockForNetwork*(id: NetworkId): Genesis
|
||||||
Genesis()
|
Genesis()
|
||||||
|
|
||||||
proc networkParams*(id: NetworkId): NetworkParams
|
proc networkParams*(id: NetworkId): NetworkParams
|
||||||
{.gcsafe, raises: [Defect, ValueError, RlpError].} =
|
{.gcsafe, raises: [ValueError, RlpError].} =
|
||||||
result.genesis = genesisBlockForNetwork(id)
|
result.genesis = genesisBlockForNetwork(id)
|
||||||
result.config = chainConfigForNetwork(id)
|
result.config = chainConfigForNetwork(id)
|
||||||
|
|
||||||
|
|
|
@ -28,6 +28,8 @@ export
|
||||||
genesis,
|
genesis,
|
||||||
utils
|
utils
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
type
|
type
|
||||||
SyncProgress = object
|
SyncProgress = object
|
||||||
start : BlockNumber
|
start : BlockNumber
|
||||||
|
@ -85,7 +87,7 @@ type
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc hardForkTransition*(com: CommonRef,
|
proc hardForkTransition*(com: CommonRef,
|
||||||
number: BlockNumber, td: Option[DifficultyInt]) {.gcsafe.}
|
number: BlockNumber, td: Option[DifficultyInt]) {.gcsafe, raises: [CatchableError].}
|
||||||
|
|
||||||
func cliquePeriod*(com: CommonRef): int
|
func cliquePeriod*(com: CommonRef): int
|
||||||
|
|
||||||
|
@ -95,6 +97,16 @@ func cliqueEpoch*(com: CommonRef): int
|
||||||
# Private helper functions
|
# Private helper functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
template noExceptionOops(info: static[string]; code: untyped) =
|
||||||
|
try:
|
||||||
|
code
|
||||||
|
except CatchableError as e:
|
||||||
|
raiseAssert "Inconveivable (" & info & ": name=" & $e.name & " msg=" & e.msg
|
||||||
|
#except Defect as e:
|
||||||
|
# raise e
|
||||||
|
except Exception as e:
|
||||||
|
raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg
|
||||||
|
|
||||||
proc consensusTransition(com: CommonRef, fork: HardFork) =
|
proc consensusTransition(com: CommonRef, fork: HardFork) =
|
||||||
if fork >= MergeFork:
|
if fork >= MergeFork:
|
||||||
com.consensusType = ConsensusType.POS
|
com.consensusType = ConsensusType.POS
|
||||||
|
@ -103,8 +115,7 @@ proc consensusTransition(com: CommonRef, fork: HardFork) =
|
||||||
# this could happen during reorg
|
# this could happen during reorg
|
||||||
com.consensusType = com.config.consensusType
|
com.consensusType = com.config.consensusType
|
||||||
|
|
||||||
proc setForkId(com: CommonRef, blockZero: BlockHeader)
|
proc setForkId(com: CommonRef, blockZero: BlockHeader) =
|
||||||
{. raises: [Defect,CatchableError].} =
|
|
||||||
com.genesisHash = blockZero.blockHash
|
com.genesisHash = blockZero.blockHash
|
||||||
let genesisCRC = crc32(0, com.genesisHash.data)
|
let genesisCRC = crc32(0, com.genesisHash.data)
|
||||||
com.forkIds = calculateForkIds(com.config, genesisCRC)
|
com.forkIds = calculateForkIds(com.config, genesisCRC)
|
||||||
|
@ -121,7 +132,7 @@ proc init(com : CommonRef,
|
||||||
pruneTrie: bool,
|
pruneTrie: bool,
|
||||||
networkId: NetworkId,
|
networkId: NetworkId,
|
||||||
config : ChainConfig,
|
config : ChainConfig,
|
||||||
genesis : Genesis) =
|
genesis : Genesis) {.gcsafe, raises: [CatchableError].} =
|
||||||
|
|
||||||
config.daoCheck()
|
config.daoCheck()
|
||||||
|
|
||||||
|
@ -163,7 +174,8 @@ proc new*(_: type CommonRef,
|
||||||
db: TrieDatabaseRef,
|
db: TrieDatabaseRef,
|
||||||
pruneTrie: bool = true,
|
pruneTrie: bool = true,
|
||||||
networkId: NetworkId = MainNet,
|
networkId: NetworkId = MainNet,
|
||||||
params = networkParams(MainNet)): CommonRef =
|
params = networkParams(MainNet)): CommonRef
|
||||||
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
|
|
||||||
## If genesis data is present, the forkIds will be initialized
|
## If genesis data is present, the forkIds will be initialized
|
||||||
## empty data base also initialized with genesis block
|
## empty data base also initialized with genesis block
|
||||||
|
@ -179,7 +191,8 @@ proc new*(_: type CommonRef,
|
||||||
db: TrieDatabaseRef,
|
db: TrieDatabaseRef,
|
||||||
config: ChainConfig,
|
config: ChainConfig,
|
||||||
pruneTrie: bool = true,
|
pruneTrie: bool = true,
|
||||||
networkId: NetworkId = MainNet): CommonRef =
|
networkId: NetworkId = MainNet): CommonRef
|
||||||
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
|
|
||||||
## There is no genesis data present
|
## There is no genesis data present
|
||||||
## Mainly used for testing without genesis
|
## Mainly used for testing without genesis
|
||||||
|
@ -258,7 +271,7 @@ proc hardForkTransition(com: CommonRef,
|
||||||
|
|
||||||
proc hardForkTransition*(com: CommonRef, parentHash: Hash256,
|
proc hardForkTransition*(com: CommonRef, parentHash: Hash256,
|
||||||
number: BlockNumber)
|
number: BlockNumber)
|
||||||
{.gcsafe, raises: [Defect, CatchableError].} =
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
|
|
||||||
# if mergeForkBlock is present, it has higher
|
# if mergeForkBlock is present, it has higher
|
||||||
# priority than TTD
|
# priority than TTD
|
||||||
|
@ -288,7 +301,7 @@ proc hardForkTransition*(com: CommonRef, parentHash: Hash256,
|
||||||
doAssert(false, "unreachable code")
|
doAssert(false, "unreachable code")
|
||||||
|
|
||||||
proc hardForkTransition*(com: CommonRef, header: BlockHeader)
|
proc hardForkTransition*(com: CommonRef, header: BlockHeader)
|
||||||
{.gcsafe, raises: [Defect, CatchableError].} =
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
|
|
||||||
com.hardForkTransition(header.parentHash, header.blockNumber)
|
com.hardForkTransition(header.parentHash, header.blockNumber)
|
||||||
|
|
||||||
|
@ -312,7 +325,7 @@ func forkGTE*(com: CommonRef, fork: HardFork): bool =
|
||||||
|
|
||||||
# TODO: move this consensus code to where it belongs
|
# TODO: move this consensus code to where it belongs
|
||||||
proc minerAddress*(com: CommonRef; header: BlockHeader): EthAddress
|
proc minerAddress*(com: CommonRef; header: BlockHeader): EthAddress
|
||||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
if com.consensusType != ConsensusType.POA:
|
if com.consensusType != ConsensusType.POA:
|
||||||
# POW and POS return header.coinbase
|
# POW and POS return header.coinbase
|
||||||
return header.coinbase
|
return header.coinbase
|
||||||
|
@ -334,7 +347,7 @@ func isEIP155*(com: CommonRef, number: BlockNumber): bool =
|
||||||
com.config.eip155Block.isSome and number >= com.config.eip155Block.get
|
com.config.eip155Block.isSome and number >= com.config.eip155Block.get
|
||||||
|
|
||||||
proc isBlockAfterTtd*(com: CommonRef, header: BlockHeader): bool
|
proc isBlockAfterTtd*(com: CommonRef, header: BlockHeader): bool
|
||||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
if com.config.terminalTotalDifficulty.isNone:
|
if com.config.terminalTotalDifficulty.isNone:
|
||||||
return false
|
return false
|
||||||
|
|
||||||
|
@ -345,14 +358,14 @@ proc isBlockAfterTtd*(com: CommonRef, header: BlockHeader): bool
|
||||||
ptd >= ttd and td >= ttd
|
ptd >= ttd and td >= ttd
|
||||||
|
|
||||||
proc consensus*(com: CommonRef, header: BlockHeader): ConsensusType
|
proc consensus*(com: CommonRef, header: BlockHeader): ConsensusType
|
||||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
if com.isBlockAfterTtd(header):
|
if com.isBlockAfterTtd(header):
|
||||||
return ConsensusType.POS
|
return ConsensusType.POS
|
||||||
|
|
||||||
return com.config.consensusType
|
return com.config.consensusType
|
||||||
|
|
||||||
proc initializeEmptyDb*(com: CommonRef)
|
proc initializeEmptyDb*(com: CommonRef)
|
||||||
{.raises: [Defect, CatchableError].} =
|
{.raises: [CatchableError].} =
|
||||||
let trieDB = com.db.db
|
let trieDB = com.db.db
|
||||||
if canonicalHeadHashKey().toOpenArray notin trieDB:
|
if canonicalHeadHashKey().toOpenArray notin trieDB:
|
||||||
trace "Writing genesis to DB"
|
trace "Writing genesis to DB"
|
||||||
|
@ -365,6 +378,7 @@ proc initializeEmptyDb*(com: CommonRef)
|
||||||
proc syncReqNewHead*(com: CommonRef; header: BlockHeader) =
|
proc syncReqNewHead*(com: CommonRef; header: BlockHeader) =
|
||||||
## Used by RPC to update the beacon head for snap sync
|
## Used by RPC to update the beacon head for snap sync
|
||||||
if not com.syncReqNewHead.isNil:
|
if not com.syncReqNewHead.isNil:
|
||||||
|
noExceptionOops("syncReqNewHead"):
|
||||||
com.syncReqNewHead(header)
|
com.syncReqNewHead(header)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
|
@ -15,6 +15,8 @@ import
|
||||||
|
|
||||||
export manager
|
export manager
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
type
|
type
|
||||||
EthContext* = ref object
|
EthContext* = ref object
|
||||||
am*: AccountsManager
|
am*: AccountsManager
|
||||||
|
@ -40,7 +42,8 @@ proc containsOnlyHexDigits(hex: string): bool =
|
||||||
return false
|
return false
|
||||||
true
|
true
|
||||||
|
|
||||||
proc getNetKeys*(ctx: EthContext, netKey, dataDir: string): Result[KeyPair, string] =
|
proc getNetKeys*(ctx: EthContext, netKey, dataDir: string): Result[KeyPair, string]
|
||||||
|
{.gcsafe, raises: [OSError]} =
|
||||||
if netKey.len == 0 or netKey == "random":
|
if netKey.len == 0 or netKey == "random":
|
||||||
let privateKey = ctx.randomPrivateKey()
|
let privateKey = ctx.randomPrivateKey()
|
||||||
return ok(privateKey.toKeyPair())
|
return ok(privateKey.toKeyPair())
|
||||||
|
|
|
@ -1,21 +1,29 @@
|
||||||
import
|
import
|
||||||
std/tables,
|
std/tables,
|
||||||
eth/[common, rlp, eip1559],
|
eth/[common, rlp, eip1559],
|
||||||
chronicles, eth/trie/[db, trie_defs],
|
eth/trie/[db, trie_defs],
|
||||||
../db/state_db,
|
../db/state_db,
|
||||||
../constants,
|
../constants,
|
||||||
./chain_config
|
./chain_config
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [].}
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
proc newStateDB*(db: TrieDatabaseRef, pruneTrie: bool): AccountStateDB =
|
proc newStateDB*(
|
||||||
|
db: TrieDatabaseRef;
|
||||||
|
pruneTrie: bool;
|
||||||
|
): AccountStateDB
|
||||||
|
{.gcsafe, raises: [].}=
|
||||||
newAccountStateDB(db, emptyRlpHash, pruneTrie)
|
newAccountStateDB(db, emptyRlpHash, pruneTrie)
|
||||||
|
|
||||||
proc toGenesisHeader*(g: Genesis, sdb: AccountStateDB, fork: HardFork): BlockHeader
|
proc toGenesisHeader*(
|
||||||
{.raises: [Defect, RlpError].} =
|
g: Genesis;
|
||||||
|
sdb: AccountStateDB;
|
||||||
|
fork: HardFork;
|
||||||
|
): BlockHeader
|
||||||
|
{.gcsafe, raises: [RlpError].} =
|
||||||
## Initialise block chain DB accounts derived from the `genesis.alloc` table
|
## Initialise block chain DB accounts derived from the `genesis.alloc` table
|
||||||
## of the `db` descriptor argument.
|
## of the `db` descriptor argument.
|
||||||
##
|
##
|
||||||
|
@ -86,21 +94,29 @@ proc toGenesisHeader*(g: Genesis, sdb: AccountStateDB, fork: HardFork): BlockHea
|
||||||
if g.difficulty.isZero and fork <= London:
|
if g.difficulty.isZero and fork <= London:
|
||||||
result.difficulty = GENESIS_DIFFICULTY
|
result.difficulty = GENESIS_DIFFICULTY
|
||||||
|
|
||||||
proc toGenesisHeader*(genesis: Genesis, fork: HardFork, db: TrieDatabaseRef = nil): BlockHeader
|
proc toGenesisHeader*(
|
||||||
{.raises: [Defect, RlpError].} =
|
genesis: Genesis;
|
||||||
|
fork: HardFork;
|
||||||
|
db = TrieDatabaseRef(nil);
|
||||||
|
): BlockHeader
|
||||||
|
{.gcsafe, raises: [RlpError].} =
|
||||||
## Generate the genesis block header from the `genesis` and `config` argument value.
|
## Generate the genesis block header from the `genesis` and `config` argument value.
|
||||||
let
|
let
|
||||||
db = if db.isNil: newMemoryDB() else: db
|
db = if db.isNil: newMemoryDB() else: db
|
||||||
sdb = newStateDB(db, pruneTrie = true)
|
sdb = newStateDB(db, pruneTrie = true)
|
||||||
toGenesisHeader(genesis, sdb, fork)
|
toGenesisHeader(genesis, sdb, fork)
|
||||||
|
|
||||||
proc toGenesisHeader*(params: NetworkParams, db: TrieDatabaseRef = nil): BlockHeader
|
proc toGenesisHeader*(
|
||||||
{.raises: [Defect, RlpError].} =
|
params: NetworkParams;
|
||||||
|
db = TrieDatabaseRef(nil);
|
||||||
|
): BlockHeader
|
||||||
|
{.raises: [RlpError].} =
|
||||||
## Generate the genesis block header from the `genesis` and `config` argument value.
|
## Generate the genesis block header from the `genesis` and `config` argument value.
|
||||||
let map = toForkToBlockNumber(params.config)
|
let map = toForkToBlockNumber(params.config)
|
||||||
let fork = map.toHardFork(0.toBlockNumber)
|
let fork = map.toHardFork(0.toBlockNumber)
|
||||||
toGenesisHeader(params.genesis, fork, db)
|
toGenesisHeader(params.genesis, fork, db)
|
||||||
|
|
||||||
|
# End
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -14,6 +14,8 @@ import
|
||||||
../utils/utils,
|
../utils/utils,
|
||||||
./evmforks
|
./evmforks
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
type
|
type
|
||||||
ConsensusType* {.pure.} = enum
|
ConsensusType* {.pure.} = enum
|
||||||
# Proof of Work
|
# Proof of Work
|
||||||
|
@ -265,22 +267,22 @@ type
|
||||||
BlockToForks* = array[HardFork, BlockToFork]
|
BlockToForks* = array[HardFork, BlockToFork]
|
||||||
|
|
||||||
func forkTrue(data, number, td: UInt256): bool
|
func forkTrue(data, number, td: UInt256): bool
|
||||||
{.gcsafe, nimcall, raises: [Defect, CatchableError].} =
|
{.gcsafe, nimcall, raises: [].} =
|
||||||
# frontier always return true
|
# frontier always return true
|
||||||
true
|
true
|
||||||
|
|
||||||
func forkFalse(data, number, td: UInt256): bool
|
func forkFalse(data, number, td: UInt256): bool
|
||||||
{.gcsafe, nimcall, raises: [Defect, CatchableError].} =
|
{.gcsafe, nimcall, raises: [].} =
|
||||||
# forkBlock.isNone always return false
|
# forkBlock.isNone always return false
|
||||||
false
|
false
|
||||||
|
|
||||||
func forkMaybe(data, number, td: UInt256): bool
|
func forkMaybe(data, number, td: UInt256): bool
|
||||||
{.gcsafe, nimcall, raises: [Defect, CatchableError].} =
|
{.gcsafe, nimcall, raises: [].} =
|
||||||
# data is a blockNumber
|
# data is a blockNumber
|
||||||
number >= data
|
number >= data
|
||||||
|
|
||||||
func mergeMaybe(data, number, td: UInt256): bool
|
func mergeMaybe(data, number, td: UInt256): bool
|
||||||
{.gcsafe, nimcall, raises: [Defect, CatchableError].} =
|
{.gcsafe, nimcall, raises: [].} =
|
||||||
# data is a TTD
|
# data is a TTD
|
||||||
td >= data
|
td >= data
|
||||||
|
|
||||||
|
|
|
@ -10,11 +10,12 @@
|
||||||
import
|
import
|
||||||
std/[os, json, tables, strutils],
|
std/[os, json, tables, strutils],
|
||||||
stew/[byteutils, results],
|
stew/[byteutils, results],
|
||||||
eth/[keyfile, common, keys],
|
eth/[keyfile, common, keys]
|
||||||
chronicles
|
|
||||||
|
|
||||||
from nimcrypto/utils import burnMem
|
from nimcrypto/utils import burnMem
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
type
|
type
|
||||||
NimbusAccount* = object
|
NimbusAccount* = object
|
||||||
privateKey*: PrivateKey
|
privateKey*: PrivateKey
|
||||||
|
@ -27,7 +28,8 @@ type
|
||||||
proc init*(_: type AccountsManager): AccountsManager =
|
proc init*(_: type AccountsManager): AccountsManager =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
proc loadKeystores*(am: var AccountsManager, path: string): Result[void, string] =
|
proc loadKeystores*(am: var AccountsManager, path: string): Result[void, string]
|
||||||
|
{.gcsafe, raises: [OSError].}=
|
||||||
try:
|
try:
|
||||||
createDir(path)
|
createDir(path)
|
||||||
except OSError, IOError:
|
except OSError, IOError:
|
||||||
|
|
|
@ -534,7 +534,7 @@ proc parseCmdArg(T: type NetworkParams, p: TaintedString): T =
|
||||||
try:
|
try:
|
||||||
if not loadNetworkParams(p.string, result):
|
if not loadNetworkParams(p.string, result):
|
||||||
raise newException(ValueError, "failed to load customNetwork")
|
raise newException(ValueError, "failed to load customNetwork")
|
||||||
except Exception as exc:
|
except Exception: # as exc: -- notused
|
||||||
# on linux/mac, nim compiler refuse to compile
|
# on linux/mac, nim compiler refuse to compile
|
||||||
# with unlisted exception error
|
# with unlisted exception error
|
||||||
raise newException(ValueError, "failed to load customNetwork")
|
raise newException(ValueError, "failed to load customNetwork")
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
import
|
import
|
||||||
eth/common/eth_types
|
eth/common/eth_types
|
||||||
|
|
||||||
proc default(t: typedesc): t = discard
|
# proc default(t: typedesc): t = discard -- notused
|
||||||
|
|
||||||
# constants
|
# constants
|
||||||
const
|
const
|
||||||
|
|
|
@ -97,8 +97,7 @@ proc newCliqueCfg*(db: ChainDBRef): CliqueCfg =
|
||||||
proc ecRecover*(
|
proc ecRecover*(
|
||||||
cfg: CliqueCfg;
|
cfg: CliqueCfg;
|
||||||
header: BlockHeader;
|
header: BlockHeader;
|
||||||
): auto
|
): auto =
|
||||||
{.gcsafe, raises: [CatchableError].} =
|
|
||||||
cfg.signatures.ecRecover(header)
|
cfg.signatures.ecRecover(header)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
|
@ -94,8 +94,7 @@ proc inTurn*(s: Snapshot; number: BlockNumber, signer: EthAddress): bool =
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
# clique/clique.go(463): func (c *Clique) verifySeal(chain [..]
|
# clique/clique.go(463): func (c *Clique) verifySeal(chain [..]
|
||||||
proc verifySeal(c: Clique; header: BlockHeader): CliqueOkResult
|
proc verifySeal(c: Clique; header: BlockHeader): CliqueOkResult =
|
||||||
{.gcsafe, raises: [CatchableError].} =
|
|
||||||
## Check whether the signature contained in the header satisfies the
|
## Check whether the signature contained in the header satisfies the
|
||||||
## consensus protocol requirements. The method accepts an optional list of
|
## consensus protocol requirements. The method accepts an optional list of
|
||||||
## parent headers that aren't yet part of the local blockchain to generate
|
## parent headers that aren't yet part of the local blockchain to generate
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
{.push raises: [Defect].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/os,
|
std/os,
|
||||||
|
@ -40,7 +40,7 @@ proc init*(
|
||||||
readOnly = false): KvResult[T] =
|
readOnly = false): KvResult[T] =
|
||||||
let
|
let
|
||||||
dataDir = basePath / name / "data"
|
dataDir = basePath / name / "data"
|
||||||
tmpDir = basePath / name / "tmp"
|
# tmpDir = basePath / name / "tmp" -- notused
|
||||||
backupsDir = basePath / name / "backups"
|
backupsDir = basePath / name / "backups"
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -27,7 +27,6 @@ import
|
||||||
./oph_gen_handlers,
|
./oph_gen_handlers,
|
||||||
./oph_helpers,
|
./oph_helpers,
|
||||||
eth/common,
|
eth/common,
|
||||||
eth/common,
|
|
||||||
sequtils,
|
sequtils,
|
||||||
stint,
|
stint,
|
||||||
strformat
|
strformat
|
||||||
|
|
|
@ -144,7 +144,7 @@ proc beforeExecCreate(c: Computation): bool =
|
||||||
c.snapshot()
|
c.snapshot()
|
||||||
|
|
||||||
if c.vmState.readOnlyStateDB().hasCodeOrNonce(c.msg.contractAddress):
|
if c.vmState.readOnlyStateDB().hasCodeOrNonce(c.msg.contractAddress):
|
||||||
let blurb = c.msg.contractAddress.toHex
|
# let blurb = c.msg.contractAddress.toHex -- notused
|
||||||
c.setError("Address collision when creating contract address={blurb}", true)
|
c.setError("Address collision when creating contract address={blurb}", true)
|
||||||
c.rollback()
|
c.rollback()
|
||||||
return true
|
return true
|
||||||
|
|
|
@ -8,12 +8,14 @@
|
||||||
# those terms.
|
# those terms.
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[strutils, tables],
|
std/strutils,
|
||||||
nimcrypto/utils, eth/common as eth_common,
|
nimcrypto/utils, eth/common as eth_common,
|
||||||
stint, json_rpc/server, json_rpc/errors,
|
stint, json_rpc/server, json_rpc/errors,
|
||||||
eth/p2p, eth/p2p/enode,
|
eth/p2p, eth/p2p/enode,
|
||||||
../config, ./hexstrings
|
../config, ./hexstrings
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
type
|
type
|
||||||
NodePorts = object
|
NodePorts = object
|
||||||
discovery: string
|
discovery: string
|
||||||
|
|
|
@ -16,6 +16,8 @@ import
|
||||||
httputils,
|
httputils,
|
||||||
websock/websock as ws
|
websock/websock as ws
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
proc sameOrigin(a, b: Uri): bool =
|
proc sameOrigin(a, b: Uri): bool =
|
||||||
a.hostname == b.hostname and
|
a.hostname == b.hostname and
|
||||||
a.scheme == b.scheme and
|
a.scheme == b.scheme and
|
||||||
|
|
|
@ -13,6 +13,8 @@ import
|
||||||
hexstrings, ../tracer, ../vm_types,
|
hexstrings, ../tracer, ../vm_types,
|
||||||
../common/common
|
../common/common
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
type
|
type
|
||||||
TraceOptions = object
|
TraceOptions = object
|
||||||
disableStorage: Option[bool]
|
disableStorage: Option[bool]
|
||||||
|
|
|
@ -9,8 +9,8 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[typetraits, times, strutils],
|
std/[typetraits, times, strutils],
|
||||||
stew/[objects, results, byteutils],
|
stew/[results, byteutils],
|
||||||
json_rpc/[rpcserver, errors],
|
json_rpc/rpcserver,
|
||||||
web3/[conversions, engine_api_types],
|
web3/[conversions, engine_api_types],
|
||||||
eth/rlp,
|
eth/rlp,
|
||||||
../common/common,
|
../common/common,
|
||||||
|
@ -24,7 +24,10 @@ import
|
||||||
# if chronicles import is in the middle
|
# if chronicles import is in the middle
|
||||||
chronicles
|
chronicles
|
||||||
|
|
||||||
proc latestValidHash(db: ChainDBRef, parent: EthBlockHeader, ttd: DifficultyInt): Hash256 =
|
{.push raises: [].}
|
||||||
|
|
||||||
|
proc latestValidHash(db: ChainDBRef, parent: EthBlockHeader, ttd: DifficultyInt): Hash256
|
||||||
|
{.gcsafe, raises: [RlpError].} =
|
||||||
let ptd = db.getScore(parent.parentHash)
|
let ptd = db.getScore(parent.parentHash)
|
||||||
if ptd >= ttd:
|
if ptd >= ttd:
|
||||||
parent.blockHash
|
parent.blockHash
|
||||||
|
@ -33,7 +36,8 @@ proc latestValidHash(db: ChainDBRef, parent: EthBlockHeader, ttd: DifficultyInt)
|
||||||
# latestValidHash MUST be set to ZERO
|
# latestValidHash MUST be set to ZERO
|
||||||
Hash256()
|
Hash256()
|
||||||
|
|
||||||
proc invalidFCU(com: CommonRef, header: EthBlockHeader): ForkchoiceUpdatedResponse =
|
proc invalidFCU(com: CommonRef, header: EthBlockHeader): ForkchoiceUpdatedResponse
|
||||||
|
{.gcsafe, raises: [RlpError].} =
|
||||||
var parent: EthBlockHeader
|
var parent: EthBlockHeader
|
||||||
if not com.db.getBlockHeader(header.parentHash, parent):
|
if not com.db.getBlockHeader(header.parentHash, parent):
|
||||||
return invalidFCU(Hash256())
|
return invalidFCU(Hash256())
|
||||||
|
|
|
@ -5,8 +5,6 @@
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
|
||||||
|
|
||||||
import
|
import
|
||||||
std/options,
|
std/options,
|
||||||
eth/common/[eth_types, eth_types_rlp],
|
eth/common/[eth_types, eth_types_rlp],
|
||||||
|
@ -17,6 +15,8 @@ import
|
||||||
|
|
||||||
export rpc_types
|
export rpc_types
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
proc topicToDigest(t: seq[Topic]): seq[Hash256] =
|
proc topicToDigest(t: seq[Topic]): seq[Hash256] =
|
||||||
var resSeq: seq[Hash256] = @[]
|
var resSeq: seq[Hash256] = @[]
|
||||||
for top in t:
|
for top in t:
|
||||||
|
|
|
@ -25,7 +25,7 @@ import
|
||||||
stew/[byteutils, objects, results],
|
stew/[byteutils, objects, results],
|
||||||
../config
|
../config
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [].}
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "Jwt/HS256 auth"
|
topics = "Jwt/HS256 auth"
|
||||||
|
@ -95,7 +95,7 @@ proc base64urlEncode(x: auto): string =
|
||||||
base64.encode(x, safe = true).replace("=", "")
|
base64.encode(x, safe = true).replace("=", "")
|
||||||
|
|
||||||
proc base64urlDecode(data: string): string
|
proc base64urlDecode(data: string): string
|
||||||
{.gcsafe, raises: [Defect, CatchableError].} =
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
## Decodes a JWT specific base64url, optionally encoding with stripped
|
## Decodes a JWT specific base64url, optionally encoding with stripped
|
||||||
## padding.
|
## padding.
|
||||||
let l = data.len mod 4
|
let l = data.len mod 4
|
||||||
|
@ -254,7 +254,7 @@ proc jwtSharedSecret*(rndSecret: JwtGenSecret; config: NimbusConf):
|
||||||
|
|
||||||
proc jwtSharedSecret*(rng: ref rand.HmacDrbgContext; config: NimbusConf):
|
proc jwtSharedSecret*(rng: ref rand.HmacDrbgContext; config: NimbusConf):
|
||||||
Result[JwtSharedKey, JwtError]
|
Result[JwtSharedKey, JwtError]
|
||||||
{.gcsafe, raises: [Defect,JwtExcept].} =
|
{.gcsafe, raises: [JwtExcept].} =
|
||||||
## Variant of `jwtSharedSecret()` with explicit random generator argument.
|
## Variant of `jwtSharedSecret()` with explicit random generator argument.
|
||||||
safeExecutor("jwtSharedSecret"):
|
safeExecutor("jwtSharedSecret"):
|
||||||
result = rng.jwtGenSecret.jwtSharedSecret(config)
|
result = rng.jwtGenSecret.jwtSharedSecret(config)
|
||||||
|
|
|
@ -22,6 +22,8 @@ import
|
||||||
../utils/utils,
|
../utils/utils,
|
||||||
./filters
|
./filters
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
#[
|
#[
|
||||||
Note:
|
Note:
|
||||||
* Hexstring types (HexQuantitySt, HexDataStr, EthAddressStr, EthHashStr)
|
* Hexstring types (HexQuantitySt, HexDataStr, EthAddressStr, EthHashStr)
|
||||||
|
@ -42,7 +44,8 @@ proc setupEthRpc*(
|
||||||
let ac = newAccountStateDB(chainDB.db, header.stateRoot, com.pruneTrie)
|
let ac = newAccountStateDB(chainDB.db, header.stateRoot, com.pruneTrie)
|
||||||
result = ReadOnlyStateDB(ac)
|
result = ReadOnlyStateDB(ac)
|
||||||
|
|
||||||
proc stateDBFromTag(tag: string, readOnly = true): ReadOnlyStateDB =
|
proc stateDBFromTag(tag: string, readOnly = true): ReadOnlyStateDB
|
||||||
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
result = getStateDB(chainDB.headerFromTag(tag))
|
result = getStateDB(chainDB.headerFromTag(tag))
|
||||||
|
|
||||||
server.rpc("eth_protocolVersion") do() -> Option[string]:
|
server.rpc("eth_protocolVersion") do() -> Option[string]:
|
||||||
|
@ -440,7 +443,8 @@ proc setupEthRpc*(
|
||||||
chain: ChainDBRef,
|
chain: ChainDBRef,
|
||||||
hash: Hash256,
|
hash: Hash256,
|
||||||
header: BlockHeader,
|
header: BlockHeader,
|
||||||
opts: FilterOptions): seq[FilterLog] =
|
opts: FilterOptions): seq[FilterLog]
|
||||||
|
{.gcsafe, raises: [RlpError,ValueError].} =
|
||||||
if headerBloomFilter(header, opts.address, opts.topics):
|
if headerBloomFilter(header, opts.address, opts.topics):
|
||||||
let blockBody = chain.getBlockBody(hash)
|
let blockBody = chain.getBlockBody(hash)
|
||||||
let receipts = chain.getReceipts(header.receiptRoot)
|
let receipts = chain.getReceipts(header.receiptRoot)
|
||||||
|
@ -458,7 +462,8 @@ proc setupEthRpc*(
|
||||||
chain: ChainDBRef,
|
chain: ChainDBRef,
|
||||||
start: UInt256,
|
start: UInt256,
|
||||||
finish: UInt256,
|
finish: UInt256,
|
||||||
opts: FilterOptions): seq[FilterLog] =
|
opts: FilterOptions): seq[FilterLog]
|
||||||
|
{.gcsafe, raises: [RlpError,ValueError].} =
|
||||||
var logs = newSeq[FilterLog]()
|
var logs = newSeq[FilterLog]()
|
||||||
var i = start
|
var i = start
|
||||||
while i <= finish:
|
while i <= finish:
|
||||||
|
|
|
@ -1,13 +1,15 @@
|
||||||
import
|
import
|
||||||
json_rpc/jsonmarshal,
|
json_rpc/jsonmarshal,
|
||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
hexstrings, options, eth/[common, rlp], json
|
hexstrings, options, eth/common, json
|
||||||
|
|
||||||
from
|
from
|
||||||
web3/ethtypes import FixedBytes
|
web3/ethtypes import FixedBytes
|
||||||
|
|
||||||
export FixedBytes, common
|
export FixedBytes, common
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
#[
|
#[
|
||||||
Notes:
|
Notes:
|
||||||
* Some of the types suppose 'null' when there is no appropriate value.
|
* Some of the types suppose 'null' when there is no appropriate value.
|
||||||
|
@ -130,8 +132,10 @@ type
|
||||||
topics*: seq[Option[seq[Hash256]]] # (optional) list of DATA topics. Topics are order-dependent. Each topic can also be a list of DATA with "or" options.
|
topics*: seq[Option[seq[Hash256]]] # (optional) list of DATA topics. Topics are order-dependent. Each topic can also be a list of DATA with "or" options.
|
||||||
blockHash*: Option[Hash256] # (optional) hash of the block. If its present, fromBlock and toBlock, should be none. Introduced in EIP234
|
blockHash*: Option[Hash256] # (optional) hash of the block. If its present, fromBlock and toBlock, should be none. Introduced in EIP234
|
||||||
|
|
||||||
proc fromJson*(n: JsonNode, argName: string, result: var FilterOptions) =
|
proc fromJson*(n: JsonNode, argName: string, result: var FilterOptions)
|
||||||
proc getOptionString(argName: string): Option[string] =
|
{.gcsafe, raises: [KeyError,ValueError].} =
|
||||||
|
proc getOptionString(argName: string): Option[string]
|
||||||
|
{.gcsafe, raises: [KeyError,ValueError].} =
|
||||||
let s = n.getOrDefault(argName)
|
let s = n.getOrDefault(argName)
|
||||||
if s == nil:
|
if s == nil:
|
||||||
return none[string]()
|
return none[string]()
|
||||||
|
@ -141,7 +145,7 @@ proc fromJson*(n: JsonNode, argName: string, result: var FilterOptions) =
|
||||||
s.kind.expect(JString, argName)
|
s.kind.expect(JString, argName)
|
||||||
return some[string](s.getStr())
|
return some[string](s.getStr())
|
||||||
|
|
||||||
proc getAddress(): seq[EthAddress] =
|
proc getAddress(): seq[EthAddress] {.gcsafe, raises: [ValueError].} =
|
||||||
## Address can by provided in two formats:
|
## Address can by provided in two formats:
|
||||||
## 1. {"address": "hexAddress"}
|
## 1. {"address": "hexAddress"}
|
||||||
## 2. {"address": ["hexAddress1", "hexAddress2" ...]}
|
## 2. {"address": ["hexAddress1", "hexAddress2" ...]}
|
||||||
|
@ -170,7 +174,7 @@ proc fromJson*(n: JsonNode, argName: string, result: var FilterOptions) =
|
||||||
else:
|
else:
|
||||||
raise newException(ValueError, "Parameter 'address` should be either string or of array of strings")
|
raise newException(ValueError, "Parameter 'address` should be either string or of array of strings")
|
||||||
|
|
||||||
proc getTopics(): seq[Option[seq[Hash256]]] =
|
proc getTopics(): seq[Option[seq[Hash256]]] {.gcsafe, raises: [ValueError].} =
|
||||||
## Topics can be provided in many forms:
|
## Topics can be provided in many forms:
|
||||||
## [] "anything"
|
## [] "anything"
|
||||||
## [A] "A in first position (and anything after)"
|
## [A] "A in first position (and anything after)"
|
||||||
|
@ -222,7 +226,7 @@ proc fromJson*(n: JsonNode, argName: string, result: var FilterOptions) =
|
||||||
raise newException(ValueError, msg)
|
raise newException(ValueError, msg)
|
||||||
return filterArr
|
return filterArr
|
||||||
|
|
||||||
proc getBlockHash(): Option[Hash256] =
|
proc getBlockHash(): Option[Hash256] {.gcsafe, raises: [KeyError,ValueError].} =
|
||||||
let s = getOptionString("blockHash")
|
let s = getOptionString("blockHash")
|
||||||
if s.isNone():
|
if s.isNone():
|
||||||
return none[Hash256]()
|
return none[Hash256]()
|
||||||
|
|
|
@ -9,22 +9,28 @@
|
||||||
|
|
||||||
import hexstrings, eth/[common, rlp, keys, trie/db], stew/byteutils,
|
import hexstrings, eth/[common, rlp, keys, trie/db], stew/byteutils,
|
||||||
../db/db_chain, strutils, algorithm, options, times, json,
|
../db/db_chain, strutils, algorithm, options, times, json,
|
||||||
../constants, stint, hexstrings, rpc_types,
|
../constants, stint, rpc_types,
|
||||||
../utils/utils, ../transaction,
|
../utils/utils, ../transaction,
|
||||||
../transaction/call_evm, ../common/evmforks
|
../transaction/call_evm, ../common/evmforks
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
const
|
const
|
||||||
defaultTag = "latest"
|
defaultTag = "latest"
|
||||||
|
|
||||||
func toAddress*(value: EthAddressStr): EthAddress = hexToPaddedByteArray[20](value.string)
|
func toAddress*(value: EthAddressStr): EthAddress
|
||||||
|
{.gcsafe, raises: [ValueError].} =
|
||||||
|
hexToPaddedByteArray[20](value.string)
|
||||||
|
|
||||||
func toHash*(value: array[32, byte]): Hash256 {.inline.} =
|
func toHash*(value: array[32, byte]): Hash256 =
|
||||||
result.data = value
|
result.data = value
|
||||||
|
|
||||||
func toHash*(value: EthHashStr): Hash256 {.inline.} =
|
func toHash*(value: EthHashStr): Hash256
|
||||||
|
{.gcsafe, raises: [ValueError].} =
|
||||||
result = hexToPaddedByteArray[32](value.string).toHash
|
result = hexToPaddedByteArray[32](value.string).toHash
|
||||||
|
|
||||||
func hexToInt*(s: string, T: typedesc[SomeInteger]): T =
|
func hexToInt*(s: string, T: typedesc[SomeInteger]): T
|
||||||
|
{.gcsafe, raises: [ValueError].} =
|
||||||
var i = 0
|
var i = 0
|
||||||
if s[i] == '0' and (s[i+1] in {'x', 'X'}): inc(i, 2)
|
if s[i] == '0' and (s[i+1] in {'x', 'X'}): inc(i, 2)
|
||||||
if s.len - i > sizeof(T) * 2:
|
if s.len - i > sizeof(T) * 2:
|
||||||
|
@ -33,7 +39,8 @@ func hexToInt*(s: string, T: typedesc[SomeInteger]): T =
|
||||||
result = result shl 4 or readHexChar(s[i]).T
|
result = result shl 4 or readHexChar(s[i]).T
|
||||||
inc(i)
|
inc(i)
|
||||||
|
|
||||||
proc headerFromTag*(chain: ChainDBRef, blockTag: string): BlockHeader =
|
proc headerFromTag*(chain: ChainDBRef, blockTag: string): BlockHeader
|
||||||
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
let tag = blockTag.toLowerAscii
|
let tag = blockTag.toLowerAscii
|
||||||
case tag
|
case tag
|
||||||
of "latest": result = chain.getCanonicalHead()
|
of "latest": result = chain.getCanonicalHead()
|
||||||
|
@ -49,13 +56,15 @@ proc headerFromTag*(chain: ChainDBRef, blockTag: string): BlockHeader =
|
||||||
let blockNum = stint.fromHex(UInt256, tag)
|
let blockNum = stint.fromHex(UInt256, tag)
|
||||||
result = chain.getBlockHeader(blockNum.toBlockNumber)
|
result = chain.getBlockHeader(blockNum.toBlockNumber)
|
||||||
|
|
||||||
proc headerFromTag*(chain: ChainDBRef, blockTag: Option[string]): BlockHeader =
|
proc headerFromTag*(chain: ChainDBRef, blockTag: Option[string]): BlockHeader
|
||||||
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
if blockTag.isSome():
|
if blockTag.isSome():
|
||||||
return chain.headerFromTag(blockTag.unsafeGet())
|
return chain.headerFromTag(blockTag.unsafeGet())
|
||||||
else:
|
else:
|
||||||
return chain.headerFromTag(defaultTag)
|
return chain.headerFromTag(defaultTag)
|
||||||
|
|
||||||
proc calculateMedianGasPrice*(chain: ChainDBRef): GasInt =
|
proc calculateMedianGasPrice*(chain: ChainDBRef): GasInt
|
||||||
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
var prices = newSeqOfCap[GasInt](64)
|
var prices = newSeqOfCap[GasInt](64)
|
||||||
let header = chain.getCanonicalHead()
|
let header = chain.getCanonicalHead()
|
||||||
for encodedTx in chain.getBlockTransactionData(header.txRoot):
|
for encodedTx in chain.getBlockTransactionData(header.txRoot):
|
||||||
|
@ -72,7 +81,8 @@ proc calculateMedianGasPrice*(chain: ChainDBRef): GasInt =
|
||||||
else:
|
else:
|
||||||
result = prices[middle]
|
result = prices[middle]
|
||||||
|
|
||||||
proc unsignedTx*(tx: TxSend, chain: ChainDBRef, defaultNonce: AccountNonce): Transaction =
|
proc unsignedTx*(tx: TxSend, chain: ChainDBRef, defaultNonce: AccountNonce): Transaction
|
||||||
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
if tx.to.isSome:
|
if tx.to.isSome:
|
||||||
result.to = some(toAddress(tx.to.get))
|
result.to = some(toAddress(tx.to.get))
|
||||||
|
|
||||||
|
@ -114,7 +124,8 @@ template optionalBytes(src, dst: untyped) =
|
||||||
if src.isSome:
|
if src.isSome:
|
||||||
dst = hexToSeqByte(src.get.string)
|
dst = hexToSeqByte(src.get.string)
|
||||||
|
|
||||||
proc callData*(call: EthCall): RpcCallData =
|
proc callData*(call: EthCall): RpcCallData
|
||||||
|
{.gcsafe, raises: [ValueError].} =
|
||||||
optionalAddress(call.source, result.source)
|
optionalAddress(call.source, result.source)
|
||||||
optionalAddress(call.to, result.to)
|
optionalAddress(call.to, result.to)
|
||||||
optionalGas(call.gas, result.gasLimit)
|
optionalGas(call.gas, result.gasLimit)
|
||||||
|
@ -124,7 +135,8 @@ proc callData*(call: EthCall): RpcCallData =
|
||||||
optionalU256(call.value, result.value)
|
optionalU256(call.value, result.value)
|
||||||
optionalBytes(call.data, result.data)
|
optionalBytes(call.data, result.data)
|
||||||
|
|
||||||
proc populateTransactionObject*(tx: Transaction, header: BlockHeader, txIndex: int): TransactionObject =
|
proc populateTransactionObject*(tx: Transaction, header: BlockHeader, txIndex: int): TransactionObject
|
||||||
|
{.gcsafe, raises: [ValidationError].} =
|
||||||
result.blockHash = some(header.hash)
|
result.blockHash = some(header.hash)
|
||||||
result.blockNumber = some(encodeQuantity(header.blockNumber))
|
result.blockNumber = some(encodeQuantity(header.blockNumber))
|
||||||
result.`from` = tx.getSender()
|
result.`from` = tx.getSender()
|
||||||
|
@ -140,7 +152,8 @@ proc populateTransactionObject*(tx: Transaction, header: BlockHeader, txIndex: i
|
||||||
result.r = encodeQuantity(tx.R)
|
result.r = encodeQuantity(tx.R)
|
||||||
result.s = encodeQuantity(tx.S)
|
result.s = encodeQuantity(tx.S)
|
||||||
|
|
||||||
proc populateBlockObject*(header: BlockHeader, chain: ChainDBRef, fullTx: bool, isUncle = false): BlockObject =
|
proc populateBlockObject*(header: BlockHeader, chain: ChainDBRef, fullTx: bool, isUncle = false): BlockObject
|
||||||
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
let blockHash = header.blockHash
|
let blockHash = header.blockHash
|
||||||
|
|
||||||
result.number = some(encodeQuantity(header.blockNumber))
|
result.number = some(encodeQuantity(header.blockNumber))
|
||||||
|
@ -182,7 +195,8 @@ proc populateBlockObject*(header: BlockHeader, chain: ChainDBRef, fullTx: bool,
|
||||||
result.transactions.add %(x)
|
result.transactions.add %(x)
|
||||||
|
|
||||||
proc populateReceipt*(receipt: Receipt, gasUsed: GasInt, tx: Transaction,
|
proc populateReceipt*(receipt: Receipt, gasUsed: GasInt, tx: Transaction,
|
||||||
txIndex: int, header: BlockHeader, fork: EVMFork): ReceiptObject =
|
txIndex: int, header: BlockHeader, fork: EVMFork): ReceiptObject
|
||||||
|
{.gcsafe, raises: [ValidationError].} =
|
||||||
result.transactionHash = tx.rlpHash
|
result.transactionHash = tx.rlpHash
|
||||||
result.transactionIndex = encodeQuantity(txIndex.uint)
|
result.transactionIndex = encodeQuantity(txIndex.uint)
|
||||||
result.blockHash = header.hash
|
result.blockHash = header.hash
|
||||||
|
|
|
@ -51,7 +51,7 @@ type
|
||||||
|
|
||||||
proc hostToComputationMessage*(msg: EvmcMessage): Message =
|
proc hostToComputationMessage*(msg: EvmcMessage): Message =
|
||||||
Message(
|
Message(
|
||||||
kind: CallKind(msg.kind),
|
kind: CallKind(msg.kind.ord),
|
||||||
depth: msg.depth,
|
depth: msg.depth,
|
||||||
gas: msg.gas,
|
gas: msg.gas,
|
||||||
sender: msg.sender.fromEvmc,
|
sender: msg.sender.fromEvmc,
|
||||||
|
|
|
@ -27,7 +27,7 @@ import
|
||||||
export
|
export
|
||||||
utils_defs, results
|
utils_defs, results
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [].}
|
||||||
|
|
||||||
const
|
const
|
||||||
INMEMORY_SIGNATURES* = ##\
|
INMEMORY_SIGNATURES* = ##\
|
||||||
|
@ -153,7 +153,7 @@ proc len*(er: var EcRecover): int =
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc ecRecover*(er: var EcRecover; header: var BlockHeader): EcAddrResult
|
proc ecRecover*(er: var EcRecover; header: var BlockHeader): EcAddrResult
|
||||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
=
|
||||||
## Extract account address from `extraData` field (last 65 bytes) of the
|
## Extract account address from `extraData` field (last 65 bytes) of the
|
||||||
## argument header. The result is kept in a LRU cache to re-purposed for
|
## argument header. The result is kept in a LRU cache to re-purposed for
|
||||||
## improved result delivery avoiding calculations.
|
## improved result delivery avoiding calculations.
|
||||||
|
@ -169,13 +169,13 @@ proc ecRecover*(er: var EcRecover; header: var BlockHeader): EcAddrResult
|
||||||
err(rc.error)
|
err(rc.error)
|
||||||
|
|
||||||
proc ecRecover*(er: var EcRecover; header: BlockHeader): EcAddrResult
|
proc ecRecover*(er: var EcRecover; header: BlockHeader): EcAddrResult
|
||||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
=
|
||||||
## Variant of `ecRecover()` for call-by-value header
|
## Variant of `ecRecover()` for call-by-value header
|
||||||
var hdr = header
|
var hdr = header
|
||||||
er.ecRecover(hdr)
|
er.ecRecover(hdr)
|
||||||
|
|
||||||
proc ecRecover*(er: var EcRecover; hash: Hash256): EcAddrResult
|
proc ecRecover*(er: var EcRecover; hash: Hash256): EcAddrResult
|
||||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
=
|
||||||
## Variant of `ecRecover()` for hash only. Will only succeed it the
|
## Variant of `ecRecover()` for hash only. Will only succeed it the
|
||||||
## argument hash is uk the LRU queue.
|
## argument hash is uk the LRU queue.
|
||||||
let rc = er.q.lruFetch(hash.data)
|
let rc = er.q.lruFetch(hash.data)
|
||||||
|
@ -188,7 +188,7 @@ proc ecRecover*(er: var EcRecover; hash: Hash256): EcAddrResult
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc append*(rw: var RlpWriter; data: EcRecover)
|
proc append*(rw: var RlpWriter; data: EcRecover)
|
||||||
{.raises: [Defect,KeyError].} =
|
{.raises: [KeyError].} =
|
||||||
## Generic support for `rlp.encode()`
|
## Generic support for `rlp.encode()`
|
||||||
rw.append((data.size,data.q))
|
rw.append((data.size,data.q))
|
||||||
|
|
||||||
|
@ -202,8 +202,7 @@ proc read*(rlp: var Rlp; Q: type EcRecover): Q
|
||||||
# Debugging
|
# Debugging
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
iterator keyItemPairs*(er: var EcRecover): (EcKey,EthAddress)
|
iterator keyItemPairs*(er: var EcRecover): (EcKey,EthAddress) =
|
||||||
{.gcsafe, raises: [Defect,CatchableError].} =
|
|
||||||
var rc = er.q.first
|
var rc = er.q.first
|
||||||
while rc.isOk:
|
while rc.isOk:
|
||||||
yield (rc.value.key, rc.value.data)
|
yield (rc.value.key, rc.value.data)
|
||||||
|
|
|
@ -21,12 +21,14 @@ import
|
||||||
eth/rlp,
|
eth/rlp,
|
||||||
stew/keyed_queue
|
stew/keyed_queue
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions, RLP support
|
# Public functions, RLP support
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc append*[K,V](rw: var RlpWriter; kq: KeyedQueue[K,V])
|
proc append*[K,V](rw: var RlpWriter; kq: KeyedQueue[K,V])
|
||||||
{.raises: [Defect,KeyError].} =
|
{.raises: [KeyError].} =
|
||||||
## Generic support for `rlp.encode(kq)` for serialising a queue.
|
## Generic support for `rlp.encode(kq)` for serialising a queue.
|
||||||
##
|
##
|
||||||
## :CAVEAT:
|
## :CAVEAT:
|
||||||
|
|
|
@ -14,6 +14,8 @@
|
||||||
import
|
import
|
||||||
std/[math, strutils]
|
std/[math, strutils]
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
proc toSI*(num: SomeUnsignedInt): string =
|
proc toSI*(num: SomeUnsignedInt): string =
|
||||||
## Prints `num` argument value greater than 99 as rounded SI unit.
|
## Prints `num` argument value greater than 99 as rounded SI unit.
|
||||||
const
|
const
|
||||||
|
|
|
@ -4,7 +4,10 @@ import
|
||||||
|
|
||||||
export eth_types_rlp
|
export eth_types_rlp
|
||||||
|
|
||||||
proc calcRootHash[T](items: openArray[T]): Hash256 =
|
{.push raises: [].}
|
||||||
|
|
||||||
|
proc calcRootHash[T](items: openArray[T]): Hash256
|
||||||
|
{.gcsafe, raises: [RlpError]} =
|
||||||
var tr = initHexaryTrie(newMemoryDB())
|
var tr = initHexaryTrie(newMemoryDB())
|
||||||
for i, t in items:
|
for i, t in items:
|
||||||
tr.put(rlp.encode(i), rlp.encode(t))
|
tr.put(rlp.encode(i), rlp.encode(t))
|
||||||
|
|
|
@ -13,6 +13,8 @@
|
||||||
## ===================================
|
## ===================================
|
||||||
##
|
##
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
type
|
type
|
||||||
UtilsErrorType* = enum
|
UtilsErrorType* = enum
|
||||||
resetUtilsError = ##\
|
resetUtilsError = ##\
|
||||||
|
@ -50,8 +52,7 @@ const
|
||||||
## No-error constant
|
## No-error constant
|
||||||
(resetUtilsError, "")
|
(resetUtilsError, "")
|
||||||
|
|
||||||
|
proc `$`*(e: UtilsError): string =
|
||||||
proc `$`*(e: UtilsError): string {.inline.} =
|
|
||||||
## Join text fragments
|
## Join text fragments
|
||||||
result = $e[0]
|
result = $e[0]
|
||||||
if e[1] != "":
|
if e[1] != "":
|
||||||
|
|
|
@ -6,6 +6,8 @@ import
|
||||||
./witness_types, ../nimbus/constants,
|
./witness_types, ../nimbus/constants,
|
||||||
../nimbus/db/storage_types, ./multi_keys
|
../nimbus/db/storage_types, ./multi_keys
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
type
|
type
|
||||||
DB = TrieDatabaseRef
|
DB = TrieDatabaseRef
|
||||||
|
|
||||||
|
@ -31,7 +33,7 @@ proc initWitnessBuilder*(db: DB, rootHash: KeccakHash, flags: WitnessFlags = {})
|
||||||
template extensionNodeKey(r: Rlp): auto =
|
template extensionNodeKey(r: Rlp): auto =
|
||||||
hexPrefixDecode r.listElem(0).toBytes
|
hexPrefixDecode r.listElem(0).toBytes
|
||||||
|
|
||||||
proc expectHash(r: Rlp): seq[byte] =
|
proc expectHash(r: Rlp): seq[byte] {.gcsafe, raises: [RlpError].} =
|
||||||
result = r.toBytes
|
result = r.toBytes
|
||||||
if result.len != 32:
|
if result.len != 32:
|
||||||
raise newException(RlpTypeMismatch,
|
raise newException(RlpTypeMismatch,
|
||||||
|
@ -41,7 +43,7 @@ template getNode(elem: untyped): untyped =
|
||||||
if elem.isList: @(elem.rawData)
|
if elem.isList: @(elem.rawData)
|
||||||
else: @(get(wb.db, elem.expectHash))
|
else: @(get(wb.db, elem.expectHash))
|
||||||
|
|
||||||
proc rlpListToBitmask(r: var Rlp): uint =
|
proc rlpListToBitmask(r: var Rlp): uint {.gcsafe, raises: [RlpError].} =
|
||||||
# only bit 1st to 16th are valid
|
# only bit 1st to 16th are valid
|
||||||
# the 1st bit is the rightmost bit
|
# the 1st bit is the rightmost bit
|
||||||
var i = 0
|
var i = 0
|
||||||
|
@ -64,7 +66,8 @@ when defined(debugHash):
|
||||||
template writeByte(wb: var WitnessBuilder, x: untyped) =
|
template writeByte(wb: var WitnessBuilder, x: untyped) =
|
||||||
wb.write(byte(x))
|
wb.write(byte(x))
|
||||||
|
|
||||||
proc writeUVarint(wb: var WitnessBuilder, x: SomeUnsignedInt) =
|
proc writeUVarint(wb: var WitnessBuilder, x: SomeUnsignedInt)
|
||||||
|
{.gcsafe, raises: [IOError].} =
|
||||||
# LEB128 varint encoding
|
# LEB128 varint encoding
|
||||||
var value = x
|
var value = x
|
||||||
while true:
|
while true:
|
||||||
|
@ -78,7 +81,8 @@ proc writeUVarint(wb: var WitnessBuilder, x: SomeUnsignedInt) =
|
||||||
template writeUVarint32(wb: var WitnessBuilder, x: untyped) =
|
template writeUVarint32(wb: var WitnessBuilder, x: untyped) =
|
||||||
wb.writeUVarint(uint32(x))
|
wb.writeUVarint(uint32(x))
|
||||||
|
|
||||||
proc writeUVarint(wb: var WitnessBuilder, x: UInt256) =
|
proc writeUVarint(wb: var WitnessBuilder, x: UInt256)
|
||||||
|
{.gcsafe, raises: [IOError].} =
|
||||||
# LEB128 varint encoding
|
# LEB128 varint encoding
|
||||||
var value = x
|
var value = x
|
||||||
while true:
|
while true:
|
||||||
|
@ -90,7 +94,8 @@ proc writeUVarint(wb: var WitnessBuilder, x: UInt256) =
|
||||||
wb.writeByte(b)
|
wb.writeByte(b)
|
||||||
if value == 0: break
|
if value == 0: break
|
||||||
|
|
||||||
proc writeNibbles(wb: var WitnessBuilder; n: NibblesSeq, withLen: bool = true) =
|
proc writeNibbles(wb: var WitnessBuilder; n: NibblesSeq, withLen: bool = true)
|
||||||
|
{.gcsafe, raises: [IOError].} =
|
||||||
# convert the NibblesSeq into left aligned byte seq
|
# convert the NibblesSeq into left aligned byte seq
|
||||||
# perhaps we can optimize it if the NibblesSeq already left aligned
|
# perhaps we can optimize it if the NibblesSeq already left aligned
|
||||||
let nibblesLen = n.len
|
let nibblesLen = n.len
|
||||||
|
@ -109,7 +114,8 @@ proc writeNibbles(wb: var WitnessBuilder; n: NibblesSeq, withLen: bool = true) =
|
||||||
# write nibbles
|
# write nibbles
|
||||||
wb.write(bytes.toOpenArray(0, numBytes-1))
|
wb.write(bytes.toOpenArray(0, numBytes-1))
|
||||||
|
|
||||||
proc writeExtensionNode(wb: var WitnessBuilder, n: NibblesSeq, depth: int, node: openArray[byte]) =
|
proc writeExtensionNode(wb: var WitnessBuilder, n: NibblesSeq, depth: int, node: openArray[byte])
|
||||||
|
{.gcsafe, raises: [IOError].} =
|
||||||
# write type
|
# write type
|
||||||
wb.writeByte(ExtensionNodeType)
|
wb.writeByte(ExtensionNodeType)
|
||||||
# write nibbles
|
# write nibbles
|
||||||
|
@ -121,7 +127,8 @@ proc writeExtensionNode(wb: var WitnessBuilder, n: NibblesSeq, depth: int, node:
|
||||||
when defined(debugHash):
|
when defined(debugHash):
|
||||||
wb.write(keccakHash(node).data)
|
wb.write(keccakHash(node).data)
|
||||||
|
|
||||||
proc writeBranchNode(wb: var WitnessBuilder, mask: uint, depth: int, node: openArray[byte]) =
|
proc writeBranchNode(wb: var WitnessBuilder, mask: uint, depth: int, node: openArray[byte])
|
||||||
|
{.gcsafe, raises: [IOError].} =
|
||||||
# write type
|
# write type
|
||||||
# branch node 17th elem should always empty
|
# branch node 17th elem should always empty
|
||||||
doAssert mask.branchMaskBitIsSet(16) == false
|
doAssert mask.branchMaskBitIsSet(16) == false
|
||||||
|
@ -137,7 +144,8 @@ proc writeBranchNode(wb: var WitnessBuilder, mask: uint, depth: int, node: openA
|
||||||
when defined(debugHash):
|
when defined(debugHash):
|
||||||
wb.write(keccakHash(node).data)
|
wb.write(keccakHash(node).data)
|
||||||
|
|
||||||
proc writeHashNode(wb: var WitnessBuilder, node: openArray[byte], depth: int, storageMode: bool) =
|
proc writeHashNode(wb: var WitnessBuilder, node: openArray[byte], depth: int, storageMode: bool)
|
||||||
|
{.gcsafe, raises: [IOError].} =
|
||||||
# usually a hash node means the recursion will not go deeper
|
# usually a hash node means the recursion will not go deeper
|
||||||
# and the information can be represented by the hash
|
# and the information can be represented by the hash
|
||||||
# for chunked witness, a hash node can be a root to another
|
# for chunked witness, a hash node can be a root to another
|
||||||
|
@ -147,16 +155,18 @@ proc writeHashNode(wb: var WitnessBuilder, node: openArray[byte], depth: int, st
|
||||||
wb.writeByte(ShortRlpPrefix)
|
wb.writeByte(ShortRlpPrefix)
|
||||||
wb.write(node)
|
wb.write(node)
|
||||||
|
|
||||||
proc writeShortRlp(wb: var WitnessBuilder, node: openArray[byte], depth: int, storageMode: bool) =
|
proc writeShortRlp(wb: var WitnessBuilder, node: openArray[byte], depth: int, storageMode: bool)
|
||||||
|
{.gcsafe, raises: [IOError].} =
|
||||||
doAssert(node.len < 32 and depth >= 9 and storageMode)
|
doAssert(node.len < 32 and depth >= 9 and storageMode)
|
||||||
wb.writeByte(HashNodeType)
|
wb.writeByte(HashNodeType)
|
||||||
wb.writeByte(ShortRlpPrefix)
|
wb.writeByte(ShortRlpPrefix)
|
||||||
wb.writeByte(node.len)
|
wb.writeByte(node.len)
|
||||||
wb.write(node)
|
wb.write(node)
|
||||||
|
|
||||||
proc getBranchRecurse(wb: var WitnessBuilder, z: var StackElem) {.raises: [ContractCodeError, IOError, Defect, CatchableError, Exception].}
|
proc getBranchRecurse(wb: var WitnessBuilder, z: var StackElem) {.gcsafe, raises: [CatchableError].}
|
||||||
|
|
||||||
proc writeByteCode(wb: var WitnessBuilder, kd: KeyData, acc: Account, depth: int) =
|
proc writeByteCode(wb: var WitnessBuilder, kd: KeyData, acc: Account, depth: int)
|
||||||
|
{.gcsafe, raises: [IOError,ContractCodeError].} =
|
||||||
if not kd.codeTouched:
|
if not kd.codeTouched:
|
||||||
# the account have code but not touched by the EVM
|
# the account have code but not touched by the EVM
|
||||||
# in current block execution
|
# in current block execution
|
||||||
|
@ -182,7 +192,8 @@ proc writeByteCode(wb: var WitnessBuilder, kd: KeyData, acc: Account, depth: int
|
||||||
wb.writeUVarint32(code.len)
|
wb.writeUVarint32(code.len)
|
||||||
wb.write(code)
|
wb.write(code)
|
||||||
|
|
||||||
proc writeStorage(wb: var WitnessBuilder, kd: KeyData, acc: Account, depth: int) =
|
proc writeStorage(wb: var WitnessBuilder, kd: KeyData, acc: Account, depth: int)
|
||||||
|
{.gcsafe, raises: [CatchableError].} =
|
||||||
if kd.storageKeys.isNil:
|
if kd.storageKeys.isNil:
|
||||||
# the account have storage but not touched by EVM
|
# the account have storage but not touched by EVM
|
||||||
wb.writeHashNode(acc.storageRoot.data, depth, true)
|
wb.writeHashNode(acc.storageRoot.data, depth, true)
|
||||||
|
@ -201,7 +212,7 @@ proc writeStorage(wb: var WitnessBuilder, kd: KeyData, acc: Account, depth: int)
|
||||||
wb.writeHashNode(emptyRlpHash.data, depth, true)
|
wb.writeHashNode(emptyRlpHash.data, depth, true)
|
||||||
|
|
||||||
proc writeAccountNode(wb: var WitnessBuilder, kd: KeyData, acc: Account,
|
proc writeAccountNode(wb: var WitnessBuilder, kd: KeyData, acc: Account,
|
||||||
node: openArray[byte], depth: int) {.raises: [ContractCodeError, IOError, Defect, CatchableError, Exception].} =
|
node: openArray[byte], depth: int) {.raises: [ContractCodeError, IOError, CatchableError].} =
|
||||||
|
|
||||||
# write type
|
# write type
|
||||||
wb.writeByte(AccountNodeType)
|
wb.writeByte(AccountNodeType)
|
||||||
|
@ -228,7 +239,8 @@ proc writeAccountNode(wb: var WitnessBuilder, kd: KeyData, acc: Account,
|
||||||
#0x00 address:<Address> balance:<Bytes32> nonce:<Bytes32>
|
#0x00 address:<Address> balance:<Bytes32> nonce:<Bytes32>
|
||||||
#0x01 address:<Address> balance:<Bytes32> nonce:<Bytes32> bytecode:<Bytecode> storage:<Tree_Node(0,1)>
|
#0x01 address:<Address> balance:<Bytes32> nonce:<Bytes32> bytecode:<Bytecode> storage:<Tree_Node(0,1)>
|
||||||
|
|
||||||
proc writeAccountStorageLeafNode(wb: var WitnessBuilder, key: openArray[byte], val: UInt256, node: openArray[byte], depth: int) =
|
proc writeAccountStorageLeafNode(wb: var WitnessBuilder, key: openArray[byte], val: UInt256, node: openArray[byte], depth: int)
|
||||||
|
{.gcsafe, raises: [IOError].} =
|
||||||
wb.writeByte(StorageLeafNodeType)
|
wb.writeByte(StorageLeafNodeType)
|
||||||
|
|
||||||
when defined(debugHash):
|
when defined(debugHash):
|
||||||
|
@ -324,7 +336,7 @@ proc getBranchRecurse(wb: var WitnessBuilder, z: var StackElem) =
|
||||||
"HexaryTrie node with an unexpected number of children")
|
"HexaryTrie node with an unexpected number of children")
|
||||||
|
|
||||||
proc buildWitness*(wb: var WitnessBuilder, keys: MultikeysRef): seq[byte]
|
proc buildWitness*(wb: var WitnessBuilder, keys: MultikeysRef): seq[byte]
|
||||||
{.raises: [ContractCodeError, IOError, Defect, CatchableError, Exception].} =
|
{.raises: [CatchableError].} =
|
||||||
|
|
||||||
# witness version
|
# witness version
|
||||||
wb.writeByte(BlockWitnessVersion)
|
wb.writeByte(BlockWitnessVersion)
|
||||||
|
|
|
@ -1,4 +1,6 @@
|
||||||
import nimcrypto/hash, stew/bitops2
|
import stew/bitops2
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
type
|
type
|
||||||
TrieNodeType* = enum
|
TrieNodeType* = enum
|
||||||
|
@ -35,15 +37,16 @@ const
|
||||||
BlockWitnessVersion* = 0x01
|
BlockWitnessVersion* = 0x01
|
||||||
ShortRlpPrefix* = 0.byte
|
ShortRlpPrefix* = 0.byte
|
||||||
|
|
||||||
proc setBranchMaskBit*(x: var uint, i: int) {.inline.} =
|
proc setBranchMaskBit*(x: var uint, i: int) =
|
||||||
assert(i >= 0 and i < 17)
|
assert(i >= 0 and i < 17)
|
||||||
x = x or (1 shl i).uint
|
x = x or (1 shl i).uint
|
||||||
|
|
||||||
func branchMaskBitIsSet*(x: uint, i: int): bool {.inline.} =
|
func branchMaskBitIsSet*(x: uint, i: int): bool =
|
||||||
assert(i >= 0 and i < 17)
|
assert(i >= 0 and i < 17)
|
||||||
result = ((x shr i.uint) and 1'u) == 1'u
|
result = ((x shr i.uint) and 1'u) == 1'u
|
||||||
|
|
||||||
func constructBranchMask*(b1, b2: byte): uint {.inline.} =
|
func constructBranchMask*(b1, b2: byte): uint
|
||||||
|
{.gcsafe, raises: [ParsingError].} =
|
||||||
result = uint(b1) shl 8 or uint(b2)
|
result = uint(b1) shl 8 or uint(b2)
|
||||||
if countOnes(result) < 2 or ((result and (not 0x1FFFF'u)) != 0):
|
if countOnes(result) < 2 or ((result and (not 0x1FFFF'u)) != 0):
|
||||||
raise newException(ParsingError, "Invalid branch mask pattern " & $result)
|
raise newException(ParsingError, "Invalid branch mask pattern " & $result)
|
||||||
|
|
|
@ -39,7 +39,7 @@ type
|
||||||
lnList: seq[string]
|
lnList: seq[string]
|
||||||
lnInx: int
|
lnInx: int
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [].}
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private deflate helpers:
|
# Private deflate helpers:
|
||||||
|
@ -101,7 +101,7 @@ proc explode(state: var GUnzip; data: openArray[char];
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc open*(state: var GUnzip; fileName: string):
|
proc open*(state: var GUnzip; fileName: string):
|
||||||
Result[void,ZError] {.gcsafe, raises: [Defect,IOError].} =
|
Result[void,ZError] {.gcsafe, raises: [IOError].} =
|
||||||
## Open gzipped file with path `fileName` and prepare for deflating and
|
## Open gzipped file with path `fileName` and prepare for deflating and
|
||||||
## extraction.
|
## extraction.
|
||||||
|
|
||||||
|
@ -142,7 +142,7 @@ proc open*(state: var GUnzip; fileName: string):
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
|
|
||||||
proc close*(state: var GUnzip) {.gcsafe.} =
|
proc close*(state: var GUnzip) =
|
||||||
## Close any open files and free resources
|
## Close any open files and free resources
|
||||||
if state.gzOpenOK:
|
if state.gzOpenOK:
|
||||||
state.gzIn.close
|
state.gzIn.close
|
||||||
|
@ -150,7 +150,7 @@ proc close*(state: var GUnzip) {.gcsafe.} =
|
||||||
|
|
||||||
|
|
||||||
proc nextChunk*(state: var GUnzip):
|
proc nextChunk*(state: var GUnzip):
|
||||||
Result[string,ZError] {.gcsafe, raises: [Defect,IOError].} =
|
Result[string,ZError] {.gcsafe, raises: [IOError].} =
|
||||||
## Fetch next unzipped data chunk, return and empty string if input
|
## Fetch next unzipped data chunk, return and empty string if input
|
||||||
## is exhausted.
|
## is exhausted.
|
||||||
var strBuf = 4096.newString
|
var strBuf = 4096.newString
|
||||||
|
@ -170,14 +170,14 @@ proc nextChunk*(state: var GUnzip):
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
proc nextChunkOk*(state: var GUnzip): bool {.inline,gcsafe.} =
|
proc nextChunkOk*(state: var GUnzip): bool =
|
||||||
## True if there is another chunk of data so that `nextChunk()` might
|
## True if there is another chunk of data so that `nextChunk()` might
|
||||||
## fetch another non-empty unzipped data chunk.
|
## fetch another non-empty unzipped data chunk.
|
||||||
state.gzCount < state.gzMax
|
state.gzCount < state.gzMax
|
||||||
|
|
||||||
|
|
||||||
proc nextLine*(state: var GUnzip):
|
proc nextLine*(state: var GUnzip):
|
||||||
Result[string,ZError] {.gcsafe, raises: [Defect,IOError].} =
|
Result[string,ZError] {.gcsafe, raises: [IOError].} =
|
||||||
## Assume that the `state` argument descriptor referes to a gzipped text
|
## Assume that the `state` argument descriptor referes to a gzipped text
|
||||||
## file with lines separated by a newline character. Then fetch the next
|
## file with lines separated by a newline character. Then fetch the next
|
||||||
## unzipped line and return it.
|
## unzipped line and return it.
|
||||||
|
@ -216,13 +216,13 @@ proc nextLine*(state: var GUnzip):
|
||||||
state.lnInx = 1
|
state.lnInx = 1
|
||||||
|
|
||||||
|
|
||||||
proc nextLineOk*(state: var GUnzip): bool {.inline,gcsafe.} =
|
proc nextLineOk*(state: var GUnzip): bool =
|
||||||
## True if there is another unzipped line available with `nextLine()`.
|
## True if there is another unzipped line available with `nextLine()`.
|
||||||
state.nextChunkOk or state.lnInx + 1 < state.lnList.len
|
state.nextChunkOk or state.lnInx + 1 < state.lnList.len
|
||||||
|
|
||||||
|
|
||||||
iterator gunzipLines*(state: var GUnzip):
|
iterator gunzipLines*(state: var GUnzip):
|
||||||
(int,string) {.gcsafe, raises: [Defect,IOError].} =
|
(int,string) {.gcsafe, raises: [IOError].} =
|
||||||
## Iterate over all lines of gzipped text file `fileName` and return
|
## Iterate over all lines of gzipped text file `fileName` and return
|
||||||
## the pair `(line-number,line-text)`
|
## the pair `(line-number,line-text)`
|
||||||
var lno = 0
|
var lno = 0
|
||||||
|
@ -235,7 +235,7 @@ iterator gunzipLines*(state: var GUnzip):
|
||||||
|
|
||||||
|
|
||||||
iterator gunzipLines*(fileName: string):
|
iterator gunzipLines*(fileName: string):
|
||||||
(int,string) {.gcsafe, raises: [Defect,IOError].} =
|
(int,string) {.gcsafe, raises: [IOError].} =
|
||||||
## Open a gzipped text file, iterate over its lines (using the other
|
## Open a gzipped text file, iterate over its lines (using the other
|
||||||
## version of `gunzipLines()`) and close it.
|
## version of `gunzipLines()`) and close it.
|
||||||
var state: GUnzip
|
var state: GUnzip
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[os, sequtils, strformat, strutils, tables],
|
std/[os, sequtils, strformat, strutils, tables],
|
||||||
|
chronicles,
|
||||||
../nimbus/db/accounts_cache,
|
../nimbus/db/accounts_cache,
|
||||||
../nimbus/common/common,
|
../nimbus/common/common,
|
||||||
../nimbus/core/chain,
|
../nimbus/core/chain,
|
||||||
|
@ -78,6 +79,16 @@ proc pp*(tx: Transaction; vmState: BaseVMState): string =
|
||||||
"," & $vmState.readOnlyStateDB.getBalance(address) &
|
"," & $vmState.readOnlyStateDB.getBalance(address) &
|
||||||
")"
|
")"
|
||||||
|
|
||||||
|
proc setTraceLevel =
|
||||||
|
discard
|
||||||
|
when defined(chronicles_runtime_filtering) and loggingEnabled:
|
||||||
|
setLogLevel(LogLevel.TRACE)
|
||||||
|
|
||||||
|
proc setErrorLevel =
|
||||||
|
discard
|
||||||
|
when defined(chronicles_runtime_filtering) and loggingEnabled:
|
||||||
|
setLogLevel(LogLevel.ERROR)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private functions
|
# Private functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -203,7 +214,7 @@ proc runTrial3crash(vmState: BaseVMState; inx: int; noisy = false) =
|
||||||
|
|
||||||
try:
|
try:
|
||||||
vmState.stateDB.persist(clearCache = false)
|
vmState.stateDB.persist(clearCache = false)
|
||||||
except AssertionError as e:
|
except AssertionDefect as e:
|
||||||
if noisy:
|
if noisy:
|
||||||
let msg = e.msg.rsplit($DirSep,1)[^1]
|
let msg = e.msg.rsplit($DirSep,1)[^1]
|
||||||
echo &"*** runVmExec({eAddr.pp}): {e.name}: {msg}"
|
echo &"*** runVmExec({eAddr.pp}): {e.name}: {msg}"
|
||||||
|
@ -339,7 +350,7 @@ proc runner(noisy = true; capture = goerliCapture) =
|
||||||
defer: dbTx.dispose()
|
defer: dbTx.dispose()
|
||||||
for n in txi:
|
for n in txi:
|
||||||
let vmState = com.getVmState(xdb.getCanonicalHead.blockNumber)
|
let vmState = com.getVmState(xdb.getCanonicalHead.blockNumber)
|
||||||
expect AssertionError:
|
expect AssertionDefect:
|
||||||
vmState.runTrial3crash(n, noisy)
|
vmState.runTrial3crash(n, noisy)
|
||||||
|
|
||||||
test &"Run {txi.len} tree-step trials without rollback":
|
test &"Run {txi.len} tree-step trials without rollback":
|
||||||
|
@ -367,6 +378,7 @@ when isMainModule:
|
||||||
var noisy = defined(debug)
|
var noisy = defined(debug)
|
||||||
#noisy = true
|
#noisy = true
|
||||||
|
|
||||||
|
setErrorLevel()
|
||||||
noisy.runner # mainCapture
|
noisy.runner # mainCapture
|
||||||
# noisy.runner goerliCapture2
|
# noisy.runner goerliCapture2
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[algorithm, sequtils, strformat, strutils, tables, times],
|
std/[algorithm, sequtils, strformat, strutils, tables, times],
|
||||||
eth/[keys, rlp],
|
eth/keys,
|
||||||
ethash,
|
ethash,
|
||||||
secp256k1/abi,
|
secp256k1/abi,
|
||||||
stew/objects,
|
stew/objects,
|
||||||
|
|
|
@ -12,11 +12,10 @@
|
||||||
## ====================================
|
## ====================================
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[base64, json, options, os, strutils, tables, times],
|
std/[base64, json, options, os, strutils, times],
|
||||||
../nimbus/config,
|
../nimbus/config,
|
||||||
../nimbus/rpc/jwt_auth,
|
../nimbus/rpc/jwt_auth,
|
||||||
./replay/pp,
|
./replay/pp,
|
||||||
confutils/defs,
|
|
||||||
chronicles,
|
chronicles,
|
||||||
chronos/apps/http/httpclient as chronoshttpclient,
|
chronos/apps/http/httpclient as chronoshttpclient,
|
||||||
chronos/apps/http/httptable,
|
chronos/apps/http/httptable,
|
||||||
|
|
Loading…
Reference in New Issue