Merge pull request #683 from status-im/devel

Testnet release 2020-01-21
This commit is contained in:
zah 2020-01-22 21:20:44 +02:00 committed by GitHub
commit 56876298d7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
67 changed files with 2069 additions and 649 deletions

2
.gitignore vendored
View File

@ -29,3 +29,5 @@ build/
# State sim # TODO - move in another folder
0000-*.json
/local_testnet_data

3
.gitmodules vendored
View File

@ -136,3 +136,6 @@
url = https://github.com/status-im/nim-bearssl.git
ignore = dirty
branch = master
[submodule "vendor/lmdb"]
path = vendor/lmdb
url = https://github.com/status-im/lmdb.git

View File

@ -7,7 +7,6 @@ cache:
directories:
- vendor/nimbus-build-system/vendor/Nim/bin
- vendor/go/bin
- rocksdbCache
- jsonTestsCache
git:
@ -27,7 +26,6 @@ matrix:
before_install:
- export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib"
- sudo apt-get -q update
- sudo apt-get install -y librocksdb-dev
- os: linux
arch: arm64
sudo: required
@ -37,14 +35,10 @@ matrix:
before_install:
- export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib"
- sudo apt-get -q update
- sudo apt-get install -y libpcre3-dev librocksdb-dev
- sudo apt-get install -y libpcre3-dev
- os: osx
env:
- NPROC=2
before_install:
- launchctl setenv LIBRARY_PATH /usr/local/lib # for RocksDB
# build our own rocksdb to test with a fixed version that we think works
- vendor/nimbus-build-system/scripts/build_rocksdb.sh rocksdbCache
install:

View File

@ -52,7 +52,6 @@ Nimbus has 4 external dependencies:
* Go 1.12 (for compiling libp2p daemon - being phased out)
* Developer tools (C compiler, Make, Bash, Git)
* [RocksDB](https://github.com/facebook/rocksdb/)
* PCRE
Nim is not an external dependency, Nimbus will build its own local copy.
@ -62,13 +61,13 @@ Nim is not an external dependency, Nimbus will build its own local copy.
On common Linux distributions the dependencies can be installed with:
```sh
# Debian and Ubuntu
sudo apt-get install build-essential git golang-go librocksdb-dev libpcre3-dev
sudo apt-get install build-essential git golang-go libpcre3-dev
# Fedora
dnf install @development-tools go rocksdb-devel pcre
dnf install @development-tools go pcre
# Archlinux, using an AUR manager for pcre-static
yourAURmanager -S base-devel go rocksdb pcre-static
yourAURmanager -S base-devel go pcre-static
```
### MacOS
@ -76,14 +75,14 @@ yourAURmanager -S base-devel go rocksdb pcre-static
Assuming you use [Homebrew](https://brew.sh/) to manage packages
```sh
brew install go rocksdb pcre
brew install go pcre
```
### Windows
* install [Go](https://golang.org/doc/install#windows)
You can install the developer tools by following the instruction in our [Windows dev environment section](#windows-dev-environment).
It also provides a downloading script for prebuilt PCRE and RocksDB.
It also provides a downloading script for prebuilt PCRE.
If you choose to install Go from source, both Go and Nimbus requires the same initial steps of installing Mingw.
@ -220,7 +219,7 @@ Variables -> Path -> Edit -> New -> C:\mingw-w64\mingw64\bin (it's "C:\mingw-w64
Install [Git for Windows](https://gitforwindows.org/) and use a "Git Bash" shell to clone and build nim-beacon-chain.
If you don't want to compile RocksDB and SQLite separately, you can fetch pre-compiled DLLs with:
If you don't want to compile PCRE separately, you can fetch pre-compiled DLLs with:
```bash
mingw32-make # this first invocation will update the Git submodules
mingw32-make fetch-dlls # this will place the right DLLs for your architecture in the "build/" directory
@ -286,19 +285,6 @@ sudo apt-get install git libgflags-dev libsnappy-dev libpcre3-dev
mkdir status
cd status
# Install rocksdb
git clone https://github.com/facebook/rocksdb.git
cd rocksdb
make shared_lib
sudo make install-shared
cd ..
# Raspberry pi doesn't include /usr/local/lib in library search path
# Add it to your profile
echo '# Local compiles (nimbus - rocksdb)' >> ~/.profile
echo 'export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH' >> ~/.profile
echo '' >> ~/.profile
# Install Go at least 1.12 (Buster only includes up to 1.11)
# Raspbian is 32-bit, so the package is go1.XX.X.linux-armv6l.tar.gz (and not arm64)
curl -O https://storage.googleapis.com/golang/go1.13.3.linux-armv6l.tar.gz

View File

@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2019 Status Research & Development GmbH
# Copyright (c) 2019-2020 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
@ -75,7 +75,7 @@ proc aggregate_attestations*(
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/validator/0_beacon-chain-validator.md#construct-aggregate
for attestation in getAttestationsForBlock(pool, state, slot):
if attestation.data == attestation_data:
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/validator/0_beacon-chain-validator.md#aggregateandproof
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/validator.md#aggregateandproof
return some(AggregateAndProof(
aggregator_index: index,
aggregate: attestation,

View File

@ -1,14 +1,14 @@
import
json, tables, options,
chronicles, serialization, json_serialization, eth/common/eth_types_json_serialization,
options,
serialization,
spec/[datatypes, digest, crypto],
eth/trie/db, ssz
kvstore, ssz
type
BeaconChainDB* = ref object
## Database storing resolved blocks and states - resolved blocks are such
## blocks that form a chain back to the tail block.
backend: TrieDatabaseRef
backend: KVStoreRef
DbKeyKind = enum
kHashToState
@ -61,15 +61,12 @@ func subkey(root: Eth2Digest, slot: Slot): auto =
ret
proc init*(T: type BeaconChainDB, backend: TrieDatabaseRef): BeaconChainDB =
proc init*(T: type BeaconChainDB, backend: KVStoreRef): BeaconChainDB =
T(backend: backend)
proc putBlock*(db: BeaconChainDB, key: Eth2Digest, value: SignedBeaconBlock) =
db.backend.put(subkey(type value, key), SSZ.encode(value))
proc putHead*(db: BeaconChainDB, key: Eth2Digest) =
db.backend.put(subkey(kHeadBlock), key.data) # TODO head block?
proc putState*(db: BeaconChainDB, key: Eth2Digest, value: BeaconState) =
# TODO prune old states - this is less easy than it seems as we never know
# when or if a particular state will become finalized.
@ -92,29 +89,28 @@ proc delBlock*(db: BeaconChainDB, key: Eth2Digest) =
proc delState*(db: BeaconChainDB, key: Eth2Digest) =
db.backend.del(subkey(BeaconState, key))
proc delStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot) =
db.backend.del(subkey(root, slot))
proc putHeadBlock*(db: BeaconChainDB, key: Eth2Digest) =
db.backend.put(subkey(kHeadBlock), key.data) # TODO head block?
db.backend.put(subkey(kHeadBlock), key.data)
proc putTailBlock*(db: BeaconChainDB, key: Eth2Digest) =
db.backend.put(subkey(kTailBlock), key.data)
proc get(db: BeaconChainDB, key: auto, T: typedesc): Option[T] =
let res = db.backend.get(key)
if res.len != 0:
var res: Option[T]
discard db.backend.get(key, proc (data: openArray[byte]) =
try:
some(SSZ.decode(res, T))
res = some(SSZ.decode(data, T))
except SerializationError:
none(T)
else:
none(T)
discard
)
res
proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Option[SignedBeaconBlock] =
db.get(subkey(SignedBeaconBlock, key), SignedBeaconBlock)
proc getBlock*(db: BeaconChainDB, slot: Slot): Option[SignedBeaconBlock] =
# TODO implement this
discard
proc getState*(db: BeaconChainDB, key: Eth2Digest): Option[BeaconState] =
db.get(subkey(BeaconState, key), BeaconState)

View File

@ -1,12 +1,12 @@
import
# Standard library
os, net, tables, random, strutils, times,
os, net, tables, random, strutils, times, sequtils,
# Nimble packages
stew/[objects, bitseqs, byteutils], stew/ranges/ptr_arith,
stew/[objects, bitseqs, byteutils],
chronos, chronicles, confutils, metrics,
json_serialization/std/[options, sets], serialization/errors,
eth/trie/db, eth/trie/backends/rocksdb_backend, eth/async_utils,
kvstore, kvstore_lmdb, eth/async_utils, eth/p2p/discoveryv5/enr,
# Local modules
spec/[datatypes, digest, crypto, beaconstate, helpers, validator, network],
@ -48,6 +48,7 @@ type
networkIdentity: Eth2NodeIdentity
requestManager: RequestManager
bootstrapNodes: seq[BootstrapAddr]
bootstrapEnrs: seq[enr.Record]
db: BeaconChainDB
config: BeaconNodeConf
attachedValidators: ValidatorPool
@ -66,9 +67,7 @@ proc saveValidatorKey(keyName, key: string, conf: BeaconNodeConf) =
writeFile(outputFile, key)
info "Imported validator key", file = outputFile
proc getStateFromSnapshot(node: BeaconNode, state: var BeaconState): bool =
template conf: untyped = node.config
proc getStateFromSnapshot(conf: BeaconNodeConf, state: var BeaconState): bool =
var
genesisPath = conf.dataDir/genesisFile
snapshotContents: TaintedString
@ -92,7 +91,8 @@ proc getStateFromSnapshot(node: BeaconNode, state: var BeaconState): bool =
dataDir = conf.dataDir.string, snapshot = snapshotPath
quit 1
else:
debug "No genesis file in data directory", genesisPath
debug "No previous genesis state. Importing snapshot",
genesisPath, dataDir = conf.dataDir.string
writeGenesisFile = true
genesisPath = snapshotPath
else:
@ -121,89 +121,146 @@ proc getStateFromSnapshot(node: BeaconNode, state: var BeaconState): bool =
result = true
proc commitGenesisState(node: BeaconNode, tailState: BeaconState) =
info "Got genesis state", hash = hash_tree_root(tailState)
node.forkVersion = tailState.fork.current_version
proc addBootstrapAddr(v: var seq[BootstrapAddr], add: TaintedString) =
try:
let tailBlock = get_initial_beacon_block(tailState)
BlockPool.preInit(node.db, tailState, tailBlock)
v.add BootstrapAddr.initAddress(string add)
except CatchableError as e:
stderr.write "Failed to initialize database\n"
stderr.write e.msg, "\n"
warn "Skipping invalid address", err = e.msg
proc loadBootstrapFile(bootstrapFile: string): seq[BootstrapAddr] =
if fileExists(bootstrapFile):
for line in lines(bootstrapFile):
result.addBootstrapAddr(line)
proc addEnrBootstrapNode(enrBase64: string,
bootNodes: var seq[BootstrapAddr],
enrs: var seq[enr.Record]) =
var enrRec: enr.Record
if enrRec.fromURI(enrBase64):
try:
let
ip = IpAddress(family: IpAddressFamily.IPv4,
address_v4: cast[array[4, uint8]](enrRec.get("ip", int)))
tcpPort = Port enrRec.get("tcp", int)
# udpPort = Port enrRec.get("udp", int)
bootNodes.add BootstrapAddr.initAddress(ip, tcpPort)
enrs.add enrRec
except CatchableError as err:
warn "Invalid ENR record", enrRec
else:
warn "Failed to parse ENR record", value = enrRec
proc useEnrBootstrapFile(bootstrapFile: string,
bootNodes: var seq[BootstrapAddr],
enrs: var seq[enr.Record]) =
let ext = splitFile(bootstrapFile).ext
if cmpIgnoreCase(ext, ".txt") == 0:
for ln in lines(bootstrapFile):
addEnrBootstrapNode(string ln, bootNodes, enrs)
elif cmpIgnoreCase(ext, ".yaml") == 0:
# TODO. This is very ugly, but let's try to negotiate the
# removal of YAML metadata.
for ln in lines(bootstrapFile):
addEnrBootstrapNode(string(ln[3..^2]), bootNodes, enrs)
else:
error "Unknown bootstrap file format", ext
quit 1
proc addBootstrapNode(node: BeaconNode, bootstrapNode: BootstrapAddr) =
if not bootstrapNode.isSameNode(node.networkIdentity):
node.bootstrapNodes.add bootstrapNode
proc loadBootstrapFile(node: BeaconNode, bootstrapFile: string) =
for ln in lines(bootstrapFile):
node.addBootstrapNode BootstrapAddr.initAddress(string ln)
proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async.} =
new result
result.config = conf
result.networkIdentity = getPersistentNetIdentity(conf)
result.nickname = if conf.nodeName == "auto": shortForm(result.networkIdentity)
else: conf.nodeName
let
networkId = getPersistentNetIdentity(conf)
nickname = if conf.nodeName == "auto": shortForm(networkId)
else: conf.nodeName
db = BeaconChainDB.init(kvStore LmdbStoreRef.init(conf.databaseDir))
for bootNode in conf.bootstrapNodes:
result.addBootstrapNode BootstrapAddr.init(bootNode)
var mainchainMonitor: MainchainMonitor
let bootstrapFile = string conf.bootstrapNodesFile
if bootstrapFile.len > 0:
result.loadBootstrapFile(bootstrapFile)
if not BlockPool.isInitialized(db):
# Fresh start - need to load a genesis state from somewhere
var genesisState = new BeaconState
let siteLocalBootstrapFile = conf.dataDir / "bootstrap_nodes.txt"
if fileExists(siteLocalBootstrapFile):
result.loadBootstrapFile(siteLocalBootstrapFile)
result.attachedValidators = ValidatorPool.init
let trieDB = trieDB newChainDb(conf.databaseDir)
result.db = BeaconChainDB.init(trieDB)
# TODO this is problably not the right place to ensure that db is sane..
# TODO does it really make sense to load from DB if a state snapshot has been
# specified on command line? potentially, this should be the other way
# around...
var eth1MonitorStartBlock: Eth2Digest
if result.db.getHeadBlock().isNone():
var state = new BeaconState
# TODO getStateFromSnapshot never returns false - it quits..
if not result.getStateFromSnapshot(state[]):
# Try file from command line first
if not conf.getStateFromSnapshot(genesisState[]):
# Didn't work, try creating a genesis state using main chain monitor
# TODO Could move this to a separate "GenesisMonitor" process or task
# that would do only this - see
if conf.depositWeb3Url.len != 0:
result.mainchainMonitor = MainchainMonitor.init(
conf.depositWeb3Url, conf.depositContractAddress, eth1MonitorStartBlock)
result.mainchainMonitor.start()
mainchainMonitor = MainchainMonitor.init(
conf.depositWeb3Url, conf.depositContractAddress, Eth2Digest())
mainchainMonitor.start()
else:
stderr.write "No state snapshot (or web3 URL) provided\n"
error "No initial state, need genesis state or deposit contract address"
quit 1
state[] = await result.mainchainMonitor.getGenesis()
else:
eth1MonitorStartBlock = state.eth1Data.block_hash
result.commitGenesisState(state[])
genesisState[] = await mainchainMonitor.getGenesis()
if result.mainchainMonitor.isNil and conf.depositWeb3Url.len != 0:
result.mainchainMonitor = MainchainMonitor.init(
conf.depositWeb3Url, conf.depositContractAddress, eth1MonitorStartBlock)
result.mainchainMonitor.start()
if genesisState[].slot != GENESIS_SLOT:
# TODO how to get a block from a non-genesis state?
error "Starting from non-genesis state not supported",
stateSlot = genesisState[].slot,
stateRoot = hash_tree_root(genesisState[])
quit 1
result.blockPool = BlockPool.init(result.db)
result.attestationPool = AttestationPool.init(result.blockPool)
let tailBlock = get_initial_beacon_block(genesisState[])
result.network = await createEth2Node(conf, result.bootstrapNodes)
result.requestManager.init result.network
try:
BlockPool.preInit(db, genesisState[], tailBlock)
doAssert BlockPool.isInitialized(db), "preInit should have initialized db"
except CatchableError as e:
error "Failed to initialize database", err = e.msg
quit 1
# TODO check that genesis given on command line (if any) matches database
let
blockPool = BlockPool.init(db)
if mainchainMonitor.isNil and conf.depositWeb3Url.len != 0:
mainchainMonitor = MainchainMonitor.init(
conf.depositWeb3Url, conf.depositContractAddress,
blockPool.headState.data.data.eth1_data.block_hash)
mainchainMonitor.start()
var
bootNodes: seq[BootstrapAddr]
enrs: seq[enr.Record]
for node in conf.bootstrapNodes: bootNodes.addBootstrapAddr(node)
bootNodes.add(loadBootstrapFile(string conf.bootstrapNodesFile))
bootNodes.add(loadBootstrapFile(conf.dataDir / "bootstrap_nodes.txt"))
let enrBootstrapFile = string conf.enrBootstrapNodesFile
if enrBootstrapFile.len > 0:
useEnrBootstrapFile(enrBootstrapFile, bootNodes, enrs)
bootNodes = filterIt(bootNodes, not it.isSameNode(networkId))
let
network = await createEth2Node(conf, bootNodes, enrs)
let addressFile = string(conf.dataDir) / "beacon_node.address"
network.saveConnectionAddressFile(addressFile)
var res = BeaconNode(
nickname: nickname,
network: network,
forkVersion: blockPool.headState.data.data.fork.current_version,
networkIdentity: networkId,
requestManager: RequestManager.init(network),
bootstrapNodes: bootNodes,
bootstrapEnrs: enrs,
db: db,
config: conf,
attachedValidators: ValidatorPool.init(),
blockPool: blockPool,
attestationPool: AttestationPool.init(blockPool),
mainchainMonitor: mainchainMonitor,
beaconClock: BeaconClock.init(blockPool.headState.data.data),
)
# TODO sync is called when a remote peer is connected - is that the right
# time to do so?
let sync = result.network.protocolState(BeaconSync)
let node = result
sync.init(
result.blockPool, result.forkVersion,
let sync = network.protocolState(BeaconSync)
sync.init(blockPool, res.forkVersion,
proc(signedBlock: SignedBeaconBlock) =
if signedBlock.message.slot mod SLOTS_PER_EPOCH == 0:
# TODO this is a hack to make sure that lmd ghost is run regularly
@ -220,19 +277,11 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async
# of the block pool
# TODO is it a problem that someone sending us a block can force
# a potentially expensive head resolution?
discard node.updateHead()
discard res.updateHead()
onBeaconBlock(result, signedBlock))
onBeaconBlock(res, signedBlock))
let addressFile = string(conf.dataDir) / "beacon_node.address"
result.network.saveConnectionAddressFile(addressFile)
result.beaconClock = BeaconClock.init(result.blockPool.headState.data.data)
when useInsecureFeatures:
if conf.metricsServer:
let metricsAddress = conf.metricsServerAddress
info "Starting metrics HTTP server", address = metricsAddress, port = conf.metricsServerPort
metrics.startHttpServer(metricsAddress, Port(conf.metricsServerPort))
return res
proc connectToNetwork(node: BeaconNode) {.async.} =
if node.bootstrapNodes.len > 0:
@ -1109,7 +1158,15 @@ when isMainModule:
createPidFile(config.dataDir.string / "beacon_node.pid")
var node = waitFor BeaconNode.init(config)
when hasPrompt: initPrompt(node)
when hasPrompt:
initPrompt(node)
when useInsecureFeatures:
if config.metricsServer:
let metricsAddress = config.metricsServerAddress
info "Starting metrics HTTP server",
address = metricsAddress, port = config.metricsServerPort
metrics.startHttpServer(metricsAddress, Port(config.metricsServerPort))
if node.nickname != "":
dynamicLogScope(node = node.nickname): node.start()

View File

@ -119,8 +119,6 @@ type
## Tree of blocks pointing back to a finalized block on the chain we're
## interested in - we call that block the tail
blocksBySlot*: Table[Slot, seq[BlockRef]]
tail*: BlockRef ##\
## The earliest finalized block we know about
@ -173,8 +171,9 @@ type
data*: HashedBeaconState
blck*: BlockRef ##\
## The block associated with the state found in data - in particular,
## blck.state_root == rdata.root
## The block associated with the state found in data - normally
## `blck.state_root == data.root` but the state might have been advanced
## further with empty slots invalidating this condition.
BlockSlot* = object
## Unique identifier for a particular fork and time in the block chain -

View File

@ -89,7 +89,7 @@ func getAncestorAt*(blck: BlockRef, slot: Slot): BlockRef =
blck = blck.parent
func get_ancestor*(blck: BlockRef, slot: Slot): BlockRef =
## https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_fork-choice.md#get_ancestor
## https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/fork-choice.md#get_ancestor
## Return ancestor at slot, or nil if queried block is older
var blck = blck
@ -159,7 +159,7 @@ proc init*(T: type BlockPool, db: BeaconChainDB): BlockPool =
var
blocks = {tailRef.root: tailRef}.toTable()
latestStateRoot = Option[Eth2Digest]()
latestStateRoot = Option[tuple[stateRoot: Eth2Digest, blckRef: BlockRef]]()
headRef: BlockRef
if headRoot != tailRoot:
@ -183,40 +183,31 @@ proc init*(T: type BlockPool, db: BeaconChainDB): BlockPool =
trace "Populating block pool", key = curRef.root, val = curRef
if latestStateRoot.isNone() and db.containsState(blck.message.state_root):
latestStateRoot = some(blck.message.state_root)
latestStateRoot = some((blck.message.state_root, curRef))
doAssert curRef == tailRef,
"head block does not lead to tail, database corrupt?"
else:
headRef = tailRef
var blocksBySlot = initTable[Slot, seq[BlockRef]]()
for _, b in tables.pairs(blocks):
let slot = db.getBlock(b.root).get().message.slot
blocksBySlot.mgetOrPut(slot, @[]).add(b)
if latestStateRoot.isNone():
doAssert db.containsState(tailBlock.message.state_root),
"state data missing for tail block, database corrupt?"
latestStateRoot = some((tailBlock.message.state_root, tailRef))
# TODO can't do straight init because in mainnet config, there are too
# many live beaconstates on the stack...
var tmpState = new Option[BeaconState]
# We're only saving epoch boundary states in the database right now, so when
# we're loading the head block, the corresponding state does not necessarily
# exist in the database - we'll load this latest state we know about and use
# that as finalization point.
tmpState[] = db.getState(latestStateRoot.get().stateRoot)
let
# The head state is necessary to find out what we considered to be the
# finalized epoch last time we saved something.
headStateRoot =
if latestStateRoot.isSome():
latestStateRoot.get()
else:
db.getBlock(tailRef.root).get().message.state_root
# TODO right now, because we save a state at every epoch, this *should*
# be the latest justified state or newer, meaning it's enough for
# establishing what we consider to be the finalized head. This logic
# will need revisiting however
tmpState[] = db.getState(headStateRoot)
let
finalizedHead =
headRef.findAncestorBySlot(
tmpState[].get().finalized_checkpoint.epoch.compute_start_slot_at_epoch())
finalizedSlot =
tmpState[].get().current_justified_checkpoint.epoch.compute_start_slot_at_epoch()
finalizedHead = headRef.findAncestorBySlot(finalizedSlot)
justifiedSlot =
tmpState[].get().current_justified_checkpoint.epoch.compute_start_slot_at_epoch()
justifiedHead = headRef.findAncestorBySlot(justifiedSlot)
@ -229,13 +220,12 @@ proc init*(T: type BlockPool, db: BeaconChainDB): BlockPool =
debug "Block pool initialized",
head = head.blck, finalizedHead, tail = tailRef,
totalBlocks = blocks.len, totalKnownSlots = blocksBySlot.len
totalBlocks = blocks.len
let res = BlockPool(
pending: initTable[Eth2Digest, SignedBeaconBlock](),
missing: initTable[Eth2Digest, MissingBlock](),
blocks: blocks,
blocksBySlot: blocksBySlot,
tail: tailRef,
head: head,
finalizedHead: finalizedHead,
@ -244,8 +234,11 @@ proc init*(T: type BlockPool, db: BeaconChainDB): BlockPool =
)
res.headState = StateData(
data: HashedBeaconState(data: tmpState[].get(), root: headStateRoot),
blck: headRef)
data: HashedBeaconState(
data: tmpState[].get(), root: latestStateRoot.get().stateRoot),
blck: latestStateRoot.get().blckRef)
res.updateStateData(res.headState, BlockSlot(blck: head.blck, slot: head.blck.slot))
res.tmpState = res.headState
tmpState[] = db.getState(justifiedStateRoot)
@ -253,25 +246,8 @@ proc init*(T: type BlockPool, db: BeaconChainDB): BlockPool =
data: HashedBeaconState(data: tmpState[].get(), root: justifiedStateRoot),
blck: justifiedHead.blck)
res
proc addSlotMapping(pool: BlockPool, br: BlockRef) =
proc addIfMissing(s: var seq[BlockRef], v: BlockRef) =
if v notin s:
s.add(v)
pool.blocksBySlot.mgetOrPut(br.slot, @[]).addIfMissing(br)
proc delSlotMapping(pool: BlockPool, br: BlockRef) =
var blks = pool.blocksBySlot.getOrDefault(br.slot)
if blks.len != 0:
let i = blks.find(br)
if i >= 0: blks.del(i)
if blks.len == 0:
pool.blocksBySlot.del(br.slot)
else:
pool.blocksBySlot[br.slot] = blks
proc addResolvedBlock(
pool: var BlockPool, state: var StateData, blockRoot: Eth2Digest,
signedBlock: SignedBeaconBlock, parent: BlockRef): BlockRef =
@ -283,8 +259,6 @@ proc addResolvedBlock(
pool.blocks[blockRoot] = blockRef
trace "Populating block pool", key = blockRoot, val = blockRef
pool.addSlotMapping(blockRef)
# Resolved blocks should be stored in database
pool.db.putBlock(blockRoot, signedBlock)
@ -322,6 +296,7 @@ proc addResolvedBlock(
blockRoot = shortLog(blockRoot),
justifiedRoot = shortLog(foundHead.get().justified.blck.root),
justifiedSlot = shortLog(foundHead.get().justified.slot),
heads = pool.heads.len(),
cat = "filtering"
# Now that we have the new block, we should see if any of the previously
@ -394,8 +369,8 @@ proc add*(
return
# The block might have been in either of these - we don't want any more
# work done on its behalf
# The block might have been in either of pending or missing - we don't want
# any more work done on its behalf
pool.pending.del(blockRoot)
# The block is resolved, now it's time to validate it to ensure that the
@ -445,11 +420,6 @@ proc add*(
# filter.
# TODO when we receive the block, we don't know how many others we're missing
# from that branch, so right now, we'll just do a blind guess
debug "Unresolved block (parent missing)",
blck = shortLog(blck),
blockRoot = shortLog(blockRoot),
cat = "filtering"
let parentSlot = blck.slot - 1
pool.missing[blck.parent_root] = MissingBlock(
@ -464,6 +434,14 @@ proc add*(
(parentSlot.uint64 mod SLOTS_PER_EPOCH.uint64))
)
debug "Unresolved block (parent missing)",
blck = shortLog(blck),
blockRoot = shortLog(blockRoot),
pending = pool.pending.len,
missing = pool.missing.len,
cat = "filtering"
func getRef*(pool: BlockPool, root: Eth2Digest): BlockRef =
## Retrieve a resolved block reference, if available
pool.blocks.getOrDefault(root, nil)
@ -566,10 +544,6 @@ func getOrResolve*(pool: var BlockPool, root: Eth2Digest): BlockRef =
if result.isNil:
pool.missing[root] = MissingBlock(slots: 1)
iterator blockRootsForSlot*(pool: BlockPool, slot: Slot): Eth2Digest =
for br in pool.blocksBySlot.getOrDefault(slot, @[]):
yield br.root
func checkMissing*(pool: var BlockPool): seq[FetchRecord] =
## Return a list of blocks that we should try to resolve from other client -
## to be called periodically but not too often (once per slot?)
@ -608,12 +582,13 @@ proc maybePutState(pool: BlockPool, state: HashedBeaconState, blck: BlockRef) =
# TODO we save state at every epoch start but never remove them - we also
# potentially save multiple states per slot if reorgs happen, meaning
# we could easily see a state explosion
# TODO this is out of sync with epoch def now, I think -- (slot + 1) mod foo.
logScope: pcs = "save_state_at_epoch_start"
if state.data.slot mod SLOTS_PER_EPOCH == 0:
if not pool.db.containsState(state.root):
info "Storing state",
blockRoot = shortLog(blck.root),
blockSlot = shortLog(blck.slot),
stateSlot = shortLog(state.data.slot),
stateRoot = shortLog(state.root),
cat = "caching"
@ -639,6 +614,14 @@ proc rewindState(pool: BlockPool, state: var StateData, bs: BlockSlot):
var
stateRoot = pool.db.getStateRoot(bs.blck.root, bs.slot)
curBs = bs
# TODO this can happen when state root is saved but state is gone - this would
# indicate a corrupt database, but since we're not atomically
# writing and deleting state+root mappings in a single transaction, it's
# likely to happen and we guard against it here.
if stateRoot.isSome() and not pool.db.containsState(stateRoot.get()):
stateRoot = none(type(stateRoot.get()))
while stateRoot.isNone():
let parBs = curBs.parent()
if parBs.blck.isNil:
@ -661,6 +644,8 @@ proc rewindState(pool: BlockPool, state: var StateData, bs: BlockSlot):
# tail state in there!)
error "Couldn't find ancestor state root!",
blockRoot = shortLog(bs.blck.root),
blockSlot = shortLog(bs.blck.slot),
slot = shortLog(bs.slot),
cat = "crash"
doAssert false, "Oh noes, we passed big bang!"
@ -675,6 +660,8 @@ proc rewindState(pool: BlockPool, state: var StateData, bs: BlockSlot):
# tail state in there!)
error "Couldn't find ancestor state or block parent missing!",
blockRoot = shortLog(bs.blck.root),
blockSlot = shortLog(bs.blck.slot),
slot = shortLog(bs.slot),
cat = "crash"
doAssert false, "Oh noes, we passed big bang!"
@ -743,43 +730,11 @@ proc loadTailState*(pool: BlockPool): StateData =
blck: pool.tail
)
proc delBlockAndState(pool: BlockPool, blockRoot: Eth2Digest) =
if (let blk = pool.db.getBlock(blockRoot); blk.isSome):
pool.db.delState(blk.get.message.stateRoot)
pool.db.delBlock(blockRoot)
proc delFinalizedStateIfNeeded(pool: BlockPool, b: BlockRef) =
# Delete finalized state for block `b` from the database, that doesn't need
# to be kept for replaying.
# TODO: Currently the protocol doesn't provide a way to request states,
# so we don't need any of the finalized states, and thus remove all of them
# (except the most recent)
if (let blk = pool.db.getBlock(b.root); blk.isSome):
pool.db.delState(blk.get.message.stateRoot)
proc setTailBlock(pool: BlockPool, newTail: BlockRef) =
## Advance tail block, pruning all the states and blocks with older slots
let oldTail = pool.tail
let fromSlot = oldTail.slot.uint64
let toSlot = newTail.slot.uint64 - 1
assert(toSlot > fromSlot)
for s in fromSlot .. toSlot:
for b in pool.blocksBySlot.getOrDefault(s.Slot, @[]):
pool.delBlockAndState(b.root)
b.children = @[]
b.parent = nil
pool.blocks.del(b.root)
pool.pending.del(b.root)
pool.missing.del(b.root)
pool.blocksBySlot.del(s.Slot)
pool.db.putTailBlock(newTail.root)
pool.tail = newTail
pool.addSlotMapping(newTail)
info "Tail block updated",
slot = newTail.slot,
root = shortLog(newTail.root)
proc delState(pool: BlockPool, bs: BlockSlot) =
# Delete state state and mapping for a particular block+slot
if (let root = pool.db.getStateRoot(bs.blck.root, bs.slot); root.isSome()):
pool.db.delState(root.get())
pool.db.delStateRoot(bs.blck.root, bs.slot)
proc updateHead*(pool: BlockPool, newHead: BlockRef) =
## Update what we consider to be the current head, as given by the fork
@ -851,53 +806,64 @@ proc updateHead*(pool: BlockPool, newHead: BlockRef) =
"Block graph should always lead to a finalized block"
if finalizedHead != pool.finalizedHead:
block: # Remove states, walking slot by slot
discard
# TODO this is very aggressive - in theory all our operations start at
# the finalized block so all states before that can be wiped..
# TODO this is disabled for now because the logic for initializing the
# block pool and potentially a few other places depend on certain
# states (like the tail state) being present. It's also problematic
# because it is not clear what happens when tail and finalized states
# happen on an empty slot..
# var cur = finalizedHead
# while cur != pool.finalizedHead:
# cur = cur.parent
# pool.delState(cur)
block: # Clean up block refs, walking block by block
var cur = finalizedHead.blck
while cur != pool.finalizedHead.blck:
# Finalization means that we choose a single chain as the canonical one -
# it also means we're no longer interested in any branches from that chain
# up to the finalization point.
# The new finalized head should not be cleaned! We start at its parent and
# clean everything including the old finalized head.
cur = cur.parent
# TODO what about attestations? we need to drop those too, though they
# *should* be pretty harmless
if cur.parent != nil: # This happens for the genesis / tail block
for child in cur.parent.children:
if child != cur:
# TODO also remove states associated with the unviable forks!
# TODO the easiest thing to do here would probably be to use
# pool.heads to find unviable heads, then walk those chains
# and remove everything.. currently, if there's a child with
# children of its own, those children will not be pruned
# correctly from the database
pool.blocks.del(child.root)
pool.db.delBlock(child.root)
cur.parent.children = @[cur]
pool.finalizedHead = finalizedHead
let hlen = pool.heads.len
for i in 0..<hlen:
let n = hlen - i - 1
if not pool.finalizedHead.blck.isAncestorOf(pool.heads[n].blck):
# Any heads that are not derived from the newly finalized block are no
# longer viable candidates for future head selection
pool.heads.del(n)
info "Finalized block",
finalizedBlockRoot = shortLog(finalizedHead.blck.root),
finalizedBlockSlot = shortLog(finalizedHead.slot),
headBlockRoot = shortLog(newHead.root),
headBlockSlot = shortLog(newHead.slot),
heads = pool.heads.len,
cat = "fork_choice"
pool.finalizedHead = finalizedHead
var cur = finalizedHead.blck
while cur != pool.finalizedHead.blck:
# Finalization means that we choose a single chain as the canonical one -
# it also means we're no longer interested in any branches from that chain
# up to the finalization point
# TODO technically, if we remove from children the gc should free the block
# because it should become orphaned, via mark&sweep if nothing else,
# though this needs verification
# TODO what about attestations? we need to drop those too, though they
# *should* be pretty harmless
# TODO remove from database as well.. here, or using some GC-like setup
# that periodically cleans it up?
for child in cur.parent.children:
if child != cur:
pool.blocks.del(child.root)
pool.delBlockAndState(child.root)
pool.delSlotMapping(child)
else:
pool.delFinalizedStateIfNeeded(child)
cur.parent.children = @[cur]
cur = cur.parent
let hlen = pool.heads.len
for i in 0..<hlen:
let n = hlen - i - 1
if pool.heads[n].blck.slot < pool.finalizedHead.blck.slot:
# By definition, the current head should be newer than the finalized
# head, so we'll never delete it here
pool.heads.del(n)
# Calculate new tail block and set it
# New tail should be WEAK_SUBJECTIVITY_PERIOD * 2 older than finalizedHead
const tailSlotInterval = WEAK_SUBJECTVITY_PERIOD * 2
if finalizedEpochStartSlot - GENESIS_SLOT > tailSlotInterval:
let tailSlot = finalizedEpochStartSlot - tailSlotInterval
let newTail = finalizedHead.blck.findAncestorBySlot(tailSlot)
pool.setTailBlock(newTail.blck)
# TODO prune everything before weak subjectivity period
func latestJustifiedBlock*(pool: BlockPool): BlockSlot =
## Return the most recent block that is justified and at least as recent
@ -914,6 +880,26 @@ func latestJustifiedBlock*(pool: BlockPool): BlockSlot =
if head.justified.slot > result.slot:
result = head.justified
proc isInitialized*(T: type BlockPool, db: BeaconChainDB): bool =
let
headBlockRoot = db.getHeadBlock()
tailBlockRoot = db.getTailBlock()
if not (headBlockRoot.isSome() and tailBlockRoot.isSome()):
return false
let
headBlock = db.getBlock(headBlockRoot.get())
tailBlock = db.getBlock(tailBlockRoot.get())
if not (headBlock.isSome() and tailBlock.isSome()):
return false
if not db.containsState(tailBlock.get().message.state_root):
return false
return true
proc preInit*(
T: type BlockPool, db: BeaconChainDB, state: BeaconState,
signedBlock: SignedBeaconBlock) =
@ -925,6 +911,7 @@ proc preInit*(
let
blockRoot = hash_tree_root(signedBlock.message)
doAssert signedBlock.message.state_root == hash_tree_root(state)
notice "New database from snapshot",
blockRoot = shortLog(blockRoot),
stateRoot = shortLog(signedBlock.message.state_root),
@ -936,5 +923,4 @@ proc preInit*(
db.putBlock(signedBlock)
db.putTailBlock(blockRoot)
db.putHeadBlock(blockRoot)
db.putStateRoot(
blockRoot, signedBlock.message.slot, signedBlock.message.state_root)
db.putStateRoot(blockRoot, state.slot, signedBlock.message.state_root)

View File

@ -90,6 +90,11 @@ type
desc: "Specifies a line-delimited file of bootsrap Ethereum network addresses."
name: "bootstrap-file" }: InputFile
enrBootstrapNodesFile* {.
defaultValue: ""
desc: "Specifies a line-delimited file of bootstrap ENR records"
name: "enr-bootstrap-file" }: InputFile
tcpPort* {.
defaultValue: defaultPort(config)
desc: "TCP listening port."
@ -100,6 +105,11 @@ type
desc: "UDP listening port."
name: "udp-port" }: int
maxPeers* {.
defaultValue: 10
desc: "The maximum number of peers to connect to"
name: "max-peers" }: int
nat* {.
desc: "Specify method to use for determining public address. " &
"Must be one of: any, none, upnp, pmp, extip:<IP>."

View File

@ -0,0 +1,26 @@
import
net,
eth/keys, eth/trie/db,
eth/p2p/discoveryv5/[protocol, node, discovery_db, types],
conf
type
Eth2DiscoveryProtocol* = protocol.Protocol
Eth2DiscoveryId* = NodeId
export
Eth2DiscoveryProtocol, open, start, close
proc new*(T: type Eth2DiscoveryProtocol,
conf: BeaconNodeConf,
rawPrivKeyBytes: openarray[byte]): T =
# TODO
# Implement more configuration options:
# * for setting up a specific key
# * for using a persistent database
var
pk = initPrivateKey(rawPrivKeyBytes)
db = DiscoveryDB.init(newMemoryDB())
newProtocol(pk, db, Port conf.udpPort)

View File

@ -1,6 +1,7 @@
import
options, tables,
chronos, json_serialization, strutils, chronicles, metrics, eth/net/nat,
chronos, json_serialization, strutils, chronicles, metrics,
eth/net/nat, eth/p2p/discoveryv5/enr,
version, conf
const
@ -126,6 +127,10 @@ when networkBackend == rlpx:
proc initAddress*(T: type BootstrapAddr, str: string): T =
initENode(str)
proc initAddress*(T: type BootstrapAddr, ip: IpAddress, tcpPort: Port): T =
# TODO
discard
func peersCount*(node: Eth2Node): int =
node.peerPool.len
@ -178,6 +183,12 @@ else:
raise newException(MultiAddressError,
"Invalid bootstrap node multi-address")
template tcpEndPoint(address, port): auto =
MultiAddress.init(address, Protocol.IPPROTO_TCP, port)
proc initAddress*(T: type BootstrapAddr, ip: IpAddress, tcpPort: Port): T =
tcpEndPoint(ip, tcpPort)
proc ensureNetworkIdFile(conf: BeaconNodeConf): string =
result = conf.dataDir / networkKeyFilename
if not fileExists(result):
@ -198,15 +209,13 @@ else:
result = KeyPair(seckey: privKey, pubkey: privKey.getKey())
template tcpEndPoint(address, port): auto =
MultiAddress.init(address, Protocol.IPPROTO_TCP, port)
proc allMultiAddresses(nodes: seq[BootstrapAddr]): seq[string] =
for node in nodes:
result.add $node
proc createEth2Node*(conf: BeaconNodeConf,
bootstrapNodes: seq[BootstrapAddr]): Future[Eth2Node] {.async.} =
bootstrapNodes: seq[BootstrapAddr],
bootstrapEnrs: seq[enr.Record]): Future[Eth2Node] {.async.} =
var
(extIp, extTcpPort, _) = setupNat(conf)
hostAddress = tcpEndPoint(globalListeningAddr, Port conf.tcpPort)
@ -222,8 +231,11 @@ else:
# TODO nim-libp2p still doesn't have support for announcing addresses
# that are different from the host address (this is relevant when we
# are running behind a NAT).
result = Eth2Node.init newStandardSwitch(some keys.seckey, hostAddress,
triggerSelf = true, gossip = true)
var switch = newStandardSwitch(some keys.seckey, hostAddress,
triggerSelf = true, gossip = true)
result = Eth2Node.init(conf, switch, keys.seckey)
for enr in bootstrapEnrs:
result.addKnownPeer(enr)
await result.start()
else:
let keyFile = conf.ensureNetworkIdFile
@ -311,16 +323,16 @@ else:
proc subscribe*[MsgType](node: Eth2Node,
topic: string,
msgHandler: proc(msg: MsgType) {.gcsafe.} ) {.async, gcsafe.} =
template execMsgHandler(gossipBytes, gossipTopic) =
template execMsgHandler(peerExpr, gossipBytes, gossipTopic) =
inc gossip_messages_received
trace "Incoming gossip bytes",
peer = msg.peer, len = gossipBytes.len, topic = gossipTopic
peer = peerExpr, len = gossipBytes.len, topic = gossipTopic
msgHandler SSZ.decode(gossipBytes, MsgType)
when networkBackend == libp2p:
let incomingMsgHandler = proc(topic: string,
data: seq[byte]) {.async, gcsafe.} =
execMsgHandler data, topic
execMsgHandler "unknown", data, topic
await node.switch.subscribe(topic, incomingMsgHandler)
@ -328,7 +340,7 @@ else:
let incomingMsgHandler = proc(api: DaemonAPI,
ticket: PubsubTicket,
msg: PubSubMessage): Future[bool] {.async, gcsafe.} =
execMsgHandler msg.data, msg.topics[0]
execMsgHandler msg.peer, msg.data, msg.topics[0]
return true
discard await node.daemon.pubsubSubscribe(topic, incomingMsgHandler)

View File

@ -39,7 +39,7 @@ const eth1BlockHash* = block:
for v in x.data.mitems: v = 0x42
x
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_deposit-contract.md#withdrawal-credentials
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/deposit-contract.md#withdrawal-credentials
func makeWithdrawalCredentials*(k: ValidatorPubKey): Eth2Digest =
var bytes = eth2hash(k.getBytes())
bytes.data[0] = BLS_WITHDRAWAL_PREFIX.uint8

94
beacon_chain/kvstore.nim Normal file
View File

@ -0,0 +1,94 @@
# Simple Key-Value store database interface
import
tables, hashes, sets
type
MemoryStoreRef* = ref object of RootObj
records: Table[seq[byte], seq[byte]]
DataProc* = proc(val: openArray[byte])
PutProc = proc (db: RootRef, key, val: openArray[byte]) {.gcsafe.}
GetProc = proc (db: RootRef, key: openArray[byte], onData: DataProc): bool {.gcsafe.}
DelProc = proc (db: RootRef, key: openArray[byte]) {.gcsafe.}
ContainsProc = proc (db: RootRef, key: openArray[byte]): bool {.gcsafe.}
KVStoreRef* = ref object
## Key-Value store virtual interface
obj: RootRef
putProc: PutProc
getProc: GetProc
delProc: DelProc
containsProc: ContainsProc
template put*(db: KVStoreRef, key, val: openArray[byte]) =
## Store ``value`` at ``key`` - overwrites existing value if already present
db.putProc(db.obj, key, val)
template get*(db: KVStoreRef, key: openArray[byte], onData: untyped): bool =
## Retrive value at ``key`` and call ``onData`` with the value. The data is
## valid for the duration of the callback.
## ``onData``: ``proc(data: openArray[byte])``
## returns true if found and false otherwise.
db.getProc(db.obj, key, onData)
template del*(db: KVStoreRef, key: openArray[byte]) =
## Remove value at ``key`` from store - do nothing if the value is not present
db.delProc(db.obj, key)
template contains*(db: KVStoreRef, key: openArray[byte]): bool =
## Return true iff ``key`` has a value in store
db.containsProc(db.obj, key)
proc get*(db: MemoryStoreRef, key: openArray[byte], onData: DataProc): bool =
let key = @key
db.records.withValue(key, v):
onData(v[])
return true
proc del*(db: MemoryStoreRef, key: openArray[byte]) =
# TODO: This is quite inefficient and it won't be necessary once
# https://github.com/nim-lang/Nim/issues/7457 is developed.
let key = @key
db.records.del(key)
proc contains*(db: MemoryStoreRef, key: openArray[byte]): bool =
db.records.contains(@key)
proc put*(db: MemoryStoreRef, key, val: openArray[byte]) =
# TODO: This is quite inefficient and it won't be necessary once
# https://github.com/nim-lang/Nim/issues/7457 is developed.
let key = @key
db.records[key] = @val
proc init*(T: type MemoryStoreRef): T =
T(
records: initTable[seq[byte], seq[byte]]()
)
proc putImpl[T](db: RootRef, key, val: openArray[byte]) =
mixin put
put(T(db), key, val)
proc getImpl[T](db: RootRef, key: openArray[byte], onData: DataProc): bool =
mixin get
get(T(db), key, onData)
proc delImpl[T](db: RootRef, key: openArray[byte]) =
mixin del
del(T(db), key)
proc containsImpl[T](db: RootRef, key: openArray[byte]): bool =
mixin contains
contains(T(db), key)
func kvStore*[T: RootRef](x: T): KVStoreRef =
mixin del, get, put, contains
KVStoreRef(
obj: x,
putProc: putImpl[T],
getProc: getImpl[T],
delProc: delImpl[T],
containsProc: containsImpl[T]
)

View File

@ -0,0 +1,160 @@
## Implementation of KVStore based on LMDB
## TODO: crashes on win32, investigate
import os
import ./kvstore
{.compile: "../vendor/lmdb/libraries/liblmdb/mdb.c".}
{.compile: "../vendor/lmdb/libraries/liblmdb/midl.c".}
const
MDB_NOSUBDIR = 0x4000
MDB_RDONLY = 0x20000
MDB_NOTFOUND = -30798
when defined(cpu64):
const LMDB_MAP_SIZE = 1024'u * 1024'u * 1024'u * 10'u # 10TB enough?
else:
const LMDB_MAP_SIZE = 1024'u * 1024'u * 1024'u # 32bit limitation
type
MDB_Env = distinct pointer
MDB_Txn = distinct pointer
MDB_Dbi = distinct cuint
MDB_val = object
mv_size: uint
mv_data: pointer
LmdbError* = object of CatchableError
# Used subset of the full LMDB API
proc mdb_env_create(env: var MDB_Env): cint {.importc, cdecl.}
proc mdb_env_open(env: MDB_Env, path: cstring, flags: cuint, mode: cint): cint {.importc, cdecl.}
proc mdb_txn_begin(env: MDB_Env, parent: MDB_Txn, flags: cuint, txn: var MDB_Txn): cint {.importc, cdecl.}
proc mdb_txn_commit(txn: MDB_Txn): cint {.importc, cdecl.}
proc mdb_txn_abort(txn: MDB_Txn) {.importc, cdecl.}
proc mdb_dbi_open(txn: MDB_Txn, name: cstring, flags: cuint, dbi: var MDB_Dbi): cint {.importc, cdecl.}
proc mdb_env_close(env: MDB_Env) {.importc, cdecl.}
proc mdb_strerror(err: cint): cstring {.importc, cdecl.}
proc mdb_get(txn: MDB_Txn, dbi: MDB_Dbi, key: var MDB_val, data: var MDB_val): cint {.importc, cdecl.}
proc mdb_del(txn: MDB_Txn, dbi: MDB_Dbi, key: var MDB_val, data: ptr MDB_val): cint {.importc, cdecl.}
proc mdb_put(txn: MDB_Txn, dbi: MDB_Dbi, key: var MDB_val, data: var MDB_val, flags: cuint): cint {.importc, cdecl.}
proc mdb_env_set_mapsize(env: MDB_Env, size: uint64): cint {.importc, cdecl.}
func raiseLmdbError(err: cint) {.noreturn.} =
let tmp = mdb_strerror(err)
raise (ref LmdbError)(msg: $tmp)
type
LmdbStoreRef* = ref object of RootObj
env: MDB_Env
template init(T: type MDB_Val, val: openArray[byte]): T =
T(
mv_size: val.len.uint,
mv_data: unsafeAddr val[0]
)
proc begin(db: LmdbStoreRef, flags: cuint): tuple[txn: MDB_Txn, dbi: MDB_Dbi] =
var
txn: MDB_Txn
dbi: MDB_Dbi
if (let x = mdb_txn_begin(db.env, nil, flags, txn); x != 0):
raiseLmdbError(x)
if (let x = mdb_dbi_open(txn, nil, 0, dbi); x != 0):
mdb_txn_abort(txn)
raiseLmdbError(x)
(txn, dbi)
proc get*(db: LmdbStoreRef, key: openarray[byte], onData: DataProc): bool =
if key.len == 0:
return
var
(txn, dbi) = db.begin(MDB_RDONLY)
dbKey = MDB_Val.init(key)
dbVal: MDB_val
# abort ok for read-only and easier for exception safety
defer: mdb_txn_abort(txn)
if (let x = mdb_get(txn, dbi, dbKey, dbVal); x != 0):
if x == MDB_NOTFOUND:
return false
raiseLmdbError(x)
if not onData.isNil:
onData(toOpenArrayByte(cast[cstring](dbVal.mv_data), 0, dbVal.mv_size.int - 1))
true
proc put*(db: LmdbStoreRef, key, value: openarray[byte]) =
if key.len == 0: return
var
(txn, dbi) = db.begin(0)
dbKey = MDB_Val.init(key)
dbVal = MDB_Val.init(value)
if (let x = mdb_put(txn, dbi, dbKey, dbVal, 0); x != 0):
mdb_txn_abort(txn)
raiseLmdbError(x)
if (let x = mdb_txn_commit(txn); x != 0):
raiseLmdbError(x)
proc contains*(db: LmdbStoreRef, key: openarray[byte]): bool =
db.get(key, nil)
proc del*(db: LmdbStoreRef, key: openarray[byte]) =
if key.len == 0: return
var
(txn, dbi) = db.begin(0)
dbKey = MDB_Val.init(key)
if (let x = mdb_del(txn, dbi, dbKey, nil); x != 0):
mdb_txn_abort(txn)
if x != MDB_NOTFOUND:
raiseLmdbError(x)
return
if (let x = mdb_txn_commit(txn); x != 0):
raiseLmdbError(x)
proc close*(db: LmdbStoreRef) =
mdb_env_close(db.env)
proc init*(T: type LmdbStoreRef, basePath: string, readOnly = false): T =
var
env: MDB_Env
if (let x = mdb_env_create(env); x != 0):
raiseLmdbError(x)
createDir(basePath)
let dataDir = basePath / "nimbus.lmdb"
if (let x = mdb_env_set_mapsize(env, LMDB_MAP_SIZE); x != 0):
mdb_env_close(env)
raiseLmdbError(x)
var openFlags = MDB_NOSUBDIR
if readOnly: openFlags = openFlags or MDB_RDONLY
# file mode ignored on windows
if (let x = mdb_env_open(env, dataDir, openFlags.cuint, 0o664.cint); x != 0):
mdb_env_close(env)
raiseLmdbError(x)
T(env: env)

View File

@ -1,8 +1,9 @@
import
algorithm, typetraits,
algorithm, typetraits, net,
stew/[varints,base58], stew/shims/[macros, tables], chronos, chronicles,
faststreams/output_stream, serialization,
stint, faststreams/output_stream, serialization,
json_serialization/std/options, eth/p2p/p2p_protocol_dsl,
eth/p2p/discoveryv5/enr,
# TODO: create simpler to use libp2p modules that use re-exports
libp2p/[switch, multistream, connection,
multiaddress, peerinfo, peer,
@ -11,7 +12,10 @@ import
libp2p/protocols/secure/[secure, secio],
libp2p/protocols/pubsub/[pubsub, floodsub],
libp2p/transports/[transport, tcptransport],
libp2p_json_serialization, ssz
libp2p_json_serialization, eth2_discovery, conf, ssz
import
eth/p2p/discoveryv5/protocol as discv5_protocol
export
p2pProtocol, libp2p_json_serialization, ssz
@ -22,7 +26,10 @@ type
# TODO Is this really needed?
Eth2Node* = ref object of RootObj
switch*: Switch
discovery*: Eth2DiscoveryProtocol
wantedPeers*: int
peers*: Table[PeerID, Peer]
peersByDiscoveryId*: Table[Eth2DiscoveryId, Peer]
protocolStates*: seq[RootRef]
libp2pTransportLoops*: seq[Future[void]]
@ -32,6 +39,7 @@ type
network*: Eth2Node
info*: PeerInfo
wasDialed*: bool
discoveryId*: Eth2DiscoveryId
connectionState*: ConnectionState
protocolStates*: seq[RootRef]
maxInactivityAllowed*: Duration
@ -91,6 +99,9 @@ type
TransmissionError* = object of CatchableError
const
TCP = net.Protocol.IPPROTO_TCP
template `$`*(peer: Peer): string = id(peer.info)
chronicles.formatIt(Peer): $it
@ -139,10 +150,59 @@ include eth/p2p/p2p_backends_helpers
include eth/p2p/p2p_tracing
include libp2p_backends_common
proc init*(T: type Eth2Node, switch: Switch): T =
proc toPeerInfo*(r: enr.TypedRecord): PeerInfo =
if r.secp256k1.isSome:
var peerId = PeerID.init r.secp256k1.get
var addresses = newSeq[MultiAddress]()
if r.ip.isSome and r.tcp.isSome:
let ip = IpAddress(family: IpAddressFamily.IPv4,
address_v4: r.ip.get)
addresses.add MultiAddress.init(ip, TCP, Port r.tcp.get)
if r.ip6.isSome:
let ip = IpAddress(family: IpAddressFamily.IPv6,
address_v6: r.ip6.get)
if r.tcp6.isSome:
addresses.add MultiAddress.init(ip, TCP, Port r.tcp6.get)
elif r.tcp.isSome:
addresses.add MultiAddress.init(ip, TCP, Port r.tcp.get)
else:
discard
if addresses.len > 0:
return PeerInfo.init(peerId, addresses)
proc toPeerInfo(r: Option[enr.TypedRecord]): PeerInfo =
if r.isSome:
return r.get.toPeerInfo
proc dialPeer*(node: Eth2Node, enr: enr.Record) {.async.} =
let peerInfo = enr.toTypedRecord.toPeerInfo
if peerInfo != nil:
discard await node.switch.dial(peerInfo)
var peer = node.getPeer(peerInfo)
peer.wasDialed = true
await initializeConnection(peer)
proc runDiscoveryLoop(node: Eth2Node) {.async.} =
while true:
if node.peersByDiscoveryId.len < node.wantedPeers:
let discoveredPeers = await node.discovery.lookupRandom()
for peer in discoveredPeers:
if peer.id notin node.peersByDiscoveryId:
# TODO do this in parallel
await node.dialPeer(peer.record)
await sleepAsync seconds(1)
proc init*(T: type Eth2Node, conf: BeaconNodeConf,
switch: Switch, privKey: PrivateKey): T =
new result
result.switch = switch
result.peers = initTable[PeerID, Peer]()
result.discovery = Eth2DiscoveryProtocol.new(conf, privKey.getBytes)
result.wantedPeers = conf.maxPeers
newSeq result.protocolStates, allProtocols.len
for proto in allProtocols:
@ -153,7 +213,12 @@ proc init*(T: type Eth2Node, switch: Switch): T =
if msg.protocolMounter != nil:
msg.protocolMounter result
proc addKnownPeer*(node: Eth2Node, peerEnr: enr.Record) =
node.discovery.addNode peerEnr
proc start*(node: Eth2Node) {.async.} =
node.discovery.open()
node.discovery.start()
node.libp2pTransportLoops = await node.switch.start()
proc init*(T: type Peer, network: Eth2Node, info: PeerInfo): Peer =

120
beacon_chain/nimquery.nim Normal file
View File

@ -0,0 +1,120 @@
import
strutils, strformat, parseutils
type
TokenKind* = enum
tIdent = "ident"
tNumber = "number"
tDot = "dot"
tOpenBracket = "["
tCloseBracket = "]"
tEof = "end of file"
tError = "error"
Token* = object
case kind*: TokenKind
of tIdent:
name*: string
of tNumber:
val*: uint64
of tError:
errMsg: string
else:
discard
Lexer* = object
tok*: Token
input: string
pos: int
Parser* = object
lexer: Lexer
NodeKind* = enum
Ident
Number
Dot
ArrayAccess
Error
Node* = ref object {.acyclic.}
case kind*: NodeKind
of Dot:
objVal*, field*: Node
of ArrayAccess:
arrayVal*, index*: Node
of Ident:
name*: string
of Number:
numVal*: uint64
of Error:
errMsg*: string
func advance(lexer: var Lexer) =
if lexer.pos >= lexer.input.len:
lexer.tok = Token(kind: tEof)
else:
let nextChar = lexer.input[lexer.pos]
case nextChar
of IdentStartChars:
lexer.tok = Token(kind: tIdent)
lexer.pos = parseIdent(lexer.input, lexer.tok.name, lexer.pos)
of Whitespace:
lexer.pos = skipWhitespace(lexer.input, lexer.pos)
advance lexer
of Digits:
lexer.tok = Token(kind: tNumber)
lexer.pos = parseBiggestUInt(lexer.input, lexer.tok.val, lexer.pos)
of '[':
lexer.tok = Token(kind: tOpenBracket)
inc lexer.pos
of ']':
lexer.tok = Token(kind: tCloseBracket)
inc lexer.pos
of '.':
lexer.tok = Token(kind: tDot)
inc lexer.pos
else:
lexer.tok = Token(
kind: tError,
errMsg: &"Unexpected character '{nextChar}' at position {lexer.pos}")
func init*(T: type Lexer, src: string): Lexer =
result.input = src
result.pos = 0
advance result
func init*(T: type Parser, src: string): Parser =
Parser(lexer: Lexer.init(src))
func expr(parser: var Parser): Node =
template unexpectedToken =
return Node(kind: Error, errMsg: &"Unexpected {parser.lexer.tok.kind} token")
case parser.lexer.tok.kind
of tIdent:
result = Node(kind: Ident, name: parser.lexer.tok.name)
of tNumber:
return Node(kind: Number, numVal: parser.lexer.tok.val)
else:
unexpectedToken
advance parser.lexer
case parser.lexer.tok.kind
of tOpenBracket:
advance parser.lexer
result = Node(kind: ArrayAccess, arrayVal: result, index: parser.expr)
if parser.lexer.tok.kind != tCloseBracket:
unexpectedToken
else:
advance parser.lexer
of tDot:
advance parser.lexer
return Node(kind: Dot, objVal: result, field: parser.expr)
else:
discard
func parse*(input: string): Node =
var p = Parser.init(input)
p.expr

View File

@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2018-2019 Status Research & Development GmbH
# Copyright (c) 2018-2020 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@ -12,7 +12,7 @@ import
./crypto, ./datatypes, ./digest, ./helpers, ./validator,
../../nbench/bench_lab
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#is_valid_merkle_branch
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#is_valid_merkle_branch
func is_valid_merkle_branch*(leaf: Eth2Digest, branch: openarray[Eth2Digest], depth: uint64, index: uint64, root: Eth2Digest): bool {.nbench.}=
## Check if ``leaf`` at ``index`` verifies against the Merkle ``root`` and
## ``branch``.
@ -30,13 +30,13 @@ func is_valid_merkle_branch*(leaf: Eth2Digest, branch: openarray[Eth2Digest], de
value = eth2hash(buf)
value == root
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#increase_balance
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#increase_balance
func increase_balance*(
state: var BeaconState, index: ValidatorIndex, delta: Gwei) =
# Increase the validator balance at index ``index`` by ``delta``.
state.balances[index] += delta
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#decrease_balance
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#decrease_balance
func decrease_balance*(
state: var BeaconState, index: ValidatorIndex, delta: Gwei) =
## Decrease the validator balance at index ``index`` by ``delta``, with
@ -103,13 +103,13 @@ func process_deposit*(
true
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#compute_activation_exit_epoch
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#compute_activation_exit_epoch
func compute_activation_exit_epoch(epoch: Epoch): Epoch =
## Return the epoch during which validator activations and exits initiated in
## ``epoch`` take effect.
epoch + 1 + MAX_SEED_LOOKAHEAD
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_validator_churn_limit
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#get_validator_churn_limit
func get_validator_churn_limit(state: BeaconState): uint64 =
# Return the validator churn limit for the current epoch.
let active_validator_indices =
@ -117,7 +117,7 @@ func get_validator_churn_limit(state: BeaconState): uint64 =
max(MIN_PER_EPOCH_CHURN_LIMIT,
len(active_validator_indices) div CHURN_LIMIT_QUOTIENT).uint64
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#initiate_validator_exit
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#initiate_validator_exit
func initiate_validator_exit*(state: var BeaconState,
index: ValidatorIndex) =
# Initiate the exit of the validator with index ``index``.
@ -128,7 +128,6 @@ func initiate_validator_exit*(state: var BeaconState,
return
# Compute exit queue epoch
# TODO try zero-functional here
var exit_epochs = mapIt(
filterIt(state.validators, it.exit_epoch != FAR_FUTURE_EPOCH),
it.exit_epoch)
@ -190,7 +189,7 @@ proc slash_validator*(state: var BeaconState, slashed_index: ValidatorIndex,
increase_balance(
state, whistleblower_index, whistleblowing_reward - proposer_reward)
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#genesis
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#genesis
func initialize_beacon_state_from_eth1*(
eth1_block_hash: Eth2Digest,
eth1_timestamp: uint64,
@ -275,7 +274,7 @@ func get_initial_beacon_block*(state: BeaconState): SignedBeaconBlock =
# parent_root, randao_reveal, eth1_data, signature, and body automatically
# initialized to default values.
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_block_root_at_slot
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#get_block_root_at_slot
func get_block_root_at_slot*(state: BeaconState,
slot: Slot): Eth2Digest =
# Return the block root at a recent ``slot``.
@ -284,12 +283,12 @@ func get_block_root_at_slot*(state: BeaconState,
doAssert slot < state.slot
state.block_roots[slot mod SLOTS_PER_HISTORICAL_ROOT]
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_block_root
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#get_block_root
func get_block_root*(state: BeaconState, epoch: Epoch): Eth2Digest =
# Return the block root at the start of a recent ``epoch``.
get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch))
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_total_balance
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#get_total_balance
func get_total_balance*(state: BeaconState, validators: auto): Gwei =
## Return the combined effective balance of the ``indices``. (1 Gwei minimum
## to avoid divisions by zero.)
@ -299,13 +298,13 @@ func get_total_balance*(state: BeaconState, validators: auto): Gwei =
# XXX: Move to state_transition_epoch.nim?
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#is_eligible_for_activation_queue
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#is_eligible_for_activation_queue
func is_eligible_for_activation_queue(validator: Validator): bool =
# Check if ``validator`` is eligible to be placed into the activation queue.
validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and
validator.effective_balance == MAX_EFFECTIVE_BALANCE
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#is_eligible_for_activation
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#is_eligible_for_activation
func is_eligible_for_activation(state: BeaconState, validator: Validator):
bool =
# Check if ``validator`` is eligible for activation.
@ -315,7 +314,7 @@ func is_eligible_for_activation(state: BeaconState, validator: Validator):
# Has not yet been activated
validator.activation_epoch == FAR_FUTURE_EPOCH
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#registry-updates
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#registry-updates
proc process_registry_updates*(state: var BeaconState) {.nbench.}=
## Process activation eligibility and ejections
## Try to avoid caching here, since this could easily become undefined
@ -400,7 +399,7 @@ proc is_valid_indexed_attestation*(
true
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_attesting_indices
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#get_attesting_indices
func get_attesting_indices*(state: BeaconState,
data: AttestationData,
bits: CommitteeValidatorsBits,
@ -413,7 +412,7 @@ func get_attesting_indices*(state: BeaconState,
if bits[i]:
result.incl index
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_indexed_attestation
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#get_indexed_attestation
func get_indexed_attestation(state: BeaconState, attestation: Attestation,
stateCache: var StateCache): IndexedAttestation =
# Return the indexed attestation corresponding to ``attestation``.

View File

@ -122,13 +122,10 @@ func pubKey*(pk: ValidatorPrivKey): ValidatorPubKey =
else:
pk.getKey
func init(T: type VerKey): VerKey =
func init*(T: type VerKey): VerKey =
result.point.inf()
func init(T: type SigKey): SigKey =
result.point.inf()
func init(T: type Signature): Signature =
func init*(T: type Signature): Signature =
result.point.inf()
func combine*[T](values: openarray[BlsValue[T]]): BlsValue[T] =

View File

@ -19,7 +19,7 @@
import
macros, hashes, json, strutils, tables,
stew/[byteutils, bitseqs], chronicles, eth/common,
stew/[byteutils, bitseqs], chronicles,
../version, ../ssz/types, ./crypto, ./digest
# TODO Data types:
@ -49,16 +49,18 @@ elif const_preset == "minimal":
import ./presets/minimal
export minimal
else:
{.fatal: "Preset \"" & const_preset ".nim\" is not supported.".}
type
Slot* = distinct uint64
Epoch* = distinct uint64
import ./presets/custom
loadCustomPreset const_preset
const
SPEC_VERSION* = "0.9.4" ## \
SPEC_VERSION* = "0.10.0" ## \
## Spec version we're aiming to be compatible with, right now
## TODO: improve this scheme once we can negotiate versions in protocol
# Initial values
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#initial-values
GENESIS_EPOCH* = (GENESIS_SLOT.uint64 div SLOTS_PER_EPOCH).Epoch ##\
## compute_epoch_at_slot(GENESIS_SLOT)
@ -80,7 +82,7 @@ type
# Domains
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#domain-types
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#domain-types
DomainType* {.pure.} = enum
DOMAIN_BEACON_PROPOSER = 0
DOMAIN_BEACON_ATTESTER = 1
@ -88,10 +90,10 @@ type
DOMAIN_DEPOSIT = 3
DOMAIN_VOLUNTARY_EXIT = 4
# Phase 1 - Custody game
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/1_custody-game.md#signature-domain-types
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase1/custody-game.md#signature-domain-types
DOMAIN_CUSTODY_BIT_CHALLENGE = 6
# Phase 1 - Sharding
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/1_shard-data-chains.md#signature-domain-types
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase1/shard-data-chains.md#signature-domain-types
DOMAIN_SHARD_PROPOSER = 128
DOMAIN_SHARD_ATTESTER = 129
@ -107,18 +109,18 @@ type
BitList*[maxLen: static int] = distinct BitSeq
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#proposerslashing
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#proposerslashing
ProposerSlashing* = object
proposer_index*: uint64
signed_header_1*: SignedBeaconBlockHeader
signed_header_2*: SignedBeaconBlockHeader
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#attesterslashing
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#attesterslashing
AttesterSlashing* = object
attestation_1*: IndexedAttestation
attestation_2*: IndexedAttestation
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#indexedattestation
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#indexedattestation
IndexedAttestation* = object
# TODO ValidatorIndex, but that doesn't serialize properly
attesting_indices*: List[uint64, MAX_VALIDATORS_PER_COMMITTEE]
@ -127,18 +129,18 @@ type
CommitteeValidatorsBits* = BitList[MAX_VALIDATORS_PER_COMMITTEE]
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#attestation
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#attestation
Attestation* = object
aggregation_bits*: CommitteeValidatorsBits
data*: AttestationData
signature*: ValidatorSig
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#checkpoint
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#checkpoint
Checkpoint* = object
epoch*: Epoch
root*: Eth2Digest
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#AttestationData
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#AttestationData
AttestationData* = object
slot*: Slot
index*: uint64
@ -150,34 +152,34 @@ type
source*: Checkpoint
target*: Checkpoint
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#deposit
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#deposit
Deposit* = object
proof*: array[DEPOSIT_CONTRACT_TREE_DEPTH + 1, Eth2Digest] ##\
## Merkle path to deposit data list root
## Merkle path to deposit root
data*: DepositData
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#depositdata
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#depositmessage
DepositMessage* = object
pubkey*: ValidatorPubKey
withdrawal_credentials*: Eth2Digest
amount*: Gwei
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#depositdata
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#depositdata
DepositData* = object
pubkey*: ValidatorPubKey
withdrawal_credentials*: Eth2Digest
amount*: uint64
signature*: ValidatorSig # signing over DepositMessage
signature*: ValidatorSig # Signing over DepositMessage
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#voluntaryexit
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#voluntaryexit
VoluntaryExit* = object
epoch*: Epoch ##\
## Earliest epoch when voluntary exit can be processed
validator_index*: uint64
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#beaconblock
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#beaconblock
BeaconBlock* = object
## For each slot, a proposer is chosen from the validator pool to propose
## a new block. Once the block as been proposed, it is transmitted to
@ -195,14 +197,14 @@ type
body*: BeaconBlockBody
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#beaconblockheader
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#beaconblockheader
BeaconBlockHeader* = object
slot*: Slot
parent_root*: Eth2Digest
state_root*: Eth2Digest
body_root*: Eth2Digest
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#beaconblockbody
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#beaconblockbody
BeaconBlockBody* = object
randao_reveal*: ValidatorSig
eth1_data*: Eth1Data
@ -215,7 +217,7 @@ type
deposits*: List[Deposit, MAX_DEPOSITS]
voluntary_exits*: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#beaconstate
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#beaconstate
BeaconState* = object
# Versioning
genesis_time*: uint64
@ -267,7 +269,7 @@ type
current_justified_checkpoint*: Checkpoint
finalized_checkpoint*: Checkpoint
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#validator
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#validator
Validator* = object
pubkey*: ValidatorPubKey
@ -289,7 +291,7 @@ type
withdrawable_epoch*: Epoch ##\
## When validator can withdraw or transfer funds
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#pendingattestation
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#pendingattestation
PendingAttestation* = object
aggregation_bits*: CommitteeValidatorsBits
data*: AttestationData
@ -299,12 +301,12 @@ type
proposer_index*: uint64
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#historicalbatch
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#historicalbatch
HistoricalBatch* = object
block_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
state_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#fork
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#fork
Fork* = object
# TODO: Spec introduced an alias for Version = array[4, byte]
# and a default parameter to compute_domain
@ -314,37 +316,47 @@ type
epoch*: Epoch ##\
## Epoch of latest fork
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#eth1data
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#eth1data
Eth1Data* = object
deposit_root*: Eth2Digest
deposit_count*: uint64
block_hash*: Eth2Digest
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#signedvoluntaryexit
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#signingroot
SigningRoot* = object
object_root*: Eth2Digest
domain*: uint64
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#signedvoluntaryexit
SignedVoluntaryExit* = object
message*: VoluntaryExit
signature*: ValidatorSig
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#signedbeaconblock
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#signedbeaconblock
SignedBeaconBlock* = object
message*: BeaconBlock
signature*: ValidatorSig
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#signedvoluntaryexit
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#signedbeaconblockheader
SignedBeaconBlockHeader* = object
message*: BeaconBlockHeader
signature*: ValidatorSig
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/validator/0_beacon-chain-validator.md#aggregateandproof
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/validator.md#aggregateandproof
AggregateAndProof* = object
aggregator_index*: uint64
aggregate*: Attestation
selection_proof*: ValidatorSig
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/validator.md#eth1block
Eth1Block* = object
timestamp*: uint64
# All other eth1 block fields
# TODO to be replaced with some magic hash caching
HashedBeaconState* = object
data*: BeaconState
root*: Eth2Digest # hash_tree_root (not signing_root!)
root*: Eth2Digest # hash_tree_root(data)
StateCache* = object
beacon_committee_cache*:
@ -377,6 +389,7 @@ template foreachSpecType*(op: untyped) =
op BeaconState
op Deposit
op DepositData
op Eth1Block
op Eth1Data
op Fork
op HistoricalBatch
@ -386,6 +399,7 @@ template foreachSpecType*(op: untyped) =
op SignedBeaconBlock
op SignedBeaconBlockHeader
op SignedVoluntaryExit
op SigningRoot
op Validator
op VoluntaryExit

View File

@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2018-2019 Status Research & Development GmbH
# Copyright (c) 2018-2020 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@ -7,7 +7,7 @@
# Serenity hash function / digest
#
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#hash
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#hash
#
# In Phase 0 the beacon chain is deployed with SHA256 (SHA2-256).
# Note that is is different from Keccak256 (often mistakenly called SHA3-256)
@ -21,11 +21,11 @@
import
chronicles,
nimcrypto/[sha2, hash, utils], eth/common/eth_types_json_serialization,
nimcrypto/[sha2, hash, utils],
hashes
export
eth_types_json_serialization, hash.`$`
hash.`$`
type
Eth2Digest* = MDigest[32 * 8] ## `hash32` from spec

View File

@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2018 Status Research & Development GmbH
# Copyright (c) 2018-2020 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@ -15,7 +15,7 @@ import
# Internal
./datatypes, ./digest
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#integer_squareroot
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#integer_squareroot
func integer_squareroot*(n: SomeInteger): SomeInteger =
# Return the largest integer ``x`` such that ``x**2 <= n``.
doAssert n >= 0'u64
@ -28,7 +28,7 @@ func integer_squareroot*(n: SomeInteger): SomeInteger =
y = (x + n div x) div 2
x
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#compute_epoch_at_slot
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#compute_epoch_at_slot
func compute_epoch_at_slot*(slot: Slot|uint64): Epoch =
# Return the epoch number at ``slot``.
(slot div SLOTS_PER_EPOCH).Epoch
@ -36,17 +36,17 @@ func compute_epoch_at_slot*(slot: Slot|uint64): Epoch =
template epoch*(slot: Slot): Epoch =
compute_epoch_at_slot(slot)
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#compute_start_slot_at_epoch
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#compute_start_slot_at_epoch
func compute_start_slot_at_epoch*(epoch: Epoch): Slot =
# Return the start slot of ``epoch``.
(epoch * SLOTS_PER_EPOCH).Slot
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#is_active_validator
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#is_active_validator
func is_active_validator*(validator: Validator, epoch: Epoch): bool =
### Check if ``validator`` is active
validator.activation_epoch <= epoch and epoch < validator.exit_epoch
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_active_validator_indices
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#get_active_validator_indices
func get_active_validator_indices*(state: BeaconState, epoch: Epoch):
seq[ValidatorIndex] =
# Return the sequence of active validator indices at ``epoch``.
@ -54,7 +54,7 @@ func get_active_validator_indices*(state: BeaconState, epoch: Epoch):
if is_active_validator(val, epoch):
result.add idx.ValidatorIndex
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_committee_count_at_slot
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#get_committee_count_at_slot
func get_committee_count_at_slot*(state: BeaconState, slot: Slot): uint64 =
# Return the number of committees at ``slot``.
let epoch = compute_epoch_at_slot(slot)
@ -67,13 +67,13 @@ func get_committee_count_at_slot*(state: BeaconState, slot: Slot): uint64 =
# Otherwise, get_beacon_committee(...) cannot access some committees.
doAssert (SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT).uint64 >= result
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_current_epoch
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#get_current_epoch
func get_current_epoch*(state: BeaconState): Epoch =
# Return the current epoch.
doAssert state.slot >= GENESIS_SLOT, $state.slot
compute_epoch_at_slot(state.slot)
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_randao_mix
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#get_randao_mix
func get_randao_mix*(state: BeaconState,
epoch: Epoch): Eth2Digest =
## Returns the randao mix at a recent ``epoch``.
@ -114,7 +114,7 @@ func int_to_bytes4*(x: uint64): array[4, byte] =
result[2] = ((x shr 16) and 0xff).byte
result[3] = ((x shr 24) and 0xff).byte
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#compute_domain
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#compute_domain
func compute_domain*(
domain_type: DomainType,
fork_version: array[4, byte] = [0'u8, 0, 0, 0]): Domain =
@ -122,7 +122,7 @@ func compute_domain*(
result[0..3] = int_to_bytes4(domain_type.uint64)
result[4..7] = fork_version
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_domain
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#get_domain
func get_domain*(
fork: Fork, domain_type: DomainType, epoch: Epoch): Domain =
## Return the signature domain (fork version concatenated with domain type)
@ -144,7 +144,7 @@ func get_domain*(
func get_domain*(state: BeaconState, domain_type: DomainType): Domain =
get_domain(state, domain_type, get_current_epoch(state))
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_seed
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#get_seed
func get_seed*(state: BeaconState, epoch: Epoch, domain_type: DomainType): Eth2Digest =
# Return the seed at ``epoch``.

View File

@ -0,0 +1,128 @@
import
macros, strutils, tables
type
BeaconChainConstants* = enum
BASE_REWARDS_PER_EPOCH
BASE_REWARD_FACTOR
BLS_WITHDRAWAL_PREFIX
CHURN_LIMIT_QUOTIENT
DEPOSIT_CONTRACT_TREE_DEPTH
DOMAIN_BEACON_ATTESTER
DOMAIN_BEACON_PROPOSER
DOMAIN_DEPOSIT
DOMAIN_RANDAO
DOMAIN_VOLUNTARY_EXIT
EFFECTIVE_BALANCE_INCREMENT
EJECTION_BALANCE
EPOCHS_PER_HISTORICAL_VECTOR
EPOCHS_PER_SLASHINGS_VECTOR
ETH1_FOLLOW_DISTANCE
GENESIS_EPOCH
GENESIS_SLOT
HISTORICAL_ROOTS_LIMIT
INACTIVITY_PENALTY_QUOTIENT
JUSTIFICATION_BITS_LENGTH
MAX_ATTESTATIONS
MAX_ATTESTER_SLASHINGS
MAX_COMMITTEES_PER_SLOT
MAX_DEPOSITS
MAX_EFFECTIVE_BALANCE
MAX_PROPOSER_SLASHINGS
MAX_SEED_LOOKAHEAD
MAX_VALIDATORS_PER_COMMITTEE
MAX_VOLUNTARY_EXITS
MIN_ATTESTATION_INCLUSION_DELAY
MIN_DEPOSIT_AMOUNT
MIN_EPOCHS_TO_INACTIVITY_PENALTY
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
MIN_GENESIS_TIME
MIN_PER_EPOCH_CHURN_LIMIT
MIN_SEED_LOOKAHEAD
MIN_SLASHING_PENALTY_QUOTIENT
MIN_VALIDATOR_WITHDRAWABILITY_DELAY
PERSISTENT_COMMITTEE_PERIOD
PROPOSER_REWARD_QUOTIENT
SAFE_SLOTS_TO_UPDATE_JUSTIFIED
SECONDS_PER_DAY
SECONDS_PER_SLOT
SHUFFLE_ROUND_COUNT
SLOTS_PER_EPOCH
SLOTS_PER_ETH1_VOTING_PERIOD
SLOTS_PER_HISTORICAL_ROOT
TARGET_COMMITTEE_SIZE
VALIDATOR_REGISTRY_LIMIT
WHISTLEBLOWER_REWARD_QUOTIENT
const
# These constants cannot really be overriden in a preset.
# If we encounter them, we'll just ignore the preset value.
dubiousConstants = {
# They are derived from other constants:
GENESIS_EPOCH,
SECONDS_PER_DAY,
# These are defined as an enum in datatypes.nim:
DOMAIN_BEACON_ATTESTER,
DOMAIN_BEACON_PROPOSER,
DOMAIN_DEPOSIT,
DOMAIN_RANDAO,
DOMAIN_VOLUNTARY_EXIT,
}
const
customTypes = {
GENESIS_SLOT: "Slot",
BLS_WITHDRAWAL_PREFIX: "byte",
BASE_REWARD_FACTOR: "uint64",
EFFECTIVE_BALANCE_INCREMENT: "uint64",
EJECTION_BALANCE: "uint64",
EPOCHS_PER_SLASHINGS_VECTOR: "uint64",
INACTIVITY_PENALTY_QUOTIENT: "uint64",
MAX_EFFECTIVE_BALANCE: "uint64",
MIN_DEPOSIT_AMOUNT: "uint64",
MIN_EPOCHS_TO_INACTIVITY_PENALTY: "uint64",
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: "uint64",
PERSISTENT_COMMITTEE_PERIOD: "uint64",
PROPOSER_REWARD_QUOTIENT: "uint64",
SECONDS_PER_SLOT: "uint64",
WHISTLEBLOWER_REWARD_QUOTIENT: "uint64",
}.toTable
template entireSet(T: type enum): untyped =
{low(T) .. high(T)}
macro loadCustomPreset*(path: static string): untyped =
result = newStmtList()
var
presetContents = staticRead(path)
presetConstants = dubiousConstants
lineNum = 0
for line in splitLines(presetContents):
inc lineNum
if line.len == 0 or line[0] == '#': continue
template lineinfo: string =
"$1($2) " % [path, $lineNum]
var constParts = line.split(":")
if constParts.len != 2:
error lineinfo & "Invalid syntax: A preset file should include only assignments in the form 'ConstName: Value'"
try:
let constant = parseEnum[BeaconChainConstants](constParts[0])
if constant in dubiousConstants: continue
constParts.add customTypes.getOrDefault(constant, "int")
presetConstants.incl constant
except ValueError:
warning lineinfo & "Unrecognized constant in a preset: " & constParts[0]
continue
result.add parseStmt("const $1* {.intdefine.} = $3($2)" % constParts)
let missingConstants = BeaconChainConstants.entireSet - presetConstants
if missingConstants.card > 0:
warning "Missing constants in preset: " & $missingConstants

View File

@ -20,7 +20,7 @@ type
const
# Misc
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/configs/mainnet.yaml#L6
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/configs/mainnet.yaml#L6
MAX_COMMITTEES_PER_SLOT* {.intdefine.} = 64
@ -45,7 +45,7 @@ const
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT* {.intdefine.} = 16384
# Constants (TODO: not actually configurable)
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/configs/mainnet.yaml#L110
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#constants
BASE_REWARDS_PER_EPOCH* = 4
DEPOSIT_CONTRACT_TREE_DEPTH* = 32
@ -69,14 +69,15 @@ const
# Initial values
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/configs/mainnet.yaml#L62
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/configs/mainnet.yaml#L64
GENESIS_SLOT* = 0.Slot
GENESIS_FORK_VERSION* = 0x00000000
BLS_WITHDRAWAL_PREFIX* = 0'u8
# Time parameters
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/configs/mainnet.yaml#L69
MIN_GENESIS_DELAY* = 86400 # 86400 seconds (1 day)
SECONDS_PER_SLOT*{.intdefine.} = 12'u64 # Compile with -d:SECONDS_PER_SLOT=1 for 12x faster slots
## TODO consistent time unit across projects, similar to C++ chrono?
@ -144,7 +145,7 @@ const
# Max operations per block
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/configs/mainnet.yaml#L124
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/configs/mainnet.yaml#L128
MAX_PROPOSER_SLASHINGS* = 2^4
MAX_ATTESTER_SLASHINGS* = 2^0
MAX_ATTESTATIONS* = 2^7
@ -153,20 +154,21 @@ const
# Fork choice
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/configs/mainnet.yaml#L26
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/configs/mainnet.yaml#L26
SAFE_SLOTS_TO_UPDATE_JUSTIFIED* = 8 # 96 seconds
# Validators
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/configs/mainnet.yaml#L32
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/configs/mainnet.yaml#L32
ETH1_FOLLOW_DISTANCE* = 1024 # blocks ~ 4 hours
TARGET_AGGREGATORS_PER_COMMITTEE* = 16 # validators
RANDOM_SUBNETS_PER_VALIDATOR* = 1 # subnet
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION* = 256 # epochs ~ 27 hours
SECONDS_PER_ETH1_BLOCK* = 14 # estimate from Eth1 mainnet)
# Phase 1 - Sharding
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/1_shard-data-chains.md#time-parameters
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase1/shard-data-chains.md#time-parameters
# TODO those are included in minimal.yaml but not mainnet.yaml
# Why?
# SHARD_SLOTS_PER_BEACON_SLOT* = 2 # spec: SHARD_SLOTS_PER_EPOCH
@ -176,9 +178,10 @@ const
# Phase 1 - Custody game
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/1_custody-game.md#constants
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase1/custody-game.md#constants
# TODO those are included in minimal.yaml but not mainnet.yaml
# Why?
# EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS* = 4096 # epochs
# EPOCHS_PER_CUSTODY_PERIOD* = 4
# CUSTODY_PERIOD_TO_RANDAO_PADDING* = 4

View File

@ -38,7 +38,7 @@ const
# Constants
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#constants
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#constants
# TODO "The following values are (non-configurable) constants" ...
# Unchanged
BASE_REWARDS_PER_EPOCH* = 4
@ -61,11 +61,16 @@ const
# Unchanged
GENESIS_SLOT* = 0.Slot
GENESIS_FORK_VERSION* = 0x01000000
BLS_WITHDRAWAL_PREFIX* = 0'u8
# Time parameters
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/configs/minimal.yaml#L69
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/configs/minimal.yaml#L71
# Changed: Faster to spin up testnets, but does not give validator
# reasonable warning time for genesis
MIN_GENESIS_DELAY* = 300
# Unchanged
SECONDS_PER_SLOT*{.intdefine.} = 6'u64
@ -91,10 +96,6 @@ const
# Changed
MIN_EPOCHS_TO_INACTIVITY_PENALTY* = 2'u64^2
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS* = 4096 # epochs
EPOCHS_PER_CUSTODY_PERIOD* = 4
CUSTODY_PERIOD_TO_RANDAO_PADDING* = 4
# State vector lengths
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/configs/minimal.yaml#L101
@ -107,7 +108,7 @@ const
# Reward and penalty quotients
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/configs/minimal.yaml#L113
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/configs/minimal.yaml#L117
BASE_REWARD_FACTOR* = 2'u64^6
WHISTLEBLOWER_REWARD_QUOTIENT* = 2'u64^9
@ -127,7 +128,7 @@ const
# Fork choice
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/configs/minimal.yaml#L26
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/configs/minimal.yaml#L26
# Changed
SAFE_SLOTS_TO_UPDATE_JUSTIFIED* = 2
@ -143,13 +144,24 @@ const
TARGET_AGGREGATORS_PER_COMMITTEE* = 16 # validators
RANDOM_SUBNETS_PER_VALIDATOR* = 1 # subnet
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION* = 256 # epochs ~ 27 hours
SECONDS_PER_ETH1_BLOCK* = 14 # estimate from Eth1 mainnet)
# Phase 1 - Sharding
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/configs/minimal.yaml#L153
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/configs/minimal.yaml#L157
# TODO those are included in minimal.yaml but not mainnet.yaml
# Why?
SHARD_SLOTS_PER_BEACON_SLOT* = 2 # spec: SHARD_SLOTS_PER_EPOCH
EPOCHS_PER_SHARD_PERIOD* = 4
PHASE_1_FORK_EPOCH* = 8
PHASE_1_FORK_SLOT* = 64
# Phase 1 - Custody game
# ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/1_custody-game.md#constants
# TODO those are included in minimal.yaml but not mainnet.yaml
# Why?
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS* = 4096 # epochs
EPOCHS_PER_CUSTODY_PERIOD* = 4
CUSTODY_PERIOD_TO_RANDAO_PADDING* = 4

View File

@ -44,7 +44,7 @@ declareGauge beacon_previous_live_validators, "Number of active validators that
declareGauge beacon_pending_deposits, "Number of pending deposits (state.eth1_data.deposit_count - state.eth1_deposit_index)" # On block
declareGauge beacon_processed_deposits_total, "Number of total deposits included on chain" # On block
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#block-header
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#block-header
proc process_block_header*(
state: var BeaconState, blck: BeaconBlock, flags: UpdateFlags,
stateCache: var StateCache): bool {.nbench.}=
@ -125,14 +125,14 @@ proc process_randao(
true
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#eth1-data
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#eth1-data
func process_eth1_data(state: var BeaconState, body: BeaconBlockBody) {.nbench.}=
state.eth1_data_votes.add body.eth1_data
if state.eth1_data_votes.count(body.eth1_data) * 2 >
SLOTS_PER_ETH1_VOTING_PERIOD:
state.eth1_data = body.eth1_data
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#is_slashable_validator
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#is_slashable_validator
func is_slashable_validator(validator: Validator, epoch: Epoch): bool =
# Check if ``validator`` is slashable.
(not validator.slashed) and
@ -201,7 +201,7 @@ proc processProposerSlashings(
true
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#is_slashable_attestation_data
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#is_slashable_attestation_data
func is_slashable_attestation_data(
data_1: AttestationData, data_2: AttestationData): bool =
## Check if ``data_1`` and ``data_2`` are slashable according to Casper FFG

View File

@ -63,7 +63,7 @@ declareGauge epoch_transition_final_updates, "Epoch transition final updates tim
# Spec
# --------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_total_active_balance
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#get_total_active_balance
func get_total_active_balance*(state: BeaconState): Gwei =
# Return the combined effective balance of the active validators.
# TODO it calls get_total_balance with set(g_a_v_i(...))
@ -140,11 +140,11 @@ proc process_justification_and_finalization*(
## matter -- in the next epoch, they'll be 2 epochs old, when BeaconState
## tracks current_epoch_attestations and previous_epoch_attestations only
## per
## https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#attestations
## https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#attestations
## and `get_matching_source_attestations(...)` via
## https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#helper-functions-1
## https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#helper-functions-1
## and
## https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#final-updates
## https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#final-updates
## after which the state.previous_epoch_attestations is replaced.
trace "Non-attesting indices in previous epoch",
missing_all_validators=
@ -338,7 +338,7 @@ func get_attestation_deltas(state: BeaconState, stateCache: var StateCache):
(rewards, penalties)
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#rewards-and-penalties-1
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#rewards-and-penalties-1
func process_rewards_and_penalties(
state: var BeaconState, cache: var StateCache) {.nbench.}=
if get_current_epoch(state) == GENESIS_EPOCH:
@ -351,7 +351,7 @@ func process_rewards_and_penalties(
decrease_balance(state, i.ValidatorIndex, penalties[i])
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#slashings
func process_slashings*(state: var BeaconState) =
func process_slashings*(state: var BeaconState) {.nbench.}=
let
epoch = get_current_epoch(state)
total_balance = get_total_active_balance(state)
@ -425,7 +425,7 @@ proc process_epoch*(state: var BeaconState) {.nbench.}=
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#rewards-and-penalties-1
process_rewards_and_penalties(state, per_epoch_cache)
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#registry-updates
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#registry-updates
# Don't rely on caching here.
process_registry_updates(state)
@ -441,7 +441,7 @@ proc process_epoch*(state: var BeaconState) {.nbench.}=
# @update_period_committee
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#final-updates
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#final-updates
process_final_updates(state)
# @after_process_final_updates

View File

@ -78,7 +78,7 @@ func get_shuffled_seq*(seed: Eth2Digest,
result = shuffled_active_validator_indices
# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#get_previous_epoch
# https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#get_previous_epoch
func get_previous_epoch*(state: BeaconState): Epoch =
# Return the previous epoch (unless the current epoch is ``GENESIS_EPOCH``).
let current_epoch = get_current_epoch(state)

View File

@ -12,7 +12,7 @@ import
endians, stew/shims/macros, options, algorithm, options,
stew/[bitops2, bitseqs, objects, varints, ptrops, ranges/ptr_arith], stint,
faststreams/input_stream, serialization, serialization/testing/tracing,
nimcrypto/sha2, blscurve, eth/common,
nimcrypto/sha2, blscurve,
./spec/[crypto, datatypes, digest],
./ssz/[types, bytes_reader]

View File

@ -14,7 +14,7 @@ type
## which blocks are valid - in particular, blocks are not valid if they
## come from the future as seen from the local clock.
##
## https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_fork-choice.md#fork-choice
## https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/fork-choice.md#fork-choice
##
# TODO replace time in chronos with a proper unit type, then this code can
# follow:

View File

@ -3,7 +3,7 @@ FROM debian:bullseye-slim AS build
SHELL ["/bin/bash", "-c"]
RUN apt-get -qq update \
&& apt-get -qq -y install build-essential make wget librocksdb-dev libpcre3-dev golang-go git &>/dev/null \
&& apt-get -qq -y install build-essential make wget libpcre3-dev golang-go git &>/dev/null \
&& apt-get -qq clean
# let Docker cache this between Git revision and testnet version changes
@ -36,7 +36,7 @@ FROM debian:bullseye-slim
SHELL ["/bin/bash", "-c"]
RUN apt-get -qq update \
&& apt-get -qq -y install librocksdb-dev libpcre3 psmisc &>/dev/null \
&& apt-get -qq -y install libpcre3 psmisc &>/dev/null \
&& apt-get -qq clean \
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*

View File

@ -24,7 +24,7 @@ Features
```
nim c -d:const_preset=mainnet -d:nbench -d:release -o:build/nbench nbench/nbench.nim
export SCENARIOS=tests/official/fixtures/tests-v0.9.3/mainnet/phase0
export SCENARIOS=tests/official/fixtures/tests-v0.9.4/mainnet/phase0
# Full state transition
build/nbench cmdFullStateTransition -d="${SCENARIOS}"/sanity/blocks/pyspec_tests/voluntary_exit/ -q=2
@ -32,6 +32,18 @@ build/nbench cmdFullStateTransition -d="${SCENARIOS}"/sanity/blocks/pyspec_tests
# Slot processing
build/nbench cmdSlotProcessing -d="${SCENARIOS}"/sanity/slots/pyspec_tests/slots_1
# Justification-Finalisation
build/nbench cmdEpochProcessing --epochProcessingCat=catJustificationFinalization -d="${SCENARIOS}"/epoch_processing/justification_and_finalization/pyspec_tests/234_ok_support/
# Registry updates
build/nbench cmdEpochProcessing --epochProcessingCat=catRegistryUpdates -d="${SCENARIOS}"/epoch_processing/registry_updates/pyspec_tests/activation_queue_efficiency/
# Slashings
build/nbench cmdEpochProcessing --epochProcessingCat=catSlashings -d="${SCENARIOS}"/epoch_processing/slashings/pyspec_tests/max_penalties/
# Final updates
build/nbench cmdEpochProcessing --epochProcessingCat=catFinalUpdates -d="${SCENARIOS}"/epoch_processing/final_updates/pyspec_tests/effective_balance_hysteresis/
# Block header processing
build/nbench cmdBlockProcessing --blockProcessingCat=catBlockHeader -d="${SCENARIOS}"/operations/block_header/pyspec_tests/proposer_slashed/
@ -59,7 +71,7 @@ Furthermore benchmarks are run in parallel and might interfere which each other.
```
nim c -d:const_preset=mainnet -d:nbench -d:release -o:build/nbench nbench/nbench.nim
nim c -o:build/nbench_tests nbench/nbench_official_fixtures.nim
nbench_tests --nbench=build/nbench --tests=tests/official/fixtures/tests-v0.9.4/mainnet/
build/nbench_tests --nbench=build/nbench --tests=tests/official/fixtures/tests-v0.9.4/mainnet/
```
## TODO Reporting

View File

@ -99,6 +99,28 @@ proc main() =
)
else:
quit "Unsupported"
of cmdEpochProcessing:
case scenario.epochProcessingCat
of catJustificationFinalization:
runProcessJustificationFinalization(
scenario.scenarioDir.string,
scenario.preState
)
of catRegistryUpdates:
runProcessRegistryUpdates(
scenario.scenarioDir.string,
scenario.preState
)
of catSlashings:
runProcessSlashings(
scenario.scenarioDir.string,
scenario.preState
)
of catFinalUpdates:
runProcessFinalUpdates(
scenario.scenarioDir.string,
scenario.preState
)
else:
quit "Unsupported"

View File

@ -28,9 +28,13 @@ proc collectTarget(cmds: var CmdLists, nbench, name, cmd, cat, path: string) =
var cat = cat
if cmd == "cmdBlockProcessing":
cat = "--blockProcessingCat=" & cat
elif cmd == "cmdEpochProcessing":
cat = "--epochProcessingCat=" & cat
cmds.add &"{nbench} {cmd} {cat} -d={path/folder}"
proc collectBenchTargets(nbench, basePath: string): CmdLists =
# State processing
# -------------------------------------------------------------------------
block: # Full state transitions
echo "----------------------------------------"
echo "Collecting full state transitions"
@ -42,9 +46,27 @@ proc collectBenchTargets(nbench, basePath: string): CmdLists =
inc countBlocks
echo "Found: ", folder, " with ", countBlocks, " blocks"
result.add &"{nbench} cmdFullStateTransition -d={path/folder} -q={$countBlocks}"
# Slot processing
# -------------------------------------------------------------------------
block: # Slot processing
let path = basePath/"phase0"/"sanity"/"slots"/"pyspec_tests"
result.collectTarget(nbench, "slot", "cmdSlotProcessing", "", path)
# Epoch processing
# -------------------------------------------------------------------------
block: # Justification-Finalization
let path = basePath/"phase0"/"epoch_processing"/"justification_and_finalization"/"pyspec_tests"
result.collectTarget(nbench, "justification_and_finalization", "cmdEpochProcessing", "catJustificationFinalization", path)
block: # Registry updates
let path = basePath/"phase0"/"epoch_processing"/"justification_and_finalization"/"pyspec_tests"
result.collectTarget(nbench, "registry_updates", "cmdEpochProcessing", "catRegistryUpdates", path)
block: # Slashings
let path = basePath/"phase0"/"epoch_processing"/"slashings"/"pyspec_tests"
result.collectTarget(nbench, "slashings", "cmdEpochProcessing", "catSlashings", path)
block: # Justification-Finalization
let path = basePath/"phase0"/"epoch_processing"/"final_updates"/"pyspec_tests"
result.collectTarget(nbench, "final_updates", "cmdEpochProcessing", "catFinalUpdates", path)
# Block processing
# -------------------------------------------------------------------------
block: # Attestation
let path = basePath/"phase0"/"operations"/"attestation"/"pyspec_tests"
result.collectTarget(nbench, "attestation", "cmdBlockProcessing", "catAttestations", path)
@ -66,5 +88,6 @@ proc collectBenchTargets(nbench, basePath: string): CmdLists =
cli do(nbench: string, tests: string):
let cmdLists = collectBenchTargets(nbench, tests)
echo "\n========================================================\n"
let err = execProcesses(cmdLists)
quit err

View File

@ -11,7 +11,7 @@ import
# Status libraries
confutils/defs, serialization,
# Beacon-chain
../beacon_chain/spec/[datatypes, crypto, beaconstate, validator, state_transition_block],
../beacon_chain/spec/[datatypes, crypto, beaconstate, validator, state_transition_block, state_transition_epoch],
../beacon_chain/[ssz, state_transition, extras]
# Nimbus Bench - Scenario configuration
@ -35,6 +35,13 @@ type
catDeposits
catVoluntaryExits
EpochProcessingCat* = enum
catFinalUpdates
catJustificationFinalization
catRegistryUpdates
catSlashings
# catRewardsPenalties # no upstream tests
ScenarioConf* = object
scenarioDir* {.
desc: "The directory of your benchmark scenario"
@ -114,7 +121,7 @@ type
name: "voluntary_exit"
defaultValue: "voluntary_exit".}: string
of cmdEpochProcessing:
discard
epochProcessingCat*: EpochProcessingCat
proc parseSSZ(path: string, T: typedesc): T =
try:
@ -157,7 +164,33 @@ proc runProcessSlots*(dir, preState: string, numSlots: uint64) =
process_slots(state[], state.slot + numSlots)
template processScenarioImpl(
template processEpochScenarioImpl(
dir, preState: string,
transitionFn: untyped,
needCache: static bool): untyped =
let prePath = dir/preState & ".ssz"
var state: ref BeaconState
new state
echo "Running: ", prePath
state[] = parseSSZ(prePath, BeaconState)
when needCache:
var cache = get_empty_per_epoch_cache()
# Epoch transitions can't fail (TODO is this true?)
when needCache:
transitionFn(state[], cache)
else:
transitionFn(state[])
echo astToStr(transitionFn) & " status: ", "Done" # if success: "SUCCESS ✓" else: "FAILURE ⚠️"
template genProcessEpochScenario(name, transitionFn: untyped, needCache: static bool): untyped =
proc `name`*(dir, preState: string) =
processEpochScenarioImpl(dir, preState, transitionFn, needCache)
template processBlockScenarioImpl(
dir, preState: string, skipBLS: bool,
transitionFn, paramName: untyped,
ConsensusObject: typedesc,
@ -192,18 +225,23 @@ template processScenarioImpl(
echo astToStr(transitionFn) & " status: ", if success: "SUCCESS ✓" else: "FAILURE ⚠️"
template genProcessScenario(name, transitionFn, paramName: untyped, ConsensusObject: typedesc, needFlags, needCache: static bool): untyped =
template genProcessBlockScenario(name, transitionFn, paramName: untyped, ConsensusObject: typedesc, needFlags, needCache: static bool): untyped =
when needFlags:
proc `name`*(dir, preState, `paramName`: string, skipBLS: bool) =
processScenarioImpl(dir, preState, skipBLS, transitionFn, paramName, ConsensusObject, needFlags, needCache)
processBlockScenarioImpl(dir, preState, skipBLS, transitionFn, paramName, ConsensusObject, needFlags, needCache)
else:
proc `name`*(dir, preState, `paramName`: string) =
# skipBLS is a dummy to avoid undeclared identifier
processScenarioImpl(dir, preState, skipBLS = false, transitionFn, paramName, ConsensusObject, needFlags, needCache)
processBlockScenarioImpl(dir, preState, skipBLS = false, transitionFn, paramName, ConsensusObject, needFlags, needCache)
genProcessScenario(runProcessBlockHeader, process_block_header, block_header, BeaconBlock, needFlags = true, needCache = true)
genProcessScenario(runProcessProposerSlashing, process_proposer_slashing, proposer_slashing, ProposerSlashing, needFlags = true, needCache = true)
genProcessScenario(runProcessAttestation, process_attestation, attestation, Attestation, needFlags = true, needCache = true)
genProcessScenario(runProcessAttesterSlashing, process_attester_slashing, att_slash, AttesterSlashing, needFlags = false, needCache = true)
genProcessScenario(runProcessDeposit, process_deposit, deposit, Deposit, needFlags = true, needCache = false)
genProcessScenario(runProcessVoluntaryExits, process_voluntary_exit, deposit, SignedVoluntaryExit, needFlags = true, needCache = false)
genProcessEpochScenario(runProcessJustificationFinalization, process_justification_and_finalization, needCache = true)
genProcessEpochScenario(runProcessRegistryUpdates, process_registry_updates, needCache = false)
genProcessEpochScenario(runProcessSlashings, process_slashings, needCache = false)
genProcessEpochScenario(runProcessFinalUpdates, process_final_updates, needCache = false)
genProcessBlockScenario(runProcessBlockHeader, process_block_header, block_header, BeaconBlock, needFlags = true, needCache = true)
genProcessBlockScenario(runProcessProposerSlashing, process_proposer_slashing, proposer_slashing, ProposerSlashing, needFlags = true, needCache = true)
genProcessBlockScenario(runProcessAttestation, process_attestation, attestation, Attestation, needFlags = true, needCache = true)
genProcessBlockScenario(runProcessAttesterSlashing, process_attester_slashing, att_slash, AttesterSlashing, needFlags = false, needCache = true)
genProcessBlockScenario(runProcessDeposit, process_deposit, deposit, Deposit, needFlags = true, needCache = false)
genProcessBlockScenario(runProcessVoluntaryExits, process_voluntary_exit, deposit, SignedVoluntaryExit, needFlags = true, needCache = false)

View File

@ -1,168 +0,0 @@
import
json, macros, sequtils, endians,
eth/common, stint, nimcrypto, stew/byteutils
type
Validator {.packed.} = object
# The validator's public key
pubkey: Uint256
# What shard the validator's balance will be sent to
# after withdrawal
withdrawal_shard: int16
# And what address
withdrawal_address: EthAddress
# The validator's current RANDAO beacon commitment
randao_commitment: Hash256
# Current balance
balance: int64
# Dynasty where the validator is inducted
start_dynasty: int64
# Dynasty where the validator leaves
end_dynasty: int64
macro typeToJson*(T: typedesc): untyped =
## Transform a Nim type section in a Json schema
## TODO: Add the possibility to force in lexicographical order
let impl = T.getTypeImpl[1].getTypeImpl # Access type implementation as a tree
var typeAsJson: JsonNode = newJObject()
for field in impl[2]:
let (fieldName, fieldType) = ($field[0], $field[1]) # convert name and type to string
typeAsJson[fieldName] = %fieldType # % creates a JsonNode from the string that we assign to key = fieldName.
result = newStrLitNode($typeAsJson)
proc appendBigEndianInt(dst: var seq[byte], src: SomeNumber) =
## Append an int as a big-endian int to a sequence of bytes
const size = sizeof(src)
proc bigEndian(dst, src: pointer) =
when size == 2: # int16
bigEndian16(dst, src)
elif size == 4: # int32
bigEndian32(dst, src)
elif size == 8: # int64
bigEndian64(dst, src)
else:
static: doAssert false, "src must be a int16, int32 or int64 or unsigned int of the same size"
dst.setLen(dst.len + size)
bigEndian(dst[dst.len - size].addr, src.unsafeAddr)
proc serializeETH[T](x: T): seq[byte] =
## Serialize an Ethereum type to the PoC serialization format
const
magic = "\x7FETHEREUM"
version = [byte 1, 0]
schema = typeToJson(T)
# Write magic string and version
result = @[]
for chr in magic:
result.add byte(chr)
result.add version
# Offset of the raw data (stored as int64 even on 32-bit platform):
# - 9 bytes of magic header
# - 2 bytes for version
# - 8 bytes for offset (int64)
# - 32 bytes for Blake2 hash for raw data
# - ??? bytes for schema
let
offset = int64(result.len + sizeof(int64) + sizeof(Hash256) + schema.len)
metadataStart = result.len + sizeof(int64)
# Write the offset as a Big Endian int64
result.setLen(result.len + sizeof(int64))
bigEndian64(result[result.len - sizeof(int64)].addr, offset.unsafeAddr)
# Reserve space for Blake2 hash (256-bit / 32 bytes)
result.setLen(result.len + sizeof(Hash256))
# Write the schema
for chr in schema:
result.add byte(chr)
doAssert result.len == offset
# Write raw data - this is similar to SimpleSerialize
for field in fields(x):
when field is UInt256:
result.add field.toByteArrayBE
elif field is (int16 or int64):
result.appendBigEndianInt field
elif field is EthAddress:
result.add field
elif field is Hash256:
result.add field.data
else:
raise newException(ValueError, "Not implemented")
doAssert result.len == offset + sizeof(T)
# Compute the hash
result[metadataStart ..< metadataStart + sizeof(Hash256)] = blake2_256.digest(result[offset ..< result.len]).data
# Some reports
echo "Schema: " & $schema
echo "Schema size: " & $schema.len
echo "Raw data offset (== metadata size including schema): " & $offset
echo "Raw data size (bytes): " & $sizeof(T)
echo "Total size (bytes): " & $result.len
when isMainModule:
let x = Validator(
pubkey: high(Uint256), # 0xFFFF...FFFF
withdrawal_shard: 4455,
withdrawal_address: hexToPaddedByteArray[20]("0x1234"),
randao_commitment: Hash256(data: hexToPaddedByteArray[32]("0xAABBCCDDEEFF")),
balance: 100000,
start_dynasty: 1,
end_dynasty: 2
)
let y = serializeETH(x)
echo "\n##################### \n"
echo "Byte representation \n"
# Byte representation
echo y
echo "\n##################### \n"
echo "Char representation \n"
echo cast[seq[char]](y)
echo "\n##################### \n"
echo "Hex representation \n"
echo byteutils.toHex y
#################################################################################
# Output
# Schema: {"pubkey":"UInt256","withdrawal_shard":"int16","withdrawal_address":"EthAddress","randao_commitment":"Hash256","balance":"int64","start_dynasty":"int64","end_dynasty":"int64"}
# Schema size: 175
# Raw data offset (== metadata size including schema): 226
# Raw data size (bytes): 110
# Total size (bytes): 336
#
# #####################
#
# Byte representation
#
# @[127, 69, 84, 72, 69, 82, 69, 85, 77, 1, 0, 0, 0, 0, 0, 0, 0, 0, 226, 57, 0, 86, 134, 122, 192, 114, 196, 207, 203, 93, 74, 188, 96, 189, 200, 234, 140, 195, 148, 28, 78, 203, 152, 116, 37, 74, 241, 189, 75, 40, 29, 123, 34, 112, 117, 98, 107, 101, 121, 34, 58, 34, 85, 73, 110, 116, 50, 53, 54, 34, 44, 34, 119, 105, 116, 104, 100, 114, 97, 119, 97, 108, 95, 115, 104, 97, 114, 100, 34, 58, 34, 105, 110, 116, 49, 54, 34, 44, 34, 119, 105, 116, 104, 100, 114, 97, 119, 97, 108, 95, 97, 100, 100, 114, 101, 115, 115, 34, 58, 34, 69, 116, 104, 65, 100, 100, 114, 101, 115, 115, 34, 44, 34, 114, 97, 110, 100, 97, 111, 95, 99, 111, 109, 109, 105, 116, 109, 101, 110, 116, 34, 58, 34, 72, 97, 115, 104, 50, 53, 54, 34, 44, 34, 98, 97, 108,97, 110, 99, 101, 34, 58, 34, 105, 110, 116, 54, 52, 34, 44, 34, 115, 116, 97, 114, 116, 95, 100, 121, 110, 97, 115, 116, 121, 34, 58, 34, 105, 110, 116, 54, 52, 34, 44, 34, 101, 110, 100, 95, 100, 121, 110, 97, 115, 116, 121, 34, 58, 34, 105, 110, 116, 54, 52, 34, 125, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 17, 103, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 52, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 170, 187, 204, 221, 238, 255, 0, 0, 0, 0, 0, 1, 134, 160, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2]
#
# #####################
#
# Char representation
#
# @['\x7F', 'E', 'T', 'H', 'E', 'R', 'E', 'U', 'M', '\x01', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\xE2', '9', '\x00', 'V', '\x86', 'z', '\xC0', 'r', '\xC4', '\xCF', '\xCB', ']', 'J', '\xBC', '`', '\xBD', '\xC8', '\xEA', '\x8C', '\xC3', '\x94', '\x1C', 'N', '\xCB', '\x98', 't', '%', 'J', '\xF1', '\xBD', 'K', '(', '\x1D', '{', '\"', 'p', 'u', 'b', 'k', 'e', 'y', '\"', ':', '\"', 'U', 'I', 'n', 't', '2', '5', '6', '\"', ',', '\"', 'w', 'i', 't', 'h', 'd', 'r', 'a', 'w', 'a', 'l', '_', 's', 'h', 'a', 'r', 'd', '\"', ':', '\"', 'i', 'n', 't', '1', '6', '\"', ',', '\"', 'w', 'i', 't', 'h', 'd', 'r', 'a', 'w', 'a', 'l', '_', 'a', 'd', 'd', 'r', 'e', 's', 's', '\"', ':', '\"', 'E', 't', 'h', 'A', 'd', 'd', 'r','e', 's', 's', '\"', ',', '\"', 'r', 'a', 'n', 'd', 'a', 'o', '_', 'c', 'o', 'm', 'm', 'i', 't', 'm', 'e', 'n', 't', '\"', ':', '\"', 'H', 'a', 's', 'h', '2', '5', '6', '\"', ',', '\"', 'b', 'a', 'l', 'a', 'n', 'c', 'e', '\"', ':', '\"', 'i', 'n', 't', '6', '4', '\"', ',', '\"', 's', 't', 'a', 'r', 't', '_', 'd', 'y', 'n', 'a', 's', 't', 'y', '\"', ':', '\"', 'i', 'n', 't', '6', '4', '\"', ',', '\"', 'e', 'n', 'd', '_', 'd', 'y', 'n', 'a', 's', 't', 'y', '\"', ':', '\"', 'i', 'n', 't', '6', '4', '\"', '}', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\x11', 'g', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x12', '4', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\xAA', '\xBB', '\xCC', '\xDD', '\xEE', '\xFF', '\x00', '\x00', '\x00', '\x00', '\x00', '\x01', '\x86', '\xA0', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x01', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x02']
#
# #####################
#
# Hex representation
#
# 7f455448455245554d010000000000000000e2390056867ac072c4cfcb5d4abc60bdc8ea8cc3941c4ecb9874254af1bd4b281d7b227075626b6579223a2255496e74323536222c227769746864726177616c5f7368617264223a22696e743136222c227769746864726177616c5f61646472657373223a2245746841646472657373222c2272616e64616f5f636f6d6d69746d656e74223a2248617368323536222c2262616c616e6365223a22696e743634222c2273746172745f64796e61737479223a22696e743634222c22656e645f64796e61737479223a22696e743634227dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff116700000000000000000000000000000000000012340000000000000000000000000000000000000000000000000000aabbccddeeff00000000000186a000000000000000010000000000000002

View File

@ -1,8 +1,8 @@
import
confutils,
../beacon_chain/[extras, ssz],
../beacon_chain/spec/[beaconstate, datatypes, digest, validator],
../tests/testutil
../beacon_chain/spec/[beaconstate, datatypes, digest],
../tests/testblockutil
proc stateSize(deposits: int, maxContent = false) =
var state = initialize_beacon_state_from_eth1(

View File

@ -9,7 +9,7 @@ import
confutils, stats, times, std/monotimes,
strformat,
options, sequtils, random, tables,
../tests/[testutil, testblockutil],
../tests/[testblockutil],
../beacon_chain/spec/[beaconstate, crypto, datatypes, digest, helpers, validator],
../beacon_chain/[attestation_pool, extras, ssz]
@ -20,11 +20,31 @@ type Timers = enum
tShuffle = "Retrieve committee once using get_beacon_committee"
tAttest = "Combine committee attestations"
proc writeJson*(prefix, slot, v: auto) =
template withTimer(stats: var RunningStat, body: untyped) =
let start = cpuTime()
block:
body
let stop = cpuTime()
stats.push stop - start
template withTimerRet(stats: var RunningStat, body: untyped): untyped =
let start = cpuTime()
let tmp = block:
body
let stop = cpuTime()
stats.push stop - start
tmp
proc jsonName(prefix, slot: auto): string =
fmt"{prefix:04}-{shortLog(slot):08}.json"
proc writeJson*(fn, v: auto) =
var f: File
defer: close(f)
let fileName = fmt"{prefix:04}-{shortLog(slot):08}.json"
Json.saveFile(fileName, v, pretty = true)
Json.saveFile(fn, v, pretty = true)
func verifyConsensus(state: BeaconState, attesterRatio: auto) =
if attesterRatio < 0.63:
@ -45,16 +65,24 @@ func verifyConsensus(state: BeaconState, attesterRatio: auto) =
cli do(slots = SLOTS_PER_EPOCH * 6,
validators = SLOTS_PER_EPOCH * 30, # One per shard is minimum
json_interval = SLOTS_PER_EPOCH,
write_last_json = false,
prefix = 0,
attesterRatio {.desc: "ratio of validators that attest in each round"} = 0.73,
validate = true):
echo "Preparing validators..."
let
flags = if validate: {} else: {skipValidation}
genesisState = initialize_beacon_state_from_eth1(
Eth2Digest(), 0,
makeInitialDeposits(validators, flags), flags)
deposits = makeInitialDeposits(validators, flags)
echo "Generating Genesis..."
let
genesisState =
initialize_beacon_state_from_eth1(Eth2Digest(), 0, deposits, flags)
genesisBlock = get_initial_beacon_block(genesisState)
echo "Starting simulation..."
var
attestations = initTable[Slot, seq[Attestation]]()
state = genesisState
@ -65,19 +93,28 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
blck: SignedBeaconBlock
cache = get_empty_per_epoch_cache()
proc maybeWrite() =
if state.slot mod json_interval.uint64 == 0:
writeJson(prefix, state.slot, state)
write(stdout, ":")
proc maybeWrite(last: bool) =
if write_last_json:
if state.slot mod json_interval.uint64 == 0:
write(stdout, ":")
else:
write(stdout, ".")
if last:
writeJson("state.json", state)
else:
write(stdout, ".")
if state.slot mod json_interval.uint64 == 0:
writeJson(jsonName(prefix, state.slot), state)
write(stdout, ":")
else:
write(stdout, ".")
# TODO doAssert against this up-front
# indexed attestation: validator index beyond max validators per committee
# len(indices) <= MAX_VALIDATORS_PER_COMMITTEE
for i in 0..<slots:
maybeWrite()
maybeWrite(false)
verifyConsensus(state, attesterRatio)
let
@ -150,9 +187,10 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
echo &" slot: {shortLog(state.slot)} ",
&"epoch: {shortLog(state.slot.compute_epoch_at_slot)}"
maybeWrite() # catch that last state as well..
echo "done!"
maybeWrite(true) # catch that last state as well..
echo "Done!"
echo "Validators: ", validators, ", epoch length: ", SLOTS_PER_EPOCH
echo "Validators per attestation (mean): ", attesters.mean

View File

@ -0,0 +1 @@
-u:metrics

View File

@ -3,8 +3,9 @@ import
const
rootDir = thisDir() / ".."
bootstrapFile = "bootstrap_nodes.txt"
depositContractFile = "deposit_contract.txt"
bootstrapTxtFileName = "bootstrap_nodes.txt"
bootstrapYamlFileName = "boot_enr.yaml"
depositContractFileName = "deposit_contract.txt"
genesisFile = "genesis.ssz"
configFile = "config.yaml"
testnetsRepo = "eth2-testnets"
@ -33,8 +34,13 @@ cli do (testnetName {.argument.}: string):
rmDir(allTestnetsDir)
cd buildDir
exec &"git clone --quiet --depth=1 {testnetsGitUrl}"
var
depositContractOpt = ""
bootstrapFileOpt = ""
let testnetDir = allTestnetsDir / team / testnet
if not system.dirExists(testnetDir):
echo &"No metadata files exists for the '{testnetName}' testnet"
@ -46,9 +52,18 @@ cli do (testnetName {.argument.}: string):
echo &"The required file {fileName} is not present in '{testnetDir}'."
quit 1
checkRequiredFile bootstrapFile
checkRequiredFile genesisFile
let bootstrapTxtFile = testnetDir / bootstrapTxtFileName
if system.fileExists(bootstrapTxtFile):
bootstrapFileOpt = &"--bootstrap-file=\"{bootstrapTxtFile}\""
else:
let bootstrapYamlFile = testnetDir / bootstrapYamlFileName
if system.fileExists(bootstrapYamlFile):
bootstrapFileOpt = &"--enr-bootstrap-file=\"{bootstrapYamlFile}\""
else:
echo "Warning: the network metadata doesn't include a bootstrap file"
var preset = testnetDir / configFile
if not system.fileExists(preset): preset = "minimal"
@ -60,8 +75,7 @@ cli do (testnetName {.argument.}: string):
beaconNodeBinary = buildDir / "beacon_node_" & dataDirName
nimFlags = "-d:chronicles_log_level=DEBUG " & getEnv("NIM_PARAMS")
var depositContractOpt = ""
let depositContractFile = testnetDir / depositContractFile
let depositContractFile = testnetDir / depositContractFileName
if system.fileExists(depositContractFile):
depositContractOpt = "--deposit-contract=" & readFile(depositContractFile).strip
@ -113,6 +127,6 @@ cli do (testnetName {.argument.}: string):
execIgnoringExitCode replace(&"""{beaconNodeBinary}
--data-dir="{dataDir}"
--dump=true
--bootstrap-file="{testnetDir/bootstrapFile}"
{bootstrapFileOpt}
--state-snapshot="{testnetDir/genesisFile}" """ & depositContractOpt, "\n", " ")

84
scripts/launch_local_testnet.sh Executable file
View File

@ -0,0 +1,84 @@
#!/bin/bash
# Mostly a duplication of "tests/simulation/{start.sh,run_node.sh}", but with a focus on
# replicating testnets as close as possible, which means following the Docker execution labyrinth.
set -e
cd "$(dirname "${BASH_SOURCE[0]}")"/..
NETWORK=${1:-"testnet1"}
NUM_NODES=10
DATA_DIR="local_testnet_data"
rm -rf "${DATA_DIR}"
DEPOSITS_DIR="${DATA_DIR}/deposits_dir"
mkdir -p "${DEPOSITS_DIR}"
NETWORK_DIR="${DATA_DIR}/network_dir"
mkdir -p "${NETWORK_DIR}"
set -a
source "scripts/${NETWORK}.env"
set +a
NETWORK_NIM_FLAGS=$(scripts/load-testnet-nim-flags.sh ${NETWORK})
make LOG_LEVEL=DEBUG NIMFLAGS="-d:debug -d:insecure -d:testnet_servers_image ${NETWORK_NIM_FLAGS}" beacon_node
rm -rf "${DEPOSITS_DIR}"
./build/beacon_node makeDeposits \
--quickstart-deposits=${QUICKSTART_VALIDATORS} \
--random-deposits=${RANDOM_VALIDATORS} \
--deposits-dir="${DEPOSITS_DIR}"
TOTAL_VALIDATORS="$(( $QUICKSTART_VALIDATORS + $RANDOM_VALIDATORS ))"
BOOTSTRAP_IP="127.0.0.1"
./build/beacon_node createTestnet \
--data-dir="${DATA_DIR}/node0" \
--validators-dir="${DEPOSITS_DIR}" \
--total-validators=${TOTAL_VALIDATORS} \
--last-user-validator=${QUICKSTART_VALIDATORS} \
--output-genesis="${NETWORK_DIR}/genesis.ssz" \
--output-bootstrap-file="${NETWORK_DIR}/bootstrap_nodes.txt" \
--bootstrap-address=${BOOTSTRAP_IP} \
--bootstrap-port=${BOOTSTRAP_PORT} \
--genesis-offset=5 # Delay in seconds
cleanup() {
killall beacon_node p2pd &>/dev/null || true
sleep 2
killall -9 beacon_node p2pd &>/dev/null || true
rm -f /tmp/nim-p2pd-*.sock || true
}
cleanup
PIDS=""
for NUM_NODE in $(seq 0 $(( ${NUM_NODES} - 1 ))); do
if [[ ${NUM_NODE} == 0 ]]; then
BOOTSTRAP_ARG=""
else
BOOTSTRAP_ARG="--bootstrap-file=${NETWORK_DIR}/bootstrap_nodes.txt"
# Wait for the master node to write out its address file
while [ ! -f "${DATA_DIR}/node0/beacon_node.address" ]; do
sleep 0.1
done
fi
stdbuf -o0 ./env.sh build/beacon_node \
--nat=none \
--log-level=TRACE \
--tcp-port=$(( ${BOOTSTRAP_PORT} + ${NUM_NODE} )) \
--udp-port=$(( ${BOOTSTRAP_PORT} + ${NUM_NODE} )) \
--data-dir="${DATA_DIR}/node${NUM_NODE}" \
${BOOTSTRAP_ARG} \
--state-snapshot="${NETWORK_DIR}/genesis.ssz" \
> "${DATA_DIR}/log${NUM_NODE}.txt" 2>&1 &
if [[ "${PIDS}" == "" ]]; then
PIDS="$!"
else
PIDS="${PIDS},$!"
fi
done
htop -p "$PIDS"
cleanup

View File

@ -120,23 +120,20 @@ if [[ $PUBLISH_TESTNET_RESETS != "0" ]]; then
scp "$DATA_DIR_ABS/privkey.protobuf" $BOOTSTRAP_HOST:/tmp/
ssh $BOOTSTRAP_HOST "sudo install -o dockremap -g docker /tmp/privkey.protobuf $BOOTSTRAP_NODE_DOCKER_PATH"
echo "Publishing Docker image..."
make push-last
echo Persisting testnet data to git...
pushd "$NETWORK_DIR_ABS"
git add $COMMITTED_FILES
git commit -m "Reset of Nimbus $NETWORK"
git push
popd
../env.sh nim --verbosity:0 manage_testnet_hosts.nims restart_nodes \
--network=$NETWORK \
> /tmp/restart-nodes.sh
bash /tmp/restart-nodes.sh
rm /tmp/restart-nodes.sh
fi
echo "Publishing Docker image..."
make push-last
#echo -e "\nA Watchtower systemd service will pull the new image and start new containers based on it, on each testnet host, in the next 2 minutes."
../env.sh nim --verbosity:0 manage_testnet_hosts.nims restart_nodes \
--network=$NETWORK \
> /tmp/restart-nodes.sh
bash /tmp/restart-nodes.sh
rm /tmp/restart-nodes.sh

View File

@ -18,6 +18,8 @@ import # Unit test
./test_block_pool,
./test_helpers,
./test_interop,
./test_kvstore,
./test_kvstore_lmdb,
./test_ssz,
./test_state_transition,
./test_sync_protocol,

@ -1 +1 @@
Subproject commit 330d343cb5e5c68e16eb57963e46d64a424751e6
Subproject commit 73e91fe1f6abac428441105bd094c71dce182af4

View File

@ -36,7 +36,7 @@ proc readValue*(r: var JsonReader, a: var seq[byte]) {.inline.} =
const
FixturesDir* = currentSourcePath.rsplit(DirSep, 1)[0] / "fixtures"
SszTestsDir* = FixturesDir/"tests-v0.9.4"
SszTestsDir* = FixturesDir/"tests-v0.10.0"
proc parseTest*(path: string, Format: typedesc[Json or SSZ], T: typedesc): T =
try:

View File

@ -20,7 +20,7 @@ const
SpecDir = currentSourcePath.rsplit(DirSep, 1)[0] /
".."/".."/"beacon_chain"/"spec"
FixturesDir = currentSourcePath.rsplit(DirSep, 1)[0] / "fixtures"
Config = FixturesDir/"tests-v0.9.4"/const_preset/"config.yaml"
Config = FixturesDir/"tests-v0.10.0"/const_preset/"config.yaml"
type
CheckedType = SomeInteger or Slot or Epoch
@ -116,8 +116,11 @@ proc checkConfig() =
let domain = parseEnum[DomainType](constant)
let value = parseU32LEHex(value.getStr())
check: uint32(domain) == value
elif constant == "GENESIS_FORK_VERSION":
let value = parseU32LEHex(value.getStr())
check: ConstsToCheck[constant] == value
else:
check: ConstsToCheck[constant] == value.getBiggestInt().uint64()
suite "Official - 0.9.4 - constants & config " & preset():
suite "Official - 0.10.0 - constants & config " & preset():
checkConfig()

View File

@ -26,7 +26,7 @@ import
const
FixturesDir = currentSourcePath.rsplit(DirSep, 1)[0] / "fixtures"
SSZDir = FixturesDir/"tests-v0.9.4"/const_preset/"phase0"/"ssz_static"
SSZDir = FixturesDir/"tests-v0.10.0"/const_preset/"phase0"/"ssz_static"
type
SSZHashTreeRoot = object
@ -87,6 +87,7 @@ proc runSSZtests() =
of "Deposit": checkSSZ(Deposit, path, hash)
of "DepositData": checkSSZ(DepositData, path, hash)
of "DepositMessage": checkSSZ(DepositMessage, path, hash)
of "Eth1Block": checkSSZ(Eth1Block, path, hash)
of "Eth1Data": checkSSZ(Eth1Data, path, hash)
of "Fork": checkSSZ(Fork, path, hash)
of "HistoricalBatch": checkSSZ(HistoricalBatch, path, hash)
@ -97,10 +98,11 @@ proc runSSZtests() =
of "SignedBeaconBlockHeader":
checkSSZ(SignedBeaconBlockHeader, path, hash)
of "SignedVoluntaryExit": checkSSZ(SignedVoluntaryExit, path, hash)
of "SigningRoot": checkSSZ(SigningRoot, path, hash)
of "Validator": checkSSZ(Validator, path, hash)
of "VoluntaryExit": checkSSZ(VoluntaryExit, path, hash)
else:
raise newException(ValueError, "Unsupported test: " & sszType)
suite "Official - 0.9.4 - SSZ consensus objects " & preset():
suite "Official - 0.10.0 - SSZ consensus objects " & preset():
runSSZtests()

View File

@ -7,17 +7,16 @@
{.used.}
import options, unittest, sequtils, eth/trie/[db],
../beacon_chain/[beacon_chain_db, extras, interop, ssz],
import options, unittest, sequtils,
../beacon_chain/[beacon_chain_db, extras, interop, ssz, kvstore],
../beacon_chain/spec/[beaconstate, datatypes, digest, crypto],
# test utilies
./testutil, ./testblockutil
suite "Beacon chain DB" & preset():
timedTest "empty database" & preset():
var
db = init(BeaconChainDB, newMemoryDB())
db = init(BeaconChainDB, kvStore MemoryStoreRef.init())
check:
when const_preset=="minimal":
@ -28,7 +27,7 @@ suite "Beacon chain DB" & preset():
timedTest "sanity check blocks" & preset():
var
db = init(BeaconChainDB, newMemoryDB())
db = init(BeaconChainDB, kvStore MemoryStoreRef.init())
let
blck = SignedBeaconBlock()
@ -46,7 +45,7 @@ suite "Beacon chain DB" & preset():
timedTest "sanity check states" & preset():
var
db = init(BeaconChainDB, newMemoryDB())
db = init(BeaconChainDB, kvStore MemoryStoreRef.init())
let
state = BeaconState()
@ -60,7 +59,7 @@ suite "Beacon chain DB" & preset():
timedTest "find ancestors" & preset():
var
db = init(BeaconChainDB, newMemoryDB())
db = init(BeaconChainDB, kvStore MemoryStoreRef.init())
x: ValidatorSig
y = init(ValidatorSig, x.getBytes())
@ -101,7 +100,7 @@ suite "Beacon chain DB" & preset():
# serialization where an all-zero default-initialized bls signature could
# not be deserialized because the deserialization was too strict.
var
db = init(BeaconChainDB, newMemoryDB())
db = init(BeaconChainDB, kvStore MemoryStoreRef.init())
let
state = initialize_beacon_state_from_eth1(

View File

@ -10,8 +10,8 @@
import
options, sequtils, unittest, chronicles,
./testutil, ./testblockutil,
../beacon_chain/spec/[datatypes, digest],
../beacon_chain/[beacon_node_types, block_pool, beacon_chain_db, ssz]
../beacon_chain/spec/[datatypes, digest, helpers, validator],
../beacon_chain/[beacon_node_types, block_pool, beacon_chain_db, extras, ssz]
suite "BlockRef and helpers" & preset():
timedTest "isAncestorOf sanity" & preset():
@ -92,7 +92,7 @@ when const_preset == "minimal": # Too much stack space used on mainnet
b1 = addBlock(state, pool.tail.root, BeaconBlockBody())
b1Root = hash_tree_root(b1.message)
b2 = addBlock(state, b1Root, BeaconBlockBody())
b2Root = hash_tree_root(b2.message)
b2Root {.used.} = hash_tree_root(b2.message)
timedTest "getRef returns nil for missing blocks":
check:
@ -104,7 +104,6 @@ when const_preset == "minimal": # Too much stack space used on mainnet
check:
b0.isSome()
toSeq(pool.blockRootsForSlot(GENESIS_SLOT)) == @[pool.tail.root]
timedTest "Simple block add&get" & preset():
let
@ -115,6 +114,8 @@ when const_preset == "minimal": # Too much stack space used on mainnet
b1Get.isSome()
b1Get.get().refs.root == b1Root
b1Add.root == b1Get.get().refs.root
pool.heads.len == 1
pool.heads[0].blck == b1Add
let
b2Add = pool.add(b2Root, b2)
@ -124,6 +125,8 @@ when const_preset == "minimal": # Too much stack space used on mainnet
b2Get.isSome()
b2Get.get().refs.root == b2Root
b2Add.root == b2Get.get().refs.root
pool.heads.len == 1
pool.heads[0].blck == b2Add
timedTest "Reverse order block add & get" & preset():
discard pool.add(b2Root, b2)
@ -144,10 +147,8 @@ when const_preset == "minimal": # Too much stack space used on mainnet
b1Get.get().refs.children[0] == b2Get.get().refs
b2Get.get().refs.parent == b1Get.get().refs
toSeq(pool.blockRootsForSlot(b1.message.slot)) == @[b1Root]
toSeq(pool.blockRootsForSlot(b2.message.slot)) == @[b2Root]
db.putHeadBlock(b2Root)
pool.updateHead(b2Get.get().refs)
# The heads structure should have been updated to contain only the new
# b2 head
@ -159,8 +160,13 @@ when const_preset == "minimal": # Too much stack space used on mainnet
pool2 = BlockPool.init(db)
check:
# ensure we loaded the correct head state
pool2.head.blck.root == b2Root
hash_tree_root(pool2.headState.data.data) == b2.message.state_root
pool2.get(b1Root).isSome()
pool2.get(b2Root).isSome()
pool2.heads.len == 1
pool2.heads[0].blck.root == b2Root
timedTest "Can add same block twice" & preset():
let
@ -180,3 +186,38 @@ when const_preset == "minimal": # Too much stack space used on mainnet
check:
pool.head.blck == b1Add
pool.headState.data.data.slot == b1Add.slot
suite "BlockPool finalization tests" & preset():
setup:
var
db = makeTestDB(SLOTS_PER_EPOCH)
pool = BlockPool.init(db)
timedTest "prune heads on finalization" & preset():
block:
# Create a fork that will not be taken
var
blck = makeBlock(pool.headState.data.data, pool.head.blck.root,
BeaconBlockBody())
discard pool.add(hash_tree_root(blck.message), blck)
for i in 0 ..< (SLOTS_PER_EPOCH * 6):
if i == 1:
# There are 2 heads now because of the fork at slot 1
check:
pool.tail.children.len == 2
pool.heads.len == 2
var
cache = get_empty_per_epoch_cache()
blck = makeBlock(pool.headState.data.data, pool.head.blck.root,
BeaconBlockBody(
attestations: makeFullAttestations(
pool.headState.data.data, pool.head.blck.root,
pool.headState.data.data.slot, cache, {skipValidation})))
let added = pool.add(hash_tree_root(blck.message), blck)
pool.updateHead(added)
check:
pool.heads.len() == 1
pool.head.justified.slot.compute_epoch_at_slot() == 5
pool.tail.children.len == 1

45
tests/test_kvstore.nim Normal file
View File

@ -0,0 +1,45 @@
{.used.}
import
unittest,
../beacon_chain/kvstore
proc testKVStore*(db: KVStoreRef) =
let
key = [0'u8, 1, 2, 3]
value = [3'u8, 2, 1, 0]
value2 = [5'u8, 2, 1, 0]
check:
db != nil
not db.get(key, proc(data: openArray[byte]) = discard)
not db.contains(key)
db.del(key) # does nothing
db.put(key, value)
check:
db.contains(key)
db.get(key, proc(data: openArray[byte]) =
check data == value
)
db.put(key, value2) # overwrite old value
check:
db.contains(key)
db.get(key, proc(data: openArray[byte]) =
check data == value2
)
db.del(key)
check:
not db.get(key, proc(data: openArray[byte]) = discard)
not db.contains(key)
db.del(key) # does nothing
suite "MemoryStoreRef":
test "KVStore interface":
testKVStore(kvStore MemoryStoreRef.init())

View File

@ -0,0 +1,24 @@
{.used.}
import
os,
unittest,
../beacon_chain/[kvstore, kvstore_lmdb],
./test_kvstore
suite "LMDB":
setup:
let
path = os.getTempDir() / "test_kvstore_lmdb"
os.removeDir(path)
os.createDir(path)
teardown:
os.removeDir(path)
test "KVStore interface":
let db = LmdbStoreRef.init(path)
defer: db.close()
testKVStore(kvStore db)

View File

@ -93,7 +93,9 @@ proc addBlock*(
# TODO ugly hack; API needs rethinking
var new_body = body
new_body.randao_reveal = privKey.genRandaoReveal(state.fork, state.slot + 1)
if skipValidation notin flags:
new_body.randao_reveal = privKey.genRandaoReveal(state.fork, state.slot + 1)
new_body.eth1_data = Eth1Data()
var
@ -171,11 +173,8 @@ proc makeAttestation*(
sig =
if skipValidation notin flags:
bls_sign(
hackPrivKey(validator), @(msg.data),
get_domain(
state,
DOMAIN_BEACON_ATTESTER,
data.target.epoch))
hackPrivKey(validator), msg.data,
get_domain(state, DOMAIN_BEACON_ATTESTER, data.target.epoch))
else:
ValidatorSig()
@ -208,3 +207,33 @@ proc makeAttestation*(
find_beacon_committee(state, validator_index, cache)
makeAttestation(state, beacon_block_root, committee, slot, index,
validator_index, cache, flags)
proc makeFullAttestations*(
state: BeaconState, beacon_block_root: Eth2Digest, slot: Slot,
cache: var StateCache,
flags: UpdateFlags = {}): seq[Attestation] =
# Create attestations in which the full committee participates for each shard
# that should be attested to during a particular slot
let
count = get_committee_count_at_slot(state, slot)
for index in 0..<count:
let
committee = get_beacon_committee(state, slot, index, cache)
data = makeAttestationData(state, slot, index, beacon_block_root)
msg = hash_tree_root(data)
var
attestation = Attestation(
aggregation_bits: CommitteeValidatorsBits.init(committee.len),
data: data,
signature: ValidatorSig(kind: Real, blsValue: Signature.init())
)
for j in 0..<committee.len():
attestation.aggregation_bits.setBit j
if skipValidation notin flags:
attestation.signature.combine(bls_sign(
hackPrivKey(state.validators[committee[j]]), msg.data,
get_domain(state, DOMAIN_BEACON_ATTESTER, data.target.epoch)))
result.add attestation

View File

@ -7,8 +7,8 @@
import
algorithm, strformat, stats, times, std/monotimes, stew/endians2,
chronicles, eth/trie/[db],
../beacon_chain/[beacon_chain_db, block_pool, extras, ssz, beacon_node_types],
chronicles,
../beacon_chain/[beacon_chain_db, block_pool, extras, ssz, kvstore, beacon_node_types],
../beacon_chain/spec/[digest, beaconstate, datatypes],
testblockutil
@ -73,7 +73,7 @@ template timedTest*(name, body) =
testTimes.add (f, name)
proc makeTestDB*(tailState: BeaconState, tailBlock: SignedBeaconBlock): BeaconChainDB =
result = init(BeaconChainDB, newMemoryDB())
result = init(BeaconChainDB, kvStore MemoryStoreRef.init())
BlockPool.preInit(result, tailState, tailBlock)
proc makeTestDB*(validators: int): BeaconChainDB =

1
vendor/lmdb vendored Submodule

@ -0,0 +1 @@
Subproject commit c8ecc17b38e164e6a728d66a9b1d05bc18dd3ace

2
vendor/nim-chronos vendored

@ -1 +1 @@
Subproject commit c39c0696806a0ef09bc90e477ea6b177d2824699
Subproject commit f02e748f18e0bc43ae6c68f92aa2c78323265bce

2
vendor/nim-eth vendored

@ -1 +1 @@
Subproject commit 655fc43751f203acdc525bab688115043f504b87
Subproject commit b7ebf8ed54c14884a63f185de1f38a25bc8fbcbc

2
vendor/nim-stint vendored

@ -1 +1 @@
Subproject commit 25c2604b4b41d1b13f4a2740486507fe5f63086e
Subproject commit 9e49b00148884a01d61478ae5d2c69b543b93ceb

2
wasm/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
state_sim

14
wasm/README.md Normal file
View File

@ -0,0 +1,14 @@
# Run nimbus state sim in a browser
Simple runners for in-browser running of WASM versions of applications - based
on emscripten-generated code.
```
# Make sure you have built nim-beacon-chain with make first!
./build.sh
# Run a http server here (wasm + file:/// apparently don't mix)
python -m SimpleHTTPServer
# Open http://localhost:8000/index.html
```

35
wasm/build.sh Executable file
View File

@ -0,0 +1,35 @@
#!/bin/bash
# Simple build script to produce an Emscripten-based wasm version of the state
# sim.
# Assumes you have emcc latest-upstream in you PATH, per their install
# instructions (https://emscripten.org/docs/getting_started/downloads.html)
#
# git clone https://github.com/emscripten-core/emsdk.git
# cd emsdk
# git pull
# ./emsdk install latest-upstream
# ./emsdk activate latest-upstream
# source ./emsdk_env.sh
# Clean build every time - we use wildcards below so this keeps it simple
rm -rf state_sim/nimcache
# GC + emcc optimizer leads to crashes - for now, we disable the GC here
../env.sh nim c \
--cpu:i386 --os:linux --gc:none --threads:off \
-d:release -d:clang -d:emscripten -d:noSignalHandler -d:usemalloc \
--nimcache:state_sim/nimcache \
-c ../research/state_sim.nim
../env.sh emcc \
-I ../vendor/nimbus-build-system/vendor/Nim/lib \
state_sim/nimcache/*.c \
../vendor/nim-blscurve/blscurve/csources/32/{big_384_29.c,ecp2_BLS381.c,rom_curve_BLS381.c,ecp_BLS381.c,fp2_BLS381.c,fp_BLS381.c,rom_field_BLS381.c,pair_BLS381.c,fp12_BLS381.c,fp4_BLS381.c} \
-s ERROR_ON_UNDEFINED_SYMBOLS=0 \
-s TOTAL_MEMORY=1073741824 \
-s EXTRA_EXPORTED_RUNTIME_METHODS=FS \
-s WASM=1 \
--shell-file state_sim_shell.html \
-O3 \
-o state_sim/state_sim.html

36
wasm/build_ncli.sh Executable file
View File

@ -0,0 +1,36 @@
#!/bin/bash
# Simple build script to produce an Emscripten-based wasm version of the state
# sim.
# Assumes you have emcc latest-upstream in you PATH, per their install
# instructions (https://emscripten.org/docs/getting_started/downloads.html)
#
# git clone https://github.com/emscripten-core/emsdk.git
# cd emsdk
# git pull
# ./emsdk install latest-upstream
# ./emsdk activate latest-upstream
# source ./emsdk_env.sh
# Clean build every time - we use wildcards below so this keeps it simple
rm -rf ncli/nimcache
# GC + emcc optimizer leads to crashes - for now, we disable the GC here
../env.sh nim c \
--cpu:i386 --os:linux --gc:none --threads:off \
-d:release -d:clang -d:emscripten -d:noSignalHandler -d:usemalloc \
--nimcache:ncli/nimcache -d:"network_type=none" \
-u:metrics \
-c ncli
../env.sh emcc \
-I ../vendor/nimbus-build-system/vendor/Nim/lib \
ncli/nimcache/*.c \
../vendor/nim-blscurve/blscurve/csources/32/{big_384_29.c,ecp2_BLS381.c,rom_curve_BLS381.c,ecp_BLS381.c,fp2_BLS381.c,fp_BLS381.c,rom_field_BLS381.c,pair_BLS381.c,fp12_BLS381.c,fp4_BLS381.c} \
-s ERROR_ON_UNDEFINED_SYMBOLS=0 \
-s TOTAL_MEMORY=1073741824 \
-s EXTRA_EXPORTED_RUNTIME_METHODS=FS \
-s WASM=1 \
--shell-file ncli_shell.html \
-O3 \
-o ncli/ncli.html

71
wasm/index.html Normal file
View File

@ -0,0 +1,71 @@
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Nimbus tooling</title>
</head>
<style>
body,
html {
font-family: monospace;
width: 100%;
height: 100%;
margin: 0;
padding: 0;
}
.row-container {
display: flex;
width: 100%;
height: 100%;
flex-direction: column;
overflow: hidden;
}
.first-row {
background-image: url("https://our.status.im/content/images/2018/12/Artboard-1-copy-15@2x.png");
background-position: center; /* Center the image */
background-repeat: no-repeat; /* Do not repeat the image */
background-size: 100%;
border: none;
margin: 0;
padding: 0;
}
.second-row {
flex-grow: 1;
border: none;
margin: 0;
padding: 0;
}
</style>
<div class="row-container">
<div class="first-row">
<p><a href="https://github.com/status-im/nim-beacon-chain#state-transition-simulation">Ethereum Beacon Chain state transition simulation</a> (unoptimized work in progress, you might run out of memory)</p>
<form action="state_sim/state_sim.html" method="get" target="frame">
<table>
<tr>
<td>Create / Validate BLS signatures</td>
<td>Validators</td>
<td>Slots</td>
<td>Attestation ratio</td>
</tr>
<tr>
<td><input type="radio" name="--validate" value="true">true<input type="radio" name="--validate" value="false"
checked="true"> false</td>
<td><input type="text" name="--validators" value="100"></input></td>
<td><input type="text" name="--slots" value="10"></input></td>
<td><input type="text" name="--attesterRatio" value="0.9"></input></td>
</tr>
</table>
<input type="submit" value="Run that state transition!">
</form>
</div>
<iframe name="frame" src="" class="second-row"></iframe>
</div>
</html>

71
wasm/index_ncli.html Normal file
View File

@ -0,0 +1,71 @@
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Nimbus ncli/title>
</head>
<style>
body,
html {
font-family: monospace;
width: 100%;
height: 100%;
margin: 0;
padding: 0;
}
.row-container {
display: flex;
width: 100%;
height: 100%;
flex-direction: column;
overflow: hidden;
}
.first-row {
background-image: url("https://our.status.im/content/images/2018/12/Artboard-1-copy-15@2x.png");
background-position: center; /* Center the image */
background-repeat: no-repeat; /* Do not repeat the image */
background-size: 100%;
border: none;
margin: 0;
padding: 0;
}
.second-row {
flex-grow: 1;
border: none;
margin: 0;
padding: 0;
}
</style>
<div class="row-container">
<div class="first-row">
<p><a href="https://github.com/status-im/nim-beacon-chain#state-transition-simulation">Ethereum Beacon Chain state transition simulation</a> (unoptimized work in progress, you might run out of memory)</p>
<form action="ncli/ncli.html" method="get" target="frame">
<table>
<tr>
<td>Create / Validate BLS signatures</td>
<td>Validators</td>
<td>Slots</td>
<td>Attestation ratio</td>
</tr>
<tr>
<td><input type="radio" name="--validate" value="true">true<input type="radio" name="--validate" value="false"
checked="true"> false</td>
<td><input type="text" name="--validators" value="100"></input></td>
<td><input type="text" name="--slots" value="10"></input></td>
<td><input type="text" name="--attesterRatio" value="0.9"></input></td>
</tr>
</table>
<input type="submit" value="Run that state transition!">
</form>
</div>
<iframe name="frame" src="" class="second-row"></iframe>
</div>
</html>

71
wasm/index_state_sim.html Normal file
View File

@ -0,0 +1,71 @@
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Nimbus State Sim</title>
</head>
<style>
body,
html {
font-family: monospace;
width: 100%;
height: 100%;
margin: 0;
padding: 0;
}
.row-container {
display: flex;
width: 100%;
height: 100%;
flex-direction: column;
overflow: hidden;
}
.first-row {
background-image: url("https://our.status.im/content/images/2018/12/Artboard-1-copy-15@2x.png");
background-position: center; /* Center the image */
background-repeat: no-repeat; /* Do not repeat the image */
background-size: 100%;
border: none;
margin: 0;
padding: 0;
}
.second-row {
flex-grow: 1;
border: none;
margin: 0;
padding: 0;
}
</style>
<div class="row-container">
<div class="first-row">
<p><a href="https://github.com/status-im/nim-beacon-chain#state-transition-simulation">Ethereum Beacon Chain state transition simulation</a> (unoptimized work in progress, you might run out of memory)</p>
<form action="state_sim/state_sim.html" method="get" target="frame">
<table>
<tr>
<td>Create / Validate BLS signatures</td>
<td>Validators</td>
<td>Slots</td>
<td>Attestation ratio</td>
</tr>
<tr>
<td><input type="radio" name="--validate" value="true">true<input type="radio" name="--validate" value="false"
checked="true"> false</td>
<td><input type="text" name="--validators" value="100"></input></td>
<td><input type="text" name="--slots" value="10"></input></td>
<td><input type="text" name="--attesterRatio" value="0.9"></input></td>
</tr>
</table>
<input type="submit" value="Run that state transition!">
</form>
</div>
<iframe name="frame" src="" class="second-row"></iframe>
</div>
</html>

124
wasm/ncli_shell.html Normal file
View File

@ -0,0 +1,124 @@
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Nimbus state transition function</title>
<style>
body,
html {
font-family: monospace;
width: 100%;
height: 100%;
margin: 0;
padding: 0;
}
.row-container {
display: flex;
width: 100%;
height: 100%;
align-items: center;
flex-direction: column;
overflow: hidden;
}
.first-row {
}
.second-row {
flex-grow: 1;
border: none;
margin: 0;
padding: 0;
}
textarea.emscripten {
font-family: monospace;
background-color: beige;
width: 95%;
}
div.emscripten_border {
border: 1px solid black;
}
</style>
</head>
<body height="100%" class="row-container">
<div class="first-row">
<div class="emscripten" id="status">Running...</div>
<hr />
</div>
<textarea class="emscripten second-row" id="output" rows=50></textarea>
<script type='text/javascript'>
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
var Module = {
arguments: window.location.search.substr(1).trim().split('&').concat(["--write_last_json:true"]),
preRun: [],
postRun: [() => offerFileAsDownload("state.json", "mime/type")],
print: (function () {
var element = document.getElementById('output');
if (element) element.value = ''; // clear browser cache
return function (text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
// These replacements are necessary if you render to raw HTML
//text = text.replace(/&/g, "&amp;");
//text = text.replace(/</g, "&lt;");
//text = text.replace(/>/g, "&gt;");
//text = text.replace('\n', '<br>', 'g');
console.log(text);
if (element) {
element.value += text + "\n";
element.scrollTop = element.scrollHeight; // focus on bottom
}
};
})(),
printErr: function (text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
console.error(text);
},
canvas: (function () { return null; })(),
setStatus: function (text) {
if (!Module.setStatus.last) Module.setStatus.last = { time: Date.now(), text: '' };
if (text === Module.setStatus.last.text) return;
statusElement.innerHTML = text;
},
totalDependencies: 0,
monitorRunDependencies: function (left) {
this.totalDependencies = Math.max(this.totalDependencies, left);
Module.setStatus(left ? 'Preparing... (' + (this.totalDependencies - left) + '/' + this.totalDependencies + ')' : 'All downloads complete.');
}
};
Module.setStatus('Downloading...');
window.onerror = function () {
Module.setStatus('Exception thrown, see JavaScript console');
Module.setStatus = function (text) {
if (text) Module.printErr('[post-exception status] ' + text);
};
};
function offerFileAsDownload(filename, mime) {
mime = mime || "application/octet-stream";
let content = Module.FS.readFile(filename);
console.log(`Offering download of "${filename}", with ${content.length} bytes...`);
var a = document.createElement('a');
a.download = filename;
a.innerText = "Download state.json";
a.href = URL.createObjectURL(new Blob([content], { type: mime }));
statusElement.innerHTML = ""
statusElement.appendChild(a)
}
</script>
{{{ SCRIPT }}}
</body>
</html>

2
wasm/nim.cfg Normal file
View File

@ -0,0 +1,2 @@
-d:"network_type=none"
-u:metrics

124
wasm/state_sim_shell.html Normal file
View File

@ -0,0 +1,124 @@
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Nimbus state transition function</title>
<style>
body,
html {
font-family: monospace;
width: 100%;
height: 100%;
margin: 0;
padding: 0;
}
.row-container {
display: flex;
width: 100%;
height: 100%;
align-items: center;
flex-direction: column;
overflow: hidden;
}
.first-row {
}
.second-row {
flex-grow: 1;
border: none;
margin: 0;
padding: 0;
}
textarea.emscripten {
font-family: monospace;
background-color: beige;
width: 95%;
}
div.emscripten_border {
border: 1px solid black;
}
</style>
</head>
<body height="100%" class="row-container">
<div class="first-row">
<div class="emscripten" id="status">Running...</div>
<hr />
</div>
<textarea class="emscripten second-row" id="output" rows=50></textarea>
<script type='text/javascript'>
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
var Module = {
arguments: window.location.search.substr(1).trim().split('&').concat(["--write_last_json:true"]),
preRun: [],
postRun: [() => offerFileAsDownload("state.json", "mime/type")],
print: (function () {
var element = document.getElementById('output');
if (element) element.value = ''; // clear browser cache
return function (text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
// These replacements are necessary if you render to raw HTML
//text = text.replace(/&/g, "&amp;");
//text = text.replace(/</g, "&lt;");
//text = text.replace(/>/g, "&gt;");
//text = text.replace('\n', '<br>', 'g');
console.log(text);
if (element) {
element.value += text + "\n";
element.scrollTop = element.scrollHeight; // focus on bottom
}
};
})(),
printErr: function (text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
console.error(text);
},
canvas: (function () { return null; })(),
setStatus: function (text) {
if (!Module.setStatus.last) Module.setStatus.last = { time: Date.now(), text: '' };
if (text === Module.setStatus.last.text) return;
statusElement.innerHTML = text;
},
totalDependencies: 0,
monitorRunDependencies: function (left) {
this.totalDependencies = Math.max(this.totalDependencies, left);
Module.setStatus(left ? 'Preparing... (' + (this.totalDependencies - left) + '/' + this.totalDependencies + ')' : 'All downloads complete.');
}
};
Module.setStatus('Downloading...');
window.onerror = function () {
Module.setStatus('Exception thrown, see JavaScript console');
Module.setStatus = function (text) {
if (text) Module.printErr('[post-exception status] ' + text);
};
};
function offerFileAsDownload(filename, mime) {
mime = mime || "application/octet-stream";
let content = Module.FS.readFile(filename);
console.log(`Offering download of "${filename}", with ${content.length} bytes...`);
var a = document.createElement('a');
a.download = filename;
a.innerText = "Download state.json";
a.href = URL.createObjectURL(new Blob([content], { type: mime }));
statusElement.innerHTML = ""
statusElement.appendChild(a)
}
</script>
{{{ SCRIPT }}}
</body>
</html>