Merge branch 'version-1.1.0' into unstable
This commit is contained in:
commit
9776fbfe17
|
@ -280,8 +280,9 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||||
## hash
|
## hash
|
||||||
```diff
|
```diff
|
||||||
+ HashArray OK
|
+ HashArray OK
|
||||||
|
+ HashList OK
|
||||||
```
|
```
|
||||||
OK: 1/1 Fail: 0/1 Skip: 0/1
|
OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||||
## state diff tests [Preset: mainnet]
|
## state diff tests [Preset: mainnet]
|
||||||
```diff
|
```diff
|
||||||
+ random slot differences [Preset: mainnet] OK
|
+ random slot differences [Preset: mainnet] OK
|
||||||
|
@ -289,4 +290,4 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||||
OK: 1/1 Fail: 0/1 Skip: 0/1
|
OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||||
|
|
||||||
---TOTAL---
|
---TOTAL---
|
||||||
OK: 154/163 Fail: 0/163 Skip: 9/163
|
OK: 155/164 Fail: 0/164 Skip: 9/164
|
||||||
|
|
57
CHANGELOG.md
57
CHANGELOG.md
|
@ -1,15 +1,60 @@
|
||||||
TBD
|
2021-04-05 v1.1.0
|
||||||
==================
|
=================
|
||||||
|
|
||||||
|
This release brings planned reforms to our database schema that provide substantial
|
||||||
|
performance improvements and pave the way for an an improved doppelganger detection
|
||||||
|
ready immediately to propose and attest to blocks (in a future release).
|
||||||
|
|
||||||
|
Please be aware that we will remain committed to maintaining backwards compatibility between
|
||||||
|
releases, but **this release does not support downgrading back to any previous 1.0.x release**.
|
||||||
|
|
||||||
|
As a safety precaution, we advise you to **please backup your Nimbus database before upgrading**
|
||||||
|
if possible.
|
||||||
|
|
||||||
**New features:**
|
**New features:**
|
||||||
|
|
||||||
* Added the `setGraffiti` RPC (POST /api/nimbus/v1/graffiti in the REST API)
|
* More efficient state storage format ==> reduced I/O load and lower storage requirements.
|
||||||
|
|
||||||
|
* More efficient in-memory cache for non-finalized states ==> significant reduction in memory
|
||||||
|
usage.
|
||||||
|
|
||||||
|
* More efficient slashing database schema ==> scales better to a larger number of validators.
|
||||||
|
|
||||||
|
* The metrics support is now compiled by default thanks to a new and more secure HTTP back-end.
|
||||||
|
|
||||||
|
* Command-line tools for generating testnet keystores and JSON deposit files suitable for use
|
||||||
|
with the official network launchpads.
|
||||||
|
|
||||||
|
* `setGraffiti` JSON-RPC call for modifying the graffiti bytes of the client at run-time.
|
||||||
|
|
||||||
|
* `next_action_wait` metric indicating the time until the next scheduled
|
||||||
|
attestation or block proposal.
|
||||||
|
|
||||||
|
* More convenient command-line help messages providing information regarding the default
|
||||||
|
values of all parameters.
|
||||||
|
|
||||||
|
* `--direct-peer` gives you the ability to specify gossip nodes to automatically connect to.
|
||||||
|
|
||||||
|
* Official docker images for ARM and ARM64.
|
||||||
|
|
||||||
|
* Support for fallback `--web3-url` providers.
|
||||||
|
|
||||||
|
**We've fixed:**
|
||||||
|
|
||||||
|
* Long processing delays induced by database pruning.
|
||||||
|
|
||||||
|
* File descriptor leaks (which manifested after failures of the selected web3 provider).
|
||||||
|
|
||||||
|
* The validator APIs now return precise actual balances instead of rounded effective balances.
|
||||||
|
|
||||||
|
* A connection tracking problem which produced failed outgoing connection attempts.
|
||||||
|
|
||||||
**Breaking changes:**
|
**Breaking changes:**
|
||||||
|
|
||||||
* Renamed some semi-internal debug rpc to be more explicit about their nature:
|
* Nimbus-specific JSON-RPCs intended for debug purposes now have the `debug_` prefix:
|
||||||
* `getGossipSubPeers` is now `debug_getGossipSubPeers`
|
|
||||||
* `getChronosFutures` is now `debug_getChronosFutures`
|
- `getGossipSubPeers` is now `debug_getGossipSubPeers`
|
||||||
|
- `getChronosFutures` is now `debug_getChronosFutures`
|
||||||
|
|
||||||
|
|
||||||
2021-03-10 v1.0.12
|
2021-03-10 v1.0.12
|
||||||
|
|
4
Makefile
4
Makefile
|
@ -394,7 +394,7 @@ define CONNECT_TO_NETWORK_WITH_VALIDATOR_CLIENT
|
||||||
endef
|
endef
|
||||||
|
|
||||||
define MAKE_DEPOSIT_DATA
|
define MAKE_DEPOSIT_DATA
|
||||||
build/nimbus_beacon_node deposits create \
|
build/nimbus_beacon_node deposits createTestnetDeposits \
|
||||||
--network=$(1) \
|
--network=$(1) \
|
||||||
--new-wallet-file=build/data/shared_$(1)_$(NODE_ID)/wallet.json \
|
--new-wallet-file=build/data/shared_$(1)_$(NODE_ID)/wallet.json \
|
||||||
--out-validators-dir=build/data/shared_$(1)_$(NODE_ID)/validators \
|
--out-validators-dir=build/data/shared_$(1)_$(NODE_ID)/validators \
|
||||||
|
@ -404,7 +404,7 @@ define MAKE_DEPOSIT_DATA
|
||||||
endef
|
endef
|
||||||
|
|
||||||
define MAKE_DEPOSIT
|
define MAKE_DEPOSIT
|
||||||
build/nimbus_beacon_node deposits create \
|
build/nimbus_beacon_node deposits createTestnetDeposits \
|
||||||
--network=$(1) \
|
--network=$(1) \
|
||||||
--out-deposits-file=nbc-$(1)-deposits.json \
|
--out-deposits-file=nbc-$(1)-deposits.json \
|
||||||
--new-wallet-file=build/data/shared_$(1)_$(NODE_ID)/wallet.json \
|
--new-wallet-file=build/data/shared_$(1)_$(NODE_ID)/wallet.json \
|
||||||
|
|
|
@ -588,10 +588,6 @@ proc getStateOnlyMutableValidators(
|
||||||
let numValidators = output.validators.len
|
let numValidators = output.validators.len
|
||||||
doAssert db.immutableValidatorsMem.len >= numValidators
|
doAssert db.immutableValidatorsMem.len >= numValidators
|
||||||
|
|
||||||
output.validators.hashes.setLen(0)
|
|
||||||
for item in output.validators.indices.mitems():
|
|
||||||
item = 0
|
|
||||||
|
|
||||||
for i in 0 ..< numValidators:
|
for i in 0 ..< numValidators:
|
||||||
let
|
let
|
||||||
# Bypass hash cache invalidation
|
# Bypass hash cache invalidation
|
||||||
|
@ -602,7 +598,7 @@ proc getStateOnlyMutableValidators(
|
||||||
assign(dstValidator.withdrawal_credentials,
|
assign(dstValidator.withdrawal_credentials,
|
||||||
srcValidator.withdrawal_credentials)
|
srcValidator.withdrawal_credentials)
|
||||||
|
|
||||||
output.validators.growHashes()
|
output.validators.resetCache()
|
||||||
|
|
||||||
true
|
true
|
||||||
of GetResult.notFound:
|
of GetResult.notFound:
|
||||||
|
|
|
@ -45,7 +45,7 @@ type
|
||||||
list = "Lists details about all wallets"
|
list = "Lists details about all wallets"
|
||||||
|
|
||||||
DepositsCmd* {.pure.} = enum
|
DepositsCmd* {.pure.} = enum
|
||||||
# create = "Creates validator keystores and deposits"
|
createTestnetDeposits = "Creates validator keystores and deposits for testnet usage"
|
||||||
`import` = "Imports password-protected keystores interactively"
|
`import` = "Imports password-protected keystores interactively"
|
||||||
# status = "Displays status information about all deposits"
|
# status = "Displays status information about all deposits"
|
||||||
exit = "Submits a validator voluntary exit"
|
exit = "Submits a validator voluntary exit"
|
||||||
|
@ -106,10 +106,9 @@ type
|
||||||
desc: "A directory containing wallet files"
|
desc: "A directory containing wallet files"
|
||||||
name: "wallets-dir" }: Option[InputDir]
|
name: "wallets-dir" }: Option[InputDir]
|
||||||
|
|
||||||
web3Url* {.
|
web3Urls* {.
|
||||||
defaultValue: ""
|
desc: "One of more Web3 provider URLs used for obtaining deposit contract data"
|
||||||
desc: "URL of the Web3 server to observe Eth1"
|
name: "web3-url" }: seq[string]
|
||||||
name: "web3-url" }: string
|
|
||||||
|
|
||||||
web3Mode* {.
|
web3Mode* {.
|
||||||
hidden
|
hidden
|
||||||
|
@ -145,7 +144,7 @@ type
|
||||||
|
|
||||||
slashingDbKind* {.
|
slashingDbKind* {.
|
||||||
hidden
|
hidden
|
||||||
defaultValue: SlashingDbKind.both
|
defaultValue: SlashingDbKind.v2
|
||||||
desc: "The slashing DB flavour to use (v1, v2 or both) [=both]"
|
desc: "The slashing DB flavour to use (v1, v2 or both) [=both]"
|
||||||
name: "slashing-db-kind" }: SlashingDbKind
|
name: "slashing-db-kind" }: SlashingDbKind
|
||||||
|
|
||||||
|
@ -389,8 +388,7 @@ type
|
||||||
|
|
||||||
of deposits:
|
of deposits:
|
||||||
case depositsCmd* {.command.}: DepositsCmd
|
case depositsCmd* {.command.}: DepositsCmd
|
||||||
#[
|
of DepositsCmd.createTestnetDeposits:
|
||||||
of DepositsCmd.create:
|
|
||||||
totalDeposits* {.
|
totalDeposits* {.
|
||||||
defaultValue: 1
|
defaultValue: 1
|
||||||
desc: "Number of deposits to generate"
|
desc: "Number of deposits to generate"
|
||||||
|
@ -422,9 +420,10 @@ type
|
||||||
desc: "Output wallet file"
|
desc: "Output wallet file"
|
||||||
name: "new-wallet-file" }: Option[OutFile]
|
name: "new-wallet-file" }: Option[OutFile]
|
||||||
|
|
||||||
|
#[
|
||||||
of DepositsCmd.status:
|
of DepositsCmd.status:
|
||||||
discard
|
discard
|
||||||
#]#
|
]#
|
||||||
|
|
||||||
of DepositsCmd.`import`:
|
of DepositsCmd.`import`:
|
||||||
importedDepositsDir* {.
|
importedDepositsDir* {.
|
||||||
|
@ -664,11 +663,9 @@ func outWalletName*(config: BeaconNodeConf): Option[WalletName] =
|
||||||
of WalletsCmd.restore: config.restoredWalletNameFlag
|
of WalletsCmd.restore: config.restoredWalletNameFlag
|
||||||
of WalletsCmd.list: fail()
|
of WalletsCmd.list: fail()
|
||||||
of deposits:
|
of deposits:
|
||||||
# TODO: Uncomment when the deposits create command is restored
|
case config.depositsCmd
|
||||||
#case config.depositsCmd
|
of DepositsCmd.createTestnetDeposits: config.newWalletNameFlag
|
||||||
#of DepositsCmd.create: config.newWalletNameFlag
|
else: fail()
|
||||||
#else: fail()
|
|
||||||
fail()
|
|
||||||
else:
|
else:
|
||||||
fail()
|
fail()
|
||||||
|
|
||||||
|
@ -683,11 +680,9 @@ func outWalletFile*(config: BeaconNodeConf): Option[OutFile] =
|
||||||
of WalletsCmd.restore: config.restoredWalletFileFlag
|
of WalletsCmd.restore: config.restoredWalletFileFlag
|
||||||
of WalletsCmd.list: fail()
|
of WalletsCmd.list: fail()
|
||||||
of deposits:
|
of deposits:
|
||||||
# TODO: Uncomment when the deposits create command is restored
|
case config.depositsCmd
|
||||||
#case config.depositsCmd
|
of DepositsCmd.createTestnetDeposits: config.newWalletFileFlag
|
||||||
#of DepositsCmd.create: config.newWalletFileFlag
|
else: fail()
|
||||||
#else: fail()
|
|
||||||
fail()
|
|
||||||
else:
|
else:
|
||||||
fail()
|
fail()
|
||||||
|
|
||||||
|
|
|
@ -81,7 +81,8 @@ type
|
||||||
|
|
||||||
Eth1Monitor* = ref object
|
Eth1Monitor* = ref object
|
||||||
state: Eth1MonitorState
|
state: Eth1MonitorState
|
||||||
web3Url: string
|
startIdx: int
|
||||||
|
web3Urls: seq[string]
|
||||||
eth1Network: Option[Eth1Network]
|
eth1Network: Option[Eth1Network]
|
||||||
depositContractAddress*: Eth1Address
|
depositContractAddress*: Eth1Address
|
||||||
|
|
||||||
|
@ -739,7 +740,7 @@ proc new(T: type Web3DataProvider,
|
||||||
depositContractAddress: Eth1Address,
|
depositContractAddress: Eth1Address,
|
||||||
web3Url: string): Future[Result[Web3DataProviderRef, string]] {.async.} =
|
web3Url: string): Future[Result[Web3DataProviderRef, string]] {.async.} =
|
||||||
let web3Fut = newWeb3(web3Url)
|
let web3Fut = newWeb3(web3Url)
|
||||||
yield web3Fut or sleepAsync(chronos.seconds(5))
|
yield web3Fut or sleepAsync(chronos.seconds(10))
|
||||||
if (not web3Fut.finished) or web3Fut.failed:
|
if (not web3Fut.finished) or web3Fut.failed:
|
||||||
await cancelAndWait(web3Fut)
|
await cancelAndWait(web3Fut)
|
||||||
return err "Failed to setup web3 connection"
|
return err "Failed to setup web3 connection"
|
||||||
|
@ -772,19 +773,22 @@ proc init*(T: type Eth1Chain, preset: RuntimePreset, db: BeaconChainDB): T =
|
||||||
proc init*(T: type Eth1Monitor,
|
proc init*(T: type Eth1Monitor,
|
||||||
preset: RuntimePreset,
|
preset: RuntimePreset,
|
||||||
db: BeaconChainDB,
|
db: BeaconChainDB,
|
||||||
web3Url: string,
|
web3Urls: seq[string],
|
||||||
depositContractAddress: Eth1Address,
|
depositContractAddress: Eth1Address,
|
||||||
depositContractSnapshot: DepositContractSnapshot,
|
depositContractSnapshot: DepositContractSnapshot,
|
||||||
eth1Network: Option[Eth1Network]): T =
|
eth1Network: Option[Eth1Network]): T =
|
||||||
var web3Url = web3Url
|
doAssert web3Urls.len > 0
|
||||||
fixupWeb3Urls web3Url
|
|
||||||
|
var web3Urls = web3Urls
|
||||||
|
for url in mitems(web3Urls):
|
||||||
|
fixupWeb3Urls url
|
||||||
|
|
||||||
putInitialDepositContractSnapshot(db, depositContractSnapshot)
|
putInitialDepositContractSnapshot(db, depositContractSnapshot)
|
||||||
|
|
||||||
T(state: Initialized,
|
T(state: Initialized,
|
||||||
eth1Chain: Eth1Chain.init(preset, db),
|
eth1Chain: Eth1Chain.init(preset, db),
|
||||||
depositContractAddress: depositContractAddress,
|
depositContractAddress: depositContractAddress,
|
||||||
web3Url: web3Url,
|
web3Urls: web3Urls,
|
||||||
eth1Network: eth1Network,
|
eth1Network: eth1Network,
|
||||||
eth1Progress: newAsyncEvent())
|
eth1Progress: newAsyncEvent())
|
||||||
|
|
||||||
|
@ -1000,12 +1004,15 @@ proc startEth1Syncing(m: Eth1Monitor, delayBeforeStart: Duration) {.async.} =
|
||||||
if delayBeforeStart != ZeroDuration:
|
if delayBeforeStart != ZeroDuration:
|
||||||
await sleepAsync(delayBeforeStart)
|
await sleepAsync(delayBeforeStart)
|
||||||
|
|
||||||
|
let web3Url = m.web3Urls[m.startIdx mod m.web3Urls.len]
|
||||||
|
inc m.startIdx
|
||||||
|
|
||||||
info "Starting Eth1 deposit contract monitoring",
|
info "Starting Eth1 deposit contract monitoring",
|
||||||
contract = $m.depositContractAddress, url = m.web3Url
|
contract = $m.depositContractAddress, url = web3Url
|
||||||
|
|
||||||
let dataProviderRes = await Web3DataProvider.new(
|
let dataProviderRes = await Web3DataProvider.new(
|
||||||
m.depositContractAddress,
|
m.depositContractAddress,
|
||||||
m.web3Url)
|
web3Url)
|
||||||
|
|
||||||
m.dataProvider = dataProviderRes.tryGet()
|
m.dataProvider = dataProviderRes.tryGet()
|
||||||
let web3 = m.dataProvider.web3
|
let web3 = m.dataProvider.web3
|
||||||
|
@ -1150,12 +1157,14 @@ when hasGenesisDetection:
|
||||||
proc init*(T: type Eth1Monitor,
|
proc init*(T: type Eth1Monitor,
|
||||||
db: BeaconChainDB,
|
db: BeaconChainDB,
|
||||||
preset: RuntimePreset,
|
preset: RuntimePreset,
|
||||||
web3Url: string,
|
web3Urls: seq[string],
|
||||||
depositContractAddress: Eth1Address,
|
depositContractAddress: Eth1Address,
|
||||||
depositContractDeployedAt: BlockHashOrNumber,
|
depositContractDeployedAt: BlockHashOrNumber,
|
||||||
eth1Network: Option[Eth1Network]): Future[Result[T, string]] {.async.} =
|
eth1Network: Option[Eth1Network]): Future[Result[T, string]] {.async.} =
|
||||||
|
doAssert web3Urls.len > 0
|
||||||
try:
|
try:
|
||||||
let dataProviderRes = await Web3DataProvider.new(depositContractAddress, web3Url)
|
var urlIdx = 0
|
||||||
|
let dataProviderRes = await Web3DataProvider.new(depositContractAddress, web3Urls[urlIdx])
|
||||||
if dataProviderRes.isErr:
|
if dataProviderRes.isErr:
|
||||||
return err(dataProviderRes.error)
|
return err(dataProviderRes.error)
|
||||||
var dataProvider = dataProviderRes.get
|
var dataProvider = dataProviderRes.get
|
||||||
|
@ -1180,8 +1189,10 @@ when hasGenesisDetection:
|
||||||
# Until this is fixed upstream, we'll just try to recreate
|
# Until this is fixed upstream, we'll just try to recreate
|
||||||
# the web3 provider before retrying. In case this fails,
|
# the web3 provider before retrying. In case this fails,
|
||||||
# the Eth1Monitor will be restarted.
|
# the Eth1Monitor will be restarted.
|
||||||
|
inc urlIdx
|
||||||
dataProvider = tryGet(
|
dataProvider = tryGet(
|
||||||
await Web3DataProvider.new(depositContractAddress, web3Url))
|
await Web3DataProvider.new(depositContractAddress,
|
||||||
|
web3Urls[urlIdx mod web3Urls.len]))
|
||||||
blk.hash.asEth2Digest
|
blk.hash.asEth2Digest
|
||||||
|
|
||||||
let depositContractSnapshot = DepositContractSnapshot(
|
let depositContractSnapshot = DepositContractSnapshot(
|
||||||
|
@ -1190,7 +1201,7 @@ when hasGenesisDetection:
|
||||||
var monitor = Eth1Monitor.init(
|
var monitor = Eth1Monitor.init(
|
||||||
db,
|
db,
|
||||||
preset,
|
preset,
|
||||||
web3Url,
|
web3Urls,
|
||||||
depositContractAddress,
|
depositContractAddress,
|
||||||
depositContractSnapshot,
|
depositContractSnapshot,
|
||||||
eth1Network)
|
eth1Network)
|
||||||
|
|
|
@ -155,7 +155,7 @@ proc init*(T: type BeaconNode,
|
||||||
# This is a fresh start without a known genesis state
|
# This is a fresh start without a known genesis state
|
||||||
# (most likely, it hasn't arrived yet). We'll try to
|
# (most likely, it hasn't arrived yet). We'll try to
|
||||||
# obtain a genesis through the Eth1 deposits monitor:
|
# obtain a genesis through the Eth1 deposits monitor:
|
||||||
if config.web3Url.len == 0:
|
if config.web3Urls.len == 0:
|
||||||
fatal "Web3 URL not specified"
|
fatal "Web3 URL not specified"
|
||||||
quit 1
|
quit 1
|
||||||
|
|
||||||
|
@ -164,7 +164,7 @@ proc init*(T: type BeaconNode,
|
||||||
let eth1MonitorRes = waitFor Eth1Monitor.init(
|
let eth1MonitorRes = waitFor Eth1Monitor.init(
|
||||||
runtimePreset,
|
runtimePreset,
|
||||||
db,
|
db,
|
||||||
config.web3Url,
|
config.web3Urls,
|
||||||
depositContractAddress,
|
depositContractAddress,
|
||||||
depositContractDeployedAt,
|
depositContractDeployedAt,
|
||||||
eth1Network)
|
eth1Network)
|
||||||
|
@ -172,7 +172,7 @@ proc init*(T: type BeaconNode,
|
||||||
if eth1MonitorRes.isErr:
|
if eth1MonitorRes.isErr:
|
||||||
fatal "Failed to start Eth1 monitor",
|
fatal "Failed to start Eth1 monitor",
|
||||||
reason = eth1MonitorRes.error,
|
reason = eth1MonitorRes.error,
|
||||||
web3Url = config.web3Url,
|
web3Urls = config.web3Urls,
|
||||||
depositContractAddress,
|
depositContractAddress,
|
||||||
depositContractDeployedAt
|
depositContractDeployedAt
|
||||||
quit 1
|
quit 1
|
||||||
|
@ -275,14 +275,14 @@ proc init*(T: type BeaconNode,
|
||||||
chainDag.setTailState(checkpointState[], checkpointBlock)
|
chainDag.setTailState(checkpointState[], checkpointBlock)
|
||||||
|
|
||||||
if eth1Monitor.isNil and
|
if eth1Monitor.isNil and
|
||||||
config.web3Url.len > 0 and
|
config.web3Urls.len > 0 and
|
||||||
genesisDepositsSnapshotContents.len > 0:
|
genesisDepositsSnapshotContents.len > 0:
|
||||||
let genesisDepositsSnapshot = SSZ.decode(genesisDepositsSnapshotContents,
|
let genesisDepositsSnapshot = SSZ.decode(genesisDepositsSnapshotContents,
|
||||||
DepositContractSnapshot)
|
DepositContractSnapshot)
|
||||||
eth1Monitor = Eth1Monitor.init(
|
eth1Monitor = Eth1Monitor.init(
|
||||||
runtimePreset,
|
runtimePreset,
|
||||||
db,
|
db,
|
||||||
config.web3Url,
|
config.web3Urls,
|
||||||
depositContractAddress,
|
depositContractAddress,
|
||||||
genesisDepositsSnapshot,
|
genesisDepositsSnapshot,
|
||||||
eth1Network)
|
eth1Network)
|
||||||
|
@ -1712,8 +1712,8 @@ proc doCreateTestnet(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.raise
|
||||||
let
|
let
|
||||||
startTime = uint64(times.toUnix(times.getTime()) + config.genesisOffset)
|
startTime = uint64(times.toUnix(times.getTime()) + config.genesisOffset)
|
||||||
outGenesis = config.outputGenesis.string
|
outGenesis = config.outputGenesis.string
|
||||||
eth1Hash = if config.web3Url.len == 0: eth1BlockHash
|
eth1Hash = if config.web3Urls.len == 0: eth1BlockHash
|
||||||
else: (waitFor getEth1BlockHash(config.web3Url, blockId("latest"))).asEth2Digest
|
else: (waitFor getEth1BlockHash(config.web3Urls[0], blockId("latest"))).asEth2Digest
|
||||||
runtimePreset = getRuntimePresetForNetwork(config.eth2Network)
|
runtimePreset = getRuntimePresetForNetwork(config.eth2Network)
|
||||||
var
|
var
|
||||||
initialState = initialize_beacon_state_from_eth1(
|
initialState = initialize_beacon_state_from_eth1(
|
||||||
|
@ -1751,11 +1751,22 @@ proc doCreateTestnet(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.raise
|
||||||
writeFile(bootstrapFile, bootstrapEnr.tryGet().toURI)
|
writeFile(bootstrapFile, bootstrapEnr.tryGet().toURI)
|
||||||
echo "Wrote ", bootstrapFile
|
echo "Wrote ", bootstrapFile
|
||||||
|
|
||||||
|
proc findWalletWithoutErrors(config: BeaconNodeConf,
|
||||||
|
name: WalletName): Option[WalletPathPair] =
|
||||||
|
let res = findWallet(config, name)
|
||||||
|
if res.isErr:
|
||||||
|
fatal "Failed to locate wallet", error = res.error
|
||||||
|
quit 1
|
||||||
|
res.get
|
||||||
|
|
||||||
proc doDeposits(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.
|
proc doDeposits(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.
|
||||||
raises: [Defect, CatchableError].} =
|
raises: [Defect, CatchableError].} =
|
||||||
case config.depositsCmd
|
case config.depositsCmd
|
||||||
#[
|
of DepositsCmd.createTestnetDeposits:
|
||||||
of DepositsCmd.create:
|
if config.eth2Network.isNone:
|
||||||
|
fatal "Please specify the intended testnet for the deposits"
|
||||||
|
quit 1
|
||||||
|
let metadata = config.loadEth2Network()
|
||||||
var seed: KeySeed
|
var seed: KeySeed
|
||||||
defer: burnMem(seed)
|
defer: burnMem(seed)
|
||||||
var walletPath: WalletPathPair
|
var walletPath: WalletPathPair
|
||||||
|
@ -1763,7 +1774,7 @@ proc doDeposits(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.
|
||||||
if config.existingWalletId.isSome:
|
if config.existingWalletId.isSome:
|
||||||
let
|
let
|
||||||
id = config.existingWalletId.get
|
id = config.existingWalletId.get
|
||||||
found = findWalletWithoutErrors(id)
|
found = findWalletWithoutErrors(config, id)
|
||||||
|
|
||||||
if found.isSome:
|
if found.isSome:
|
||||||
walletPath = found.get
|
walletPath = found.get
|
||||||
|
@ -1778,7 +1789,7 @@ proc doDeposits(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.
|
||||||
# The failure will be reported in `unlockWalletInteractively`.
|
# The failure will be reported in `unlockWalletInteractively`.
|
||||||
quit 1
|
quit 1
|
||||||
else:
|
else:
|
||||||
var walletRes = createWalletInteractively(rng[], config)
|
var walletRes = createWalletInteractively(rng, config)
|
||||||
if walletRes.isErr:
|
if walletRes.isErr:
|
||||||
fatal "Unable to create wallet", err = walletRes.error
|
fatal "Unable to create wallet", err = walletRes.error
|
||||||
quit 1
|
quit 1
|
||||||
|
@ -1797,8 +1808,8 @@ proc doDeposits(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.
|
||||||
quit QuitFailure
|
quit QuitFailure
|
||||||
|
|
||||||
let deposits = generateDeposits(
|
let deposits = generateDeposits(
|
||||||
runtimePreset,
|
metadata.runtimePreset,
|
||||||
rng[],
|
rng,
|
||||||
seed,
|
seed,
|
||||||
walletPath.wallet.nextAccount,
|
walletPath.wallet.nextAccount,
|
||||||
config.totalDeposits,
|
config.totalDeposits,
|
||||||
|
@ -1816,7 +1827,7 @@ proc doDeposits(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.
|
||||||
config.outValidatorsDir / "deposit_data-" & $epochTime() & ".json"
|
config.outValidatorsDir / "deposit_data-" & $epochTime() & ".json"
|
||||||
|
|
||||||
let launchPadDeposits =
|
let launchPadDeposits =
|
||||||
mapIt(deposits.value, LaunchPadDeposit.init(runtimePreset, it))
|
mapIt(deposits.value, LaunchPadDeposit.init(metadata.runtimePreset, it))
|
||||||
|
|
||||||
Json.saveFile(depositDataPath, launchPadDeposits)
|
Json.saveFile(depositDataPath, launchPadDeposits)
|
||||||
echo "Deposit data written to \"", depositDataPath, "\""
|
echo "Deposit data written to \"", depositDataPath, "\""
|
||||||
|
@ -1831,12 +1842,12 @@ proc doDeposits(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.
|
||||||
except CatchableError as err:
|
except CatchableError as err:
|
||||||
fatal "Failed to create launchpad deposit data file", err = err.msg
|
fatal "Failed to create launchpad deposit data file", err = err.msg
|
||||||
quit 1
|
quit 1
|
||||||
|
#[
|
||||||
of DepositsCmd.status:
|
of DepositsCmd.status:
|
||||||
echo "The status command is not implemented yet"
|
echo "The status command is not implemented yet"
|
||||||
quit 1
|
quit 1
|
||||||
|
]#
|
||||||
|
|
||||||
#]#
|
|
||||||
of DepositsCmd.`import`:
|
of DepositsCmd.`import`:
|
||||||
let validatorKeysDir = if config.importedDepositsDir.isSome:
|
let validatorKeysDir = if config.importedDepositsDir.isSome:
|
||||||
config.importedDepositsDir.get
|
config.importedDepositsDir.get
|
||||||
|
@ -1861,19 +1872,12 @@ proc doDeposits(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.
|
||||||
|
|
||||||
proc doWallets(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.
|
proc doWallets(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.
|
||||||
raises: [Defect, CatchableError].} =
|
raises: [Defect, CatchableError].} =
|
||||||
template findWalletWithoutErrors(name: WalletName): auto =
|
|
||||||
let res = keystore_management.findWallet(config, name)
|
|
||||||
if res.isErr:
|
|
||||||
fatal "Failed to locate wallet", error = res.error
|
|
||||||
quit 1
|
|
||||||
res.get
|
|
||||||
|
|
||||||
case config.walletsCmd:
|
case config.walletsCmd:
|
||||||
of WalletsCmd.create:
|
of WalletsCmd.create:
|
||||||
if config.createdWalletNameFlag.isSome:
|
if config.createdWalletNameFlag.isSome:
|
||||||
let
|
let
|
||||||
name = config.createdWalletNameFlag.get
|
name = config.createdWalletNameFlag.get
|
||||||
existingWallet = findWalletWithoutErrors(name)
|
existingWallet = findWalletWithoutErrors(config, name)
|
||||||
if existingWallet.isSome:
|
if existingWallet.isSome:
|
||||||
echo "The Wallet '" & name.string & "' already exists."
|
echo "The Wallet '" & name.string & "' already exists."
|
||||||
quit 1
|
quit 1
|
||||||
|
|
|
@ -122,8 +122,11 @@ proc process_deposit*(preset: RuntimePreset,
|
||||||
# by the deposit contract
|
# by the deposit contract
|
||||||
if skipBLSValidation in flags or verify_deposit_signature(preset, deposit.data):
|
if skipBLSValidation in flags or verify_deposit_signature(preset, deposit.data):
|
||||||
# New validator! Add validator and balance entries
|
# New validator! Add validator and balance entries
|
||||||
state.validators.add(get_validator_from_deposit(deposit.data))
|
if not state.validators.add(get_validator_from_deposit(deposit.data)):
|
||||||
state.balances.add(amount)
|
return err("process_deposit: too many validators")
|
||||||
|
if not state.balances.add(amount):
|
||||||
|
static: doAssert state.balances.maxLen == state.validators.maxLen
|
||||||
|
raiseAssert "adding validator succeeded, so should balances"
|
||||||
|
|
||||||
doAssert state.validators.len == state.balances.len
|
doAssert state.validators.len == state.balances.len
|
||||||
else:
|
else:
|
||||||
|
@ -298,8 +301,11 @@ proc initialize_beacon_state_from_eth1*(
|
||||||
if skipBlsValidation in flags or
|
if skipBlsValidation in flags or
|
||||||
verify_deposit_signature(preset, deposit):
|
verify_deposit_signature(preset, deposit):
|
||||||
pubkeyToIndex[pubkey] = state.validators.len
|
pubkeyToIndex[pubkey] = state.validators.len
|
||||||
state.validators.add(get_validator_from_deposit(deposit))
|
if not state.validators.add(get_validator_from_deposit(deposit)):
|
||||||
state.balances.add(amount)
|
raiseAssert "too many validators"
|
||||||
|
if not state.balances.add(amount):
|
||||||
|
raiseAssert "same as validators"
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# Invalid deposits are perfectly possible
|
# Invalid deposits are perfectly possible
|
||||||
trace "Skipping deposit with invalid signature",
|
trace "Skipping deposit with invalid signature",
|
||||||
|
@ -507,7 +513,8 @@ func get_sorted_attesting_indices_list*(
|
||||||
state: BeaconState, data: AttestationData, bits: CommitteeValidatorsBits,
|
state: BeaconState, data: AttestationData, bits: CommitteeValidatorsBits,
|
||||||
cache: var StateCache): List[uint64, Limit MAX_VALIDATORS_PER_COMMITTEE] =
|
cache: var StateCache): List[uint64, Limit MAX_VALIDATORS_PER_COMMITTEE] =
|
||||||
for index in get_sorted_attesting_indices(state, data, bits, cache):
|
for index in get_sorted_attesting_indices(state, data, bits, cache):
|
||||||
result.add index.uint64
|
if not result.add index.uint64:
|
||||||
|
raiseAssert "The `result` list has the same max size as the sorted `bits` input"
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#get_indexed_attestation
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#get_indexed_attestation
|
||||||
func get_indexed_attestation(state: BeaconState, attestation: Attestation,
|
func get_indexed_attestation(state: BeaconState, attestation: Attestation,
|
||||||
|
@ -626,6 +633,8 @@ proc process_attestation*(
|
||||||
# data sadly is a processing hotspot - the business with the addDefault
|
# data sadly is a processing hotspot - the business with the addDefault
|
||||||
# pointer is here simply to work around the poor codegen
|
# pointer is here simply to work around the poor codegen
|
||||||
var pa = attestations.addDefault()
|
var pa = attestations.addDefault()
|
||||||
|
if pa.isNil:
|
||||||
|
return err("process_attestation: too many pending attestations")
|
||||||
assign(pa[].aggregation_bits, attestation.aggregation_bits)
|
assign(pa[].aggregation_bits, attestation.aggregation_bits)
|
||||||
pa[].data = attestation.data
|
pa[].data = attestation.data
|
||||||
pa[].inclusion_delay = state.slot - attestation.data.slot
|
pa[].inclusion_delay = state.slot - attestation.data.slot
|
||||||
|
|
|
@ -728,6 +728,7 @@ proc writeValue*(writer: var JsonWriter, value: HashList)
|
||||||
|
|
||||||
proc readValue*(reader: var JsonReader, value: var HashList)
|
proc readValue*(reader: var JsonReader, value: var HashList)
|
||||||
{.raises: [IOError, SerializationError, Defect].} =
|
{.raises: [IOError, SerializationError, Defect].} =
|
||||||
|
value.resetCache()
|
||||||
readValue(reader, value.data)
|
readValue(reader, value.data)
|
||||||
|
|
||||||
template writeValue*(writer: var JsonWriter, value: Version | ForkDigest) =
|
template writeValue*(writer: var JsonWriter, value: Version | ForkDigest) =
|
||||||
|
|
|
@ -73,13 +73,12 @@ func `xor`[T: array](a, b: T): T =
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#randao
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#randao
|
||||||
proc process_randao(
|
proc process_randao(
|
||||||
state: var BeaconState, body: SomeBeaconBlockBody, flags: UpdateFlags,
|
state: var BeaconState, body: SomeBeaconBlockBody, flags: UpdateFlags,
|
||||||
stateCache: var StateCache): bool {.nbench.} =
|
stateCache: var StateCache): Result[void, cstring] {.nbench.} =
|
||||||
let
|
let
|
||||||
proposer_index = get_beacon_proposer_index(state, stateCache)
|
proposer_index = get_beacon_proposer_index(state, stateCache)
|
||||||
|
|
||||||
if proposer_index.isNone:
|
if proposer_index.isNone:
|
||||||
debug "Proposer index missing, probably along with any active validators"
|
return err("process_randao: proposer index missing, probably along with any active validators")
|
||||||
return false
|
|
||||||
|
|
||||||
# Verify RANDAO reveal
|
# Verify RANDAO reveal
|
||||||
let
|
let
|
||||||
|
@ -91,11 +90,8 @@ proc process_randao(
|
||||||
if not verify_epoch_signature(
|
if not verify_epoch_signature(
|
||||||
state.fork, state.genesis_validators_root, epoch, proposer_pubkey,
|
state.fork, state.genesis_validators_root, epoch, proposer_pubkey,
|
||||||
body.randao_reveal):
|
body.randao_reveal):
|
||||||
debug "Randao mismatch", proposer_pubkey = shortLog(proposer_pubkey),
|
|
||||||
epoch,
|
return err("process_randao: invalid epoch signature")
|
||||||
signature = shortLog(body.randao_reveal),
|
|
||||||
slot = state.slot
|
|
||||||
return false
|
|
||||||
|
|
||||||
# Mix it in
|
# Mix it in
|
||||||
let
|
let
|
||||||
|
@ -105,15 +101,18 @@ proc process_randao(
|
||||||
state.randao_mixes[epoch mod EPOCHS_PER_HISTORICAL_VECTOR].data =
|
state.randao_mixes[epoch mod EPOCHS_PER_HISTORICAL_VECTOR].data =
|
||||||
mix.data xor rr
|
mix.data xor rr
|
||||||
|
|
||||||
true
|
ok()
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#eth1-data
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#eth1-data
|
||||||
func process_eth1_data(state: var BeaconState, body: SomeBeaconBlockBody) {.nbench.}=
|
func process_eth1_data(state: var BeaconState, body: SomeBeaconBlockBody): Result[void, cstring] {.nbench.}=
|
||||||
state.eth1_data_votes.add body.eth1_data
|
if not state.eth1_data_votes.add body.eth1_data:
|
||||||
|
# Count is reset in process_final_updates, so this should never happen
|
||||||
|
return err("process_eth1_data: no more room for eth1 data")
|
||||||
|
|
||||||
if state.eth1_data_votes.asSeq.count(body.eth1_data).uint64 * 2 >
|
if state.eth1_data_votes.asSeq.count(body.eth1_data).uint64 * 2 >
|
||||||
SLOTS_PER_ETH1_VOTING_PERIOD:
|
SLOTS_PER_ETH1_VOTING_PERIOD:
|
||||||
state.eth1_data = body.eth1_data
|
state.eth1_data = body.eth1_data
|
||||||
|
ok()
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#is_slashable_validator
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#is_slashable_validator
|
||||||
func is_slashable_validator(validator: Validator, epoch: Epoch): bool =
|
func is_slashable_validator(validator: Validator, epoch: Epoch): bool =
|
||||||
|
@ -340,20 +339,14 @@ proc process_operations(preset: RuntimePreset,
|
||||||
proc process_block*(
|
proc process_block*(
|
||||||
preset: RuntimePreset,
|
preset: RuntimePreset,
|
||||||
state: var BeaconState, blck: SomeBeaconBlock, flags: UpdateFlags,
|
state: var BeaconState, blck: SomeBeaconBlock, flags: UpdateFlags,
|
||||||
stateCache: var StateCache): Result[void, cstring] {.nbench.}=
|
cache: var StateCache): Result[void, cstring] {.nbench.}=
|
||||||
## When there's a new block, we need to verify that the block is sane and
|
## When there's a new block, we need to verify that the block is sane and
|
||||||
## update the state accordingly - the state is left in an unknown state when
|
## update the state accordingly - the state is left in an unknown state when
|
||||||
## block application fails (!)
|
## block application fails (!)
|
||||||
|
|
||||||
? process_block_header(state, blck, flags, stateCache)
|
? process_block_header(state, blck, flags, cache)
|
||||||
|
? process_randao(state, blck.body, flags, cache)
|
||||||
if not process_randao(state, blck.body, flags, stateCache):
|
? process_eth1_data(state, blck.body)
|
||||||
return err("Randao failure".cstring)
|
? process_operations(preset, state, blck.body, flags, cache)
|
||||||
|
|
||||||
process_eth1_data(state, blck.body)
|
|
||||||
|
|
||||||
let res_ops = process_operations(preset, state, blck.body, flags, stateCache)
|
|
||||||
if res_ops.isErr:
|
|
||||||
return res_ops
|
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
|
@ -598,8 +598,9 @@ func process_historical_roots_update(state: var BeaconState) {.nbench.} =
|
||||||
# significant additional stack or heap.
|
# significant additional stack or heap.
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#historicalbatch
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#historicalbatch
|
||||||
# In response to https://github.com/status-im/nimbus-eth2/issues/921
|
# In response to https://github.com/status-im/nimbus-eth2/issues/921
|
||||||
state.historical_roots.add hash_tree_root(
|
if not state.historical_roots.add hash_tree_root(
|
||||||
[hash_tree_root(state.block_roots), hash_tree_root(state.state_roots)])
|
[hash_tree_root(state.block_roots), hash_tree_root(state.state_roots)]):
|
||||||
|
raiseAssert "no more room for historical roots, so long and thanks for the fish!"
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/dev/specs/phase0/beacon-chain.md#participation-records-rotation
|
# https://github.com/ethereum/eth2.0-specs/blob/dev/specs/phase0/beacon-chain.md#participation-records-rotation
|
||||||
func process_participation_record_updates(state: var BeaconState) {.nbench.} =
|
func process_participation_record_updates(state: var BeaconState) {.nbench.} =
|
||||||
|
|
|
@ -24,9 +24,8 @@ template setOutputSize[R, T](a: var array[R, T], length: int) =
|
||||||
raiseIncorrectSize a.type
|
raiseIncorrectSize a.type
|
||||||
|
|
||||||
proc setOutputSize(list: var List, length: int) {.raisesssz.} =
|
proc setOutputSize(list: var List, length: int) {.raisesssz.} =
|
||||||
if int64(length) > list.maxLen:
|
if not list.setLen length:
|
||||||
raise newException(MalformedSszError, "SSZ list maximum size exceeded")
|
raise newException(MalformedSszError, "SSZ list maximum size exceeded")
|
||||||
list.setLen length
|
|
||||||
|
|
||||||
# fromSszBytes copies the wire representation to a Nim variable,
|
# fromSszBytes copies the wire representation to a Nim variable,
|
||||||
# assuming there's enough data in the buffer
|
# assuming there's enough data in the buffer
|
||||||
|
@ -140,15 +139,9 @@ func readSszValue*[T](input: openArray[byte],
|
||||||
if resultBytesCount == maxExpectedSize:
|
if resultBytesCount == maxExpectedSize:
|
||||||
checkForForbiddenBits(T, input, val.maxLen + 1)
|
checkForForbiddenBits(T, input, val.maxLen + 1)
|
||||||
|
|
||||||
elif val is HashList:
|
elif val is HashList | HashArray:
|
||||||
readSszValue(input, val.data)
|
readSszValue(input, val.data)
|
||||||
val.hashes.setLen(0)
|
val.resetCache()
|
||||||
val.growHashes()
|
|
||||||
|
|
||||||
elif val is HashArray:
|
|
||||||
readSszValue(input, val.data)
|
|
||||||
for h in val.hashes.mitems():
|
|
||||||
clearCache(h)
|
|
||||||
|
|
||||||
elif val is List|array:
|
elif val is List|array:
|
||||||
type E = type val[0]
|
type E = type val[0]
|
||||||
|
|
|
@ -85,13 +85,38 @@ type
|
||||||
BitList*[maxLen: static Limit] = distinct BitSeq
|
BitList*[maxLen: static Limit] = distinct BitSeq
|
||||||
|
|
||||||
HashArray*[maxLen: static Limit; T] = object
|
HashArray*[maxLen: static Limit; T] = object
|
||||||
|
## Array implementation that caches the hash of each chunk of data - see
|
||||||
|
## also HashList for more details.
|
||||||
data*: array[maxLen, T]
|
data*: array[maxLen, T]
|
||||||
hashes* {.dontSerialize.}: array[maxChunkIdx(T, maxLen), Eth2Digest]
|
hashes* {.dontSerialize.}: array[maxChunkIdx(T, maxLen), Eth2Digest]
|
||||||
|
|
||||||
HashList*[T; maxLen: static Limit] = object
|
HashList*[T; maxLen: static Limit] = object
|
||||||
|
## List implementation that caches the hash of each chunk of data as well
|
||||||
|
## as the combined hash of each level of the merkle tree using a flattened
|
||||||
|
## list of hashes.
|
||||||
|
##
|
||||||
|
## The merkle tree of a list is formed by imagining a virtual buffer of
|
||||||
|
## `maxLen` length which is zero-filled where there is no data. Then,
|
||||||
|
## a merkle tree of hashes is formed as usual - at each level of the tree,
|
||||||
|
## iff the hash is combined from two zero-filled chunks, the hash is not
|
||||||
|
## stored in the `hashes` list - instead, `indices` keeps track of where in
|
||||||
|
## the list each level starts. When the length of `data` changes, the
|
||||||
|
## `hashes` and `indices` structures must be updated accordingly using
|
||||||
|
## `growHashes`.
|
||||||
|
##
|
||||||
|
## All mutating operators (those that take `var HashList`) will
|
||||||
|
## automatically invalidate the cache for the relevant chunks - the leaf and
|
||||||
|
## all intermediate chunk hashes up to the root. When large changes are made
|
||||||
|
## to `data`, it might be more efficient to batch the updates then reset
|
||||||
|
## the cache using resetCache` instead.
|
||||||
|
|
||||||
data*: List[T, maxLen]
|
data*: List[T, maxLen]
|
||||||
hashes* {.dontSerialize.}: seq[Eth2Digest]
|
hashes* {.dontSerialize.}: seq[Eth2Digest] ## \
|
||||||
indices* {.dontSerialize.}: array[hashListIndicesLen(maxChunkIdx(T, maxLen)), int64]
|
## Flattened tree store that skips "empty" branches of the tree - the
|
||||||
|
## starting index in this sequence of each "level" in the tree is found
|
||||||
|
## in `indices`.
|
||||||
|
indices* {.dontSerialize.}: array[hashListIndicesLen(maxChunkIdx(T, maxLen)), int64] ##\
|
||||||
|
## Holds the starting index in the hashes list for each level of the tree
|
||||||
|
|
||||||
# Note for readers:
|
# Note for readers:
|
||||||
# We use `array` for `Vector` and
|
# We use `array` for `Vector` and
|
||||||
|
@ -115,9 +140,7 @@ template init*[T, N](L: type List[T, N], x: seq[T]): auto =
|
||||||
List[T, N](x)
|
List[T, N](x)
|
||||||
|
|
||||||
template `$`*(x: List): auto = $(distinctBase x)
|
template `$`*(x: List): auto = $(distinctBase x)
|
||||||
template add*(x: var List, val: auto) = add(distinctBase x, val)
|
|
||||||
template len*(x: List): auto = len(distinctBase x)
|
template len*(x: List): auto = len(distinctBase x)
|
||||||
template setLen*(x: var List, val: auto) = setLen(distinctBase x, val)
|
|
||||||
template low*(x: List): auto = low(distinctBase x)
|
template low*(x: List): auto = low(distinctBase x)
|
||||||
template high*(x: List): auto = high(distinctBase x)
|
template high*(x: List): auto = high(distinctBase x)
|
||||||
template `[]`*(x: List, idx: auto): untyped = distinctBase(x)[idx]
|
template `[]`*(x: List, idx: auto): untyped = distinctBase(x)[idx]
|
||||||
|
@ -131,6 +154,20 @@ template pairs* (x: List): untyped = pairs(distinctBase x)
|
||||||
template mitems*(x: var List): untyped = mitems(distinctBase x)
|
template mitems*(x: var List): untyped = mitems(distinctBase x)
|
||||||
template mpairs*(x: var List): untyped = mpairs(distinctBase x)
|
template mpairs*(x: var List): untyped = mpairs(distinctBase x)
|
||||||
|
|
||||||
|
proc add*(x: var List, val: auto): bool =
|
||||||
|
if x.len < x.maxLen:
|
||||||
|
add(distinctBase x, val)
|
||||||
|
true
|
||||||
|
else:
|
||||||
|
false
|
||||||
|
|
||||||
|
proc setLen*(x: var List, newLen: int): bool =
|
||||||
|
if newLen <= x.maxLen:
|
||||||
|
setLen(distinctBase x, newLen)
|
||||||
|
true
|
||||||
|
else:
|
||||||
|
false
|
||||||
|
|
||||||
template init*(L: type BitList, x: seq[byte], N: static Limit): auto =
|
template init*(L: type BitList, x: seq[byte], N: static Limit): auto =
|
||||||
BitList[N](data: x)
|
BitList[N](data: x)
|
||||||
|
|
||||||
|
@ -194,13 +231,16 @@ func nodesAtLayer*(layer, depth, leaves: int): int =
|
||||||
|
|
||||||
func cacheNodes*(depth, leaves: int): int =
|
func cacheNodes*(depth, leaves: int): int =
|
||||||
## Total number of nodes needed to cache a tree of a given depth with
|
## Total number of nodes needed to cache a tree of a given depth with
|
||||||
## `leaves` items in it (the rest zero-filled)
|
## `leaves` items in it - chunks that are zero-filled have well-known hash
|
||||||
|
## trees and don't need to be stored in the tree.
|
||||||
var res = 0
|
var res = 0
|
||||||
for i in 0..<depth:
|
for i in 0..<depth:
|
||||||
res += nodesAtLayer(i, depth, leaves)
|
res += nodesAtLayer(i, depth, leaves)
|
||||||
res
|
res
|
||||||
|
|
||||||
proc clearCaches*(a: var HashList, dataIdx: int64) =
|
proc clearCaches*(a: var HashList, dataIdx: int64) =
|
||||||
|
## Clear each level of the merkle tree up to the root affected by a data
|
||||||
|
## change at `dataIdx`.
|
||||||
if a.hashes.len == 0:
|
if a.hashes.len == 0:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -212,6 +252,8 @@ proc clearCaches*(a: var HashList, dataIdx: int64) =
|
||||||
idxInLayer = idx - (1'i64 shl layer)
|
idxInLayer = idx - (1'i64 shl layer)
|
||||||
layerIdx = idxInlayer + a.indices[layer]
|
layerIdx = idxInlayer + a.indices[layer]
|
||||||
if layerIdx < a.indices[layer + 1]:
|
if layerIdx < a.indices[layer + 1]:
|
||||||
|
# Only clear cache when we're actually storing it - ie it hasn't been
|
||||||
|
# skipped by the "combined zero hash" optimization
|
||||||
clearCache(a.hashes[layerIdx])
|
clearCache(a.hashes[layerIdx])
|
||||||
|
|
||||||
idx = idx shr 1
|
idx = idx shr 1
|
||||||
|
@ -225,7 +267,8 @@ proc clearCache*(a: var HashList) =
|
||||||
for c in a.hashes.mitems(): clearCache(c)
|
for c in a.hashes.mitems(): clearCache(c)
|
||||||
|
|
||||||
proc growHashes*(a: var HashList) =
|
proc growHashes*(a: var HashList) =
|
||||||
# Ensure that the hash cache is big enough for the data in the list
|
## Ensure that the hash cache is big enough for the data in the list - must
|
||||||
|
## be called whenever `data` grows.
|
||||||
let
|
let
|
||||||
leaves = int(
|
leaves = int(
|
||||||
chunkIdx(a, a.data.len() + dataPerChunk(a.T) - 1))
|
chunkIdx(a, a.data.len() + dataPerChunk(a.T) - 1))
|
||||||
|
@ -250,12 +293,26 @@ proc growHashes*(a: var HashList) =
|
||||||
swap(a.hashes, newHashes)
|
swap(a.hashes, newHashes)
|
||||||
a.indices = newIndices
|
a.indices = newIndices
|
||||||
|
|
||||||
|
proc resetCache*(a: var HashList) =
|
||||||
|
## Perform a full reset of the hash cache, for example after data has been
|
||||||
|
## rewritten "manually" without going through the exported operators
|
||||||
|
a.hashes.setLen(0)
|
||||||
|
a.indices = default(type a.indices)
|
||||||
|
a.growHashes()
|
||||||
|
|
||||||
|
proc resetCache*(a: var HashArray) =
|
||||||
|
for h in a.hashes.mitems():
|
||||||
|
clearCache(h)
|
||||||
|
|
||||||
template len*(a: type HashArray): auto = int(a.maxLen)
|
template len*(a: type HashArray): auto = int(a.maxLen)
|
||||||
|
|
||||||
template add*(x: var HashList, val: auto) =
|
proc add*(x: var HashList, val: auto): bool =
|
||||||
add(x.data, val)
|
if add(x.data, val):
|
||||||
x.growHashes()
|
x.growHashes()
|
||||||
clearCaches(x, x.data.len() - 1)
|
clearCaches(x, x.data.len() - 1)
|
||||||
|
true
|
||||||
|
else:
|
||||||
|
false
|
||||||
|
|
||||||
proc addDefault*(x: var HashList): ptr x.T =
|
proc addDefault*(x: var HashList): ptr x.T =
|
||||||
distinctBase(x.data).setLen(x.data.len + 1)
|
distinctBase(x.data).setLen(x.data.len + 1)
|
||||||
|
@ -299,7 +356,8 @@ template swap*(a, b: var HashList) =
|
||||||
swap(a.indices, b.indices)
|
swap(a.indices, b.indices)
|
||||||
|
|
||||||
template clear*(a: var HashList) =
|
template clear*(a: var HashList) =
|
||||||
a.data.setLen(0)
|
if not a.data.setLen(0):
|
||||||
|
raiseAssert "length 0 should always succeed"
|
||||||
a.hashes.setLen(0)
|
a.hashes.setLen(0)
|
||||||
a.indices = default(type a.indices)
|
a.indices = default(type a.indices)
|
||||||
|
|
||||||
|
|
|
@ -31,9 +31,10 @@ func applyValidatorIdentities(
|
||||||
validators: var HashList[Validator, Limit VALIDATOR_REGISTRY_LIMIT],
|
validators: var HashList[Validator, Limit VALIDATOR_REGISTRY_LIMIT],
|
||||||
hl: auto) =
|
hl: auto) =
|
||||||
for item in hl:
|
for item in hl:
|
||||||
validators.add Validator(
|
if not validators.add Validator(
|
||||||
pubkey: item.pubkey,
|
pubkey: item.pubkey,
|
||||||
withdrawal_credentials: item.withdrawal_credentials)
|
withdrawal_credentials: item.withdrawal_credentials):
|
||||||
|
raiseAssert "cannot readd"
|
||||||
|
|
||||||
func setValidatorStatuses(
|
func setValidatorStatuses(
|
||||||
validators: var HashList[Validator, Limit VALIDATOR_REGISTRY_LIMIT],
|
validators: var HashList[Validator, Limit VALIDATOR_REGISTRY_LIMIT],
|
||||||
|
@ -67,7 +68,8 @@ func replaceOrAddEncodeEth1Votes[T, U](votes0, votes1: HashList[T, U]):
|
||||||
|
|
||||||
result[0] = lower_bound == 0
|
result[0] = lower_bound == 0
|
||||||
for i in lower_bound ..< votes1.len:
|
for i in lower_bound ..< votes1.len:
|
||||||
result[1].add votes1[i]
|
if not result[1].add votes1[i]:
|
||||||
|
raiseAssert "same limit"
|
||||||
|
|
||||||
func replaceOrAddDecodeEth1Votes[T, U](
|
func replaceOrAddDecodeEth1Votes[T, U](
|
||||||
votes0: var HashList[T, U], eth1_data_votes_replaced: bool,
|
votes0: var HashList[T, U], eth1_data_votes_replaced: bool,
|
||||||
|
@ -76,11 +78,13 @@ func replaceOrAddDecodeEth1Votes[T, U](
|
||||||
votes0 = HashList[T, U]()
|
votes0 = HashList[T, U]()
|
||||||
|
|
||||||
for item in votes1:
|
for item in votes1:
|
||||||
votes0.add item
|
if not votes0.add item:
|
||||||
|
raiseAssert "same limit"
|
||||||
|
|
||||||
func getMutableValidatorStatuses(state: BeaconState):
|
func getMutableValidatorStatuses(state: BeaconState):
|
||||||
List[ValidatorStatus, Limit VALIDATOR_REGISTRY_LIMIT] =
|
List[ValidatorStatus, Limit VALIDATOR_REGISTRY_LIMIT] =
|
||||||
result.setLen(state.validators.len)
|
if not result.setLen(state.validators.len):
|
||||||
|
raiseAssert "same limt as validators"
|
||||||
for i in 0 ..< state.validators.len:
|
for i in 0 ..< state.validators.len:
|
||||||
let validator = unsafeAddr state.validators.data[i]
|
let validator = unsafeAddr state.validators.data[i]
|
||||||
assign(result[i].effective_balance, validator.effective_balance)
|
assign(result[i].effective_balance, validator.effective_balance)
|
||||||
|
@ -149,9 +153,8 @@ func applyDiff*(
|
||||||
immutableValidators: openArray[ImmutableValidatorData],
|
immutableValidators: openArray[ImmutableValidatorData],
|
||||||
stateDiff: BeaconStateDiff) =
|
stateDiff: BeaconStateDiff) =
|
||||||
template assign[T, U](tgt: var HashList[T, U], src: List[T, U]) =
|
template assign[T, U](tgt: var HashList[T, U], src: List[T, U]) =
|
||||||
tgt.clear()
|
|
||||||
assign(tgt.data, src)
|
assign(tgt.data, src)
|
||||||
tgt.growHashes()
|
tgt.resetCache()
|
||||||
|
|
||||||
# Carry over unchanged genesis_time, genesis_validators_root, and fork.
|
# Carry over unchanged genesis_time, genesis_validators_root, and fork.
|
||||||
assign(state.latest_block_header, stateDiff.latest_block_header)
|
assign(state.latest_block_header, stateDiff.latest_block_header)
|
||||||
|
@ -159,7 +162,8 @@ func applyDiff*(
|
||||||
applyModIncrement(state.block_roots, stateDiff.block_roots, state.slot.uint64)
|
applyModIncrement(state.block_roots, stateDiff.block_roots, state.slot.uint64)
|
||||||
applyModIncrement(state.state_roots, stateDiff.state_roots, state.slot.uint64)
|
applyModIncrement(state.state_roots, stateDiff.state_roots, state.slot.uint64)
|
||||||
if stateDiff.historical_root_added:
|
if stateDiff.historical_root_added:
|
||||||
state.historical_roots.add stateDiff.historical_root
|
if not state.historical_roots.add stateDiff.historical_root:
|
||||||
|
raiseAssert "cannot readd historical state root"
|
||||||
|
|
||||||
assign(state.eth1_data, stateDiff.eth1_data)
|
assign(state.eth1_data, stateDiff.eth1_data)
|
||||||
replaceOrAddDecodeEth1Votes(
|
replaceOrAddDecodeEth1Votes(
|
||||||
|
|
|
@ -15,8 +15,8 @@ when not defined(nimscript):
|
||||||
|
|
||||||
const
|
const
|
||||||
versionMajor* = 1
|
versionMajor* = 1
|
||||||
versionMinor* = 0
|
versionMinor* = 1
|
||||||
versionBuild* = 12
|
versionBuild* = 0
|
||||||
|
|
||||||
versionBlob* = "stateofus" # Single word - ends up in the default graffitti
|
versionBlob* = "stateofus" # Single word - ends up in the default graffitti
|
||||||
|
|
||||||
|
|
|
@ -65,7 +65,7 @@ if [ "$ETH1_PRIVATE_KEY" != "" ]; then
|
||||||
echo "Done: $DEPOSIT_CONTRACT_ADDRESS"
|
echo "Done: $DEPOSIT_CONTRACT_ADDRESS"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Building a local nimbus_beacon_node instance for 'deposits create' and 'createTestnet'"
|
echo "Building a local nimbus_beacon_node instance for 'deposits createTestnetDeposits' and 'createTestnet'"
|
||||||
make -j2 NIMFLAGS="-d:testnet_servers_image ${NETWORK_NIM_FLAGS}" nimbus_beacon_node nimbus_signing_process process_dashboard
|
make -j2 NIMFLAGS="-d:testnet_servers_image ${NETWORK_NIM_FLAGS}" nimbus_beacon_node nimbus_signing_process process_dashboard
|
||||||
|
|
||||||
echo "Generating Grafana dashboards for remote testnet servers"
|
echo "Generating Grafana dashboards for remote testnet servers"
|
||||||
|
@ -83,7 +83,7 @@ echo "Building Docker image..."
|
||||||
# in docker/Makefile, and are enabled by default.
|
# in docker/Makefile, and are enabled by default.
|
||||||
make build
|
make build
|
||||||
|
|
||||||
../build/nimbus_beacon_node deposits create \
|
../build/nimbus_beacon_node deposits createTestnetDeposits \
|
||||||
--count=$TOTAL_VALIDATORS \
|
--count=$TOTAL_VALIDATORS \
|
||||||
--out-validators-dir="$VALIDATORS_DIR_ABS" \
|
--out-validators-dir="$VALIDATORS_DIR_ABS" \
|
||||||
--out-secrets-dir="$SECRETS_DIR_ABS" \
|
--out-secrets-dir="$SECRETS_DIR_ABS" \
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
deposits create --network=spadina --new-wallet-file=build/data/shared_spadina_0/wallet.json --out-validators-dir=build/data/shared_spadina_0/validators --out-secrets-dir=build/data/shared_spadina_0/secrets --out-deposits-file=spadina-deposits_data-20201001212925.json --count=1
|
deposits createTestnetDeposits --network=spadina --new-wallet-file=build/data/shared_spadina_0/wallet.json --out-validators-dir=build/data/shared_spadina_0/validators --out-secrets-dir=build/data/shared_spadina_0/secrets --out-deposits-file=spadina-deposits_data-20201001212925.json --count=1
|
||||||
|
|
|
@ -35,8 +35,10 @@ func add(v: var Deltas, idx: int, delta: Delta) =
|
||||||
v.penalties[idx] += delta.penalties
|
v.penalties[idx] += delta.penalties
|
||||||
|
|
||||||
func init(T: type Deltas, len: int): T =
|
func init(T: type Deltas, len: int): T =
|
||||||
result.rewards.setLen(len)
|
if not result.rewards.setLen(len):
|
||||||
result.penalties.setLen(len)
|
raiseAssert "setLen"
|
||||||
|
if not result.penalties.setLen(len):
|
||||||
|
raiseAssert "setLen"
|
||||||
|
|
||||||
proc runTest(rewardsDir, identifier: string) =
|
proc runTest(rewardsDir, identifier: string) =
|
||||||
# We wrap the tests in a proc to avoid running out of globals
|
# We wrap the tests in a proc to avoid running out of globals
|
||||||
|
|
|
@ -91,31 +91,40 @@ suiteReport "SSZ navigator":
|
||||||
let b = [byte 0x04, 0x05, 0x06].toDigest
|
let b = [byte 0x04, 0x05, 0x06].toDigest
|
||||||
let c = [byte 0x07, 0x08, 0x09].toDigest
|
let c = [byte 0x07, 0x08, 0x09].toDigest
|
||||||
|
|
||||||
|
var xx: List[uint64, 16]
|
||||||
|
check:
|
||||||
|
not xx.setLen(17)
|
||||||
|
xx.setLen(16)
|
||||||
|
|
||||||
var leaves = HashList[Eth2Digest, 1'i64 shl 3]()
|
var leaves = HashList[Eth2Digest, 1'i64 shl 3]()
|
||||||
leaves.add a
|
check:
|
||||||
leaves.add b
|
leaves.add a
|
||||||
leaves.add c
|
leaves.add b
|
||||||
|
leaves.add c
|
||||||
let root = hash_tree_root(leaves)
|
let root = hash_tree_root(leaves)
|
||||||
check $root == "5248085b588fab1dd1e03f3cd62201602b12e6560665935964f46e805977e8c5"
|
check $root == "5248085b588fab1dd1e03f3cd62201602b12e6560665935964f46e805977e8c5"
|
||||||
|
|
||||||
while leaves.len < 1 shl 3:
|
while leaves.len < 1 shl 3:
|
||||||
leaves.add c
|
check:
|
||||||
check hash_tree_root(leaves) == hash_tree_root(leaves.data)
|
leaves.add c
|
||||||
|
hash_tree_root(leaves) == hash_tree_root(leaves.data)
|
||||||
|
|
||||||
leaves = default(type leaves)
|
leaves = default(type leaves)
|
||||||
|
|
||||||
while leaves.len < (1 shl 3) - 1:
|
while leaves.len < (1 shl 3) - 1:
|
||||||
leaves.add c
|
check:
|
||||||
leaves.add c
|
leaves.add c
|
||||||
check hash_tree_root(leaves) == hash_tree_root(leaves.data)
|
leaves.add c
|
||||||
|
hash_tree_root(leaves) == hash_tree_root(leaves.data)
|
||||||
|
|
||||||
leaves = default(type leaves)
|
leaves = default(type leaves)
|
||||||
|
|
||||||
while leaves.len < (1 shl 3) - 2:
|
while leaves.len < (1 shl 3) - 2:
|
||||||
leaves.add c
|
check:
|
||||||
leaves.add c
|
leaves.add c
|
||||||
leaves.add c
|
leaves.add c
|
||||||
check hash_tree_root(leaves) == hash_tree_root(leaves.data)
|
leaves.add c
|
||||||
|
hash_tree_root(leaves) == hash_tree_root(leaves.data)
|
||||||
|
|
||||||
for i in 0 ..< leaves.data.len - 2:
|
for i in 0 ..< leaves.data.len - 2:
|
||||||
leaves[i] = a
|
leaves[i] = a
|
||||||
|
@ -124,23 +133,26 @@ suiteReport "SSZ navigator":
|
||||||
check hash_tree_root(leaves) == hash_tree_root(leaves.data)
|
check hash_tree_root(leaves) == hash_tree_root(leaves.data)
|
||||||
|
|
||||||
var leaves2 = HashList[Eth2Digest, 1'i64 shl 48]() # Large number!
|
var leaves2 = HashList[Eth2Digest, 1'i64 shl 48]() # Large number!
|
||||||
leaves2.add a
|
check:
|
||||||
leaves2.add b
|
leaves2.add a
|
||||||
leaves2.add c
|
leaves2.add b
|
||||||
check hash_tree_root(leaves2) == hash_tree_root(leaves2.data)
|
leaves2.add c
|
||||||
|
hash_tree_root(leaves2) == hash_tree_root(leaves2.data)
|
||||||
|
|
||||||
var leaves3 = HashList[Eth2Digest, 7]() # Non-power-of-2
|
var leaves3 = HashList[Eth2Digest, 7]() # Non-power-of-2
|
||||||
check hash_tree_root(leaves3) == hash_tree_root(leaves3.data)
|
check:
|
||||||
leaves3.add a
|
hash_tree_root(leaves3) == hash_tree_root(leaves3.data)
|
||||||
leaves3.add b
|
leaves3.add a
|
||||||
leaves3.add c
|
leaves3.add b
|
||||||
check hash_tree_root(leaves3) == hash_tree_root(leaves3.data)
|
leaves3.add c
|
||||||
|
hash_tree_root(leaves3) == hash_tree_root(leaves3.data)
|
||||||
|
|
||||||
timedTest "basictype":
|
timedTest "basictype":
|
||||||
var leaves = HashList[uint64, 1'i64 shl 3]()
|
var leaves = HashList[uint64, 1'i64 shl 3]()
|
||||||
while leaves.len < leaves.maxLen:
|
while leaves.len < leaves.maxLen:
|
||||||
leaves.add leaves.lenu64
|
check:
|
||||||
check hash_tree_root(leaves) == hash_tree_root(leaves.data)
|
leaves.add leaves.lenu64
|
||||||
|
hash_tree_root(leaves) == hash_tree_root(leaves.data)
|
||||||
|
|
||||||
suiteReport "SSZ dynamic navigator":
|
suiteReport "SSZ dynamic navigator":
|
||||||
timedTest "navigating fields":
|
timedTest "navigating fields":
|
||||||
|
@ -197,10 +209,45 @@ suiteReport "hash":
|
||||||
|
|
||||||
both: it.arr[0].data[0] = byte 1
|
both: it.arr[0].data[0] = byte 1
|
||||||
|
|
||||||
both: it.li.add Eth2Digest()
|
both: check: it.li.add Eth2Digest()
|
||||||
|
|
||||||
var y: HashArray[32, uint64]
|
var y: HashArray[32, uint64]
|
||||||
doAssert hash_tree_root(y) == hash_tree_root(y.data)
|
doAssert hash_tree_root(y) == hash_tree_root(y.data)
|
||||||
for i in 0..<y.len:
|
for i in 0..<y.len:
|
||||||
y[i] = 42'u64
|
y[i] = 42'u64
|
||||||
doAssert hash_tree_root(y) == hash_tree_root(y.data)
|
doAssert hash_tree_root(y) == hash_tree_root(y.data)
|
||||||
|
|
||||||
|
timedTest "HashList":
|
||||||
|
type MyList = HashList[uint64, 1024]
|
||||||
|
var
|
||||||
|
small, large: MyList
|
||||||
|
|
||||||
|
check: small.add(10'u64)
|
||||||
|
|
||||||
|
for i in 0..<100:
|
||||||
|
check: large.add(uint64(i))
|
||||||
|
|
||||||
|
let
|
||||||
|
sroot = hash_tree_root(small)
|
||||||
|
lroot = hash_tree_root(large)
|
||||||
|
|
||||||
|
doAssert sroot == hash_tree_root(small.data)
|
||||||
|
doAssert lroot == hash_tree_root(large.data)
|
||||||
|
|
||||||
|
var
|
||||||
|
sbytes = SSZ.encode(small)
|
||||||
|
lbytes = SSZ.encode(large)
|
||||||
|
sloaded = SSZ.decode(sbytes, MyList)
|
||||||
|
lloaded = SSZ.decode(lbytes, MyList)
|
||||||
|
|
||||||
|
doAssert sroot == hash_tree_root(sloaded)
|
||||||
|
doAssert lroot == hash_tree_root(lloaded)
|
||||||
|
|
||||||
|
# Here we smoke test that the cache is reset correctly even when reading
|
||||||
|
# into an existing instance - the instances are size-swapped so the reader
|
||||||
|
# will have some more work to do
|
||||||
|
readSszValue(sbytes, lloaded)
|
||||||
|
readSszValue(lbytes, sloaded)
|
||||||
|
|
||||||
|
doAssert lroot == hash_tree_root(sloaded)
|
||||||
|
doAssert sroot == hash_tree_root(lloaded)
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit 363dd4ca77582310f55d98569b9fd2a35678f17d
|
Subproject commit 002b21b49226473cbe4d6cc67e9d836c340babc3
|
|
@ -1 +1 @@
|
||||||
Subproject commit 64d40d6c1a095761a03d1ba55eb45877596e8e7b
|
Subproject commit 7a9d118929483c38f67df81514011414e229cd66
|
Loading…
Reference in New Issue