taskpools: bump (#6757)
* taskpools: bump * better raises effects * get rid of custom naming * bump * oops * bump stable
This commit is contained in:
parent
7d81ee17db
commit
031d24ff41
|
@ -144,7 +144,7 @@ type
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: type BatchCrypto, rng: ref HmacDrbgContext,
|
T: type BatchCrypto, rng: ref HmacDrbgContext,
|
||||||
eager: Eager, genesis_validators_root: Eth2Digest, taskpool: TaskPoolPtr):
|
eager: Eager, genesis_validators_root: Eth2Digest, taskpool: Taskpool):
|
||||||
Result[ref BatchCrypto, string] =
|
Result[ref BatchCrypto, string] =
|
||||||
let res = (ref BatchCrypto)(
|
let res = (ref BatchCrypto)(
|
||||||
rng: rng, taskpool: taskpool,
|
rng: rng, taskpool: taskpool,
|
||||||
|
|
|
@ -169,7 +169,7 @@ proc new*(T: type Eth2Processor,
|
||||||
blobQuarantine: ref BlobQuarantine,
|
blobQuarantine: ref BlobQuarantine,
|
||||||
rng: ref HmacDrbgContext,
|
rng: ref HmacDrbgContext,
|
||||||
getBeaconTime: GetBeaconTimeFn,
|
getBeaconTime: GetBeaconTimeFn,
|
||||||
taskpool: TaskPoolPtr
|
taskpool: Taskpool
|
||||||
): ref Eth2Processor =
|
): ref Eth2Processor =
|
||||||
(ref Eth2Processor)(
|
(ref Eth2Processor)(
|
||||||
doppelgangerDetectionEnabled: doppelgangerDetectionEnabled,
|
doppelgangerDetectionEnabled: doppelgangerDetectionEnabled,
|
||||||
|
|
|
@ -291,7 +291,7 @@ proc initFullNode(
|
||||||
rng: ref HmacDrbgContext,
|
rng: ref HmacDrbgContext,
|
||||||
dag: ChainDAGRef,
|
dag: ChainDAGRef,
|
||||||
clist: ChainListRef,
|
clist: ChainListRef,
|
||||||
taskpool: TaskPoolPtr,
|
taskpool: Taskpool,
|
||||||
getBeaconTime: GetBeaconTimeFn) {.async.} =
|
getBeaconTime: GetBeaconTimeFn) {.async.} =
|
||||||
template config(): auto = node.config
|
template config(): auto = node.config
|
||||||
|
|
||||||
|
@ -417,7 +417,7 @@ proc initFullNode(
|
||||||
blobQuarantine = newClone(BlobQuarantine.init(onBlobSidecarAdded))
|
blobQuarantine = newClone(BlobQuarantine.init(onBlobSidecarAdded))
|
||||||
dataColumnQuarantine = newClone(DataColumnQuarantine.init())
|
dataColumnQuarantine = newClone(DataColumnQuarantine.init())
|
||||||
supernode = node.config.subscribeAllSubnets
|
supernode = node.config.subscribeAllSubnets
|
||||||
localCustodySubnets =
|
localCustodySubnets =
|
||||||
if supernode:
|
if supernode:
|
||||||
DATA_COLUMN_SIDECAR_SUBNET_COUNT.uint64
|
DATA_COLUMN_SIDECAR_SUBNET_COUNT.uint64
|
||||||
else:
|
else:
|
||||||
|
@ -529,28 +529,28 @@ proc initFullNode(
|
||||||
(proc(): bool = syncManager.inProgress),
|
(proc(): bool = syncManager.inProgress),
|
||||||
quarantine, blobQuarantine, rmanBlockVerifier,
|
quarantine, blobQuarantine, rmanBlockVerifier,
|
||||||
rmanBlockLoader, rmanBlobLoader)
|
rmanBlockLoader, rmanBlobLoader)
|
||||||
|
|
||||||
# As per EIP 7594, the BN is now categorised into a
|
# As per EIP 7594, the BN is now categorised into a
|
||||||
# `Fullnode` and a `Supernode`, the fullnodes custodies a
|
# `Fullnode` and a `Supernode`, the fullnodes custodies a
|
||||||
# given set of data columns, and hence ONLY subcribes to those
|
# given set of data columns, and hence ONLY subcribes to those
|
||||||
# data column subnet topics, however, the supernodes subscribe
|
# data column subnet topics, however, the supernodes subscribe
|
||||||
# to all of the topics. This in turn keeps our `data column quarantine`
|
# to all of the topics. This in turn keeps our `data column quarantine`
|
||||||
# really variable. Whenever the BN is a supernode, column quarantine
|
# really variable. Whenever the BN is a supernode, column quarantine
|
||||||
# essentially means all the NUMBER_OF_COLUMNS, as per mentioned in the
|
# essentially means all the NUMBER_OF_COLUMNS, as per mentioned in the
|
||||||
# spec. However, in terms of fullnode, quarantine is really dependent
|
# spec. However, in terms of fullnode, quarantine is really dependent
|
||||||
# on the randomly assigned columns, by `get_custody_columns`.
|
# on the randomly assigned columns, by `get_custody_columns`.
|
||||||
|
|
||||||
# Hence, in order to keep column quarantine accurate and error proof
|
# Hence, in order to keep column quarantine accurate and error proof
|
||||||
# the custody columns are computed once as the BN boots. Then the values
|
# the custody columns are computed once as the BN boots. Then the values
|
||||||
# are used globally around the codebase.
|
# are used globally around the codebase.
|
||||||
|
|
||||||
# `get_custody_columns` is not a very expensive function, but there
|
# `get_custody_columns` is not a very expensive function, but there
|
||||||
# are multiple instances of computing custody columns, especially
|
# are multiple instances of computing custody columns, especially
|
||||||
# during peer selection, sync with columns, and so on. That is why,
|
# during peer selection, sync with columns, and so on. That is why,
|
||||||
# the rationale of populating it at boot and using it gloabally.
|
# the rationale of populating it at boot and using it gloabally.
|
||||||
|
|
||||||
dataColumnQuarantine[].supernode = supernode
|
dataColumnQuarantine[].supernode = supernode
|
||||||
dataColumnQuarantine[].custody_columns =
|
dataColumnQuarantine[].custody_columns =
|
||||||
node.network.nodeId.get_custody_columns(max(SAMPLES_PER_SLOT.uint64,
|
node.network.nodeId.get_custody_columns(max(SAMPLES_PER_SLOT.uint64,
|
||||||
localCustodySubnets))
|
localCustodySubnets))
|
||||||
if node.config.lightClientDataServe:
|
if node.config.lightClientDataServe:
|
||||||
|
@ -654,7 +654,6 @@ proc init*(T: type BeaconNode,
|
||||||
metadata: Eth2NetworkMetadata): Future[BeaconNode]
|
metadata: Eth2NetworkMetadata): Future[BeaconNode]
|
||||||
{.async.} =
|
{.async.} =
|
||||||
var
|
var
|
||||||
taskpool: TaskPoolPtr
|
|
||||||
genesisState: ref ForkedHashedBeaconState = nil
|
genesisState: ref ForkedHashedBeaconState = nil
|
||||||
|
|
||||||
template cfg: auto = metadata.cfg
|
template cfg: auto = metadata.cfg
|
||||||
|
@ -690,18 +689,20 @@ proc init*(T: type BeaconNode,
|
||||||
altair_fork_epoch = metadata.cfg.ALTAIR_FORK_EPOCH
|
altair_fork_epoch = metadata.cfg.ALTAIR_FORK_EPOCH
|
||||||
quit 1
|
quit 1
|
||||||
|
|
||||||
try:
|
let taskpool =
|
||||||
if config.numThreads < 0:
|
try:
|
||||||
fatal "The number of threads --numThreads cannot be negative."
|
if config.numThreads < 0:
|
||||||
|
fatal "The number of threads --num-threads cannot be negative."
|
||||||
|
quit 1
|
||||||
|
elif config.numThreads == 0:
|
||||||
|
Taskpool.new(numThreads = min(countProcessors(), 16))
|
||||||
|
else:
|
||||||
|
Taskpool.new(numThreads = config.numThreads)
|
||||||
|
except CatchableError as e:
|
||||||
|
fatal "Cannot start taskpool", err = e.msg
|
||||||
quit 1
|
quit 1
|
||||||
elif config.numThreads == 0:
|
|
||||||
taskpool = TaskPoolPtr.new(numThreads = min(countProcessors(), 16))
|
|
||||||
else:
|
|
||||||
taskpool = TaskPoolPtr.new(numThreads = config.numThreads)
|
|
||||||
|
|
||||||
info "Threadpool started", numThreads = taskpool.numThreads
|
info "Threadpool started", numThreads = taskpool.numThreads
|
||||||
except Exception:
|
|
||||||
raise newException(Defect, "Failure in taskpool initialization.")
|
|
||||||
|
|
||||||
if metadata.genesis.kind == BakedIn:
|
if metadata.genesis.kind == BakedIn:
|
||||||
if config.genesisState.isSome:
|
if config.genesisState.isSome:
|
||||||
|
|
|
@ -26,18 +26,16 @@ import
|
||||||
export results, rand, altair, phase0, taskpools, signatures
|
export results, rand, altair, phase0, taskpools, signatures
|
||||||
|
|
||||||
type
|
type
|
||||||
TaskPoolPtr* = Taskpool
|
|
||||||
|
|
||||||
BatchVerifier* = object
|
BatchVerifier* = object
|
||||||
sigVerifCache*: BatchedBLSVerifierCache
|
sigVerifCache*: BatchedBLSVerifierCache
|
||||||
## A cache for batch BLS signature verification contexts
|
## A cache for batch BLS signature verification contexts
|
||||||
rng*: ref HmacDrbgContext
|
rng*: ref HmacDrbgContext
|
||||||
## A reference to the Nimbus application-wide RNG
|
## A reference to the Nimbus application-wide RNG
|
||||||
taskpool*: TaskPoolPtr
|
taskpool*: Taskpool
|
||||||
|
|
||||||
proc init*(
|
proc init*(
|
||||||
T: type BatchVerifier, rng: ref HmacDrbgContext,
|
T: type BatchVerifier, rng: ref HmacDrbgContext,
|
||||||
taskpool: TaskPoolPtr): BatchVerifier =
|
taskpool: Taskpool): BatchVerifier =
|
||||||
BatchVerifier(
|
BatchVerifier(
|
||||||
sigVerifCache: BatchedBLSVerifierCache.init(taskpool),
|
sigVerifCache: BatchedBLSVerifierCache.init(taskpool),
|
||||||
rng: rng,
|
rng: rng,
|
||||||
|
@ -46,7 +44,7 @@ proc init*(
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: type BatchVerifier, rng: ref HmacDrbgContext,
|
T: type BatchVerifier, rng: ref HmacDrbgContext,
|
||||||
taskpool: TaskPoolPtr): ref BatchVerifier =
|
taskpool: Taskpool): ref BatchVerifier =
|
||||||
(ref BatchVerifier)(
|
(ref BatchVerifier)(
|
||||||
sigVerifCache: BatchedBLSVerifierCache.init(taskpool),
|
sigVerifCache: BatchedBLSVerifierCache.init(taskpool),
|
||||||
rng: rng,
|
rng: rng,
|
||||||
|
|
|
@ -144,7 +144,7 @@ proc runTest(suiteName, path: string, consensusFork: static ConsensusFork) =
|
||||||
lcDataConfig = LightClientDataConfig(
|
lcDataConfig = LightClientDataConfig(
|
||||||
serve: true, importMode: LightClientDataImportMode.Full))
|
serve: true, importMode: LightClientDataImportMode.Full))
|
||||||
rng = HmacDrbgContext.new()
|
rng = HmacDrbgContext.new()
|
||||||
taskpool = TaskPool.new()
|
taskpool = Taskpool.new()
|
||||||
var
|
var
|
||||||
verifier = BatchVerifier.init(rng, taskpool)
|
verifier = BatchVerifier.init(rng, taskpool)
|
||||||
quarantine = newClone(Quarantine.init())
|
quarantine = newClone(Quarantine.init())
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit 66585e2e960b7695e48ea60377fb3aeac96406e8
|
Subproject commit 25f24dddaf1b6d0c791751e209109ba79fea6802
|
Loading…
Reference in New Issue