2020-05-22 17:04:52 +00:00
|
|
|
# beacon_chain
|
|
|
|
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
|
|
|
import
|
|
|
|
# Standard library
|
2020-09-01 13:44:40 +00:00
|
|
|
os, strutils, json,
|
2020-05-22 17:04:52 +00:00
|
|
|
|
|
|
|
# Nimble packages
|
2020-07-14 01:01:23 +00:00
|
|
|
stew/shims/[tables, macros],
|
2020-05-22 17:04:52 +00:00
|
|
|
chronos, confutils, metrics, json_rpc/[rpcclient, jsonmarshal],
|
2020-05-27 17:06:28 +00:00
|
|
|
chronicles,
|
2020-08-19 11:33:52 +00:00
|
|
|
json_serialization/std/[options, sets, net],
|
2020-05-22 17:04:52 +00:00
|
|
|
|
|
|
|
# Local modules
|
2020-09-16 11:30:03 +00:00
|
|
|
spec/[datatypes, digest, crypto, helpers, network, signatures],
|
2020-06-29 17:30:19 +00:00
|
|
|
conf, time, version,
|
2020-05-22 17:04:52 +00:00
|
|
|
eth2_network, eth2_discovery, validator_pool, beacon_node_types,
|
2020-09-14 11:13:30 +00:00
|
|
|
attestation_aggregation,
|
2020-05-22 17:04:52 +00:00
|
|
|
nimbus_binary_common,
|
2020-06-03 13:52:02 +00:00
|
|
|
version, ssz/merkleization,
|
2020-06-08 14:56:56 +00:00
|
|
|
sync_manager, keystore_management,
|
2020-06-19 09:21:17 +00:00
|
|
|
spec/eth2_apis/callsigs_types,
|
2020-09-16 11:30:03 +00:00
|
|
|
eth2_json_rpc_serialization,
|
|
|
|
validator_slashing_protection,
|
|
|
|
eth/db/[kvstore, kvstore_sqlite3]
|
2020-05-22 17:04:52 +00:00
|
|
|
|
2020-06-10 10:30:57 +00:00
|
|
|
logScope: topics = "vc"
|
|
|
|
|
2020-05-22 17:04:52 +00:00
|
|
|
template sourceDir: string = currentSourcePath.rsplit(DirSep, 1)[0]
|
|
|
|
|
|
|
|
## Generate client convenience marshalling wrappers from forward declarations
|
2020-05-27 17:06:28 +00:00
|
|
|
createRpcSigs(RpcClient, sourceDir / "spec" / "eth2_apis" / "validator_callsigs.nim")
|
2020-06-19 09:21:17 +00:00
|
|
|
createRpcSigs(RpcClient, sourceDir / "spec" / "eth2_apis" / "beacon_callsigs.nim")
|
2020-05-22 17:04:52 +00:00
|
|
|
|
|
|
|
type
|
|
|
|
ValidatorClient = ref object
|
|
|
|
config: ValidatorClientConf
|
2020-06-29 17:30:19 +00:00
|
|
|
graffitiBytes: GraffitiBytes
|
2020-05-22 17:04:52 +00:00
|
|
|
client: RpcHttpClient
|
|
|
|
beaconClock: BeaconClock
|
|
|
|
attachedValidators: ValidatorPool
|
2020-05-27 17:06:28 +00:00
|
|
|
fork: Fork
|
2020-06-19 09:21:17 +00:00
|
|
|
proposalsForCurrentEpoch: Table[Slot, ValidatorPubKey]
|
|
|
|
attestationsForEpoch: Table[Epoch, Table[Slot, seq[AttesterDuties]]]
|
2020-05-27 17:06:28 +00:00
|
|
|
beaconGenesis: BeaconGenesisTuple
|
|
|
|
|
2020-06-19 09:21:17 +00:00
|
|
|
template attemptUntilSuccess(vc: ValidatorClient, body: untyped) =
|
|
|
|
while true:
|
|
|
|
try:
|
|
|
|
body
|
|
|
|
break
|
|
|
|
except CatchableError as err:
|
|
|
|
warn "Caught an unexpected error", err = err.msg
|
2020-09-01 13:44:40 +00:00
|
|
|
waitFor sleepAsync(chronos.seconds(vc.config.retryDelay))
|
2020-06-19 09:21:17 +00:00
|
|
|
|
2020-05-27 17:06:28 +00:00
|
|
|
proc getValidatorDutiesForEpoch(vc: ValidatorClient, epoch: Epoch) {.gcsafe, async.} =
|
2020-07-08 10:11:22 +00:00
|
|
|
info "Getting validator duties for epoch", epoch = epoch
|
|
|
|
|
2020-05-27 17:06:28 +00:00
|
|
|
let proposals = await vc.client.get_v1_validator_duties_proposer(epoch)
|
|
|
|
# update the block proposal duties this VC should do during this epoch
|
2020-06-19 09:21:17 +00:00
|
|
|
vc.proposalsForCurrentEpoch.clear()
|
2020-05-27 17:06:28 +00:00
|
|
|
for curr in proposals:
|
|
|
|
if vc.attachedValidators.validators.contains curr.public_key:
|
2020-06-19 09:21:17 +00:00
|
|
|
vc.proposalsForCurrentEpoch.add(curr.slot, curr.public_key)
|
2020-05-27 17:06:28 +00:00
|
|
|
|
|
|
|
# couldn't use mapIt in ANY shape or form so reverting to raw loops - sorry Sean Parent :|
|
|
|
|
var validatorPubkeys: seq[ValidatorPubKey]
|
|
|
|
for key in vc.attachedValidators.validators.keys:
|
|
|
|
validatorPubkeys.add key
|
2020-06-19 09:21:17 +00:00
|
|
|
|
|
|
|
proc getAttesterDutiesForEpoch(epoch: Epoch) {.gcsafe, async.} =
|
|
|
|
# make sure there's an entry
|
|
|
|
if not vc.attestationsForEpoch.contains epoch:
|
|
|
|
vc.attestationsForEpoch.add(epoch, Table[Slot, seq[AttesterDuties]]())
|
2020-09-14 11:13:30 +00:00
|
|
|
let attestations = await vc.client.get_v1_validator_duties_attester(
|
2020-07-08 10:11:22 +00:00
|
|
|
epoch, validatorPubkeys)
|
2020-06-19 09:21:17 +00:00
|
|
|
for a in attestations:
|
|
|
|
if vc.attestationsForEpoch[epoch].hasKeyOrPut(a.slot, @[a]):
|
|
|
|
vc.attestationsForEpoch[epoch][a.slot].add(a)
|
|
|
|
|
2020-07-08 10:11:22 +00:00
|
|
|
# clear both for the current epoch and the next because a change of
|
|
|
|
# fork could invalidate the attester duties even the current epoch
|
|
|
|
vc.attestationsForEpoch.clear()
|
|
|
|
await getAttesterDutiesForEpoch(epoch)
|
2020-06-19 09:21:17 +00:00
|
|
|
# obtain the attestation duties this VC should do during the next epoch
|
2020-07-08 10:11:22 +00:00
|
|
|
# TODO currently we aren't making use of this but perhaps we should
|
2020-06-19 09:21:17 +00:00
|
|
|
await getAttesterDutiesForEpoch(epoch + 1)
|
2020-05-27 17:06:28 +00:00
|
|
|
|
|
|
|
# for now we will get the fork each time we update the validator duties for each epoch
|
2020-06-19 09:21:17 +00:00
|
|
|
# TODO should poll occasionally `/v1/config/fork_schedule`
|
2020-05-27 17:06:28 +00:00
|
|
|
vc.fork = await vc.client.get_v1_beacon_states_fork("head")
|
2020-05-22 17:04:52 +00:00
|
|
|
|
2020-07-08 10:11:22 +00:00
|
|
|
var numAttestationsForEpoch = 0
|
|
|
|
for _, dutiesForSlot in vc.attestationsForEpoch[epoch]:
|
|
|
|
numAttestationsForEpoch += dutiesForSlot.len
|
|
|
|
|
|
|
|
info "Got validator duties for epoch",
|
|
|
|
num_proposals = vc.proposalsForCurrentEpoch.len,
|
|
|
|
num_attestations = numAttestationsForEpoch
|
|
|
|
|
2020-05-22 17:04:52 +00:00
|
|
|
proc onSlotStart(vc: ValidatorClient, lastSlot, scheduledSlot: Slot) {.gcsafe, async.} =
|
|
|
|
|
|
|
|
let
|
|
|
|
# The slot we should be at, according to the clock
|
|
|
|
beaconTime = vc.beaconClock.now()
|
|
|
|
wallSlot = beaconTime.toSlot()
|
|
|
|
|
|
|
|
let
|
|
|
|
slot = wallSlot.slot # afterGenesis == true!
|
|
|
|
nextSlot = slot + 1
|
2020-06-19 09:21:17 +00:00
|
|
|
epoch = slot.compute_epoch_at_slot
|
2020-05-22 17:04:52 +00:00
|
|
|
|
2020-06-10 10:30:57 +00:00
|
|
|
info "Slot start",
|
|
|
|
lastSlot = shortLog(lastSlot),
|
|
|
|
scheduledSlot = shortLog(scheduledSlot),
|
|
|
|
beaconTime = shortLog(beaconTime),
|
2020-07-16 13:16:51 +00:00
|
|
|
portBN = vc.config.rpcPort
|
2020-06-05 09:57:40 +00:00
|
|
|
|
2020-09-01 13:38:34 +00:00
|
|
|
# Check before any re-scheduling of onSlotStart()
|
|
|
|
checkIfShouldStopAtEpoch(scheduledSlot, vc.config.stopAtEpoch)
|
|
|
|
|
2020-05-22 17:04:52 +00:00
|
|
|
try:
|
2020-06-05 09:57:40 +00:00
|
|
|
# at the start of each epoch - request all validator duties
|
|
|
|
# TODO perhaps call this not on the first slot of each Epoch but perhaps
|
|
|
|
# 1 slot earlier because there are a few back-and-forth requests which
|
|
|
|
# could take up time for attesting... Perhaps this should be called more
|
|
|
|
# than once per epoch because of forks & other events...
|
|
|
|
if slot.isEpoch:
|
2020-06-19 09:21:17 +00:00
|
|
|
await getValidatorDutiesForEpoch(vc, epoch)
|
2020-05-22 17:04:52 +00:00
|
|
|
|
2020-05-27 17:06:28 +00:00
|
|
|
# check if we have a validator which needs to propose on this slot
|
2020-06-19 09:21:17 +00:00
|
|
|
if vc.proposalsForCurrentEpoch.contains slot:
|
|
|
|
let public_key = vc.proposalsForCurrentEpoch[slot]
|
2020-05-22 17:04:52 +00:00
|
|
|
|
2020-09-16 11:30:03 +00:00
|
|
|
let notSlashable = vc.attachedValidators
|
|
|
|
.slashingProtection
|
|
|
|
.checkSlashableBlockProposal(public_key, slot)
|
|
|
|
if notSlashable.isOk:
|
|
|
|
let validator = vc.attachedValidators.validators[public_key]
|
2020-10-01 18:56:42 +00:00
|
|
|
notice "Proposing block", slot = slot, public_key = public_key
|
2020-09-16 11:30:03 +00:00
|
|
|
let randao_reveal = await validator.genRandaoReveal(
|
|
|
|
vc.fork, vc.beaconGenesis.genesis_validators_root, slot)
|
|
|
|
var newBlock = SignedBeaconBlock(
|
|
|
|
message: await vc.client.get_v1_validator_block(slot, vc.graffitiBytes, randao_reveal)
|
|
|
|
)
|
|
|
|
newBlock.root = hash_tree_root(newBlock.message)
|
|
|
|
|
|
|
|
# TODO: signing_root is recomputed in signBlockProposal just after
|
|
|
|
let signing_root = compute_block_root(vc.fork, vc.beaconGenesis.genesis_validators_root, slot, newBlock.root)
|
|
|
|
vc.attachedValidators
|
|
|
|
.slashingProtection
|
|
|
|
.registerBlock(public_key, slot, signing_root)
|
|
|
|
|
|
|
|
newBlock.signature = await validator.signBlockProposal(
|
|
|
|
vc.fork, vc.beaconGenesis.genesis_validators_root, slot, newBlock.root)
|
|
|
|
|
|
|
|
discard await vc.client.post_v1_validator_block(newBlock)
|
|
|
|
else:
|
|
|
|
warn "Slashing protection activated for block proposal",
|
|
|
|
validator = public_key,
|
|
|
|
slot = slot,
|
|
|
|
existingProposal = notSlashable.error
|
2020-05-27 17:06:28 +00:00
|
|
|
|
2020-11-09 14:18:55 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.0/specs/phase0/validator.md#attesting
|
2020-06-05 09:57:40 +00:00
|
|
|
# A validator should create and broadcast the attestation to the associated
|
|
|
|
# attestation subnet when either (a) the validator has received a valid
|
|
|
|
# block from the expected block proposer for the assigned slot or
|
|
|
|
# (b) one-third of the slot has transpired (`SECONDS_PER_SLOT / 3` seconds
|
|
|
|
# after the start of slot) -- whichever comes first.
|
|
|
|
discard await vc.beaconClock.sleepToSlotOffset(
|
|
|
|
seconds(int64(SECONDS_PER_SLOT)) div 3, slot, "Waiting to send attestations")
|
2020-05-27 17:06:28 +00:00
|
|
|
|
2020-06-05 09:57:40 +00:00
|
|
|
# check if we have validators which need to attest on this slot
|
2020-07-08 10:11:22 +00:00
|
|
|
if vc.attestationsForEpoch.contains(epoch) and
|
|
|
|
vc.attestationsForEpoch[epoch].contains slot:
|
2020-09-14 11:13:30 +00:00
|
|
|
var validatorToAttestationDataRoot: Table[ValidatorPubKey, Eth2Digest]
|
2020-06-19 09:21:17 +00:00
|
|
|
for a in vc.attestationsForEpoch[epoch][slot]:
|
2020-10-01 18:56:42 +00:00
|
|
|
notice "Attesting", slot = slot, public_key = a.public_key
|
2020-06-05 09:57:40 +00:00
|
|
|
|
2020-07-08 10:11:22 +00:00
|
|
|
let validator = vc.attachedValidators.validators[a.public_key]
|
|
|
|
let ad = await vc.client.get_v1_validator_attestation(slot, a.committee_index)
|
2020-05-27 17:06:28 +00:00
|
|
|
|
2020-09-16 11:30:03 +00:00
|
|
|
let notSlashable = vc.attachedValidators
|
|
|
|
.slashingProtection
|
|
|
|
.checkSlashableAttestation(
|
|
|
|
a.public_key,
|
|
|
|
ad.source.epoch,
|
|
|
|
ad.target.epoch)
|
|
|
|
if notSlashable.isOk():
|
|
|
|
# TODO signing_root is recomputed in produceAndSignAttestation/signAttestation just after
|
|
|
|
let signing_root = compute_attestation_root(
|
|
|
|
vc.fork, vc.beaconGenesis.genesis_validators_root, ad)
|
|
|
|
vc.attachedValidators
|
|
|
|
.slashingProtection
|
|
|
|
.registerAttestation(
|
|
|
|
a.public_key, ad.source.epoch, ad.target.epoch, signing_root)
|
|
|
|
|
|
|
|
# TODO I don't like these (u)int64-to-int conversions...
|
|
|
|
let attestation = await validator.produceAndSignAttestation(
|
2020-11-16 16:10:51 +00:00
|
|
|
ad, a.committee_length.int, a.validator_committee_index,
|
2020-09-16 11:30:03 +00:00
|
|
|
vc.fork, vc.beaconGenesis.genesis_validators_root)
|
|
|
|
|
|
|
|
discard await vc.client.post_v1_beacon_pool_attestations(attestation)
|
2020-09-18 14:31:45 +00:00
|
|
|
|
|
|
|
validatorToAttestationDataRoot[a.public_key] = attestation.data.hash_tree_root
|
2020-09-16 11:30:03 +00:00
|
|
|
else:
|
|
|
|
warn "Slashing protection activated for attestation",
|
|
|
|
validator = a.public_key,
|
|
|
|
badVoteDetails = $notSlashable.error
|
2020-05-22 17:04:52 +00:00
|
|
|
|
2020-11-09 14:18:55 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.0/specs/phase0/validator.md#broadcast-aggregate
|
2020-09-14 11:13:30 +00:00
|
|
|
# If the validator is selected to aggregate (is_aggregator), then they
|
|
|
|
# broadcast their best aggregate as a SignedAggregateAndProof to the global
|
|
|
|
# aggregate channel (beacon_aggregate_and_proof) two-thirds of the way
|
|
|
|
# through the slot-that is, SECONDS_PER_SLOT * 2 / 3 seconds after the start
|
|
|
|
# of slot.
|
|
|
|
if slot > 2:
|
|
|
|
discard await vc.beaconClock.sleepToSlotOffset(
|
|
|
|
seconds(int64(SECONDS_PER_SLOT * 2)) div 3, slot,
|
|
|
|
"Waiting to aggregate attestations")
|
|
|
|
|
|
|
|
# loop again over all of our validators which need to attest on
|
|
|
|
# this slot and check if we should also aggregate attestations
|
|
|
|
for a in vc.attestationsForEpoch[epoch][slot]:
|
|
|
|
let validator = vc.attachedValidators.validators[a.public_key]
|
|
|
|
let slot_signature = await getSlotSig(validator, vc.fork,
|
|
|
|
vc.beaconGenesis.genesis_validators_root, slot)
|
|
|
|
|
2020-09-18 14:31:45 +00:00
|
|
|
if is_aggregator(a.committee_length, slot_signature) and
|
|
|
|
validatorToAttestationDataRoot.contains(a.public_key):
|
2020-10-01 18:56:42 +00:00
|
|
|
notice "Aggregating", slot = slot, public_key = a.public_key
|
2020-09-14 11:13:30 +00:00
|
|
|
|
|
|
|
let aa = await vc.client.get_v1_validator_aggregate_attestation(
|
|
|
|
slot, validatorToAttestationDataRoot[a.public_key])
|
|
|
|
let aap = AggregateAndProof(aggregator_index: a.validator_index.uint64,
|
|
|
|
aggregate: aa, selection_proof: slot_signature)
|
2020-10-01 18:56:42 +00:00
|
|
|
let sig = await signAggregateAndProof(validator,
|
2020-09-14 11:13:30 +00:00
|
|
|
aap, vc.fork, vc.beaconGenesis.genesis_validators_root)
|
|
|
|
var signedAP = SignedAggregateAndProof(message: aap, signature: sig)
|
|
|
|
discard await vc.client.post_v1_validator_aggregate_and_proofs(signedAP)
|
|
|
|
|
2020-05-22 17:04:52 +00:00
|
|
|
except CatchableError as err:
|
2020-06-19 09:21:17 +00:00
|
|
|
warn "Caught an unexpected error", err = err.msg, slot = shortLog(slot)
|
2020-05-22 17:04:52 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
nextSlotStart = saturate(vc.beaconClock.fromNow(nextSlot))
|
|
|
|
|
2020-06-10 10:30:57 +00:00
|
|
|
info "Slot end",
|
|
|
|
slot = shortLog(slot),
|
|
|
|
nextSlot = shortLog(nextSlot),
|
2020-07-16 13:16:51 +00:00
|
|
|
portBN = vc.config.rpcPort
|
2020-06-10 10:30:57 +00:00
|
|
|
|
|
|
|
when declared(GC_fullCollect):
|
|
|
|
# The slots in the validator client work as frames in a game: we want to make
|
|
|
|
# sure that we're ready for the next one and don't get stuck in lengthy
|
|
|
|
# garbage collection tasks when time is of essence in the middle of a slot -
|
|
|
|
# while this does not guarantee that we'll never collect during a slot, it
|
|
|
|
# makes sure that all the scratch space we used during slot tasks (logging,
|
|
|
|
# temporary buffers etc) gets recycled for the next slot that is likely to
|
|
|
|
# need similar amounts of memory.
|
|
|
|
GC_fullCollect()
|
|
|
|
|
2020-05-22 17:04:52 +00:00
|
|
|
addTimer(nextSlotStart) do (p: pointer):
|
|
|
|
asyncCheck vc.onSlotStart(slot, nextSlot)
|
|
|
|
|
|
|
|
programMain:
|
2020-11-09 14:23:15 +00:00
|
|
|
let config = makeBannerAndConfig("Nimbus validator client " & fullVersionStr, ValidatorClientConf)
|
2020-05-22 17:04:52 +00:00
|
|
|
|
2020-10-12 10:54:41 +00:00
|
|
|
setupStdoutLogging(config.logLevel)
|
|
|
|
|
2020-07-15 13:15:55 +00:00
|
|
|
setupLogging(config.logLevel, config.logFile)
|
2020-05-22 17:04:52 +00:00
|
|
|
|
|
|
|
case config.cmd
|
|
|
|
of VCNoCommand:
|
|
|
|
debug "Launching validator client",
|
|
|
|
version = fullVersionStr,
|
|
|
|
cmdParams = commandLineParams(),
|
|
|
|
config
|
|
|
|
|
|
|
|
var vc = ValidatorClient(
|
|
|
|
config: config,
|
2020-06-29 17:30:19 +00:00
|
|
|
client: newRpcHttpClient(),
|
2020-09-08 11:32:43 +00:00
|
|
|
graffitiBytes: if config.graffiti.isSome: config.graffiti.get
|
2020-06-29 17:30:19 +00:00
|
|
|
else: defaultGraffitiBytes())
|
2020-05-22 17:04:52 +00:00
|
|
|
|
2020-05-27 17:06:28 +00:00
|
|
|
# load all the validators from the data dir into memory
|
2020-05-22 17:04:52 +00:00
|
|
|
for curr in vc.config.validatorKeys:
|
2020-11-27 23:34:25 +00:00
|
|
|
vc.attachedValidators.addLocalValidator(
|
|
|
|
curr.toPubKey.initPubKey, curr, none(ValidatorIndex))
|
2020-05-22 17:04:52 +00:00
|
|
|
|
2020-09-08 11:32:43 +00:00
|
|
|
waitFor vc.client.connect($vc.config.rpcAddress, vc.config.rpcPort)
|
2020-07-08 10:11:22 +00:00
|
|
|
info "Connected to BN",
|
|
|
|
port = vc.config.rpcPort,
|
|
|
|
address = vc.config.rpcAddress
|
2020-05-27 17:06:28 +00:00
|
|
|
|
2020-06-19 09:21:17 +00:00
|
|
|
vc.attemptUntilSuccess:
|
|
|
|
# init the beacon clock
|
|
|
|
vc.beaconGenesis = waitFor vc.client.get_v1_beacon_genesis()
|
|
|
|
vc.beaconClock = BeaconClock.init(vc.beaconGenesis.genesis_time)
|
2020-05-22 17:04:52 +00:00
|
|
|
|
2020-10-06 08:51:33 +00:00
|
|
|
vc.attachedValidators.slashingProtection =
|
|
|
|
SlashingProtectionDB.init(
|
|
|
|
vc.beaconGenesis.genesis_validators_root,
|
|
|
|
kvStore SqStoreRef.init(config.validatorsDir(), "slashing_protection").tryGet()
|
|
|
|
)
|
2020-09-16 11:30:03 +00:00
|
|
|
|
2020-05-22 17:04:52 +00:00
|
|
|
let
|
|
|
|
curSlot = vc.beaconClock.now().slotOrZero()
|
|
|
|
nextSlot = curSlot + 1 # No earlier than GENESIS_SLOT + 1
|
|
|
|
fromNow = saturate(vc.beaconClock.fromNow(nextSlot))
|
|
|
|
|
2020-06-19 09:21:17 +00:00
|
|
|
vc.attemptUntilSuccess:
|
|
|
|
waitFor vc.getValidatorDutiesForEpoch(curSlot.compute_epoch_at_slot)
|
2020-05-27 17:06:28 +00:00
|
|
|
|
2020-05-22 17:04:52 +00:00
|
|
|
info "Scheduling first slot action",
|
|
|
|
beaconTime = shortLog(vc.beaconClock.now()),
|
|
|
|
nextSlot = shortLog(nextSlot),
|
2020-07-16 13:16:51 +00:00
|
|
|
fromNow = shortLog(fromNow)
|
2020-05-22 17:04:52 +00:00
|
|
|
|
|
|
|
addTimer(fromNow) do (p: pointer) {.gcsafe.}:
|
|
|
|
asyncCheck vc.onSlotStart(curSlot, nextSlot)
|
|
|
|
|
|
|
|
runForever()
|