From 8300cee13148afe5bfdfed34e6227c63d2e65381 Mon Sep 17 00:00:00 2001 From: Zahary Karadjov Date: Sun, 22 Mar 2020 22:35:49 +0200 Subject: [PATCH 01/58] Merge the contents of backends_common in libp2p_backend --- beacon_chain/libp2p_backend.nim | 403 +++++++++++++++++++++++++++++++- 1 file changed, 401 insertions(+), 2 deletions(-) diff --git a/beacon_chain/libp2p_backend.nim b/beacon_chain/libp2p_backend.nim index b9f320be8..94e811e9b 100644 --- a/beacon_chain/libp2p_backend.nim +++ b/beacon_chain/libp2p_backend.nim @@ -1,7 +1,7 @@ import algorithm, typetraits, net as stdNet, stew/[varints,base58], stew/shims/[macros, tables], chronos, chronicles, - stint, faststreams/output_stream, serialization, + stint, faststreams/output_stream, serialization, metrics, json_serialization/std/[net, options], eth/[keys, async_utils], eth/p2p/[enode, p2p_protocol_dsl], eth/p2p/discoveryv5/[enr, node], @@ -23,6 +23,7 @@ export p2pProtocol, libp2p_json_serialization, ssz type + Bytes = seq[byte] P2PStream = Connection # TODO Is this really needed? @@ -82,6 +83,11 @@ type ProtocolInfo* = ptr ProtocolInfoObj + ResponseCode* = enum + Success + InvalidRequest + ServerError + PeerStateInitializer* = proc(peer: Peer): RootRef {.gcsafe.} NetworkStateInitializer* = proc(network: EthereumNode): RootRef {.gcsafe.} HandshakeStep* = proc(peer: Peer, stream: P2PStream): Future[void] {.gcsafe.} @@ -103,6 +109,28 @@ type const TCP = net.Protocol.IPPROTO_TCP + defaultIncomingReqTimeout = 5000 + HandshakeTimeout = FaultOrError + + # Spec constants + # https://github.com/ethereum/eth2.0-specs/blob/dev/specs/networking/p2p-interface.md#eth-20-network-interaction-domains + REQ_RESP_MAX_SIZE* = 1 * 1024 * 1024 # bytes + GOSSIP_MAX_SIZE* = 1 * 1024 * 1024 # bytes + TTFB_TIMEOUT* = 5.seconds + RESP_TIMEOUT* = 10.seconds + + readTimeoutErrorMsg = "Exceeded read timeout for a request" + +logScope: + topics = "libp2p" + +declarePublicGauge libp2p_successful_dials, + "Number of successfully dialed peers" + +declarePublicGauge libp2p_peers, + "Number of active libp2p peers" + +template libp2pProtocol*(name: string, version: int) {.pragma.} template `$`*(peer: Peer): string = id(peer.info) chronicles.formatIt(Peer): $it @@ -165,7 +193,378 @@ proc handleIncomingPeer*(peer: Peer) include eth/p2p/p2p_backends_helpers include eth/p2p/p2p_tracing -include libp2p_backends_common + +proc getRequestProtoName(fn: NimNode): NimNode = + # `getCustomPragmaVal` doesn't work yet on regular nnkProcDef nodes + # (TODO: file as an issue) + + let pragmas = fn.pragma + if pragmas.kind == nnkPragma and pragmas.len > 0: + for pragma in pragmas: + if pragma.len > 0 and $pragma[0] == "libp2pProtocol": + let protoName = $(pragma[1]) + let protoVer = $(pragma[2].intVal) + return newLit("/eth2/beacon_chain/req/" & protoName & "/" & protoVer & "/ssz") + + return newLit("") + +template raisePeerDisconnected(msg: string, r: DisconnectionReason) = + var e = newException(PeerDisconnected, msg) + e.reason = r + raise e + +proc disconnectAndRaise(peer: Peer, + reason: DisconnectionReason, + msg: string) {.async.} = + let r = reason + await peer.disconnect(r) + raisePeerDisconnected(msg, r) + +proc readChunk(stream: P2PStream, + MsgType: type, + withResponseCode: bool, + deadline: Future[void]): Future[Option[MsgType]] {.gcsafe.} + +proc readSizePrefix(stream: P2PStream, + deadline: Future[void]): Future[int] {.async.} = + trace "about to read msg size prefix" + var parser: VarintParser[uint64, ProtoBuf] + while true: + var nextByte: byte + var readNextByte = stream.readExactly(addr nextByte, 1) + await readNextByte or deadline + if not readNextByte.finished: + trace "size prefix byte not received in time" + return -1 + case parser.feedByte(nextByte) + of Done: + let res = parser.getResult + if res > uint64(REQ_RESP_MAX_SIZE): + trace "size prefix outside of range", res + return -1 + else: + trace "got size prefix", res + return int(res) + of Overflow: + trace "size prefix overflow" + return -1 + of Incomplete: + continue + +proc readMsgBytes(stream: P2PStream, + withResponseCode: bool, + deadline: Future[void]): Future[Bytes] {.async.} = + trace "about to read message bytes", withResponseCode + + try: + if withResponseCode: + var responseCode: byte + trace "about to read response code" + var readResponseCode = stream.readExactly(addr responseCode, 1) + await readResponseCode or deadline + + if not readResponseCode.finished: + trace "response code not received in time" + return + + if responseCode > ResponseCode.high.byte: + trace "invalid response code", responseCode + return + + logScope: responseCode = ResponseCode(responseCode) + trace "got response code" + + case ResponseCode(responseCode) + of InvalidRequest, ServerError: + let responseErrMsg = await readChunk(stream, string, false, deadline) + debug "P2P request resulted in error", responseErrMsg + return + + of Success: + # The response is OK, the execution continues below + discard + + var sizePrefix = await readSizePrefix(stream, deadline) + trace "got msg size prefix", sizePrefix + + if sizePrefix == -1: + debug "Failed to read an incoming message size prefix", peer = stream.peer + return + + if sizePrefix == 0: + debug "Received SSZ with zero size", peer = stream.peer + return + + trace "about to read msg bytes", len = sizePrefix + var msgBytes = newSeq[byte](sizePrefix) + var readBody = stream.readExactly(addr msgBytes[0], sizePrefix) + await readBody or deadline + if not readBody.finished: + trace "msg bytes not received in time" + return + + trace "got message bytes", len = sizePrefix + return msgBytes + + except TransportIncompleteError: + return @[] + +proc readChunk(stream: P2PStream, + MsgType: type, + withResponseCode: bool, + deadline: Future[void]): Future[Option[MsgType]] {.gcsafe, async.} = + var msgBytes = await stream.readMsgBytes(withResponseCode, deadline) + try: + if msgBytes.len > 0: + return some SSZ.decode(msgBytes, MsgType) + except SerializationError as err: + debug "Failed to decode a network message", + msgBytes, errMsg = err.formatMsg("") + return + +proc readResponse( + stream: P2PStream, + MsgType: type, + deadline: Future[void]): Future[Option[MsgType]] {.gcsafe, async.} = + + when MsgType is seq: + type E = ElemType(MsgType) + var results: MsgType + while true: + let nextRes = await readChunk(stream, E, true, deadline) + if nextRes.isNone: break + results.add nextRes.get + if results.len > 0: + return some(results) + else: + return await readChunk(stream, MsgType, true, deadline) + +proc encodeErrorMsg(responseCode: ResponseCode, errMsg: string): Bytes = + var s = init OutputStream + s.append byte(responseCode) + s.appendVarint errMsg.len + s.appendValue SSZ, errMsg + s.getOutput + +proc sendErrorResponse(peer: Peer, + stream: P2PStream, + err: ref SerializationError, + msgName: string, + msgBytes: Bytes) {.async.} = + debug "Received an invalid request", + peer, msgName, msgBytes, errMsg = err.formatMsg("") + + let responseBytes = encodeErrorMsg(InvalidRequest, err.formatMsg("msg")) + await stream.writeAllBytes(responseBytes) + await stream.close() + +proc sendErrorResponse(peer: Peer, + stream: P2PStream, + responseCode: ResponseCode, + errMsg: string) {.async.} = + debug "Error processing request", peer, responseCode, errMsg + + let responseBytes = encodeErrorMsg(ServerError, errMsg) + await stream.writeAllBytes(responseBytes) + await stream.close() + +proc sendNotificationMsg(peer: Peer, protocolId: string, requestBytes: Bytes) {.async} = + var deadline = sleepAsync RESP_TIMEOUT + var streamFut = peer.network.openStream(peer, protocolId) + await streamFut or deadline + if not streamFut.finished: + # TODO: we are returning here because the deadline passed, but + # the stream can still be opened eventually a bit later. Who is + # going to close it then? + raise newException(TransmissionError, "Failed to open LibP2P stream") + + let stream = streamFut.read + defer: + await safeClose(stream) + + var s = init OutputStream + s.appendVarint requestBytes.len.uint64 + s.append requestBytes + let bytes = s.getOutput + await stream.writeAllBytes(bytes) + +# TODO There is too much duplication in the responder functions, but +# I hope to reduce this when I increse the reliance on output streams. +proc sendResponseChunkBytes(responder: UntypedResponder, payload: Bytes) {.async.} = + var s = init OutputStream + s.append byte(Success) + s.appendVarint payload.len.uint64 + s.append payload + let bytes = s.getOutput + await responder.stream.writeAllBytes(bytes) + +proc sendResponseChunkObj(responder: UntypedResponder, val: auto) {.async.} = + var s = init OutputStream + s.append byte(Success) + s.appendValue SSZ, sizePrefixed(val) + let bytes = s.getOutput + await responder.stream.writeAllBytes(bytes) + +proc sendResponseChunks[T](responder: UntypedResponder, chunks: seq[T]) {.async.} = + var s = init OutputStream + for chunk in chunks: + s.append byte(Success) + s.appendValue SSZ, sizePrefixed(chunk) + + let bytes = s.getOutput + await responder.stream.writeAllBytes(bytes) + +proc makeEth2Request(peer: Peer, protocolId: string, requestBytes: Bytes, + ResponseMsg: type, + timeout: Duration): Future[Option[ResponseMsg]] {.gcsafe, async.} = + var deadline = sleepAsync timeout + + # Open a new LibP2P stream + var streamFut = peer.network.openStream(peer, protocolId) + await streamFut or deadline + if not streamFut.finished: + # TODO: we are returning here because the deadline passed, but + # the stream can still be opened eventually a bit later. Who is + # going to close it then? + return none(ResponseMsg) + + let stream = streamFut.read + defer: + await safeClose(stream) + + # Send the request + var s = init OutputStream + s.appendVarint requestBytes.len.uint64 + s.append requestBytes + let bytes = s.getOutput + await stream.writeAllBytes(bytes) + + # Read the response + return await stream.readResponse(ResponseMsg, deadline) + +proc init*[MsgType](T: type Responder[MsgType], + peer: Peer, stream: P2PStream): T = + T(UntypedResponder(peer: peer, stream: stream)) + +template write*[M](r: var Responder[M], val: auto): auto = + mixin send + type Msg = M + type MsgRec = RecType(Msg) + when MsgRec is seq|openarray: + type E = ElemType(MsgRec) + when val is E: + sendResponseChunkObj(UntypedResponder(r), val) + elif val is MsgRec: + sendResponseChunks(UntypedResponder(r), val) + else: + {.fatal: "Unepected message type".} + else: + send(r, val) + +proc performProtocolHandshakes*(peer: Peer) {.async.} = + var subProtocolsHandshakes = newSeqOfCap[Future[void]](allProtocols.len) + for protocol in allProtocols: + if protocol.handshake != nil: + subProtocolsHandshakes.add((protocol.handshake)(peer, nil)) + + await all(subProtocolsHandshakes) + +template initializeConnection*(peer: Peer): auto = + performProtocolHandshakes(peer) + +proc initProtocol(name: string, + peerInit: PeerStateInitializer, + networkInit: NetworkStateInitializer): ProtocolInfoObj = + result.name = name + result.messages = @[] + result.peerStateInitializer = peerInit + result.networkStateInitializer = networkInit + +proc registerProtocol(protocol: ProtocolInfo) = + # TODO: This can be done at compile-time in the future + let pos = lowerBound(gProtocols, protocol) + gProtocols.insert(protocol, pos) + for i in 0 ..< gProtocols.len: + gProtocols[i].index = i + +proc setEventHandlers(p: ProtocolInfo, + handshake: HandshakeStep, + disconnectHandler: DisconnectionHandler) = + p.handshake = handshake + p.disconnectHandler = disconnectHandler + +proc implementSendProcBody(sendProc: SendProc) = + let + msg = sendProc.msg + UntypedResponder = bindSym "UntypedResponder" + await = ident "await" + + proc sendCallGenerator(peer, bytes: NimNode): NimNode = + if msg.kind != msgResponse: + let msgProto = getRequestProtoName(msg.procDef) + case msg.kind + of msgRequest: + let + timeout = msg.timeoutParam[0] + ResponseRecord = msg.response.recName + quote: + makeEth2Request(`peer`, `msgProto`, `bytes`, + `ResponseRecord`, `timeout`) + else: + quote: sendNotificationMsg(`peer`, `msgProto`, `bytes`) + else: + quote: sendResponseChunkBytes(`UntypedResponder`(`peer`), `bytes`) + + sendProc.useStandardBody(nil, nil, sendCallGenerator) + +proc handleIncomingStream(network: Eth2Node, stream: P2PStream, + MsgType, Format: distinct type) {.async, gcsafe.} = + mixin callUserHandler, RecType + const msgName = typetraits.name(MsgType) + + ## Uncomment this to enable tracing on all incoming requests + ## You can include `msgNameLit` in the condition to select + ## more specific requests: + # when chronicles.runtimeFilteringEnabled: + # setLogLevel(LogLevel.TRACE) + # defer: setLogLevel(LogLevel.DEBUG) + # trace "incoming " & `msgNameLit` & " stream" + + let peer = peerFromStream(network, stream) + + handleIncomingPeer(peer) + + defer: + await safeClose(stream) + + let + deadline = sleepAsync RESP_TIMEOUT + msgBytes = await readMsgBytes(stream, false, deadline) + + if msgBytes.len == 0: + await sendErrorResponse(peer, stream, ServerError, readTimeoutErrorMsg) + return + + type MsgRec = RecType(MsgType) + var msg: MsgRec + try: + msg = decode(Format, msgBytes, MsgRec) + except SerializationError as err: + await sendErrorResponse(peer, stream, err, msgName, msgBytes) + return + except Exception as err: + # TODO. This is temporary code that should be removed after interop. + # It can be enabled only in certain diagnostic builds where it should + # re-raise the exception. + debug "Crash during serialization", inputBytes = toHex(msgBytes), msgName + await sendErrorResponse(peer, stream, ServerError, err.msg) + raise err + + try: + logReceivedMsg(peer, MsgType(msg)) + await callUserHandler(peer, stream, msg) + except CatchableError as err: + await sendErrorResponse(peer, stream, ServerError, err.msg) proc handleOutgoingPeer*(peer: Peer): Future[void] {.async.} = let network = peer.network From 0c018cb68acdff111d6ab92ab596e5830df26fb8 Mon Sep 17 00:00:00 2001 From: Zahary Karadjov Date: Sun, 22 Mar 2020 22:54:47 +0200 Subject: [PATCH 02/58] Mechanically remove all mentions of the daemon from the code --- .appveyor.yml | 2 +- .travis.yml | 2 +- Jenkinsfile | 2 +- azure-pipelines.yml | 2 +- beacon_chain/beacon_node.nim | 15 +- beacon_chain/eth2_network.nim | 337 ++++++++------------ beacon_chain/libp2p_backends_common.nim | 407 ------------------------ beacon_chain/libp2p_daemon_backend.nim | 266 ---------------- beacon_chain/request_manager.nim | 81 ++--- beacon_chain/sync_protocol.nim | 5 +- beacon_chain/version.nim | 14 +- scripts/connect_to_testnet.nims | 2 - scripts/load-testnet-nim-flags.sh | 1 - scripts/testnet0.env | 1 - scripts/testnet1.env | 1 - tests/simulation/start.sh | 11 +- tests/simulation/vars.sh | 5 - wasm/build_ncli.sh | 2 +- wasm/nim.cfg | 1 - 19 files changed, 167 insertions(+), 990 deletions(-) delete mode 100644 beacon_chain/libp2p_backends_common.nim delete mode 100644 beacon_chain/libp2p_daemon_backend.nim diff --git a/.appveyor.yml b/.appveyor.yml index 173794d3c..542e50a37 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -37,7 +37,7 @@ build_script: test_script: # the "go-checks" target fails in AppVeyor, for some reason; easier to disable than to debug - mingw32-make -j2 ARCH_OVERRIDE=%PLATFORM% DISABLE_GO_CHECKS=1 P2PD_CACHE=p2pdCache LOG_LEVEL=TRACE - - mingw32-make -j2 ARCH_OVERRIDE=%PLATFORM% DISABLE_GO_CHECKS=1 P2PD_CACHE=p2pdCache LOG_LEVEL=TRACE NIMFLAGS="-d:NETWORK_TYPE=libp2p -d:testnet_servers_image" + - mingw32-make -j2 ARCH_OVERRIDE=%PLATFORM% DISABLE_GO_CHECKS=1 P2PD_CACHE=p2pdCache LOG_LEVEL=TRACE NIMFLAGS="-d:testnet_servers_image" - mingw32-make -j2 ARCH_OVERRIDE=%PLATFORM% DISABLE_TEST_FIXTURES_SCRIPT=1 DISABLE_GO_CHECKS=1 test deploy: off diff --git a/.travis.yml b/.travis.yml index d015327cc..b045ec565 100644 --- a/.travis.yml +++ b/.travis.yml @@ -50,6 +50,6 @@ script: # Building Nim-1.0.4 takes up to 10 minutes on Travis - the time limit after which jobs are cancelled for having no output - make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC}" V=1 update # to allow a newer Nim version to be detected - make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC}" LOG_LEVEL=TRACE - - make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:NETWORK_TYPE=libp2p -d:testnet_servers_image" LOG_LEVEL=TRACE + - make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:testnet_servers_image" LOG_LEVEL=TRACE - make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC}" DISABLE_TEST_FIXTURES_SCRIPT=1 test diff --git a/Jenkinsfile b/Jenkinsfile index 13469b744..025287bae 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -22,7 +22,7 @@ def runStages() { "tools": { stage("Tools") { sh "make -j${env.NPROC}" - sh "make -j${env.NPROC} LOG_LEVEL=TRACE NIMFLAGS='-d:NETWORK_TYPE=libp2p -d:testnet_servers_image'" + sh "make -j${env.NPROC} LOG_LEVEL=TRACE NIMFLAGS='-d:testnet_servers_image'" } }, "test suite": { diff --git a/azure-pipelines.yml b/azure-pipelines.yml index bf0754ede..db0bff816 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -72,7 +72,7 @@ jobs: mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} CI_CACHE=NimBinaries update mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} fetch-dlls mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} P2PD_CACHE=p2pdCache LOG_LEVEL=TRACE - mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} P2PD_CACHE=p2pdCache LOG_LEVEL=TRACE NIMFLAGS="-d:NETWORK_TYPE=libp2p -d:testnet_servers_image" + mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} P2PD_CACHE=p2pdCache LOG_LEVEL=TRACE NIMFLAGS="-d:testnet_servers_image" file build/beacon_node mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} DISABLE_TEST_FIXTURES_SCRIPT=1 test displayName: 'build and test' diff --git a/beacon_chain/beacon_node.nim b/beacon_chain/beacon_node.nim index 6b6ac170f..2d8e906ff 100644 --- a/beacon_chain/beacon_node.nim +++ b/beacon_chain/beacon_node.nim @@ -193,12 +193,6 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async for node in conf.bootstrapNodes: addBootstrapNode(node, bootNodes, bootEnrs, ourPubKey) loadBootstrapFile(string conf.bootstrapNodesFile, bootNodes, bootEnrs, ourPubKey) - when networkBackend == libp2pDaemon: - for enr in bootEnrs: - let enode = toENode(enr) - if enode.isOk: - bootNodes.add enode.value - let persistentBootstrapFile = conf.dataDir / "bootstrap_nodes.txt" if fileExists(persistentBootstrapFile): loadBootstrapFile(persistentBootstrapFile, bootNodes, bootEnrs, ourPubKey) @@ -924,16 +918,9 @@ proc installBeaconApiHandlers(rpcServer: RpcServer, node: BeaconNode) = return StringOfJson("null") rpcServer.rpc("getNetworkPeerId") do () -> string: - when networkBackend != libp2p: - raise newException(CatchableError, "Unsupported operation") - else: - return $publicKey(node.network) + return $publicKey(node.network) rpcServer.rpc("getNetworkPeers") do () -> seq[string]: - when networkBackend != libp2p: - if true: - raise newException(CatchableError, "Unsupported operation") - for peerId, peer in node.network.peerPool: result.add $peerId diff --git a/beacon_chain/eth2_network.nim b/beacon_chain/eth2_network.nim index bb71ef73b..cfebe1f38 100644 --- a/beacon_chain/eth2_network.nim +++ b/beacon_chain/eth2_network.nim @@ -63,244 +63,155 @@ proc setupNat(conf: BeaconNodeConf): tuple[ip: IpAddress, if extPorts.isSome: (result.tcpPort, result.udpPort) = extPorts.get() -when networkBackend in [libp2p, libp2pDaemon]: - import - os, random, - stew/io, eth/async_utils, - libp2p/[multiaddress, multicodec], - ssz +import + os, random, + stew/io, eth/async_utils, + libp2p/[multiaddress, multicodec], + ssz - export - multiaddress +export + multiaddress - when networkBackend == libp2p: - import - libp2p/standard_setup, libp2p_backend, libp2p/peerinfo, peer_pool +import + libp2p/standard_setup, libp2p_backend, libp2p/peerinfo, peer_pool - export - libp2p_backend, peer_pool, peerinfo +export + libp2p_backend, peer_pool, peerinfo +const + netBackendName* = "libp2p" + networkKeyFilename = "privkey.protobuf" + +func asLibp2pKey*(key: keys.PublicKey): PublicKey = + PublicKey(scheme: Secp256k1, skkey: key) + +func asEthKey*(key: PrivateKey): keys.PrivateKey = + keys.PrivateKey(data: key.skkey.data) + +proc initAddress*(T: type MultiAddress, str: string): T = + let address = MultiAddress.init(str) + if IPFS.match(address) and matchPartial(multiaddress.TCP, address): + result = address else: - import - libp2p/daemon/daemonapi, libp2p_daemon_backend + raise newException(MultiAddressError, + "Invalid bootstrap node multi-address") - export - libp2p_daemon_backend +template tcpEndPoint(address, port): auto = + MultiAddress.init(address, Protocol.IPPROTO_TCP, port) - var mainDaemon: DaemonAPI +proc ensureNetworkIdFile(conf: BeaconNodeConf): string = + result = conf.dataDir / networkKeyFilename + if not fileExists(result): + createDir conf.dataDir.string + let pk = PrivateKey.random(Secp256k1) + writeFile(result, pk.getBytes) - proc closeDaemon() {.noconv.} = - if mainDaemon != nil: - info "Shutting down the LibP2P daemon" - waitFor mainDaemon.close() +proc getPersistentNetKeys*(conf: BeaconNodeConf): KeyPair = + let privKeyPath = conf.dataDir / networkKeyFilename + var privKey: PrivateKey + if not fileExists(privKeyPath): + createDir conf.dataDir.string + privKey = PrivateKey.random(Secp256k1) + writeFile(privKeyPath, privKey.getBytes()) + else: + let keyBytes = readFile(privKeyPath) + privKey = PrivateKey.init(keyBytes.toOpenArrayByte(0, keyBytes.high)) - addQuitProc(closeDaemon) + KeyPair(seckey: privKey, pubkey: privKey.getKey()) - const - netBackendName* = "libp2p" - networkKeyFilename = "privkey.protobuf" +proc createEth2Node*(conf: BeaconNodeConf, + bootstrapNodes: seq[ENode]): Future[Eth2Node] {.async.} = + var + (extIp, extTcpPort, _) = setupNat(conf) + hostAddress = tcpEndPoint(conf.libp2pAddress, conf.tcpPort) + announcedAddresses = if extIp == globalListeningAddr: @[] + else: @[tcpEndPoint(extIp, extTcpPort)] - func asLibp2pKey*(key: keys.PublicKey): PublicKey = - PublicKey(scheme: Secp256k1, skkey: key) + info "Initializing networking", hostAddress, + announcedAddresses, + bootstrapNodes - func asEthKey*(key: PrivateKey): keys.PrivateKey = - keys.PrivateKey(data: key.skkey.data) + let keys = conf.getPersistentNetKeys + # TODO nim-libp2p still doesn't have support for announcing addresses + # that are different from the host address (this is relevant when we + # are running behind a NAT). + var switch = newStandardSwitch(some keys.seckey, hostAddress, + triggerSelf = true, gossip = false) + result = Eth2Node.init(conf, switch, extIp, keys.seckey.asEthKey) - proc initAddress*(T: type MultiAddress, str: string): T = - let address = MultiAddress.init(str) - if IPFS.match(address) and matchPartial(multiaddress.TCP, address): - result = address - else: - raise newException(MultiAddressError, - "Invalid bootstrap node multi-address") +proc getPersistenBootstrapAddr*(conf: BeaconNodeConf, + ip: IpAddress, port: Port): ENode = + let pair = getPersistentNetKeys(conf) + initENode(pair.pubkey.skkey, Address(ip: ip, udpPort: port)) - template tcpEndPoint(address, port): auto = - MultiAddress.init(address, Protocol.IPPROTO_TCP, port) +proc shortForm*(id: KeyPair): string = + $PeerID.init(id.pubkey) - proc ensureNetworkIdFile(conf: BeaconNodeConf): string = - result = conf.dataDir / networkKeyFilename - if not fileExists(result): - createDir conf.dataDir.string - let pk = PrivateKey.random(Secp256k1) - writeFile(result, pk.getBytes) +proc toPeerInfo(enode: ENode): PeerInfo = + let + peerId = PeerID.init enode.pubkey.asLibp2pKey + addresses = @[MultiAddress.init enode.toMultiAddressStr] + return PeerInfo.init(peerId, addresses) - proc getPersistentNetKeys*(conf: BeaconNodeConf): KeyPair = - let privKeyPath = conf.dataDir / networkKeyFilename - var privKey: PrivateKey - if not fileExists(privKeyPath): - createDir conf.dataDir.string - privKey = PrivateKey.random(Secp256k1) - writeFile(privKeyPath, privKey.getBytes()) - else: - let keyBytes = readFile(privKeyPath) - privKey = PrivateKey.init(keyBytes.toOpenArrayByte(0, keyBytes.high)) +proc connectToNetwork*(node: Eth2Node, + bootstrapNodes: seq[ENode], + bootstrapEnrs: seq[enr.Record]) {.async.} = + for bootstrapNode in bootstrapEnrs: + debug "Adding known peer", peer = bootstrapNode + node.addKnownPeer bootstrapNode - KeyPair(seckey: privKey, pubkey: privKey.getKey()) + await node.start() - proc createEth2Node*(conf: BeaconNodeConf, - bootstrapNodes: seq[ENode]): Future[Eth2Node] {.async.} = - var - (extIp, extTcpPort, _) = setupNat(conf) - hostAddress = tcpEndPoint(conf.libp2pAddress, conf.tcpPort) - announcedAddresses = if extIp == globalListeningAddr: @[] - else: @[tcpEndPoint(extIp, extTcpPort)] + proc checkIfConnectedToBootstrapNode {.async.} = + await sleepAsync(30.seconds) + if bootstrapEnrs.len > 0 and libp2p_successful_dials.value == 0: + fatal "Failed to connect to any bootstrap node. Quitting", bootstrapEnrs + quit 1 - info "Initializing networking", hostAddress, - announcedAddresses, - bootstrapNodes + traceAsyncErrors checkIfConnectedToBootstrapNode() - when networkBackend == libp2p: - let keys = conf.getPersistentNetKeys - # TODO nim-libp2p still doesn't have support for announcing addresses - # that are different from the host address (this is relevant when we - # are running behind a NAT). - var switch = newStandardSwitch(some keys.seckey, hostAddress, - triggerSelf = true, gossip = false) - result = Eth2Node.init(conf, switch, extIp, keys.seckey.asEthKey) - else: - let keyFile = conf.ensureNetworkIdFile +proc saveConnectionAddressFile*(node: Eth2Node, filename: string) = + writeFile(filename, $node.switch.peerInfo.addrs[0] & "/p2p/" & + node.switch.peerInfo.id) - var daemonFut = if bootstrapNodes.len == 0: - newDaemonApi({PSNoSign, DHTFull, PSFloodSub}, - id = keyFile, - hostAddresses = @[hostAddress], - announcedAddresses = announcedAddresses) - else: - newDaemonApi({PSNoSign, DHTFull, PSFloodSub, WaitBootstrap}, - id = keyFile, - hostAddresses = @[hostAddress], - announcedAddresses = announcedAddresses, - bootstrapNodes = mapIt(bootstrapNodes, it.toMultiAddressStr), - peersRequired = 1) +func peersCount*(node: Eth2Node): int = + len(node.peerPool) - mainDaemon = await daemonFut +proc subscribe*[MsgType](node: Eth2Node, + topic: string, + msgHandler: proc(msg: MsgType) {.gcsafe.} ) {.async, gcsafe.} = + template execMsgHandler(peerExpr, gossipBytes, gossipTopic) = + inc gossip_messages_received + trace "Incoming pubsub message received", + peer = peerExpr, len = gossipBytes.len, topic = gossipTopic, + message_id = `$`(sha256.digest(gossipBytes)) + msgHandler SSZ.decode(gossipBytes, MsgType) - var identity = await mainDaemon.identity() - info "LibP2P daemon started", peer = identity.peer.pretty(), - addresses = identity.addresses + let incomingMsgHandler = proc(topic: string, + data: seq[byte]) {.async, gcsafe.} = + execMsgHandler "unknown", data, topic - result = await Eth2Node.init(mainDaemon) + await node.switch.subscribe(topic, incomingMsgHandler) - proc getPersistenBootstrapAddr*(conf: BeaconNodeConf, - ip: IpAddress, port: Port): ENode = - let pair = getPersistentNetKeys(conf) - initENode(pair.pubkey.skkey, Address(ip: ip, udpPort: port)) +proc traceMessage(fut: FutureBase, digest: MDigest[256]) = + fut.addCallback do (arg: pointer): + if not(fut.failed): + trace "Outgoing pubsub message has been sent", message_id = `$`(digest) - proc shortForm*(id: KeyPair): string = - $PeerID.init(id.pubkey) +proc broadcast*(node: Eth2Node, topic: string, msg: auto) = + inc gossip_messages_sent + let broadcastBytes = SSZ.encode(msg) + var fut = node.switch.publish(topic, broadcastBytes) + traceMessage(fut, sha256.digest(broadcastBytes)) + traceAsyncErrors(fut) - proc toPeerInfo(enode: ENode): PeerInfo = - let - peerId = PeerID.init enode.pubkey.asLibp2pKey - addresses = @[MultiAddress.init enode.toMultiAddressStr] - when networkBackend == libp2p: - return PeerInfo.init(peerId, addresses) - else: - return PeerInfo(peer: peerId, addresses: addresses) +# TODO: +# At the moment, this is just a compatiblity shim for the existing RLPx functionality. +# The filtering is not implemented properly yet. +iterator randomPeers*(node: Eth2Node, maxPeers: int, Protocol: type): Peer = + var peers = newSeq[Peer]() + for _, peer in pairs(node.peers): peers.add peer + shuffle peers + if peers.len > maxPeers: peers.setLen(maxPeers) + for p in peers: yield p - proc connectToNetwork*(node: Eth2Node, - bootstrapNodes: seq[ENode], - bootstrapEnrs: seq[enr.Record]) {.async.} = - when networkBackend == libp2pDaemon: - var connected = false - for bootstrapNode in bootstrapNodes: - try: - let peerInfo = toPeerInfo(bootstrapNode) - when networkBackend == libp2p: - discard await node.switch.dial(peerInfo) - else: - await node.daemon.connect(peerInfo.peer, peerInfo.addresses) - var peer = node.getPeer(peerInfo) - peer.wasDialed = true - await initializeConnection(peer) - connected = true - except CatchableError as err: - error "Failed to connect to bootstrap node", - node = bootstrapNode, err = err.msg - - if bootstrapNodes.len > 0 and connected == false: - fatal "Failed to connect to any bootstrap node. Quitting." - quit 1 - elif networkBackend == libp2p: - for bootstrapNode in bootstrapEnrs: - debug "Adding known peer", peer = bootstrapNode - node.addKnownPeer bootstrapNode - await node.start() - - proc checkIfConnectedToBootstrapNode {.async.} = - await sleepAsync(30.seconds) - if bootstrapEnrs.len > 0 and libp2p_successful_dials.value == 0: - fatal "Failed to connect to any bootstrap node. Quitting", bootstrapEnrs - quit 1 - - traceAsyncErrors checkIfConnectedToBootstrapNode() - - proc saveConnectionAddressFile*(node: Eth2Node, filename: string) = - when networkBackend == libp2p: - writeFile(filename, $node.switch.peerInfo.addrs[0] & "/p2p/" & - node.switch.peerInfo.id) - else: - let id = waitFor node.daemon.identity() - writeFile(filename, $id.addresses[0] & "/p2p/" & id.peer.pretty) - - func peersCount*(node: Eth2Node): int = - when networkBackend == libp2p: - len(node.peerPool) - else: - node.peers.len - - proc subscribe*[MsgType](node: Eth2Node, - topic: string, - msgHandler: proc(msg: MsgType) {.gcsafe.} ) {.async, gcsafe.} = - template execMsgHandler(peerExpr, gossipBytes, gossipTopic) = - inc gossip_messages_received - trace "Incoming pubsub message received", - peer = peerExpr, len = gossipBytes.len, topic = gossipTopic, - message_id = `$`(sha256.digest(gossipBytes)) - msgHandler SSZ.decode(gossipBytes, MsgType) - - when networkBackend == libp2p: - let incomingMsgHandler = proc(topic: string, - data: seq[byte]) {.async, gcsafe.} = - execMsgHandler "unknown", data, topic - - await node.switch.subscribe(topic, incomingMsgHandler) - - else: - let incomingMsgHandler = proc(api: DaemonAPI, - ticket: PubsubTicket, - msg: PubSubMessage): Future[bool] {.async, gcsafe.} = - execMsgHandler msg.peer, msg.data, msg.topics[0] - return true - - discard await node.daemon.pubsubSubscribe(topic, incomingMsgHandler) - - proc traceMessage(fut: FutureBase, digest: MDigest[256]) = - fut.addCallback do (arg: pointer): - if not(fut.failed): - trace "Outgoing pubsub message has been sent", message_id = `$`(digest) - - proc broadcast*(node: Eth2Node, topic: string, msg: auto) = - inc gossip_messages_sent - let broadcastBytes = SSZ.encode(msg) - - when networkBackend == libp2p: - var fut = node.switch.publish(topic, broadcastBytes) - traceMessage(fut, sha256.digest(broadcastBytes)) - traceAsyncErrors(fut) - else: - var fut = node.daemon.pubsubPublish(topic, broadcastBytes) - traceMessage(fut, sha256.digest(broadcastBytes)) - traceAsyncErrors(fut) - - # TODO: - # At the moment, this is just a compatiblity shim for the existing RLPx functionality. - # The filtering is not implemented properly yet. - iterator randomPeers*(node: Eth2Node, maxPeers: int, Protocol: type): Peer = - var peers = newSeq[Peer]() - for _, peer in pairs(node.peers): peers.add peer - shuffle peers - if peers.len > maxPeers: peers.setLen(maxPeers) - for p in peers: yield p -else: - {.fatal: "Unsupported network backend".} diff --git a/beacon_chain/libp2p_backends_common.nim b/beacon_chain/libp2p_backends_common.nim deleted file mode 100644 index 500ab194f..000000000 --- a/beacon_chain/libp2p_backends_common.nim +++ /dev/null @@ -1,407 +0,0 @@ -import - metrics - -type - ResponseCode* = enum - Success - InvalidRequest - ServerError - - Bytes = seq[byte] - -const - defaultIncomingReqTimeout = 5000 - HandshakeTimeout = FaultOrError - - # Spec constants - # https://github.com/ethereum/eth2.0-specs/blob/dev/specs/networking/p2p-interface.md#eth-20-network-interaction-domains - REQ_RESP_MAX_SIZE* = 1 * 1024 * 1024 # bytes - GOSSIP_MAX_SIZE* = 1 * 1024 * 1024 # bytes - TTFB_TIMEOUT* = 5.seconds - RESP_TIMEOUT* = 10.seconds - - readTimeoutErrorMsg = "Exceeded read timeout for a request" - -logScope: - topics = "libp2p" - -declarePublicGauge libp2p_successful_dials, - "Number of successfully dialed peers" - -declarePublicGauge libp2p_peers, - "Number of active libp2p peers" - -template libp2pProtocol*(name: string, version: int) {.pragma.} - -proc getRequestProtoName(fn: NimNode): NimNode = - # `getCustomPragmaVal` doesn't work yet on regular nnkProcDef nodes - # (TODO: file as an issue) - - let pragmas = fn.pragma - if pragmas.kind == nnkPragma and pragmas.len > 0: - for pragma in pragmas: - if pragma.len > 0 and $pragma[0] == "libp2pProtocol": - let protoName = $(pragma[1]) - let protoVer = $(pragma[2].intVal) - return newLit("/eth2/beacon_chain/req/" & protoName & "/" & protoVer & "/ssz") - - return newLit("") - -template raisePeerDisconnected(msg: string, r: DisconnectionReason) = - var e = newException(PeerDisconnected, msg) - e.reason = r - raise e - -proc disconnectAndRaise(peer: Peer, - reason: DisconnectionReason, - msg: string) {.async.} = - let r = reason - await peer.disconnect(r) - raisePeerDisconnected(msg, r) - -proc readChunk(stream: P2PStream, - MsgType: type, - withResponseCode: bool, - deadline: Future[void]): Future[Option[MsgType]] {.gcsafe.} - -proc readSizePrefix(stream: P2PStream, - deadline: Future[void]): Future[int] {.async.} = - trace "about to read msg size prefix" - var parser: VarintParser[uint64, ProtoBuf] - while true: - var nextByte: byte - var readNextByte = stream.readExactly(addr nextByte, 1) - await readNextByte or deadline - if not readNextByte.finished: - trace "size prefix byte not received in time" - return -1 - case parser.feedByte(nextByte) - of Done: - let res = parser.getResult - if res > uint64(REQ_RESP_MAX_SIZE): - trace "size prefix outside of range", res - return -1 - else: - trace "got size prefix", res - return int(res) - of Overflow: - trace "size prefix overflow" - return -1 - of Incomplete: - continue - -proc readMsgBytes(stream: P2PStream, - withResponseCode: bool, - deadline: Future[void]): Future[Bytes] {.async.} = - trace "about to read message bytes", withResponseCode - - try: - if withResponseCode: - var responseCode: byte - trace "about to read response code" - var readResponseCode = stream.readExactly(addr responseCode, 1) - await readResponseCode or deadline - - if not readResponseCode.finished: - trace "response code not received in time" - return - - if responseCode > ResponseCode.high.byte: - trace "invalid response code", responseCode - return - - logScope: responseCode = ResponseCode(responseCode) - trace "got response code" - - case ResponseCode(responseCode) - of InvalidRequest, ServerError: - let responseErrMsg = await readChunk(stream, string, false, deadline) - debug "P2P request resulted in error", responseErrMsg - return - - of Success: - # The response is OK, the execution continues below - discard - - var sizePrefix = await readSizePrefix(stream, deadline) - trace "got msg size prefix", sizePrefix - - if sizePrefix == -1: - debug "Failed to read an incoming message size prefix", peer = stream.peer - return - - if sizePrefix == 0: - debug "Received SSZ with zero size", peer = stream.peer - return - - trace "about to read msg bytes", len = sizePrefix - var msgBytes = newSeq[byte](sizePrefix) - var readBody = stream.readExactly(addr msgBytes[0], sizePrefix) - await readBody or deadline - if not readBody.finished: - trace "msg bytes not received in time" - return - - trace "got message bytes", len = sizePrefix - return msgBytes - - except TransportIncompleteError: - return @[] - -proc readChunk(stream: P2PStream, - MsgType: type, - withResponseCode: bool, - deadline: Future[void]): Future[Option[MsgType]] {.gcsafe, async.} = - var msgBytes = await stream.readMsgBytes(withResponseCode, deadline) - try: - if msgBytes.len > 0: - return some SSZ.decode(msgBytes, MsgType) - except SerializationError as err: - debug "Failed to decode a network message", - msgBytes, errMsg = err.formatMsg("") - return - -proc readResponse( - stream: P2PStream, - MsgType: type, - deadline: Future[void]): Future[Option[MsgType]] {.gcsafe, async.} = - - when MsgType is seq: - type E = ElemType(MsgType) - var results: MsgType - while true: - let nextRes = await readChunk(stream, E, true, deadline) - if nextRes.isNone: break - results.add nextRes.get - if results.len > 0: - return some(results) - else: - return await readChunk(stream, MsgType, true, deadline) - -proc encodeErrorMsg(responseCode: ResponseCode, errMsg: string): Bytes = - var s = init OutputStream - s.append byte(responseCode) - s.appendVarint errMsg.len - s.appendValue SSZ, errMsg - s.getOutput - -proc sendErrorResponse(peer: Peer, - stream: P2PStream, - err: ref SerializationError, - msgName: string, - msgBytes: Bytes) {.async.} = - debug "Received an invalid request", - peer, msgName, msgBytes, errMsg = err.formatMsg("") - - let responseBytes = encodeErrorMsg(InvalidRequest, err.formatMsg("msg")) - await stream.writeAllBytes(responseBytes) - await stream.close() - -proc sendErrorResponse(peer: Peer, - stream: P2PStream, - responseCode: ResponseCode, - errMsg: string) {.async.} = - debug "Error processing request", peer, responseCode, errMsg - - let responseBytes = encodeErrorMsg(ServerError, errMsg) - await stream.writeAllBytes(responseBytes) - await stream.close() - -proc sendNotificationMsg(peer: Peer, protocolId: string, requestBytes: Bytes) {.async} = - var deadline = sleepAsync RESP_TIMEOUT - var streamFut = peer.network.openStream(peer, protocolId) - await streamFut or deadline - if not streamFut.finished: - # TODO: we are returning here because the deadline passed, but - # the stream can still be opened eventually a bit later. Who is - # going to close it then? - raise newException(TransmissionError, "Failed to open LibP2P stream") - - let stream = streamFut.read - defer: - await safeClose(stream) - - var s = init OutputStream - s.appendVarint requestBytes.len.uint64 - s.append requestBytes - let bytes = s.getOutput - await stream.writeAllBytes(bytes) - -# TODO There is too much duplication in the responder functions, but -# I hope to reduce this when I increse the reliance on output streams. -proc sendResponseChunkBytes(responder: UntypedResponder, payload: Bytes) {.async.} = - var s = init OutputStream - s.append byte(Success) - s.appendVarint payload.len.uint64 - s.append payload - let bytes = s.getOutput - await responder.stream.writeAllBytes(bytes) - -proc sendResponseChunkObj(responder: UntypedResponder, val: auto) {.async.} = - var s = init OutputStream - s.append byte(Success) - s.appendValue SSZ, sizePrefixed(val) - let bytes = s.getOutput - await responder.stream.writeAllBytes(bytes) - -proc sendResponseChunks[T](responder: UntypedResponder, chunks: seq[T]) {.async.} = - var s = init OutputStream - for chunk in chunks: - s.append byte(Success) - s.appendValue SSZ, sizePrefixed(chunk) - - let bytes = s.getOutput - await responder.stream.writeAllBytes(bytes) - -proc makeEth2Request(peer: Peer, protocolId: string, requestBytes: Bytes, - ResponseMsg: type, - timeout: Duration): Future[Option[ResponseMsg]] {.gcsafe, async.} = - var deadline = sleepAsync timeout - - # Open a new LibP2P stream - var streamFut = peer.network.openStream(peer, protocolId) - await streamFut or deadline - if not streamFut.finished: - # TODO: we are returning here because the deadline passed, but - # the stream can still be opened eventually a bit later. Who is - # going to close it then? - return none(ResponseMsg) - - let stream = streamFut.read - defer: - await safeClose(stream) - - # Send the request - var s = init OutputStream - s.appendVarint requestBytes.len.uint64 - s.append requestBytes - let bytes = s.getOutput - await stream.writeAllBytes(bytes) - - # Read the response - return await stream.readResponse(ResponseMsg, deadline) - -proc init*[MsgType](T: type Responder[MsgType], - peer: Peer, stream: P2PStream): T = - T(UntypedResponder(peer: peer, stream: stream)) - -template write*[M](r: var Responder[M], val: auto): auto = - mixin send - type Msg = M - type MsgRec = RecType(Msg) - when MsgRec is seq|openarray: - type E = ElemType(MsgRec) - when val is E: - sendResponseChunkObj(UntypedResponder(r), val) - elif val is MsgRec: - sendResponseChunks(UntypedResponder(r), val) - else: - {.fatal: "Unepected message type".} - else: - send(r, val) - -proc performProtocolHandshakes*(peer: Peer) {.async.} = - var subProtocolsHandshakes = newSeqOfCap[Future[void]](allProtocols.len) - for protocol in allProtocols: - if protocol.handshake != nil: - subProtocolsHandshakes.add((protocol.handshake)(peer, nil)) - - await all(subProtocolsHandshakes) - -template initializeConnection*(peer: Peer): auto = - performProtocolHandshakes(peer) - -proc initProtocol(name: string, - peerInit: PeerStateInitializer, - networkInit: NetworkStateInitializer): ProtocolInfoObj = - result.name = name - result.messages = @[] - result.peerStateInitializer = peerInit - result.networkStateInitializer = networkInit - -proc registerProtocol(protocol: ProtocolInfo) = - # TODO: This can be done at compile-time in the future - let pos = lowerBound(gProtocols, protocol) - gProtocols.insert(protocol, pos) - for i in 0 ..< gProtocols.len: - gProtocols[i].index = i - -proc setEventHandlers(p: ProtocolInfo, - handshake: HandshakeStep, - disconnectHandler: DisconnectionHandler) = - p.handshake = handshake - p.disconnectHandler = disconnectHandler - -proc implementSendProcBody(sendProc: SendProc) = - let - msg = sendProc.msg - UntypedResponder = bindSym "UntypedResponder" - await = ident "await" - - proc sendCallGenerator(peer, bytes: NimNode): NimNode = - if msg.kind != msgResponse: - let msgProto = getRequestProtoName(msg.procDef) - case msg.kind - of msgRequest: - let - timeout = msg.timeoutParam[0] - ResponseRecord = msg.response.recName - quote: - makeEth2Request(`peer`, `msgProto`, `bytes`, - `ResponseRecord`, `timeout`) - else: - quote: sendNotificationMsg(`peer`, `msgProto`, `bytes`) - else: - quote: sendResponseChunkBytes(`UntypedResponder`(`peer`), `bytes`) - - sendProc.useStandardBody(nil, nil, sendCallGenerator) - -proc handleIncomingStream(network: Eth2Node, stream: P2PStream, - MsgType, Format: distinct type) {.async, gcsafe.} = - mixin callUserHandler, RecType - const msgName = typetraits.name(MsgType) - - ## Uncomment this to enable tracing on all incoming requests - ## You can include `msgNameLit` in the condition to select - ## more specific requests: - # when chronicles.runtimeFilteringEnabled: - # setLogLevel(LogLevel.TRACE) - # defer: setLogLevel(LogLevel.DEBUG) - # trace "incoming " & `msgNameLit` & " stream" - - let peer = peerFromStream(network, stream) - - handleIncomingPeer(peer) - - defer: - await safeClose(stream) - - let - deadline = sleepAsync RESP_TIMEOUT - msgBytes = await readMsgBytes(stream, false, deadline) - - if msgBytes.len == 0: - await sendErrorResponse(peer, stream, ServerError, readTimeoutErrorMsg) - return - - type MsgRec = RecType(MsgType) - var msg: MsgRec - try: - msg = decode(Format, msgBytes, MsgRec) - except SerializationError as err: - await sendErrorResponse(peer, stream, err, msgName, msgBytes) - return - except Exception as err: - # TODO. This is temporary code that should be removed after interop. - # It can be enabled only in certain diagnostic builds where it should - # re-raise the exception. - debug "Crash during serialization", inputBytes = toHex(msgBytes), msgName - await sendErrorResponse(peer, stream, ServerError, err.msg) - raise err - - try: - logReceivedMsg(peer, MsgType(msg)) - await callUserHandler(peer, stream, msg) - except CatchableError as err: - await sendErrorResponse(peer, stream, ServerError, err.msg) - diff --git a/beacon_chain/libp2p_daemon_backend.nim b/beacon_chain/libp2p_daemon_backend.nim deleted file mode 100644 index 4d57d927a..000000000 --- a/beacon_chain/libp2p_daemon_backend.nim +++ /dev/null @@ -1,266 +0,0 @@ -import - algorithm, typetraits, - stew/varints, stew/shims/[macros, tables], chronos, chronicles, - libp2p/daemon/daemonapi, faststreams/output_stream, serialization, - json_serialization/std/options, eth/p2p/p2p_protocol_dsl, - libp2p_json_serialization, ssz - -export - daemonapi, p2pProtocol, libp2p_json_serialization, ssz - -type - Eth2Node* = ref object of RootObj - daemon*: DaemonAPI - peers*: Table[PeerID, Peer] - protocolStates*: seq[RootRef] - - EthereumNode = Eth2Node # needed for the definitions in p2p_backends_helpers - - Peer* = ref object - network*: Eth2Node - id*: PeerID - wasDialed*: bool - connectionState*: ConnectionState - protocolStates*: seq[RootRef] - maxInactivityAllowed*: Duration - - ConnectionState* = enum - None, - Connecting, - Connected, - Disconnecting, - Disconnected - - UntypedResponder = object - peer*: Peer - stream*: P2PStream - - Responder*[MsgType] = distinct UntypedResponder - - MessageInfo* = object - name*: string - - # Private fields: - thunk*: ThunkProc - libp2pProtocol: string - printer*: MessageContentPrinter - nextMsgResolver*: NextMsgResolver - - ProtocolInfoObj* = object - name*: string - messages*: seq[MessageInfo] - index*: int # the position of the protocol in the - # ordered list of supported protocols - - # Private fields: - peerStateInitializer*: PeerStateInitializer - networkStateInitializer*: NetworkStateInitializer - handshake*: HandshakeStep - disconnectHandler*: DisconnectionHandler - - ProtocolInfo* = ptr ProtocolInfoObj - - PeerStateInitializer* = proc(peer: Peer): RootRef {.gcsafe.} - NetworkStateInitializer* = proc(network: EthereumNode): RootRef {.gcsafe.} - HandshakeStep* = proc(peer: Peer, stream: P2PStream): Future[void] {.gcsafe.} - DisconnectionHandler* = proc(peer: Peer): Future[void] {.gcsafe.} - ThunkProc* = proc(daemon: DaemonAPI, stream: P2PStream): Future[void] {.gcsafe.} - MessageContentPrinter* = proc(msg: pointer): string {.gcsafe.} - NextMsgResolver* = proc(msgData: SszReader, future: FutureBase) {.gcsafe.} - - DisconnectionReason* = enum - ClientShutDown - IrrelevantNetwork - FaultOrError - - PeerDisconnected* = object of CatchableError - reason*: DisconnectionReason - - TransmissionError* = object of CatchableError - -template `$`*(peer: Peer): string = $peer.id -chronicles.formatIt(Peer): $it - -# TODO: These exists only as a compatibility layer between the daemon -# APIs and the native LibP2P ones. It won't be necessary once the -# daemon is removed. -# -proc writeAllBytes(stream: P2PStream, bytes: seq[byte]) {.async.} = - let sent = await stream.transp.write(bytes) - if sent != bytes.len: - raise newException(TransmissionError, "Failed to deliver msg bytes") - -template readExactly(stream: P2PStream, dst: pointer, dstLen: int): untyped = - readExactly(stream.transp, dst, dstLen) - -template openStream(node: Eth2Node, peer: Peer, protocolId: string): untyped = - openStream(node.daemon, peer.id, @[protocolId]) - -# -# End of compatibility layer - -proc init*(T: type Peer, network: Eth2Node, id: PeerID): Peer {.gcsafe.} - -template remote*(peer: Peer): untyped = - # TODO: Can we get a proper address here? - peer.id - -proc getPeer*(node: Eth2Node, peerId: PeerID): Peer {.gcsafe.} = - result = node.peers.getOrDefault(peerId) - if result == nil: - result = Peer.init(node, peerId) - node.peers[peerId] = result - -proc getPeer*(node: Eth2Node, peerInfo: PeerInfo): Peer = - node.getPeer(peerInfo.peer) - -proc peerFromStream(node: Eth2Node, stream: P2PStream): Peer {.gcsafe.} = - node.getPeer(stream.peer) - -proc disconnect*(peer: Peer, reason: DisconnectionReason, notifyOtherPeer = false) {.async.} = - # TODO: How should we notify the other peer? - if peer.connectionState notin {Disconnecting, Disconnected}: - peer.connectionState = Disconnecting - await peer.network.daemon.disconnect(peer.id) - peer.connectionState = Disconnected - peer.network.peers.del(peer.id) - -proc safeClose(stream: P2PStream) {.async.} = - if P2PStreamFlags.Closed notin stream.flags: - await close(stream) - -proc handleIncomingPeer*(peer: Peer) {.inline.} = - discard - -include eth/p2p/p2p_backends_helpers -include eth/p2p/p2p_tracing -include libp2p_backends_common - -proc init*(T: type Eth2Node, daemon: DaemonAPI): Future[T] {.async.} = - new result - result.daemon = daemon - result.daemon.userData = result - result.peers = initTable[PeerID, Peer]() - - newSeq result.protocolStates, allProtocols.len - for proto in allProtocols: - if proto.networkStateInitializer != nil: - result.protocolStates[proto.index] = proto.networkStateInitializer(result) - - for msg in proto.messages: - if msg.libp2pProtocol.len > 0 and msg.thunk != nil: - await daemon.addHandler(@[msg.libp2pProtocol], msg.thunk) - -proc init*(T: type Peer, network: Eth2Node, id: PeerID): Peer = - new result - result.id = id - result.network = network - result.connectionState = Connected - result.maxInactivityAllowed = 15.minutes # TODO: Read this from the config - newSeq result.protocolStates, allProtocols.len - for i in 0 ..< allProtocols.len: - let proto = allProtocols[i] - if proto.peerStateInitializer != nil: - result.protocolStates[i] = proto.peerStateInitializer(result) - -proc registerMsg(protocol: ProtocolInfo, - name: string, - thunk: ThunkProc, - libp2pProtocol: string, - printer: MessageContentPrinter) = - protocol.messages.add MessageInfo(name: name, - thunk: thunk, - libp2pProtocol: libp2pProtocol, - printer: printer) - -proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend = - var - Format = ident "SSZ" - Responder = bindSym "Responder" - DaemonAPI = bindSym "DaemonAPI" - P2PStream = ident "P2PStream" - OutputStream = bindSym "OutputStream" - Peer = bindSym "Peer" - Eth2Node = bindSym "Eth2Node" - messagePrinter = bindSym "messagePrinter" - milliseconds = bindSym "milliseconds" - registerMsg = bindSym "registerMsg" - initProtocol = bindSym "initProtocol" - bindSymOp = bindSym "bindSym" - errVar = ident "err" - msgVar = ident "msg" - msgBytesVar = ident "msgBytes" - daemonVar = ident "daemon" - await = ident "await" - callUserHandler = ident "callUserHandler" - - p.useRequestIds = false - p.useSingleRecordInlining = true - - new result - - result.PeerType = Peer - result.NetworkType = Eth2Node - result.registerProtocol = bindSym "registerProtocol" - result.setEventHandlers = bindSym "setEventHandlers" - result.SerializationFormat = Format - result.ResponderType = Responder - - result.afterProtocolInit = proc (p: P2PProtocol) = - p.onPeerConnected.params.add newIdentDefs(streamVar, P2PStream) - - result.implementMsg = proc (msg: Message) = - let - protocol = msg.protocol - msgName = $msg.ident - msgNameLit = newLit msgName - MsgRecName = msg.recName - MsgStrongRecName = msg.strongRecName - - if msg.procDef.body.kind != nnkEmpty and msg.kind == msgRequest: - # Request procs need an extra param - the stream where the response - # should be written: - msg.userHandler.params.insert(2, newIdentDefs(streamVar, P2PStream)) - msg.initResponderCall.add streamVar - - ## - ## Implemenmt Thunk - ## - var thunkName: NimNode - - if msg.userHandler != nil: - thunkName = ident(msgName & "_thunk") - let userHandlerCall = msg.genUserHandlerCall(msgVar, [peerVar, streamVar]) - msg.defineThunk quote do: - template `callUserHandler`(`peerVar`: `Peer`, - `streamVar`: `P2PStream`, - `msgVar`: `MsgRecName`): untyped = - `userHandlerCall` - - proc `thunkName`(`daemonVar`: `DaemonAPI`, - `streamVar`: `P2PStream`): Future[void] {.gcsafe.} = - return handleIncomingStream(`Eth2Node`(`daemonVar`.userData), `streamVar`, - `MsgStrongRecName`, `Format`) - else: - thunkName = newNilLit() - - ## - ## Implement Senders and Handshake - ## - if msg.kind == msgHandshake: - macros.error "Handshake messages are not supported in LibP2P protocols" - else: - var sendProc = msg.createSendProc() - implementSendProcBody sendProc - - protocol.outProcRegistrations.add( - newCall(registerMsg, - protocol.protocolInfoVar, - msgNameLit, - thunkName, - getRequestProtoName(msg.procDef), - newTree(nnkBracketExpr, messagePrinter, MsgRecName))) - - result.implementProtocolInit = proc (p: P2PProtocol): NimNode = - return newCall(initProtocol, newLit(p.name), p.peerInit, p.netInit) - diff --git a/beacon_chain/request_manager.nim b/beacon_chain/request_manager.nim index bf4a41f73..1e7c19520 100644 --- a/beacon_chain/request_manager.nim +++ b/beacon_chain/request_manager.nim @@ -33,56 +33,39 @@ proc fetchAncestorBlocksFromPeer( debug "Error while fetching ancestor blocks", err = err.msg, root = rec.root, peer = peer -when networkBackend == libp2p: +proc fetchAncestorBlocksFromNetwork( + network: Eth2Node, + rec: FetchRecord, + responseHandler: FetchAncestorsResponseHandler) {.async.} = + var peer: Peer + try: + peer = await network.peerPool.acquire() + let blocks = await peer.beaconBlocksByRoot([rec.root]) + if blocks.isSome: + for b in blocks.get: + responseHandler(b) + except CatchableError as err: + debug "Error while fetching ancestor blocks", + err = err.msg, root = rec.root, peer = peer + finally: + if not(isNil(peer)): + network.peerPool.release(peer) - proc fetchAncestorBlocksFromNetwork( - network: Eth2Node, - rec: FetchRecord, - responseHandler: FetchAncestorsResponseHandler) {.async.} = - var peer: Peer - try: - peer = await network.peerPool.acquire() - let blocks = await peer.beaconBlocksByRoot([rec.root]) - if blocks.isSome: - for b in blocks.get: - responseHandler(b) - except CatchableError as err: - debug "Error while fetching ancestor blocks", - err = err.msg, root = rec.root, peer = peer - finally: - if not(isNil(peer)): - network.peerPool.release(peer) - - proc fetchAncestorBlocks*(requestManager: RequestManager, - roots: seq[FetchRecord], - responseHandler: FetchAncestorsResponseHandler) = - # TODO: we could have some fancier logic here: - # - # * Keeps track of what was requested - # (this would give a little bit of time for the asked peer to respond) - # - # * Keep track of the average latency of each peer - # (we can give priority to peers with better latency) - # - const ParallelRequests = 2 - - for i in 0 ..< ParallelRequests: - traceAsyncErrors fetchAncestorBlocksFromNetwork(requestManager.network, - roots.sample(), - responseHandler) -elif networkBackend == libp2pDaemon: - proc fetchAncestorBlocks*(requestManager: RequestManager, +proc fetchAncestorBlocks*(requestManager: RequestManager, roots: seq[FetchRecord], responseHandler: FetchAncestorsResponseHandler) = - # TODO: we could have some fancier logic here: - # - # * Keeps track of what was requested - # (this would give a little bit of time for the asked peer to respond) - # - # * Keep track of the average latency of each peer - # (we can give priority to peers with better latency) - # - const ParallelRequests = 2 + # TODO: we could have some fancier logic here: + # + # * Keeps track of what was requested + # (this would give a little bit of time for the asked peer to respond) + # + # * Keep track of the average latency of each peer + # (we can give priority to peers with better latency) + # + const ParallelRequests = 2 + + for i in 0 ..< ParallelRequests: + traceAsyncErrors fetchAncestorBlocksFromNetwork(requestManager.network, + roots.sample(), + responseHandler) - for peer in requestManager.network.randomPeers(ParallelRequests, BeaconSync): - traceAsyncErrors peer.fetchAncestorBlocksFromPeer(roots.sample(), responseHandler) diff --git a/beacon_chain/sync_protocol.nim b/beacon_chain/sync_protocol.nim index 8f386a12e..d378c37fa 100644 --- a/beacon_chain/sync_protocol.nim +++ b/beacon_chain/sync_protocol.nim @@ -1,12 +1,9 @@ import options, tables, sets, macros, - chronicles, chronos, stew/ranges/bitranges, + chronicles, chronos, stew/ranges/bitranges, libp2p/switch, spec/[datatypes, crypto, digest, helpers], beacon_node_types, eth2_network, block_pool, ssz -when networkBackend == libp2p: - import libp2p/switch - logScope: topics = "sync" diff --git a/beacon_chain/version.nim b/beacon_chain/version.nim index 06b9eb2fb..9376c7e95 100644 --- a/beacon_chain/version.nim +++ b/beacon_chain/version.nim @@ -1,15 +1,3 @@ -type - NetworkBackendType* = enum - libp2p - libp2pDaemon - -const - NETWORK_TYPE {.strdefine.} = "libp2p" - - networkBackend* = when NETWORK_TYPE == "libp2p": libp2p - elif NETWORK_TYPE == "libp2p_daemon": libp2pDaemon - else: {.fatal: "The 'NETWORK_TYPE' should be either 'libp2p', 'libp2p_daemon'" .} - const copyrights* = "Copyright (c) 2019 Status Research & Development GmbH" @@ -30,5 +18,5 @@ const $versionMajor & "." & $versionMinor & "." & $versionBuild fullVersionStr* = - versionAsStr & " (" & gitRevision & ", " & NETWORK_TYPE & ")" + versionAsStr & " (" & gitRevision & ")" diff --git a/scripts/connect_to_testnet.nims b/scripts/connect_to_testnet.nims index f30d4c2cf..7c9b6146a 100644 --- a/scripts/connect_to_testnet.nims +++ b/scripts/connect_to_testnet.nims @@ -100,8 +100,6 @@ cli do (skipGoerliKey {. rmDir dataDir cd rootDir - if testnet == "testnet1": - nimFlags &= " -d:NETWORK_TYPE=libp2p" exec &"""nim c {nimFlags} -d:"const_preset={preset}" -o:"{beaconNodeBinary}" beacon_chain/beacon_node.nim""" mkDir dumpDir diff --git a/scripts/load-testnet-nim-flags.sh b/scripts/load-testnet-nim-flags.sh index 47bf0b4f9..0374c76f2 100755 --- a/scripts/load-testnet-nim-flags.sh +++ b/scripts/load-testnet-nim-flags.sh @@ -16,7 +16,6 @@ add_var () { } add_var CONST_PRESET -add_var NETWORK_TYPE add_var SLOTS_PER_EPOCH add_var MAX_COMMITTEES_PER_SLOT diff --git a/scripts/testnet0.env b/scripts/testnet0.env index f031f87bc..ba5f434aa 100644 --- a/scripts/testnet0.env +++ b/scripts/testnet0.env @@ -1,5 +1,4 @@ CONST_PRESET=minimal -NETWORK_TYPE=libp2p_daemon QUICKSTART_VALIDATORS=8 RANDOM_VALIDATORS=120 BOOTSTRAP_PORT=9000 diff --git a/scripts/testnet1.env b/scripts/testnet1.env index 732a950a1..56009f64b 100644 --- a/scripts/testnet1.env +++ b/scripts/testnet1.env @@ -1,5 +1,4 @@ CONST_PRESET=minimal -NETWORK_TYPE=libp2p QUICKSTART_VALIDATORS=8 RANDOM_VALIDATORS=120 BOOTSTRAP_PORT=9100 diff --git a/tests/simulation/start.sh b/tests/simulation/start.sh index 2c0f4faae..2bdd61ec7 100755 --- a/tests/simulation/start.sh +++ b/tests/simulation/start.sh @@ -39,15 +39,10 @@ build_beacon_node () { $MAKE NIMFLAGS="-o:$OUTPUT_BIN $PARAMS" LOG_LEVEL="${LOG_LEVEL:-DEBUG}" beacon_node } -build_beacon_node $BEACON_NODE_BIN -d:"NETWORK_TYPE=$NETWORK_TYPE" +build_beacon_node $BEACON_NODE_BIN -if [[ "$BOOTSTRAP_NODE_NETWORK_TYPE" != "$NETWORK_TYPE" ]]; then - build_beacon_node $BOOTSTRAP_NODE_BIN \ - --nimcache:nimcache/bootstrap_node \ - -d:"NETWORK_TYPE=$BOOTSTRAP_NODE_NETWORK_TYPE" -else - cp $BEACON_NODE_BIN $BOOTSTRAP_NODE_BIN -fi +# DAEMON TODO: This copy is now unnecessary +cp $BEACON_NODE_BIN $BOOTSTRAP_NODE_BIN if [ ! -f "${LAST_VALIDATOR}" ]; then echo Building $DEPLOY_DEPOSIT_CONTRACT_BIN diff --git a/tests/simulation/vars.sh b/tests/simulation/vars.sh index a1dc352b8..977878408 100644 --- a/tests/simulation/vars.sh +++ b/tests/simulation/vars.sh @@ -25,11 +25,6 @@ TOTAL_USER_NODES=${USER_NODES:-0} TOTAL_SYSTEM_NODES=$(( TOTAL_NODES - TOTAL_USER_NODES )) MASTER_NODE=$(( TOTAL_NODES - 1 )) -# You can run a mixed simulation of daemon and native libp2p nodes -# by changing the variables below: -NETWORK_TYPE=${NETWORK_TYPE:-"libp2p"} -BOOTSTRAP_NODE_NETWORK_TYPE=${BOOTSTRAP_NODE_NETWORK_TYPE:-"libp2p"} - SIMULATION_DIR="${SIM_ROOT}/data" METRICS_DIR="${SIM_ROOT}/prometheus" VALIDATORS_DIR="${SIM_ROOT}/validators" diff --git a/wasm/build_ncli.sh b/wasm/build_ncli.sh index 95a312354..9199d86a2 100755 --- a/wasm/build_ncli.sh +++ b/wasm/build_ncli.sh @@ -19,7 +19,7 @@ rm -rf ncli/nimcache ../env.sh nim c \ --cpu:i386 --os:linux --gc:none --threads:off \ -d:release -d:clang -d:emscripten -d:noSignalHandler -d:usemalloc \ - --nimcache:ncli/nimcache -d:"network_type=none" \ + --nimcache:ncli/nimcache \ -u:metrics \ -c ncli diff --git a/wasm/nim.cfg b/wasm/nim.cfg index 75ef2c566..56cdbfa95 100644 --- a/wasm/nim.cfg +++ b/wasm/nim.cfg @@ -1,2 +1 @@ --d:"network_type=none" -u:metrics From d5e4e640b4a3913c53839980d1b93cfd1dfe743c Mon Sep 17 00:00:00 2001 From: Zahary Karadjov Date: Sun, 22 Mar 2020 23:55:01 +0200 Subject: [PATCH 03/58] Merge libp2p_backend into eth2_network --- beacon_chain/eth2_network.nim | 854 ++++++++++++++++++++++++++++++-- beacon_chain/libp2p_backend.nim | 822 ------------------------------ 2 files changed, 819 insertions(+), 857 deletions(-) delete mode 100644 beacon_chain/libp2p_backend.nim diff --git a/beacon_chain/eth2_network.nim b/beacon_chain/eth2_network.nim index cfebe1f38..7629c3b84 100644 --- a/beacon_chain/eth2_network.nim +++ b/beacon_chain/eth2_network.nim @@ -1,20 +1,141 @@ import - options, tables, strutils, sequtils, - json_serialization, json_serialization/std/net, - metrics, chronos, chronicles, metrics, libp2p/crypto/crypto, - eth/keys, eth/p2p/enode, eth/net/nat, eth/p2p/discoveryv5/enr, - eth2_discovery, version, conf + # Std lib + typetraits, strutils, os, random, algorithm, + options as stdOptions, net as stdNet, + + # Status libs + stew/[io, varints, base58], stew/shims/[macros, tables], stint, + faststreams/output_stream, + json_serialization, json_serialization/std/[net, options], + chronos, chronicles, metrics, + # TODO: create simpler to use libp2p modules that use re-exports + libp2p/[switch, standard_setup, peerinfo, peer, connection, + multiaddress, multicodec, crypto/crypto, + protocols/identify, protocols/protocol], + libp2p/protocols/secure/[secure, secio], + libp2p/protocols/pubsub/[pubsub, floodsub], + libp2p/transports/[transport, tcptransport], + eth/[keys, async_utils], eth/p2p/[enode, p2p_protocol_dsl], + eth/net/nat, eth/p2p/discoveryv5/[enr, node], + + # Beacon node modules + version, conf, eth2_discovery, libp2p_json_serialization, conf, ssz, + peer_pool + +import + eth/p2p/discoveryv5/protocol as discv5_protocol + +export + version, multiaddress, peer_pool, peerinfo, p2pProtocol, + libp2p_json_serialization, ssz + +logScope: + topics = "networking" type KeyPair* = crypto.KeyPair PublicKey* = crypto.PublicKey PrivateKey* = crypto.PrivateKey + Bytes = seq[byte] + P2PStream = Connection + + # TODO Is this really needed? + Eth2Node* = ref object of RootObj + switch*: Switch + discovery*: Eth2DiscoveryProtocol + wantedPeers*: int + peerPool*: PeerPool[Peer, PeerID] + protocolStates*: seq[RootRef] + libp2pTransportLoops*: seq[Future[void]] + + EthereumNode = Eth2Node # needed for the definitions in p2p_backends_helpers + + Peer* = ref object + network*: Eth2Node + info*: PeerInfo + wasDialed*: bool + discoveryId*: Eth2DiscoveryId + connectionState*: ConnectionState + protocolStates*: seq[RootRef] + maxInactivityAllowed*: Duration + score*: int + + ConnectionState* = enum + None, + Connecting, + Connected, + Disconnecting, + Disconnected + + UntypedResponder = object + peer*: Peer + stream*: P2PStream + + Responder*[MsgType] = distinct UntypedResponder + + MessageInfo* = object + name*: string + + # Private fields: + libp2pCodecName: string + protocolMounter*: MounterProc + printer*: MessageContentPrinter + nextMsgResolver*: NextMsgResolver + + ProtocolInfoObj* = object + name*: string + messages*: seq[MessageInfo] + index*: int # the position of the protocol in the + # ordered list of supported protocols + + # Private fields: + peerStateInitializer*: PeerStateInitializer + networkStateInitializer*: NetworkStateInitializer + handshake*: HandshakeStep + disconnectHandler*: DisconnectionHandler + + ProtocolInfo* = ptr ProtocolInfoObj + + ResponseCode* = enum + Success + InvalidRequest + ServerError + + PeerStateInitializer* = proc(peer: Peer): RootRef {.gcsafe.} + NetworkStateInitializer* = proc(network: EthereumNode): RootRef {.gcsafe.} + HandshakeStep* = proc(peer: Peer, stream: P2PStream): Future[void] {.gcsafe.} + DisconnectionHandler* = proc(peer: Peer): Future[void] {.gcsafe.} + ThunkProc* = LPProtoHandler + MounterProc* = proc(network: Eth2Node) {.gcsafe.} + MessageContentPrinter* = proc(msg: pointer): string {.gcsafe.} + NextMsgResolver* = proc(msgData: SszReader, future: FutureBase) {.gcsafe.} + + DisconnectionReason* = enum + ClientShutDown + IrrelevantNetwork + FaultOrError + + PeerDisconnected* = object of CatchableError + reason*: DisconnectionReason + + TransmissionError* = object of CatchableError + const clientId* = "Nimbus beacon node v" & fullVersionStr + networkKeyFilename = "privkey.protobuf" -export - version + TCP = net.Protocol.IPPROTO_TCP + HandshakeTimeout = FaultOrError + + # Spec constants + # https://github.com/ethereum/eth2.0-specs/blob/dev/specs/networking/p2p-interface.md#eth-20-network-interaction-domains + REQ_RESP_MAX_SIZE* = 1 * 1024 * 1024 # bytes + GOSSIP_MAX_SIZE* = 1 * 1024 * 1024 # bytes + TTFB_TIMEOUT* = 5.seconds + RESP_TIMEOUT* = 10.seconds + + readTimeoutErrorMsg = "Exceeded read timeout for a request" let globalListeningAddr = parseIpAddress("0.0.0.0") @@ -26,13 +147,702 @@ declareCounter gossip_messages_sent, declareCounter gossip_messages_received, "Number of gossip messages received by this peer" +declarePublicGauge libp2p_successful_dials, + "Number of successfully dialed peers" + +declarePublicGauge libp2p_peers, + "Number of active libp2p peers" + +template libp2pProtocol*(name: string, version: int) {.pragma.} + +template `$`*(peer: Peer): string = id(peer.info) +chronicles.formatIt(Peer): $it + +template remote*(peer: Peer): untyped = + peer.info.peerId + +# TODO: This exists only as a compatibility layer between the daemon +# APIs and the native LibP2P ones. It won't be necessary once the +# daemon is removed. +# +template writeAllBytes(stream: P2PStream, bytes: seq[byte]): untyped = + write(stream, bytes) + +template openStream(node: Eth2Node, peer: Peer, protocolId: string): untyped = + dial(node.switch, peer.info, protocolId) + +proc peer(stream: P2PStream): PeerID = + # TODO: Can this be `nil`? + stream.peerInfo.peerId +# +# End of compatibility layer + +proc init*(T: type Peer, network: Eth2Node, info: PeerInfo): Peer {.gcsafe.} + +proc getPeer*(node: Eth2Node, peerInfo: PeerInfo): Peer {.gcsafe.} = + let peerId = peerInfo.peerId + result = node.peerPool.getOrDefault(peerId) + if result == nil: + result = Peer.init(node, peerInfo) + +proc peerFromStream(network: Eth2Node, stream: P2PStream): Peer {.gcsafe.} = + # TODO: Can this be `nil`? + return network.getPeer(stream.peerInfo) + +proc getKey*(peer: Peer): PeerID {.inline.} = + result = peer.info.peerId + +proc getFuture*(peer: Peer): Future[void] {.inline.} = + result = peer.info.lifeFuture() + +proc `<`*(a, b: Peer): bool = + result = `<`(a.score, b.score) + +proc disconnect*(peer: Peer, reason: DisconnectionReason, + notifyOtherPeer = false) {.async.} = + # TODO: How should we notify the other peer? + if peer.connectionState notin {Disconnecting, Disconnected}: + peer.connectionState = Disconnecting + await peer.network.switch.disconnect(peer.info) + peer.connectionState = Disconnected + peer.network.peerPool.release(peer) + peer.info.close() + +proc safeClose(stream: P2PStream) {.async.} = + if not stream.closed: + await close(stream) + +proc handleIncomingPeer*(peer: Peer) + +include eth/p2p/p2p_backends_helpers +include eth/p2p/p2p_tracing + +proc getRequestProtoName(fn: NimNode): NimNode = + # `getCustomPragmaVal` doesn't work yet on regular nnkProcDef nodes + # (TODO: file as an issue) + + let pragmas = fn.pragma + if pragmas.kind == nnkPragma and pragmas.len > 0: + for pragma in pragmas: + if pragma.len > 0 and $pragma[0] == "libp2pProtocol": + let protoName = $(pragma[1]) + let protoVer = $(pragma[2].intVal) + return newLit("/eth2/beacon_chain/req/" & protoName & "/" & protoVer & "/ssz") + + return newLit("") + +template raisePeerDisconnected(msg: string, r: DisconnectionReason) = + var e = newException(PeerDisconnected, msg) + e.reason = r + raise e + +proc disconnectAndRaise(peer: Peer, + reason: DisconnectionReason, + msg: string) {.async.} = + let r = reason + await peer.disconnect(r) + raisePeerDisconnected(msg, r) + +proc readChunk(stream: P2PStream, + MsgType: type, + withResponseCode: bool, + deadline: Future[void]): Future[Option[MsgType]] {.gcsafe.} + +proc readSizePrefix(stream: P2PStream, + deadline: Future[void]): Future[int] {.async.} = + trace "about to read msg size prefix" + var parser: VarintParser[uint64, ProtoBuf] + while true: + var nextByte: byte + var readNextByte = stream.readExactly(addr nextByte, 1) + await readNextByte or deadline + if not readNextByte.finished: + trace "size prefix byte not received in time" + return -1 + case parser.feedByte(nextByte) + of Done: + let res = parser.getResult + if res > uint64(REQ_RESP_MAX_SIZE): + trace "size prefix outside of range", res + return -1 + else: + trace "got size prefix", res + return int(res) + of Overflow: + trace "size prefix overflow" + return -1 + of Incomplete: + continue + +proc readMsgBytes(stream: P2PStream, + withResponseCode: bool, + deadline: Future[void]): Future[Bytes] {.async.} = + trace "about to read message bytes", withResponseCode + + try: + if withResponseCode: + var responseCode: byte + trace "about to read response code" + var readResponseCode = stream.readExactly(addr responseCode, 1) + await readResponseCode or deadline + + if not readResponseCode.finished: + trace "response code not received in time" + return + + if responseCode > ResponseCode.high.byte: + trace "invalid response code", responseCode + return + + logScope: responseCode = ResponseCode(responseCode) + trace "got response code" + + case ResponseCode(responseCode) + of InvalidRequest, ServerError: + let responseErrMsg = await readChunk(stream, string, false, deadline) + debug "P2P request resulted in error", responseErrMsg + return + + of Success: + # The response is OK, the execution continues below + discard + + var sizePrefix = await readSizePrefix(stream, deadline) + trace "got msg size prefix", sizePrefix + + if sizePrefix == -1: + debug "Failed to read an incoming message size prefix", peer = stream.peer + return + + if sizePrefix == 0: + debug "Received SSZ with zero size", peer = stream.peer + return + + trace "about to read msg bytes", len = sizePrefix + var msgBytes = newSeq[byte](sizePrefix) + var readBody = stream.readExactly(addr msgBytes[0], sizePrefix) + await readBody or deadline + if not readBody.finished: + trace "msg bytes not received in time" + return + + trace "got message bytes", len = sizePrefix + return msgBytes + + except TransportIncompleteError: + return @[] + +proc readChunk(stream: P2PStream, + MsgType: type, + withResponseCode: bool, + deadline: Future[void]): Future[Option[MsgType]] {.gcsafe, async.} = + var msgBytes = await stream.readMsgBytes(withResponseCode, deadline) + try: + if msgBytes.len > 0: + return some SSZ.decode(msgBytes, MsgType) + except SerializationError as err: + debug "Failed to decode a network message", + msgBytes, errMsg = err.formatMsg("") + return + +proc readResponse( + stream: P2PStream, + MsgType: type, + deadline: Future[void]): Future[Option[MsgType]] {.gcsafe, async.} = + + when MsgType is seq: + type E = ElemType(MsgType) + var results: MsgType + while true: + let nextRes = await readChunk(stream, E, true, deadline) + if nextRes.isNone: break + results.add nextRes.get + if results.len > 0: + return some(results) + else: + return await readChunk(stream, MsgType, true, deadline) + +proc encodeErrorMsg(responseCode: ResponseCode, errMsg: string): Bytes = + var s = init OutputStream + s.append byte(responseCode) + s.appendVarint errMsg.len + s.appendValue SSZ, errMsg + s.getOutput + +proc sendErrorResponse(peer: Peer, + stream: P2PStream, + err: ref SerializationError, + msgName: string, + msgBytes: Bytes) {.async.} = + debug "Received an invalid request", + peer, msgName, msgBytes, errMsg = err.formatMsg("") + + let responseBytes = encodeErrorMsg(InvalidRequest, err.formatMsg("msg")) + await stream.writeAllBytes(responseBytes) + await stream.close() + +proc sendErrorResponse(peer: Peer, + stream: P2PStream, + responseCode: ResponseCode, + errMsg: string) {.async.} = + debug "Error processing request", peer, responseCode, errMsg + + let responseBytes = encodeErrorMsg(ServerError, errMsg) + await stream.writeAllBytes(responseBytes) + await stream.close() + +proc sendNotificationMsg(peer: Peer, protocolId: string, requestBytes: Bytes) {.async} = + var deadline = sleepAsync RESP_TIMEOUT + var streamFut = peer.network.openStream(peer, protocolId) + await streamFut or deadline + if not streamFut.finished: + # TODO: we are returning here because the deadline passed, but + # the stream can still be opened eventually a bit later. Who is + # going to close it then? + raise newException(TransmissionError, "Failed to open LibP2P stream") + + let stream = streamFut.read + defer: + await safeClose(stream) + + var s = init OutputStream + s.appendVarint requestBytes.len.uint64 + s.append requestBytes + let bytes = s.getOutput + await stream.writeAllBytes(bytes) + +# TODO There is too much duplication in the responder functions, but +# I hope to reduce this when I increse the reliance on output streams. +proc sendResponseChunkBytes(responder: UntypedResponder, payload: Bytes) {.async.} = + var s = init OutputStream + s.append byte(Success) + s.appendVarint payload.len.uint64 + s.append payload + let bytes = s.getOutput + await responder.stream.writeAllBytes(bytes) + +proc sendResponseChunkObj(responder: UntypedResponder, val: auto) {.async.} = + var s = init OutputStream + s.append byte(Success) + s.appendValue SSZ, sizePrefixed(val) + let bytes = s.getOutput + await responder.stream.writeAllBytes(bytes) + +proc sendResponseChunks[T](responder: UntypedResponder, chunks: seq[T]) {.async.} = + var s = init OutputStream + for chunk in chunks: + s.append byte(Success) + s.appendValue SSZ, sizePrefixed(chunk) + + let bytes = s.getOutput + await responder.stream.writeAllBytes(bytes) + +proc makeEth2Request(peer: Peer, protocolId: string, requestBytes: Bytes, + ResponseMsg: type, + timeout: Duration): Future[Option[ResponseMsg]] {.gcsafe, async.} = + var deadline = sleepAsync timeout + + # Open a new LibP2P stream + var streamFut = peer.network.openStream(peer, protocolId) + await streamFut or deadline + if not streamFut.finished: + # TODO: we are returning here because the deadline passed, but + # the stream can still be opened eventually a bit later. Who is + # going to close it then? + return none(ResponseMsg) + + let stream = streamFut.read + defer: + await safeClose(stream) + + # Send the request + var s = init OutputStream + s.appendVarint requestBytes.len.uint64 + s.append requestBytes + let bytes = s.getOutput + await stream.writeAllBytes(bytes) + + # Read the response + return await stream.readResponse(ResponseMsg, deadline) + +proc init*[MsgType](T: type Responder[MsgType], + peer: Peer, stream: P2PStream): T = + T(UntypedResponder(peer: peer, stream: stream)) + +template write*[M](r: var Responder[M], val: auto): auto = + mixin send + type Msg = M + type MsgRec = RecType(Msg) + when MsgRec is seq|openarray: + type E = ElemType(MsgRec) + when val is E: + sendResponseChunkObj(UntypedResponder(r), val) + elif val is MsgRec: + sendResponseChunks(UntypedResponder(r), val) + else: + {.fatal: "Unepected message type".} + else: + send(r, val) + +proc performProtocolHandshakes*(peer: Peer) {.async.} = + var subProtocolsHandshakes = newSeqOfCap[Future[void]](allProtocols.len) + for protocol in allProtocols: + if protocol.handshake != nil: + subProtocolsHandshakes.add((protocol.handshake)(peer, nil)) + + await all(subProtocolsHandshakes) + +template initializeConnection*(peer: Peer): auto = + performProtocolHandshakes(peer) + +proc initProtocol(name: string, + peerInit: PeerStateInitializer, + networkInit: NetworkStateInitializer): ProtocolInfoObj = + result.name = name + result.messages = @[] + result.peerStateInitializer = peerInit + result.networkStateInitializer = networkInit + +proc registerProtocol(protocol: ProtocolInfo) = + # TODO: This can be done at compile-time in the future + let pos = lowerBound(gProtocols, protocol) + gProtocols.insert(protocol, pos) + for i in 0 ..< gProtocols.len: + gProtocols[i].index = i + +proc setEventHandlers(p: ProtocolInfo, + handshake: HandshakeStep, + disconnectHandler: DisconnectionHandler) = + p.handshake = handshake + p.disconnectHandler = disconnectHandler + +proc implementSendProcBody(sendProc: SendProc) = + let + msg = sendProc.msg + UntypedResponder = bindSym "UntypedResponder" + + proc sendCallGenerator(peer, bytes: NimNode): NimNode = + if msg.kind != msgResponse: + let msgProto = getRequestProtoName(msg.procDef) + case msg.kind + of msgRequest: + let + timeout = msg.timeoutParam[0] + ResponseRecord = msg.response.recName + quote: + makeEth2Request(`peer`, `msgProto`, `bytes`, + `ResponseRecord`, `timeout`) + else: + quote: sendNotificationMsg(`peer`, `msgProto`, `bytes`) + else: + quote: sendResponseChunkBytes(`UntypedResponder`(`peer`), `bytes`) + + sendProc.useStandardBody(nil, nil, sendCallGenerator) + +proc handleIncomingStream(network: Eth2Node, stream: P2PStream, + MsgType, Format: distinct type) {.async, gcsafe.} = + mixin callUserHandler, RecType + const msgName = typetraits.name(MsgType) + + ## Uncomment this to enable tracing on all incoming requests + ## You can include `msgNameLit` in the condition to select + ## more specific requests: + # when chronicles.runtimeFilteringEnabled: + # setLogLevel(LogLevel.TRACE) + # defer: setLogLevel(LogLevel.DEBUG) + # trace "incoming " & `msgNameLit` & " stream" + + let peer = peerFromStream(network, stream) + + handleIncomingPeer(peer) + + defer: + await safeClose(stream) + + let + deadline = sleepAsync RESP_TIMEOUT + msgBytes = await readMsgBytes(stream, false, deadline) + + if msgBytes.len == 0: + await sendErrorResponse(peer, stream, ServerError, readTimeoutErrorMsg) + return + + type MsgRec = RecType(MsgType) + var msg: MsgRec + try: + msg = decode(Format, msgBytes, MsgRec) + except SerializationError as err: + await sendErrorResponse(peer, stream, err, msgName, msgBytes) + return + except Exception as err: + # TODO. This is temporary code that should be removed after interop. + # It can be enabled only in certain diagnostic builds where it should + # re-raise the exception. + debug "Crash during serialization", inputBytes = toHex(msgBytes), msgName + await sendErrorResponse(peer, stream, ServerError, err.msg) + raise err + + try: + logReceivedMsg(peer, MsgType(msg)) + await callUserHandler(peer, stream, msg) + except CatchableError as err: + await sendErrorResponse(peer, stream, ServerError, err.msg) + +proc handleOutgoingPeer*(peer: Peer): Future[void] {.async.} = + let network = peer.network + + proc onPeerClosed(udata: pointer) {.gcsafe.} = + debug "Peer (outgoing) lost", peer = $peer.info + libp2p_peers.set int64(len(network.peerPool)) + + let res = await network.peerPool.addOutgoingPeer(peer) + if res: + debug "Peer (outgoing) has been added to PeerPool", peer = $peer.info + peer.getFuture().addCallback(onPeerClosed) + libp2p_peers.set int64(len(network.peerPool)) + +proc handleIncomingPeer*(peer: Peer) = + let network = peer.network + + proc onPeerClosed(udata: pointer) {.gcsafe.} = + debug "Peer (incoming) lost", peer = $peer.info + libp2p_peers.set int64(len(network.peerPool)) + + let res = network.peerPool.addIncomingPeerNoWait(peer) + if res: + debug "Peer (incoming) has been added to PeerPool", peer = $peer.info + peer.getFuture().addCallback(onPeerClosed) + libp2p_peers.set int64(len(network.peerPool)) + +proc toPeerInfo*(r: enr.TypedRecord): PeerInfo = + if r.secp256k1.isSome: + var pubKey: keys.PublicKey + if recoverPublicKey(r.secp256k1.get, pubKey) != EthKeysStatus.Success: + return # TODO + + let peerId = PeerID.init crypto.PublicKey(scheme: Secp256k1, skkey: pubKey) + var addresses = newSeq[MultiAddress]() + + if r.ip.isSome and r.tcp.isSome: + let ip = IpAddress(family: IpAddressFamily.IPv4, + address_v4: r.ip.get) + addresses.add MultiAddress.init(ip, TCP, Port r.tcp.get) + + if r.ip6.isSome: + let ip = IpAddress(family: IpAddressFamily.IPv6, + address_v6: r.ip6.get) + if r.tcp6.isSome: + addresses.add MultiAddress.init(ip, TCP, Port r.tcp6.get) + elif r.tcp.isSome: + addresses.add MultiAddress.init(ip, TCP, Port r.tcp.get) + else: + discard + + if addresses.len > 0: + return PeerInfo.init(peerId, addresses) + +proc toPeerInfo(r: Option[enr.TypedRecord]): PeerInfo = + if r.isSome: + return r.get.toPeerInfo + +proc dialPeer*(node: Eth2Node, peerInfo: PeerInfo) {.async.} = + logScope: peer = $peerInfo + + debug "Connecting to peer" + await node.switch.connect(peerInfo) + var peer = node.getPeer(peerInfo) + peer.wasDialed = true + + debug "Initializing connection" + await initializeConnection(peer) + + inc libp2p_successful_dials + debug "Network handshakes completed" + + await handleOutgoingPeer(peer) + +proc runDiscoveryLoop*(node: Eth2Node) {.async.} = + debug "Starting discovery loop" + + while true: + let currentPeerCount = node.peerPool.len + if currentPeerCount < node.wantedPeers: + try: + let discoveredPeers = + node.discovery.randomNodes(node.wantedPeers - currentPeerCount) + debug "Discovered peers", peer = $discoveredPeers + for peer in discoveredPeers: + try: + let peerInfo = peer.record.toTypedRecord.toPeerInfo + if peerInfo != nil and peerInfo.id notin node.switch.connections: + # TODO do this in parallel + await node.dialPeer(peerInfo) + except CatchableError as err: + debug "Failed to connect to peer", peer = $peer, err = err.msg + except CatchableError as err: + debug "Failure in discovery", err = err.msg + + await sleepAsync seconds(1) + +proc init*(T: type Eth2Node, conf: BeaconNodeConf, + switch: Switch, ip: IpAddress, privKey: keys.PrivateKey): T = + new result + result.switch = switch + result.discovery = Eth2DiscoveryProtocol.new(conf, ip, privKey.data) + result.wantedPeers = conf.maxPeers + result.peerPool = newPeerPool[Peer, PeerID](maxPeers = conf.maxPeers) + + newSeq result.protocolStates, allProtocols.len + for proto in allProtocols: + if proto.networkStateInitializer != nil: + result.protocolStates[proto.index] = proto.networkStateInitializer(result) + + for msg in proto.messages: + if msg.protocolMounter != nil: + msg.protocolMounter result + +template publicKey*(node: Eth2Node): keys.PublicKey = + node.discovery.privKey.getPublicKey + +template addKnownPeer*(node: Eth2Node, peer: ENode|enr.Record) = + node.discovery.addNode peer + +proc start*(node: Eth2Node) {.async.} = + node.discovery.open() + node.libp2pTransportLoops = await node.switch.start() + traceAsyncErrors node.runDiscoveryLoop() + +proc init*(T: type Peer, network: Eth2Node, info: PeerInfo): Peer = + new result + result.info = info + result.network = network + result.connectionState = Connected + result.maxInactivityAllowed = 15.minutes # TODO: Read this from the config + newSeq result.protocolStates, allProtocols.len + for i in 0 ..< allProtocols.len: + let proto = allProtocols[i] + if proto.peerStateInitializer != nil: + result.protocolStates[i] = proto.peerStateInitializer(result) + +proc registerMsg(protocol: ProtocolInfo, + name: string, + mounter: MounterProc, + libp2pCodecName: string, + printer: MessageContentPrinter) = + protocol.messages.add MessageInfo(name: name, + protocolMounter: mounter, + libp2pCodecName: libp2pCodecName, + printer: printer) + +proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend = + var + Format = ident "SSZ" + Responder = bindSym "Responder" + P2PStream = bindSym "P2PStream" + Peer = bindSym "Peer" + Eth2Node = bindSym "Eth2Node" + messagePrinter = bindSym "messagePrinter" + registerMsg = bindSym "registerMsg" + initProtocol = bindSym "initProtocol" + msgVar = ident "msg" + networkVar = ident "network" + callUserHandler = ident "callUserHandler" + + p.useRequestIds = false + p.useSingleRecordInlining = true + + new result + + result.PeerType = Peer + result.NetworkType = Eth2Node + result.registerProtocol = bindSym "registerProtocol" + result.setEventHandlers = bindSym "setEventHandlers" + result.SerializationFormat = Format + result.ResponderType = Responder + + result.afterProtocolInit = proc (p: P2PProtocol) = + p.onPeerConnected.params.add newIdentDefs(streamVar, P2PStream) + + result.implementMsg = proc (msg: Message) = + let + protocol = msg.protocol + msgName = $msg.ident + msgNameLit = newLit msgName + MsgRecName = msg.recName + MsgStrongRecName = msg.strongRecName + codecNameLit = getRequestProtoName(msg.procDef) + + if msg.procDef.body.kind != nnkEmpty and msg.kind == msgRequest: + # Request procs need an extra param - the stream where the response + # should be written: + msg.userHandler.params.insert(2, newIdentDefs(streamVar, P2PStream)) + msg.initResponderCall.add streamVar + + ## + ## Implement the Thunk: + ## + ## The protocol handlers in nim-libp2p receive only a `P2PStream` + ## parameter and there is no way to access the wider context (such + ## as the current `Switch`). In our handlers, we may need to list all + ## peers in the current network, so we must keep a reference to the + ## network object in the closure environment of the installed handlers. + ## + ## For this reason, we define a `protocol mounter` proc that will + ## initialize the network object by creating handlers bound to the + ## specific network. + ## + let + protocolMounterName = ident(msgName & "_mounter") + userHandlerCall = msg.genUserHandlerCall(msgVar, [peerVar, streamVar]) + + var mounter: NimNode + if msg.userHandler != nil: + protocol.outRecvProcs.add quote do: + template `callUserHandler`(`peerVar`: `Peer`, + `streamVar`: `P2PStream`, + `msgVar`: `MsgRecName`): untyped = + `userHandlerCall` + + proc `protocolMounterName`(`networkVar`: `Eth2Node`) = + proc thunk(`streamVar`: `P2PStream`, + proto: string): Future[void] {.gcsafe.} = + return handleIncomingStream(`networkVar`, `streamVar`, + `MsgStrongRecName`, `Format`) + + mount `networkVar`.switch, + LPProtocol(codec: `codecNameLit`, handler: thunk) + + mounter = protocolMounterName + else: + mounter = newNilLit() + + ## + ## Implement Senders and Handshake + ## + if msg.kind == msgHandshake: + macros.error "Handshake messages are not supported in LibP2P protocols" + else: + var sendProc = msg.createSendProc() + implementSendProcBody sendProc + + protocol.outProcRegistrations.add( + newCall(registerMsg, + protocol.protocolInfoVar, + msgNameLit, + mounter, + codecNameLit, + newTree(nnkBracketExpr, messagePrinter, MsgRecName))) + + result.implementProtocolInit = proc (p: P2PProtocol): NimNode = + return newCall(initProtocol, newLit(p.name), p.peerInit, p.netInit) + proc setupNat(conf: BeaconNodeConf): tuple[ip: IpAddress, tcpPort: Port, udpPort: Port] = # defaults result.ip = globalListeningAddr - result.tcpPort = Port(conf.tcpPort) - result.udpPort = Port(conf.udpPort) + result.tcpPort = conf.tcpPort + result.udpPort = conf.udpPort var nat: NatStrategy case conf.nat.toLowerAscii: @@ -63,25 +873,6 @@ proc setupNat(conf: BeaconNodeConf): tuple[ip: IpAddress, if extPorts.isSome: (result.tcpPort, result.udpPort) = extPorts.get() -import - os, random, - stew/io, eth/async_utils, - libp2p/[multiaddress, multicodec], - ssz - -export - multiaddress - -import - libp2p/standard_setup, libp2p_backend, libp2p/peerinfo, peer_pool - -export - libp2p_backend, peer_pool, peerinfo - -const - netBackendName* = "libp2p" - networkKeyFilename = "privkey.protobuf" - func asLibp2pKey*(key: keys.PublicKey): PublicKey = PublicKey(scheme: Secp256k1, skkey: key) @@ -99,13 +890,6 @@ proc initAddress*(T: type MultiAddress, str: string): T = template tcpEndPoint(address, port): auto = MultiAddress.init(address, Protocol.IPPROTO_TCP, port) -proc ensureNetworkIdFile(conf: BeaconNodeConf): string = - result = conf.dataDir / networkKeyFilename - if not fileExists(result): - createDir conf.dataDir.string - let pk = PrivateKey.random(Secp256k1) - writeFile(result, pk.getBytes) - proc getPersistentNetKeys*(conf: BeaconNodeConf): KeyPair = let privKeyPath = conf.dataDir / networkKeyFilename var privKey: PrivateKey diff --git a/beacon_chain/libp2p_backend.nim b/beacon_chain/libp2p_backend.nim deleted file mode 100644 index 94e811e9b..000000000 --- a/beacon_chain/libp2p_backend.nim +++ /dev/null @@ -1,822 +0,0 @@ -import - algorithm, typetraits, net as stdNet, - stew/[varints,base58], stew/shims/[macros, tables], chronos, chronicles, - stint, faststreams/output_stream, serialization, metrics, - json_serialization/std/[net, options], - eth/[keys, async_utils], eth/p2p/[enode, p2p_protocol_dsl], - eth/p2p/discoveryv5/[enr, node], - # TODO: create simpler to use libp2p modules that use re-exports - libp2p/[switch, multistream, connection, - multiaddress, peerinfo, peer, - crypto/crypto, protocols/identify, protocols/protocol], - libp2p/muxers/mplex/[mplex, types], - libp2p/protocols/secure/[secure, secio], - libp2p/protocols/pubsub/[pubsub, floodsub], - libp2p/transports/[transport, tcptransport], - libp2p_json_serialization, eth2_discovery, conf, ssz, - peer_pool - -import - eth/p2p/discoveryv5/protocol as discv5_protocol - -export - p2pProtocol, libp2p_json_serialization, ssz - -type - Bytes = seq[byte] - P2PStream = Connection - - # TODO Is this really needed? - Eth2Node* = ref object of RootObj - switch*: Switch - discovery*: Eth2DiscoveryProtocol - wantedPeers*: int - peerPool*: PeerPool[Peer, PeerID] - protocolStates*: seq[RootRef] - libp2pTransportLoops*: seq[Future[void]] - - EthereumNode = Eth2Node # needed for the definitions in p2p_backends_helpers - - Peer* = ref object - network*: Eth2Node - info*: PeerInfo - wasDialed*: bool - discoveryId*: Eth2DiscoveryId - connectionState*: ConnectionState - protocolStates*: seq[RootRef] - maxInactivityAllowed*: Duration - score*: int - - ConnectionState* = enum - None, - Connecting, - Connected, - Disconnecting, - Disconnected - - UntypedResponder = object - peer*: Peer - stream*: P2PStream - - Responder*[MsgType] = distinct UntypedResponder - - MessageInfo* = object - name*: string - - # Private fields: - libp2pCodecName: string - protocolMounter*: MounterProc - printer*: MessageContentPrinter - nextMsgResolver*: NextMsgResolver - - ProtocolInfoObj* = object - name*: string - messages*: seq[MessageInfo] - index*: int # the position of the protocol in the - # ordered list of supported protocols - - # Private fields: - peerStateInitializer*: PeerStateInitializer - networkStateInitializer*: NetworkStateInitializer - handshake*: HandshakeStep - disconnectHandler*: DisconnectionHandler - - ProtocolInfo* = ptr ProtocolInfoObj - - ResponseCode* = enum - Success - InvalidRequest - ServerError - - PeerStateInitializer* = proc(peer: Peer): RootRef {.gcsafe.} - NetworkStateInitializer* = proc(network: EthereumNode): RootRef {.gcsafe.} - HandshakeStep* = proc(peer: Peer, stream: P2PStream): Future[void] {.gcsafe.} - DisconnectionHandler* = proc(peer: Peer): Future[void] {.gcsafe.} - ThunkProc* = LPProtoHandler - MounterProc* = proc(network: Eth2Node) {.gcsafe.} - MessageContentPrinter* = proc(msg: pointer): string {.gcsafe.} - NextMsgResolver* = proc(msgData: SszReader, future: FutureBase) {.gcsafe.} - - DisconnectionReason* = enum - ClientShutDown - IrrelevantNetwork - FaultOrError - - PeerDisconnected* = object of CatchableError - reason*: DisconnectionReason - - TransmissionError* = object of CatchableError - -const - TCP = net.Protocol.IPPROTO_TCP - defaultIncomingReqTimeout = 5000 - HandshakeTimeout = FaultOrError - - # Spec constants - # https://github.com/ethereum/eth2.0-specs/blob/dev/specs/networking/p2p-interface.md#eth-20-network-interaction-domains - REQ_RESP_MAX_SIZE* = 1 * 1024 * 1024 # bytes - GOSSIP_MAX_SIZE* = 1 * 1024 * 1024 # bytes - TTFB_TIMEOUT* = 5.seconds - RESP_TIMEOUT* = 10.seconds - - readTimeoutErrorMsg = "Exceeded read timeout for a request" - -logScope: - topics = "libp2p" - -declarePublicGauge libp2p_successful_dials, - "Number of successfully dialed peers" - -declarePublicGauge libp2p_peers, - "Number of active libp2p peers" - -template libp2pProtocol*(name: string, version: int) {.pragma.} - -template `$`*(peer: Peer): string = id(peer.info) -chronicles.formatIt(Peer): $it - -template remote*(peer: Peer): untyped = - peer.info.peerId - -# TODO: This exists only as a compatibility layer between the daemon -# APIs and the native LibP2P ones. It won't be necessary once the -# daemon is removed. -# -template writeAllBytes(stream: P2PStream, bytes: seq[byte]): untyped = - write(stream, bytes) - -template openStream(node: Eth2Node, peer: Peer, protocolId: string): untyped = - dial(node.switch, peer.info, protocolId) - -proc peer(stream: P2PStream): PeerID = - # TODO: Can this be `nil`? - stream.peerInfo.peerId -# -# End of compatibility layer - -proc init*(T: type Peer, network: Eth2Node, info: PeerInfo): Peer {.gcsafe.} - -proc getPeer*(node: Eth2Node, peerInfo: PeerInfo): Peer {.gcsafe.} = - let peerId = peerInfo.peerId - result = node.peerPool.getOrDefault(peerId) - if result == nil: - result = Peer.init(node, peerInfo) - -proc peerFromStream(network: Eth2Node, stream: P2PStream): Peer {.gcsafe.} = - # TODO: Can this be `nil`? - return network.getPeer(stream.peerInfo) - -proc getKey*(peer: Peer): PeerID {.inline.} = - result = peer.info.peerId - -proc getFuture*(peer: Peer): Future[void] {.inline.} = - result = peer.info.lifeFuture() - -proc `<`*(a, b: Peer): bool = - result = `<`(a.score, b.score) - -proc disconnect*(peer: Peer, reason: DisconnectionReason, - notifyOtherPeer = false) {.async.} = - # TODO: How should we notify the other peer? - if peer.connectionState notin {Disconnecting, Disconnected}: - peer.connectionState = Disconnecting - await peer.network.switch.disconnect(peer.info) - peer.connectionState = Disconnected - peer.network.peerPool.release(peer) - peer.info.close() - -proc safeClose(stream: P2PStream) {.async.} = - if not stream.closed: - await close(stream) - -proc handleIncomingPeer*(peer: Peer) - -include eth/p2p/p2p_backends_helpers -include eth/p2p/p2p_tracing - -proc getRequestProtoName(fn: NimNode): NimNode = - # `getCustomPragmaVal` doesn't work yet on regular nnkProcDef nodes - # (TODO: file as an issue) - - let pragmas = fn.pragma - if pragmas.kind == nnkPragma and pragmas.len > 0: - for pragma in pragmas: - if pragma.len > 0 and $pragma[0] == "libp2pProtocol": - let protoName = $(pragma[1]) - let protoVer = $(pragma[2].intVal) - return newLit("/eth2/beacon_chain/req/" & protoName & "/" & protoVer & "/ssz") - - return newLit("") - -template raisePeerDisconnected(msg: string, r: DisconnectionReason) = - var e = newException(PeerDisconnected, msg) - e.reason = r - raise e - -proc disconnectAndRaise(peer: Peer, - reason: DisconnectionReason, - msg: string) {.async.} = - let r = reason - await peer.disconnect(r) - raisePeerDisconnected(msg, r) - -proc readChunk(stream: P2PStream, - MsgType: type, - withResponseCode: bool, - deadline: Future[void]): Future[Option[MsgType]] {.gcsafe.} - -proc readSizePrefix(stream: P2PStream, - deadline: Future[void]): Future[int] {.async.} = - trace "about to read msg size prefix" - var parser: VarintParser[uint64, ProtoBuf] - while true: - var nextByte: byte - var readNextByte = stream.readExactly(addr nextByte, 1) - await readNextByte or deadline - if not readNextByte.finished: - trace "size prefix byte not received in time" - return -1 - case parser.feedByte(nextByte) - of Done: - let res = parser.getResult - if res > uint64(REQ_RESP_MAX_SIZE): - trace "size prefix outside of range", res - return -1 - else: - trace "got size prefix", res - return int(res) - of Overflow: - trace "size prefix overflow" - return -1 - of Incomplete: - continue - -proc readMsgBytes(stream: P2PStream, - withResponseCode: bool, - deadline: Future[void]): Future[Bytes] {.async.} = - trace "about to read message bytes", withResponseCode - - try: - if withResponseCode: - var responseCode: byte - trace "about to read response code" - var readResponseCode = stream.readExactly(addr responseCode, 1) - await readResponseCode or deadline - - if not readResponseCode.finished: - trace "response code not received in time" - return - - if responseCode > ResponseCode.high.byte: - trace "invalid response code", responseCode - return - - logScope: responseCode = ResponseCode(responseCode) - trace "got response code" - - case ResponseCode(responseCode) - of InvalidRequest, ServerError: - let responseErrMsg = await readChunk(stream, string, false, deadline) - debug "P2P request resulted in error", responseErrMsg - return - - of Success: - # The response is OK, the execution continues below - discard - - var sizePrefix = await readSizePrefix(stream, deadline) - trace "got msg size prefix", sizePrefix - - if sizePrefix == -1: - debug "Failed to read an incoming message size prefix", peer = stream.peer - return - - if sizePrefix == 0: - debug "Received SSZ with zero size", peer = stream.peer - return - - trace "about to read msg bytes", len = sizePrefix - var msgBytes = newSeq[byte](sizePrefix) - var readBody = stream.readExactly(addr msgBytes[0], sizePrefix) - await readBody or deadline - if not readBody.finished: - trace "msg bytes not received in time" - return - - trace "got message bytes", len = sizePrefix - return msgBytes - - except TransportIncompleteError: - return @[] - -proc readChunk(stream: P2PStream, - MsgType: type, - withResponseCode: bool, - deadline: Future[void]): Future[Option[MsgType]] {.gcsafe, async.} = - var msgBytes = await stream.readMsgBytes(withResponseCode, deadline) - try: - if msgBytes.len > 0: - return some SSZ.decode(msgBytes, MsgType) - except SerializationError as err: - debug "Failed to decode a network message", - msgBytes, errMsg = err.formatMsg("") - return - -proc readResponse( - stream: P2PStream, - MsgType: type, - deadline: Future[void]): Future[Option[MsgType]] {.gcsafe, async.} = - - when MsgType is seq: - type E = ElemType(MsgType) - var results: MsgType - while true: - let nextRes = await readChunk(stream, E, true, deadline) - if nextRes.isNone: break - results.add nextRes.get - if results.len > 0: - return some(results) - else: - return await readChunk(stream, MsgType, true, deadline) - -proc encodeErrorMsg(responseCode: ResponseCode, errMsg: string): Bytes = - var s = init OutputStream - s.append byte(responseCode) - s.appendVarint errMsg.len - s.appendValue SSZ, errMsg - s.getOutput - -proc sendErrorResponse(peer: Peer, - stream: P2PStream, - err: ref SerializationError, - msgName: string, - msgBytes: Bytes) {.async.} = - debug "Received an invalid request", - peer, msgName, msgBytes, errMsg = err.formatMsg("") - - let responseBytes = encodeErrorMsg(InvalidRequest, err.formatMsg("msg")) - await stream.writeAllBytes(responseBytes) - await stream.close() - -proc sendErrorResponse(peer: Peer, - stream: P2PStream, - responseCode: ResponseCode, - errMsg: string) {.async.} = - debug "Error processing request", peer, responseCode, errMsg - - let responseBytes = encodeErrorMsg(ServerError, errMsg) - await stream.writeAllBytes(responseBytes) - await stream.close() - -proc sendNotificationMsg(peer: Peer, protocolId: string, requestBytes: Bytes) {.async} = - var deadline = sleepAsync RESP_TIMEOUT - var streamFut = peer.network.openStream(peer, protocolId) - await streamFut or deadline - if not streamFut.finished: - # TODO: we are returning here because the deadline passed, but - # the stream can still be opened eventually a bit later. Who is - # going to close it then? - raise newException(TransmissionError, "Failed to open LibP2P stream") - - let stream = streamFut.read - defer: - await safeClose(stream) - - var s = init OutputStream - s.appendVarint requestBytes.len.uint64 - s.append requestBytes - let bytes = s.getOutput - await stream.writeAllBytes(bytes) - -# TODO There is too much duplication in the responder functions, but -# I hope to reduce this when I increse the reliance on output streams. -proc sendResponseChunkBytes(responder: UntypedResponder, payload: Bytes) {.async.} = - var s = init OutputStream - s.append byte(Success) - s.appendVarint payload.len.uint64 - s.append payload - let bytes = s.getOutput - await responder.stream.writeAllBytes(bytes) - -proc sendResponseChunkObj(responder: UntypedResponder, val: auto) {.async.} = - var s = init OutputStream - s.append byte(Success) - s.appendValue SSZ, sizePrefixed(val) - let bytes = s.getOutput - await responder.stream.writeAllBytes(bytes) - -proc sendResponseChunks[T](responder: UntypedResponder, chunks: seq[T]) {.async.} = - var s = init OutputStream - for chunk in chunks: - s.append byte(Success) - s.appendValue SSZ, sizePrefixed(chunk) - - let bytes = s.getOutput - await responder.stream.writeAllBytes(bytes) - -proc makeEth2Request(peer: Peer, protocolId: string, requestBytes: Bytes, - ResponseMsg: type, - timeout: Duration): Future[Option[ResponseMsg]] {.gcsafe, async.} = - var deadline = sleepAsync timeout - - # Open a new LibP2P stream - var streamFut = peer.network.openStream(peer, protocolId) - await streamFut or deadline - if not streamFut.finished: - # TODO: we are returning here because the deadline passed, but - # the stream can still be opened eventually a bit later. Who is - # going to close it then? - return none(ResponseMsg) - - let stream = streamFut.read - defer: - await safeClose(stream) - - # Send the request - var s = init OutputStream - s.appendVarint requestBytes.len.uint64 - s.append requestBytes - let bytes = s.getOutput - await stream.writeAllBytes(bytes) - - # Read the response - return await stream.readResponse(ResponseMsg, deadline) - -proc init*[MsgType](T: type Responder[MsgType], - peer: Peer, stream: P2PStream): T = - T(UntypedResponder(peer: peer, stream: stream)) - -template write*[M](r: var Responder[M], val: auto): auto = - mixin send - type Msg = M - type MsgRec = RecType(Msg) - when MsgRec is seq|openarray: - type E = ElemType(MsgRec) - when val is E: - sendResponseChunkObj(UntypedResponder(r), val) - elif val is MsgRec: - sendResponseChunks(UntypedResponder(r), val) - else: - {.fatal: "Unepected message type".} - else: - send(r, val) - -proc performProtocolHandshakes*(peer: Peer) {.async.} = - var subProtocolsHandshakes = newSeqOfCap[Future[void]](allProtocols.len) - for protocol in allProtocols: - if protocol.handshake != nil: - subProtocolsHandshakes.add((protocol.handshake)(peer, nil)) - - await all(subProtocolsHandshakes) - -template initializeConnection*(peer: Peer): auto = - performProtocolHandshakes(peer) - -proc initProtocol(name: string, - peerInit: PeerStateInitializer, - networkInit: NetworkStateInitializer): ProtocolInfoObj = - result.name = name - result.messages = @[] - result.peerStateInitializer = peerInit - result.networkStateInitializer = networkInit - -proc registerProtocol(protocol: ProtocolInfo) = - # TODO: This can be done at compile-time in the future - let pos = lowerBound(gProtocols, protocol) - gProtocols.insert(protocol, pos) - for i in 0 ..< gProtocols.len: - gProtocols[i].index = i - -proc setEventHandlers(p: ProtocolInfo, - handshake: HandshakeStep, - disconnectHandler: DisconnectionHandler) = - p.handshake = handshake - p.disconnectHandler = disconnectHandler - -proc implementSendProcBody(sendProc: SendProc) = - let - msg = sendProc.msg - UntypedResponder = bindSym "UntypedResponder" - await = ident "await" - - proc sendCallGenerator(peer, bytes: NimNode): NimNode = - if msg.kind != msgResponse: - let msgProto = getRequestProtoName(msg.procDef) - case msg.kind - of msgRequest: - let - timeout = msg.timeoutParam[0] - ResponseRecord = msg.response.recName - quote: - makeEth2Request(`peer`, `msgProto`, `bytes`, - `ResponseRecord`, `timeout`) - else: - quote: sendNotificationMsg(`peer`, `msgProto`, `bytes`) - else: - quote: sendResponseChunkBytes(`UntypedResponder`(`peer`), `bytes`) - - sendProc.useStandardBody(nil, nil, sendCallGenerator) - -proc handleIncomingStream(network: Eth2Node, stream: P2PStream, - MsgType, Format: distinct type) {.async, gcsafe.} = - mixin callUserHandler, RecType - const msgName = typetraits.name(MsgType) - - ## Uncomment this to enable tracing on all incoming requests - ## You can include `msgNameLit` in the condition to select - ## more specific requests: - # when chronicles.runtimeFilteringEnabled: - # setLogLevel(LogLevel.TRACE) - # defer: setLogLevel(LogLevel.DEBUG) - # trace "incoming " & `msgNameLit` & " stream" - - let peer = peerFromStream(network, stream) - - handleIncomingPeer(peer) - - defer: - await safeClose(stream) - - let - deadline = sleepAsync RESP_TIMEOUT - msgBytes = await readMsgBytes(stream, false, deadline) - - if msgBytes.len == 0: - await sendErrorResponse(peer, stream, ServerError, readTimeoutErrorMsg) - return - - type MsgRec = RecType(MsgType) - var msg: MsgRec - try: - msg = decode(Format, msgBytes, MsgRec) - except SerializationError as err: - await sendErrorResponse(peer, stream, err, msgName, msgBytes) - return - except Exception as err: - # TODO. This is temporary code that should be removed after interop. - # It can be enabled only in certain diagnostic builds where it should - # re-raise the exception. - debug "Crash during serialization", inputBytes = toHex(msgBytes), msgName - await sendErrorResponse(peer, stream, ServerError, err.msg) - raise err - - try: - logReceivedMsg(peer, MsgType(msg)) - await callUserHandler(peer, stream, msg) - except CatchableError as err: - await sendErrorResponse(peer, stream, ServerError, err.msg) - -proc handleOutgoingPeer*(peer: Peer): Future[void] {.async.} = - let network = peer.network - - proc onPeerClosed(udata: pointer) {.gcsafe.} = - debug "Peer (outgoing) lost", peer = $peer.info - libp2p_peers.set int64(len(network.peerPool)) - - let res = await network.peerPool.addOutgoingPeer(peer) - if res: - debug "Peer (outgoing) has been added to PeerPool", peer = $peer.info - peer.getFuture().addCallback(onPeerClosed) - libp2p_peers.set int64(len(network.peerPool)) - -proc handleIncomingPeer*(peer: Peer) = - let network = peer.network - - proc onPeerClosed(udata: pointer) {.gcsafe.} = - debug "Peer (incoming) lost", peer = $peer.info - libp2p_peers.set int64(len(network.peerPool)) - - let res = network.peerPool.addIncomingPeerNoWait(peer) - if res: - debug "Peer (incoming) has been added to PeerPool", peer = $peer.info - peer.getFuture().addCallback(onPeerClosed) - libp2p_peers.set int64(len(network.peerPool)) - -proc toPeerInfo*(r: enr.TypedRecord): PeerInfo = - if r.secp256k1.isSome: - var pubKey: keys.PublicKey - if recoverPublicKey(r.secp256k1.get, pubKey) != EthKeysStatus.Success: - return # TODO - - let peerId = PeerID.init crypto.PublicKey(scheme: Secp256k1, skkey: pubKey) - var addresses = newSeq[MultiAddress]() - - if r.ip.isSome and r.tcp.isSome: - let ip = IpAddress(family: IpAddressFamily.IPv4, - address_v4: r.ip.get) - addresses.add MultiAddress.init(ip, TCP, Port r.tcp.get) - - if r.ip6.isSome: - let ip = IpAddress(family: IpAddressFamily.IPv6, - address_v6: r.ip6.get) - if r.tcp6.isSome: - addresses.add MultiAddress.init(ip, TCP, Port r.tcp6.get) - elif r.tcp.isSome: - addresses.add MultiAddress.init(ip, TCP, Port r.tcp.get) - else: - discard - - if addresses.len > 0: - return PeerInfo.init(peerId, addresses) - -proc toPeerInfo(r: Option[enr.TypedRecord]): PeerInfo = - if r.isSome: - return r.get.toPeerInfo - -proc dialPeer*(node: Eth2Node, peerInfo: PeerInfo) {.async.} = - logScope: peer = $peerInfo - - debug "Connecting to peer" - await node.switch.connect(peerInfo) - var peer = node.getPeer(peerInfo) - peer.wasDialed = true - - debug "Initializing connection" - await initializeConnection(peer) - - inc libp2p_successful_dials - debug "Network handshakes completed" - - await handleOutgoingPeer(peer) - -proc runDiscoveryLoop*(node: Eth2Node) {.async.} = - debug "Starting discovery loop" - - while true: - let currentPeerCount = node.peerPool.len - if currentPeerCount < node.wantedPeers: - try: - let discoveredPeers = - node.discovery.randomNodes(node.wantedPeers - currentPeerCount) - debug "Discovered peers", peer = $discoveredPeers - for peer in discoveredPeers: - try: - let peerInfo = peer.record.toTypedRecord.toPeerInfo - if peerInfo != nil and peerInfo.id notin node.switch.connections: - # TODO do this in parallel - await node.dialPeer(peerInfo) - except CatchableError as err: - debug "Failed to connect to peer", peer = $peer, err = err.msg - except CatchableError as err: - debug "Failure in discovery", err = err.msg - - await sleepAsync seconds(1) - -proc init*(T: type Eth2Node, conf: BeaconNodeConf, - switch: Switch, ip: IpAddress, privKey: keys.PrivateKey): T = - new result - result.switch = switch - result.discovery = Eth2DiscoveryProtocol.new(conf, ip, privKey.data) - result.wantedPeers = conf.maxPeers - result.peerPool = newPeerPool[Peer, PeerID](maxPeers = conf.maxPeers) - - newSeq result.protocolStates, allProtocols.len - for proto in allProtocols: - if proto.networkStateInitializer != nil: - result.protocolStates[proto.index] = proto.networkStateInitializer(result) - - for msg in proto.messages: - if msg.protocolMounter != nil: - msg.protocolMounter result - -template publicKey*(node: Eth2Node): keys.PublicKey = - node.discovery.privKey.getPublicKey - -template addKnownPeer*(node: Eth2Node, peer: ENode|enr.Record) = - node.discovery.addNode peer - -proc start*(node: Eth2Node) {.async.} = - node.discovery.open() - node.libp2pTransportLoops = await node.switch.start() - traceAsyncErrors node.runDiscoveryLoop() - -proc init*(T: type Peer, network: Eth2Node, info: PeerInfo): Peer = - new result - result.info = info - result.network = network - result.connectionState = Connected - result.maxInactivityAllowed = 15.minutes # TODO: Read this from the config - newSeq result.protocolStates, allProtocols.len - for i in 0 ..< allProtocols.len: - let proto = allProtocols[i] - if proto.peerStateInitializer != nil: - result.protocolStates[i] = proto.peerStateInitializer(result) - -proc registerMsg(protocol: ProtocolInfo, - name: string, - mounter: MounterProc, - libp2pCodecName: string, - printer: MessageContentPrinter) = - protocol.messages.add MessageInfo(name: name, - protocolMounter: mounter, - libp2pCodecName: libp2pCodecName, - printer: printer) - -proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend = - var - Format = ident "SSZ" - Responder = bindSym "Responder" - P2PStream = bindSym "P2PStream" - OutputStream = bindSym "OutputStream" - Peer = bindSym "Peer" - Eth2Node = bindSym "Eth2Node" - messagePrinter = bindSym "messagePrinter" - milliseconds = bindSym "milliseconds" - registerMsg = bindSym "registerMsg" - initProtocol = bindSym "initProtocol" - bindSymOp = bindSym "bindSym" - errVar = ident "err" - msgVar = ident "msg" - msgBytesVar = ident "msgBytes" - networkVar = ident "network" - await = ident "await" - callUserHandler = ident "callUserHandler" - - p.useRequestIds = false - p.useSingleRecordInlining = true - - new result - - result.PeerType = Peer - result.NetworkType = Eth2Node - result.registerProtocol = bindSym "registerProtocol" - result.setEventHandlers = bindSym "setEventHandlers" - result.SerializationFormat = Format - result.ResponderType = Responder - - result.afterProtocolInit = proc (p: P2PProtocol) = - p.onPeerConnected.params.add newIdentDefs(streamVar, P2PStream) - - result.implementMsg = proc (msg: Message) = - let - protocol = msg.protocol - msgName = $msg.ident - msgNameLit = newLit msgName - MsgRecName = msg.recName - MsgStrongRecName = msg.strongRecName - codecNameLit = getRequestProtoName(msg.procDef) - - if msg.procDef.body.kind != nnkEmpty and msg.kind == msgRequest: - # Request procs need an extra param - the stream where the response - # should be written: - msg.userHandler.params.insert(2, newIdentDefs(streamVar, P2PStream)) - msg.initResponderCall.add streamVar - - ## - ## Implement the Thunk: - ## - ## The protocol handlers in nim-libp2p receive only a `P2PStream` - ## parameter and there is no way to access the wider context (such - ## as the current `Switch`). In our handlers, we may need to list all - ## peers in the current network, so we must keep a reference to the - ## network object in the closure environment of the installed handlers. - ## - ## For this reason, we define a `protocol mounter` proc that will - ## initialize the network object by creating handlers bound to the - ## specific network. - ## - let - protocolMounterName = ident(msgName & "_mounter") - userHandlerCall = msg.genUserHandlerCall(msgVar, [peerVar, streamVar]) - - var mounter: NimNode - if msg.userHandler != nil: - protocol.outRecvProcs.add quote do: - template `callUserHandler`(`peerVar`: `Peer`, - `streamVar`: `P2PStream`, - `msgVar`: `MsgRecName`): untyped = - `userHandlerCall` - - proc `protocolMounterName`(`networkVar`: `Eth2Node`) = - proc thunk(`streamVar`: `P2PStream`, - proto: string): Future[void] {.gcsafe.} = - return handleIncomingStream(`networkVar`, `streamVar`, - `MsgStrongRecName`, `Format`) - - mount `networkVar`.switch, - LPProtocol(codec: `codecNameLit`, handler: thunk) - - mounter = protocolMounterName - else: - mounter = newNilLit() - - ## - ## Implement Senders and Handshake - ## - if msg.kind == msgHandshake: - macros.error "Handshake messages are not supported in LibP2P protocols" - else: - var sendProc = msg.createSendProc() - implementSendProcBody sendProc - - protocol.outProcRegistrations.add( - newCall(registerMsg, - protocol.protocolInfoVar, - msgNameLit, - mounter, - codecNameLit, - newTree(nnkBracketExpr, messagePrinter, MsgRecName))) - - result.implementProtocolInit = proc (p: P2PProtocol): NimNode = - return newCall(initProtocol, newLit(p.name), p.peerInit, p.netInit) - From 4623aa81eca77e033c72b1e73b87fc5e78b5e607 Mon Sep 17 00:00:00 2001 From: Zahary Karadjov Date: Mon, 23 Mar 2020 01:23:21 +0200 Subject: [PATCH 04/58] Clean up some obsolete code --- beacon_chain/beacon_node.nim | 15 ++- beacon_chain/eth2_network.nim | 156 ++++++++++++--------------- tests/simulation/start.sh | 6 +- tests/simulation/vars.sh | 2 +- tests/simulation/wait_master_node.sh | 4 +- tests/test_peer_connection.nim | 2 +- 6 files changed, 82 insertions(+), 103 deletions(-) diff --git a/beacon_chain/beacon_node.nim b/beacon_chain/beacon_node.nim index 2d8e906ff..70c56af53 100644 --- a/beacon_chain/beacon_node.nim +++ b/beacon_chain/beacon_node.nim @@ -190,17 +190,15 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async var bootNodes: seq[ENode] var bootEnrs: seq[enr.Record] - for node in conf.bootstrapNodes: addBootstrapNode(node, bootNodes, bootEnrs, ourPubKey) + for node in conf.bootstrapNodes: + addBootstrapNode(node, bootNodes, bootEnrs, ourPubKey) loadBootstrapFile(string conf.bootstrapNodesFile, bootNodes, bootEnrs, ourPubKey) let persistentBootstrapFile = conf.dataDir / "bootstrap_nodes.txt" if fileExists(persistentBootstrapFile): loadBootstrapFile(persistentBootstrapFile, bootNodes, bootEnrs, ourPubKey) - let - network = await createEth2Node(conf, bootNodes) - addressFile = string(conf.dataDir) / "beacon_node.address" - network.saveConnectionAddressFile(addressFile) + let network = await createEth2Node(conf) let rpcServer = if conf.rpcEnabled: RpcServer.init(conf.rpcAddress, conf.rpcPort) @@ -252,13 +250,12 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async return res proc connectToNetwork(node: BeaconNode) {.async.} = - if node.bootstrapNodes.len > 0: - info "Connecting to bootstrap nodes", bootstrapNodes = node.bootstrapNodes + if node.bootstrapEnrs.len > 0: + info "Connecting to bootstrap nodes", bootstrapEnrs = node.bootstrapEnrs else: info "Waiting for connections" - await node.network.connectToNetwork(node.bootstrapNodes, - node.bootstrapEnrs) + await node.network.connectToNetwork(node.bootstrapEnrs) template findIt(s: openarray, predicate: untyped): int = var res = -1 diff --git a/beacon_chain/eth2_network.nim b/beacon_chain/eth2_network.nim index 7629c3b84..57655b3c9 100644 --- a/beacon_chain/eth2_network.nim +++ b/beacon_chain/eth2_network.nim @@ -38,7 +38,6 @@ type PrivateKey* = crypto.PrivateKey Bytes = seq[byte] - P2PStream = Connection # TODO Is this really needed? Eth2Node* = ref object of RootObj @@ -70,7 +69,7 @@ type UntypedResponder = object peer*: Peer - stream*: P2PStream + stream*: Connection Responder*[MsgType] = distinct UntypedResponder @@ -80,8 +79,6 @@ type # Private fields: libp2pCodecName: string protocolMounter*: MounterProc - printer*: MessageContentPrinter - nextMsgResolver*: NextMsgResolver ProtocolInfoObj* = object name*: string @@ -104,12 +101,11 @@ type PeerStateInitializer* = proc(peer: Peer): RootRef {.gcsafe.} NetworkStateInitializer* = proc(network: EthereumNode): RootRef {.gcsafe.} - HandshakeStep* = proc(peer: Peer, stream: P2PStream): Future[void] {.gcsafe.} + HandshakeStep* = proc(peer: Peer, conn: Connection): Future[void] {.gcsafe.} DisconnectionHandler* = proc(peer: Peer): Future[void] {.gcsafe.} ThunkProc* = LPProtoHandler MounterProc* = proc(network: Eth2Node) {.gcsafe.} MessageContentPrinter* = proc(msg: pointer): string {.gcsafe.} - NextMsgResolver* = proc(msgData: SszReader, future: FutureBase) {.gcsafe.} DisconnectionReason* = enum ClientShutDown @@ -148,10 +144,10 @@ declareCounter gossip_messages_received, "Number of gossip messages received by this peer" declarePublicGauge libp2p_successful_dials, - "Number of successfully dialed peers" + "Number of successfully dialed peers" declarePublicGauge libp2p_peers, - "Number of active libp2p peers" + "Number of active libp2p peers" template libp2pProtocol*(name: string, version: int) {.pragma.} @@ -161,21 +157,12 @@ chronicles.formatIt(Peer): $it template remote*(peer: Peer): untyped = peer.info.peerId -# TODO: This exists only as a compatibility layer between the daemon -# APIs and the native LibP2P ones. It won't be necessary once the -# daemon is removed. -# -template writeAllBytes(stream: P2PStream, bytes: seq[byte]): untyped = - write(stream, bytes) - template openStream(node: Eth2Node, peer: Peer, protocolId: string): untyped = dial(node.switch, peer.info, protocolId) -proc peer(stream: P2PStream): PeerID = +func peerId(conn: Connection): PeerID = # TODO: Can this be `nil`? - stream.peerInfo.peerId -# -# End of compatibility layer + conn.peerInfo.peerId proc init*(T: type Peer, network: Eth2Node, info: PeerInfo): Peer {.gcsafe.} @@ -183,11 +170,12 @@ proc getPeer*(node: Eth2Node, peerInfo: PeerInfo): Peer {.gcsafe.} = let peerId = peerInfo.peerId result = node.peerPool.getOrDefault(peerId) if result == nil: + # TODO: We should register this peer in the pool! result = Peer.init(node, peerInfo) -proc peerFromStream(network: Eth2Node, stream: P2PStream): Peer {.gcsafe.} = +proc peerFromStream(network: Eth2Node, conn: Connection): Peer {.gcsafe.} = # TODO: Can this be `nil`? - return network.getPeer(stream.peerInfo) + return network.getPeer(conn.peerInfo) proc getKey*(peer: Peer): PeerID {.inline.} = result = peer.info.peerId @@ -208,9 +196,9 @@ proc disconnect*(peer: Peer, reason: DisconnectionReason, peer.network.peerPool.release(peer) peer.info.close() -proc safeClose(stream: P2PStream) {.async.} = - if not stream.closed: - await close(stream) +proc safeClose(conn: Connection) {.async.} = + if not conn.closed: + await close(conn) proc handleIncomingPeer*(peer: Peer) @@ -243,18 +231,18 @@ proc disconnectAndRaise(peer: Peer, await peer.disconnect(r) raisePeerDisconnected(msg, r) -proc readChunk(stream: P2PStream, +proc readChunk(conn: Connection, MsgType: type, withResponseCode: bool, deadline: Future[void]): Future[Option[MsgType]] {.gcsafe.} -proc readSizePrefix(stream: P2PStream, +proc readSizePrefix(conn: Connection, deadline: Future[void]): Future[int] {.async.} = trace "about to read msg size prefix" var parser: VarintParser[uint64, ProtoBuf] while true: var nextByte: byte - var readNextByte = stream.readExactly(addr nextByte, 1) + var readNextByte = conn.readExactly(addr nextByte, 1) await readNextByte or deadline if not readNextByte.finished: trace "size prefix byte not received in time" @@ -274,7 +262,7 @@ proc readSizePrefix(stream: P2PStream, of Incomplete: continue -proc readMsgBytes(stream: P2PStream, +proc readMsgBytes(conn: Connection, withResponseCode: bool, deadline: Future[void]): Future[Bytes] {.async.} = trace "about to read message bytes", withResponseCode @@ -283,7 +271,7 @@ proc readMsgBytes(stream: P2PStream, if withResponseCode: var responseCode: byte trace "about to read response code" - var readResponseCode = stream.readExactly(addr responseCode, 1) + var readResponseCode = conn.readExactly(addr responseCode, 1) await readResponseCode or deadline if not readResponseCode.finished: @@ -299,7 +287,7 @@ proc readMsgBytes(stream: P2PStream, case ResponseCode(responseCode) of InvalidRequest, ServerError: - let responseErrMsg = await readChunk(stream, string, false, deadline) + let responseErrMsg = await conn.readChunk(string, false, deadline) debug "P2P request resulted in error", responseErrMsg return @@ -307,20 +295,20 @@ proc readMsgBytes(stream: P2PStream, # The response is OK, the execution continues below discard - var sizePrefix = await readSizePrefix(stream, deadline) + var sizePrefix = await conn.readSizePrefix(deadline) trace "got msg size prefix", sizePrefix if sizePrefix == -1: - debug "Failed to read an incoming message size prefix", peer = stream.peer + debug "Failed to read an incoming message size prefix", peer = conn.peerId return if sizePrefix == 0: - debug "Received SSZ with zero size", peer = stream.peer + debug "Received SSZ with zero size", peer = conn.peerId return trace "about to read msg bytes", len = sizePrefix var msgBytes = newSeq[byte](sizePrefix) - var readBody = stream.readExactly(addr msgBytes[0], sizePrefix) + var readBody = conn.readExactly(addr msgBytes[0], sizePrefix) await readBody or deadline if not readBody.finished: trace "msg bytes not received in time" @@ -332,11 +320,11 @@ proc readMsgBytes(stream: P2PStream, except TransportIncompleteError: return @[] -proc readChunk(stream: P2PStream, +proc readChunk(conn: Connection, MsgType: type, withResponseCode: bool, deadline: Future[void]): Future[Option[MsgType]] {.gcsafe, async.} = - var msgBytes = await stream.readMsgBytes(withResponseCode, deadline) + var msgBytes = await conn.readMsgBytes(withResponseCode, deadline) try: if msgBytes.len > 0: return some SSZ.decode(msgBytes, MsgType) @@ -346,7 +334,7 @@ proc readChunk(stream: P2PStream, return proc readResponse( - stream: P2PStream, + conn: Connection, MsgType: type, deadline: Future[void]): Future[Option[MsgType]] {.gcsafe, async.} = @@ -354,13 +342,13 @@ proc readResponse( type E = ElemType(MsgType) var results: MsgType while true: - let nextRes = await readChunk(stream, E, true, deadline) + let nextRes = await conn.readChunk(E, true, deadline) if nextRes.isNone: break results.add nextRes.get if results.len > 0: return some(results) else: - return await readChunk(stream, MsgType, true, deadline) + return await conn.readChunk(MsgType, true, deadline) proc encodeErrorMsg(responseCode: ResponseCode, errMsg: string): Bytes = var s = init OutputStream @@ -370,7 +358,7 @@ proc encodeErrorMsg(responseCode: ResponseCode, errMsg: string): Bytes = s.getOutput proc sendErrorResponse(peer: Peer, - stream: P2PStream, + conn: Connection, err: ref SerializationError, msgName: string, msgBytes: Bytes) {.async.} = @@ -378,18 +366,18 @@ proc sendErrorResponse(peer: Peer, peer, msgName, msgBytes, errMsg = err.formatMsg("") let responseBytes = encodeErrorMsg(InvalidRequest, err.formatMsg("msg")) - await stream.writeAllBytes(responseBytes) - await stream.close() + await conn.write(responseBytes) + await conn.close() proc sendErrorResponse(peer: Peer, - stream: P2PStream, + conn: Connection, responseCode: ResponseCode, errMsg: string) {.async.} = debug "Error processing request", peer, responseCode, errMsg let responseBytes = encodeErrorMsg(ServerError, errMsg) - await stream.writeAllBytes(responseBytes) - await stream.close() + await conn.write(responseBytes) + await conn.close() proc sendNotificationMsg(peer: Peer, protocolId: string, requestBytes: Bytes) {.async} = var deadline = sleepAsync RESP_TIMEOUT @@ -409,7 +397,7 @@ proc sendNotificationMsg(peer: Peer, protocolId: string, requestBytes: Bytes) {. s.appendVarint requestBytes.len.uint64 s.append requestBytes let bytes = s.getOutput - await stream.writeAllBytes(bytes) + await stream.write(bytes) # TODO There is too much duplication in the responder functions, but # I hope to reduce this when I increse the reliance on output streams. @@ -419,14 +407,14 @@ proc sendResponseChunkBytes(responder: UntypedResponder, payload: Bytes) {.async s.appendVarint payload.len.uint64 s.append payload let bytes = s.getOutput - await responder.stream.writeAllBytes(bytes) + await responder.stream.write(bytes) proc sendResponseChunkObj(responder: UntypedResponder, val: auto) {.async.} = var s = init OutputStream s.append byte(Success) s.appendValue SSZ, sizePrefixed(val) let bytes = s.getOutput - await responder.stream.writeAllBytes(bytes) + await responder.stream.write(bytes) proc sendResponseChunks[T](responder: UntypedResponder, chunks: seq[T]) {.async.} = var s = init OutputStream @@ -435,7 +423,7 @@ proc sendResponseChunks[T](responder: UntypedResponder, chunks: seq[T]) {.async. s.appendValue SSZ, sizePrefixed(chunk) let bytes = s.getOutput - await responder.stream.writeAllBytes(bytes) + await responder.stream.write(bytes) proc makeEth2Request(peer: Peer, protocolId: string, requestBytes: Bytes, ResponseMsg: type, @@ -460,14 +448,14 @@ proc makeEth2Request(peer: Peer, protocolId: string, requestBytes: Bytes, s.appendVarint requestBytes.len.uint64 s.append requestBytes let bytes = s.getOutput - await stream.writeAllBytes(bytes) + await stream.write(bytes) # Read the response return await stream.readResponse(ResponseMsg, deadline) proc init*[MsgType](T: type Responder[MsgType], - peer: Peer, stream: P2PStream): T = - T(UntypedResponder(peer: peer, stream: stream)) + peer: Peer, conn: Connection): T = + T(UntypedResponder(peer: peer, stream: conn)) template write*[M](r: var Responder[M], val: auto): auto = mixin send @@ -539,7 +527,7 @@ proc implementSendProcBody(sendProc: SendProc) = sendProc.useStandardBody(nil, nil, sendCallGenerator) -proc handleIncomingStream(network: Eth2Node, stream: P2PStream, +proc handleIncomingStream(network: Eth2Node, conn: Connection, MsgType, Format: distinct type) {.async, gcsafe.} = mixin callUserHandler, RecType const msgName = typetraits.name(MsgType) @@ -550,21 +538,21 @@ proc handleIncomingStream(network: Eth2Node, stream: P2PStream, # when chronicles.runtimeFilteringEnabled: # setLogLevel(LogLevel.TRACE) # defer: setLogLevel(LogLevel.DEBUG) - # trace "incoming " & `msgNameLit` & " stream" + # trace "incoming " & `msgNameLit` & " conn" - let peer = peerFromStream(network, stream) + let peer = peerFromStream(network, conn) handleIncomingPeer(peer) defer: - await safeClose(stream) + await safeClose(conn) let deadline = sleepAsync RESP_TIMEOUT - msgBytes = await readMsgBytes(stream, false, deadline) + msgBytes = await readMsgBytes(conn, false, deadline) if msgBytes.len == 0: - await sendErrorResponse(peer, stream, ServerError, readTimeoutErrorMsg) + await sendErrorResponse(peer, conn, ServerError, readTimeoutErrorMsg) return type MsgRec = RecType(MsgType) @@ -572,21 +560,21 @@ proc handleIncomingStream(network: Eth2Node, stream: P2PStream, try: msg = decode(Format, msgBytes, MsgRec) except SerializationError as err: - await sendErrorResponse(peer, stream, err, msgName, msgBytes) + await sendErrorResponse(peer, conn, err, msgName, msgBytes) return except Exception as err: # TODO. This is temporary code that should be removed after interop. # It can be enabled only in certain diagnostic builds where it should # re-raise the exception. debug "Crash during serialization", inputBytes = toHex(msgBytes), msgName - await sendErrorResponse(peer, stream, ServerError, err.msg) + await sendErrorResponse(peer, conn, ServerError, err.msg) raise err try: logReceivedMsg(peer, MsgType(msg)) - await callUserHandler(peer, stream, msg) + await callUserHandler(peer, conn, msg) except CatchableError as err: - await sendErrorResponse(peer, stream, ServerError, err.msg) + await sendErrorResponse(peer, conn, ServerError, err.msg) proc handleOutgoingPeer*(peer: Peer): Future[void] {.async.} = let network = peer.network @@ -727,21 +715,18 @@ proc init*(T: type Peer, network: Eth2Node, info: PeerInfo): Peer = proc registerMsg(protocol: ProtocolInfo, name: string, mounter: MounterProc, - libp2pCodecName: string, - printer: MessageContentPrinter) = + libp2pCodecName: string) = protocol.messages.add MessageInfo(name: name, protocolMounter: mounter, - libp2pCodecName: libp2pCodecName, - printer: printer) + libp2pCodecName: libp2pCodecName) proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend = var Format = ident "SSZ" Responder = bindSym "Responder" - P2PStream = bindSym "P2PStream" + Connection = bindSym "Connection" Peer = bindSym "Peer" Eth2Node = bindSym "Eth2Node" - messagePrinter = bindSym "messagePrinter" registerMsg = bindSym "registerMsg" initProtocol = bindSym "initProtocol" msgVar = ident "msg" @@ -761,7 +746,7 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend = result.ResponderType = Responder result.afterProtocolInit = proc (p: P2PProtocol) = - p.onPeerConnected.params.add newIdentDefs(streamVar, P2PStream) + p.onPeerConnected.params.add newIdentDefs(streamVar, Connection) result.implementMsg = proc (msg: Message) = let @@ -775,13 +760,13 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend = if msg.procDef.body.kind != nnkEmpty and msg.kind == msgRequest: # Request procs need an extra param - the stream where the response # should be written: - msg.userHandler.params.insert(2, newIdentDefs(streamVar, P2PStream)) + msg.userHandler.params.insert(2, newIdentDefs(streamVar, Connection)) msg.initResponderCall.add streamVar ## ## Implement the Thunk: ## - ## The protocol handlers in nim-libp2p receive only a `P2PStream` + ## The protocol handlers in nim-libp2p receive only a `Connection` ## parameter and there is no way to access the wider context (such ## as the current `Switch`). In our handlers, we may need to list all ## peers in the current network, so we must keep a reference to the @@ -799,12 +784,12 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend = if msg.userHandler != nil: protocol.outRecvProcs.add quote do: template `callUserHandler`(`peerVar`: `Peer`, - `streamVar`: `P2PStream`, + `streamVar`: `Connection`, `msgVar`: `MsgRecName`): untyped = `userHandlerCall` proc `protocolMounterName`(`networkVar`: `Eth2Node`) = - proc thunk(`streamVar`: `P2PStream`, + proc thunk(`streamVar`: `Connection`, proto: string): Future[void] {.gcsafe.} = return handleIncomingStream(`networkVar`, `streamVar`, `MsgStrongRecName`, `Format`) @@ -830,8 +815,7 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend = protocol.protocolInfoVar, msgNameLit, mounter, - codecNameLit, - newTree(nnkBracketExpr, messagePrinter, MsgRecName))) + codecNameLit)) result.implementProtocolInit = proc (p: P2PProtocol): NimNode = return newCall(initProtocol, newLit(p.name), p.peerInit, p.netInit) @@ -903,8 +887,7 @@ proc getPersistentNetKeys*(conf: BeaconNodeConf): KeyPair = KeyPair(seckey: privKey, pubkey: privKey.getKey()) -proc createEth2Node*(conf: BeaconNodeConf, - bootstrapNodes: seq[ENode]): Future[Eth2Node] {.async.} = +proc createEth2Node*(conf: BeaconNodeConf): Future[Eth2Node] {.async.} = var (extIp, extTcpPort, _) = setupNat(conf) hostAddress = tcpEndPoint(conf.libp2pAddress, conf.tcpPort) @@ -912,8 +895,7 @@ proc createEth2Node*(conf: BeaconNodeConf, else: @[tcpEndPoint(extIp, extTcpPort)] info "Initializing networking", hostAddress, - announcedAddresses, - bootstrapNodes + announcedAddresses let keys = conf.getPersistentNetKeys # TODO nim-libp2p still doesn't have support for announcing addresses @@ -924,9 +906,14 @@ proc createEth2Node*(conf: BeaconNodeConf, result = Eth2Node.init(conf, switch, extIp, keys.seckey.asEthKey) proc getPersistenBootstrapAddr*(conf: BeaconNodeConf, - ip: IpAddress, port: Port): ENode = - let pair = getPersistentNetKeys(conf) - initENode(pair.pubkey.skkey, Address(ip: ip, udpPort: port)) + ip: IpAddress, port: Port): enr.Record = + let + pair = getPersistentNetKeys(conf) + enode = initENode(pair.pubkey.skkey, Address(ip: ip, udpPort: port)) + + return enr.Record.init(1'u64, # sequence number + pair.seckey.asEthKey, + enode.address) proc shortForm*(id: KeyPair): string = $PeerID.init(id.pubkey) @@ -938,7 +925,6 @@ proc toPeerInfo(enode: ENode): PeerInfo = return PeerInfo.init(peerId, addresses) proc connectToNetwork*(node: Eth2Node, - bootstrapNodes: seq[ENode], bootstrapEnrs: seq[enr.Record]) {.async.} = for bootstrapNode in bootstrapEnrs: debug "Adding known peer", peer = bootstrapNode @@ -954,10 +940,6 @@ proc connectToNetwork*(node: Eth2Node, traceAsyncErrors checkIfConnectedToBootstrapNode() -proc saveConnectionAddressFile*(node: Eth2Node, filename: string) = - writeFile(filename, $node.switch.peerInfo.addrs[0] & "/p2p/" & - node.switch.peerInfo.id) - func peersCount*(node: Eth2Node): int = len(node.peerPool) diff --git a/tests/simulation/start.sh b/tests/simulation/start.sh index 2bdd61ec7..314ba3192 100755 --- a/tests/simulation/start.sh +++ b/tests/simulation/start.sh @@ -76,8 +76,8 @@ fi rm -f beacon_node.log # Delete any leftover address files from a previous session -if [ -f "${MASTER_NODE_ADDRESS_FILE}" ]; then - rm "${MASTER_NODE_ADDRESS_FILE}" +if [ -f "${MASTER_NODE_PID_FILE}" ]; then + rm "${MASTER_NODE_PID_FILE}" fi # to allow overriding the program names @@ -137,7 +137,7 @@ fi for i in $(seq $MASTER_NODE -1 $TOTAL_USER_NODES); do if [[ "$i" != "$MASTER_NODE" && "$USE_MULTITAIL" == "no" ]]; then # Wait for the master node to write out its address file - while [ ! -f "${MASTER_NODE_ADDRESS_FILE}" ]; do + while [ ! -f "${MASTER_NODE_PID_FILE}" ]; do sleep 0.1 done fi diff --git a/tests/simulation/vars.sh b/tests/simulation/vars.sh index 977878408..f3837a495 100644 --- a/tests/simulation/vars.sh +++ b/tests/simulation/vars.sh @@ -33,7 +33,7 @@ NETWORK_BOOTSTRAP_FILE="${SIMULATION_DIR}/bootstrap_nodes.txt" BEACON_NODE_BIN="${SIMULATION_DIR}/beacon_node" BOOTSTRAP_NODE_BIN="${SIMULATION_DIR}/bootstrap_node" DEPLOY_DEPOSIT_CONTRACT_BIN="${SIMULATION_DIR}/deploy_deposit_contract" -MASTER_NODE_ADDRESS_FILE="${SIMULATION_DIR}/node-${MASTER_NODE}/beacon_node.address" +MASTER_NODE_PID_FILE="${SIMULATION_DIR}/node-${MASTER_NODE}/beacon_node.pid" BASE_P2P_PORT=30000 BASE_RPC_PORT=7000 diff --git a/tests/simulation/wait_master_node.sh b/tests/simulation/wait_master_node.sh index 6a9c092cd..dc5d08197 100755 --- a/tests/simulation/wait_master_node.sh +++ b/tests/simulation/wait_master_node.sh @@ -1,8 +1,8 @@ #!/bin/bash -if [ ! -f "${MASTER_NODE_ADDRESS_FILE}" ]; then +if [ ! -f "${MASTER_NODE_PID_FILE}" ]; then echo Waiting for master node... - while [ ! -f "${MASTER_NODE_ADDRESS_FILE}" ]; do + while [ ! -f "${MASTER_NODE_PID_FILE}" ]; do sleep 0.1 done fi diff --git a/tests/test_peer_connection.nim b/tests/test_peer_connection.nim index 32ab23858..91e919cc3 100644 --- a/tests/test_peer_connection.nim +++ b/tests/test_peer_connection.nim @@ -35,5 +35,5 @@ asyncTest "connect two nodes": c2.nat = "none" var n2 = await createEth2Node(c2) - await n2.connectToNetwork(bootstrapNodes = @[n1PersistentAddress]) + await n2.connectToNetwork(@[n1PersistentAddress]) From d7c36a677e13dc2f6bdbe1b3802a2abe0490e72c Mon Sep 17 00:00:00 2001 From: Zahary Karadjov Date: Mon, 23 Mar 2020 01:34:20 +0200 Subject: [PATCH 05/58] Remove p2pd from the build recipes --- .appveyor.yml | 5 ++--- Makefile | 11 ++--------- azure-pipelines.yml | 10 ++-------- docker/Dockerfile | 12 ++++++------ docker/run_in_docker.sh | 1 - scripts/launch_local_testnet.sh | 8 ++------ 6 files changed, 14 insertions(+), 33 deletions(-) diff --git a/.appveyor.yml b/.appveyor.yml index 542e50a37..98df7f0c6 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -9,7 +9,6 @@ init: # Scripts called at the very beginning cache: - NimBinaries - - p2pdCache - jsonTestsCache matrix: @@ -36,8 +35,8 @@ build_script: test_script: # the "go-checks" target fails in AppVeyor, for some reason; easier to disable than to debug - - mingw32-make -j2 ARCH_OVERRIDE=%PLATFORM% DISABLE_GO_CHECKS=1 P2PD_CACHE=p2pdCache LOG_LEVEL=TRACE - - mingw32-make -j2 ARCH_OVERRIDE=%PLATFORM% DISABLE_GO_CHECKS=1 P2PD_CACHE=p2pdCache LOG_LEVEL=TRACE NIMFLAGS="-d:testnet_servers_image" + - mingw32-make -j2 ARCH_OVERRIDE=%PLATFORM% DISABLE_GO_CHECKS=1 LOG_LEVEL=TRACE + - mingw32-make -j2 ARCH_OVERRIDE=%PLATFORM% DISABLE_GO_CHECKS=1 LOG_LEVEL=TRACE NIMFLAGS="-d:testnet_servers_image" - mingw32-make -j2 ARCH_OVERRIDE=%PLATFORM% DISABLE_TEST_FIXTURES_SCRIPT=1 DISABLE_GO_CHECKS=1 test deploy: off diff --git a/Makefile b/Makefile index 096800f8c..dc063481c 100644 --- a/Makefile +++ b/Makefile @@ -37,7 +37,6 @@ TOOLS_CSV := $(subst $(SPACE),$(COMMA),$(TOOLS)) build-system-checks \ deps \ update \ - p2pd \ test \ $(TOOLS) \ clean_eth2_network_simulation_files \ @@ -80,7 +79,7 @@ build-system-checks: }; \ exit 0 -deps: | deps-common beacon_chain.nims p2pd +deps: | deps-common beacon_chain.nims ifneq ($(USE_LIBBACKTRACE), 0) deps: | libbacktrace endif @@ -98,12 +97,6 @@ beacon_chain.nims: libbacktrace: + $(MAKE) -C vendor/nim-libbacktrace BUILD_CXX_LIB=0 -P2PD_CACHE := -p2pd: | go-checks - BUILD_MSG="$(BUILD_MSG) $@" \ - V=$(V) \ - $(ENV_SCRIPT) $(BUILD_SYSTEM_DIR)/scripts/build_p2pd.sh "$(P2PD_CACHE)" - # Windows 10 with WSL enabled, but no distro installed, fails if "../../nimble.sh" is executed directly # in a Makefile recipe but works when prefixing it with `bash`. No idea how the PATH is overridden. DISABLE_TEST_FIXTURES_SCRIPT := 0 @@ -121,7 +114,7 @@ $(TOOLS): | build deps clean_eth2_network_simulation_files: rm -rf tests/simulation/{data,validators} -eth2_network_simulation: | build deps p2pd clean_eth2_network_simulation_files process_dashboard +eth2_network_simulation: | build deps clean_eth2_network_simulation_files process_dashboard + GIT_ROOT="$$PWD" NIMFLAGS="$(NIMFLAGS)" LOG_LEVEL="$(LOG_LEVEL)" tests/simulation/start.sh clean-testnet0: diff --git a/azure-pipelines.yml b/azure-pipelines.yml index db0bff816..844f6a69f 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -17,12 +17,6 @@ jobs: key: NimBinaries | $(Agent.OS) | $(PLATFORM) | "$(Build.SourceBranchName)" | "v2" path: NimBinaries - - task: CacheBeta@1 - displayName: 'cache p2pd binaries' - inputs: - key: p2pdCache | $(Agent.OS) | $(PLATFORM) | "$(Build.SourceBranchName)" - path: p2pdCache - - task: CacheBeta@1 displayName: 'cache official test fixtures' inputs: @@ -71,8 +65,8 @@ jobs: scripts/setup_official_tests.sh jsonTestsCache mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} CI_CACHE=NimBinaries update mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} fetch-dlls - mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} P2PD_CACHE=p2pdCache LOG_LEVEL=TRACE - mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} P2PD_CACHE=p2pdCache LOG_LEVEL=TRACE NIMFLAGS="-d:testnet_servers_image" + mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE + mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE NIMFLAGS="-d:testnet_servers_image" file build/beacon_node mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} DISABLE_TEST_FIXTURES_SCRIPT=1 test displayName: 'build and test' diff --git a/docker/Dockerfile b/docker/Dockerfile index 4e3b6ccae..541857601 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -15,11 +15,11 @@ RUN cd /root \ && make -j$(nproc) update \ && make deps -# Please note that the commands above have the goal of caching the compilation -# of Nim and p2pd, but don't depend on the current git revision. This means -# that the cache can become outdated over time and you'll start seeing Nim -# being compiled on every run. If this happens, just prune your docker cache -# to get a fresh up-to-date version of Nim and p2pd. +# Please note that the commands above have the goal of caching the +# compilation of Nim, but don't depend on the current git revision. +# This means that the cache can become outdated over time and you'll +# start seeing Nim being compiled on every run. If this happens, just +# prune your docker cache to get a fresh up-to-date version of Nim. ARG GIT_REVISION ARG NETWORK_NIM_FLAGS ARG MARCH_NIM_FLAGS @@ -43,7 +43,7 @@ RUN apt-get -qq update \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # "COPY" creates new image layers, so we cram all we can into one command -COPY --from=build /root/nim-beacon-chain/docker/run_in_docker.sh /root/nim-beacon-chain/build/beacon_node /root/nim-beacon-chain/vendor/go/bin/p2pd /usr/bin/ +COPY --from=build /root/nim-beacon-chain/docker/run_in_docker.sh /root/nim-beacon-chain/build/beacon_node /usr/bin/ MAINTAINER Zahary Karadjov LABEL description="Nimbus installation that can act as an ETH2 network bootstrap node." diff --git a/docker/run_in_docker.sh b/docker/run_in_docker.sh index 9d966c858..df75b6a2a 100755 --- a/docker/run_in_docker.sh +++ b/docker/run_in_docker.sh @@ -6,7 +6,6 @@ # Deal with previous execution of the deamon leaving behind # socket files that prevent the deamon from launching again # inside the container: -killall p2pd rm -rf /tmp/* beacon_node "$@" diff --git a/scripts/launch_local_testnet.sh b/scripts/launch_local_testnet.sh index 2b5a19c91..3ed74fd50 100755 --- a/scripts/launch_local_testnet.sh +++ b/scripts/launch_local_testnet.sh @@ -141,10 +141,9 @@ BOOTSTRAP_IP="127.0.0.1" --genesis-offset=5 # Delay in seconds cleanup() { - killall beacon_node p2pd &>/dev/null || true + killall beacon_node &>/dev/null || true sleep 2 - killall -9 beacon_node p2pd &>/dev/null || true - rm -f /tmp/nim-p2pd-*.sock || true + killall -9 beacon_node &>/dev/null || true } cleanup @@ -152,9 +151,6 @@ PIDS="" NODES_WITH_VALIDATORS=${NODES_WITH_VALIDATORS:-4} VALIDATORS_PER_NODE=$(( $RANDOM_VALIDATORS / $NODES_WITH_VALIDATORS )) -# for the p2pd path -source env.sh - for NUM_NODE in $(seq 0 $(( ${NUM_NODES} - 1 ))); do if [[ ${NUM_NODE} == 0 ]]; then BOOTSTRAP_ARG="" From 12207eba0727c1d4a39a20faae093a082e7b8260 Mon Sep 17 00:00:00 2001 From: Zahary Karadjov Date: Mon, 23 Mar 2020 01:36:54 +0200 Subject: [PATCH 06/58] Remove all mentions of Go from the README --- README.md | 34 +++++++--------------------------- 1 file changed, 7 insertions(+), 27 deletions(-) diff --git a/README.md b/README.md index 59dc67f25..f8de147dc 100644 --- a/README.md +++ b/README.md @@ -48,9 +48,8 @@ You can check where the beacon chain fits in the Ethereum ecosystem our Two-Poin At the moment, Nimbus has to be built from source. -Nimbus has 4 external dependencies: +Nimbus has the following external dependencies: -* Go 1.12 (for compiling libp2p daemon - being phased out) * Developer tools (C compiler, Make, Bash, Git) * PCRE @@ -61,13 +60,13 @@ Nim is not an external dependency, Nimbus will build its own local copy. On common Linux distributions the dependencies can be installed with: ```sh # Debian and Ubuntu -sudo apt-get install build-essential git golang-go libpcre3-dev +sudo apt-get install build-essential git libpcre3-dev # Fedora -dnf install @development-tools go pcre +dnf install @development-tools pcre # Archlinux, using an AUR manager for pcre-static -yourAURmanager -S base-devel go pcre-static +yourAURmanager -S base-devel pcre-static ``` ### MacOS @@ -75,17 +74,14 @@ yourAURmanager -S base-devel go pcre-static Assuming you use [Homebrew](https://brew.sh/) to manage packages ```sh -brew install go pcre +brew install pcre ``` ### Windows -* install [Go](https://golang.org/doc/install#windows) You can install the developer tools by following the instruction in our [Windows dev environment section](#windows-dev-environment). It also provides a downloading script for prebuilt PCRE. -If you choose to install Go from source, both Go and Nimbus requires the same initial steps of installing Mingw. - ### Android * Install the [Termux](https://termux.com) app from FDroid or the Google Play store @@ -95,7 +91,7 @@ Note, the Ubuntu PRoot is known to contain all Nimbus prerequisites compiled on *Assuming Ubuntu PRoot is used* ```sh -apt install build-essential git golang-go libpcre3-dev +apt install build-essential git libpcre3-dev ``` ## For users @@ -295,24 +291,8 @@ sudo reboot # Install prerequisites sudo apt-get install git libgflags-dev libsnappy-dev libpcre3-dev -mkdir status -cd status - -# Install Go at least 1.12 (Buster only includes up to 1.11) -# Raspbian is 32-bit, so the package is go1.XX.X.linux-armv6l.tar.gz (and not arm64) -curl -O https://storage.googleapis.com/golang/go1.13.3.linux-armv6l.tar.gz -sudo tar -C /usr/local -xzf go1.13.3.linux-armv6l.tar.gz - -echo '# Go install' >> ~/.profile -echo 'export PATH=$PATH:/usr/local/go/bin' >> ~/.profile - -# Reload the environment variable changes -source ~/.profile - -git clone https://github.com/status-im/nim-beacon-chain.git - -cd nim-beacon-chain # Then you can follow instructions for Linux. + ``` ### Makefile tips and tricks for developers From 232d07467fdcb0c6c121d555b1025e24abd92faf Mon Sep 17 00:00:00 2001 From: Zahary Karadjov Date: Mon, 23 Mar 2020 01:38:55 +0200 Subject: [PATCH 07/58] Remove the run_in_docker wrapper script --- docker/Dockerfile | 4 ++-- docker/run_in_docker.sh | 12 ------------ 2 files changed, 2 insertions(+), 14 deletions(-) delete mode 100755 docker/run_in_docker.sh diff --git a/docker/Dockerfile b/docker/Dockerfile index 541857601..0e6552bff 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -43,7 +43,7 @@ RUN apt-get -qq update \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # "COPY" creates new image layers, so we cram all we can into one command -COPY --from=build /root/nim-beacon-chain/docker/run_in_docker.sh /root/nim-beacon-chain/build/beacon_node /usr/bin/ +COPY --from=build /root/nim-beacon-chain/build/beacon_node /usr/bin/ MAINTAINER Zahary Karadjov LABEL description="Nimbus installation that can act as an ETH2 network bootstrap node." @@ -51,5 +51,5 @@ LABEL description="Nimbus installation that can act as an ETH2 network bootstrap # TODO: This custom entry script is necessary only because we must clean up # temporary files left by previous executions of the Go daeamon. # We should be able to remove it once we have a native LibP2P impl. -ENTRYPOINT ["/usr/bin/run_in_docker.sh"] +ENTRYPOINT ["/usr/bin/beacon_node"] diff --git a/docker/run_in_docker.sh b/docker/run_in_docker.sh deleted file mode 100755 index df75b6a2a..000000000 --- a/docker/run_in_docker.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# TODO This script will no longer be necessary once we switch -# to the native LibP2P - -# Deal with previous execution of the deamon leaving behind -# socket files that prevent the deamon from launching again -# inside the container: -rm -rf /tmp/* - -beacon_node "$@" - From 14cffdea99c1122a0a2fa32025fda1a70efcf4b1 Mon Sep 17 00:00:00 2001 From: Zahary Karadjov Date: Mon, 23 Mar 2020 12:51:04 +0200 Subject: [PATCH 08/58] Don't create a separate bootstrap_node binary --- tests/simulation/run_node.sh | 7 +------ tests/simulation/start.sh | 3 --- tests/simulation/vars.sh | 1 - 3 files changed, 1 insertion(+), 10 deletions(-) diff --git a/tests/simulation/run_node.sh b/tests/simulation/run_node.sh index fbaccc068..0521c765a 100755 --- a/tests/simulation/run_node.sh +++ b/tests/simulation/run_node.sh @@ -47,13 +47,8 @@ fi rm -rf "$DATA_DIR/dump" mkdir -p "$DATA_DIR/dump" -NODE_BIN=$BEACON_NODE_BIN -if [[ $NODE_ID == $MASTER_NODE ]]; then - NODE_BIN=$BOOTSTRAP_NODE_BIN -fi - # if you want tracing messages, add "--log-level=TRACE" below -cd "$DATA_DIR" && $NODE_BIN \ +cd "$DATA_DIR" && $BEACON_NODE_BIN \ --log-level=${LOG_LEVEL:-DEBUG} \ --bootstrap-file=$BOOTSTRAP_ADDRESS_FILE \ --data-dir=$DATA_DIR \ diff --git a/tests/simulation/start.sh b/tests/simulation/start.sh index 314ba3192..9fa86b311 100755 --- a/tests/simulation/start.sh +++ b/tests/simulation/start.sh @@ -41,9 +41,6 @@ build_beacon_node () { build_beacon_node $BEACON_NODE_BIN -# DAEMON TODO: This copy is now unnecessary -cp $BEACON_NODE_BIN $BOOTSTRAP_NODE_BIN - if [ ! -f "${LAST_VALIDATOR}" ]; then echo Building $DEPLOY_DEPOSIT_CONTRACT_BIN $MAKE NIMFLAGS="-o:\"$DEPLOY_DEPOSIT_CONTRACT_BIN\" $CUSTOM_NIMFLAGS $DEFS" deposit_contract diff --git a/tests/simulation/vars.sh b/tests/simulation/vars.sh index f3837a495..a7d2dce73 100644 --- a/tests/simulation/vars.sh +++ b/tests/simulation/vars.sh @@ -31,7 +31,6 @@ VALIDATORS_DIR="${SIM_ROOT}/validators" SNAPSHOT_FILE="${SIMULATION_DIR}/state_snapshot.ssz" NETWORK_BOOTSTRAP_FILE="${SIMULATION_DIR}/bootstrap_nodes.txt" BEACON_NODE_BIN="${SIMULATION_DIR}/beacon_node" -BOOTSTRAP_NODE_BIN="${SIMULATION_DIR}/bootstrap_node" DEPLOY_DEPOSIT_CONTRACT_BIN="${SIMULATION_DIR}/deploy_deposit_contract" MASTER_NODE_PID_FILE="${SIMULATION_DIR}/node-${MASTER_NODE}/beacon_node.pid" From 46a3009c6390448163b89acf5fcefdaf46419025 Mon Sep 17 00:00:00 2001 From: Zahary Karadjov Date: Mon, 23 Mar 2020 13:26:44 +0200 Subject: [PATCH 09/58] Restore the address file feature (and use it for synchronization in local sim) --- beacon_chain/beacon_node.nim | 3 +++ beacon_chain/eth2_network.nim | 8 ++++++-- tests/simulation/start.sh | 6 +++--- tests/simulation/tmux_demo.sh | 21 --------------------- tests/simulation/vars.sh | 2 +- tests/simulation/wait_master_node.sh | 9 --------- 6 files changed, 13 insertions(+), 36 deletions(-) delete mode 100755 tests/simulation/tmux_demo.sh delete mode 100755 tests/simulation/wait_master_node.sh diff --git a/beacon_chain/beacon_node.nim b/beacon_chain/beacon_node.nim index 70c56af53..440d40932 100644 --- a/beacon_chain/beacon_node.nim +++ b/beacon_chain/beacon_node.nim @@ -257,6 +257,9 @@ proc connectToNetwork(node: BeaconNode) {.async.} = await node.network.connectToNetwork(node.bootstrapEnrs) + let addressFile = node.config.dataDir / "beacon_node.address" + writeFile(addressFile, node.network.announcedENR.toURI) + template findIt(s: openarray, predicate: untyped): int = var res = -1 for i, it {.inject.} in s: diff --git a/beacon_chain/eth2_network.nim b/beacon_chain/eth2_network.nim index 57655b3c9..a4adcf7a7 100644 --- a/beacon_chain/eth2_network.nim +++ b/beacon_chain/eth2_network.nim @@ -909,11 +909,15 @@ proc getPersistenBootstrapAddr*(conf: BeaconNodeConf, ip: IpAddress, port: Port): enr.Record = let pair = getPersistentNetKeys(conf) - enode = initENode(pair.pubkey.skkey, Address(ip: ip, udpPort: port)) + enodeAddress = Address(ip: ip, udpPort: port) return enr.Record.init(1'u64, # sequence number pair.seckey.asEthKey, - enode.address) + enodeAddress) + +proc announcedENR*(node: Eth2Node): enr.Record = + doAssert node.discovery != nil, "The Eth2Node must be initialized" + node.discovery.localNode.record proc shortForm*(id: KeyPair): string = $PeerID.init(id.pubkey) diff --git a/tests/simulation/start.sh b/tests/simulation/start.sh index 9fa86b311..a38bb5c05 100755 --- a/tests/simulation/start.sh +++ b/tests/simulation/start.sh @@ -73,8 +73,8 @@ fi rm -f beacon_node.log # Delete any leftover address files from a previous session -if [ -f "${MASTER_NODE_PID_FILE}" ]; then - rm "${MASTER_NODE_PID_FILE}" +if [ -f "${MASTER_NODE_ADDRESS_FILE}" ]; then + rm "${MASTER_NODE_ADDRESS_FILE}" fi # to allow overriding the program names @@ -134,7 +134,7 @@ fi for i in $(seq $MASTER_NODE -1 $TOTAL_USER_NODES); do if [[ "$i" != "$MASTER_NODE" && "$USE_MULTITAIL" == "no" ]]; then # Wait for the master node to write out its address file - while [ ! -f "${MASTER_NODE_PID_FILE}" ]; do + while [ ! -f "${MASTER_NODE_ADDRESS_FILE}" ]; do sleep 0.1 done fi diff --git a/tests/simulation/tmux_demo.sh b/tests/simulation/tmux_demo.sh deleted file mode 100755 index 5866e550d..000000000 --- a/tests/simulation/tmux_demo.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -# Read in variables -set -a -# shellcheck source=/dev/null -source "$(dirname "$0")/vars.sh" - -cd $(dirname "$0") -rm -rf data - -tmux new-session -s 'beacon_node' -d - -# maybe these should be moved to a user config file -tmux set-option -g history-limit 999999 -tmux set -g mouse on - -tmux send-keys -t 0 './start.sh' Enter -tmux new-window -n "demo_node" "./wait_master_node.sh && ./run_node.sh 0" - -tmux attach-session -d - diff --git a/tests/simulation/vars.sh b/tests/simulation/vars.sh index a7d2dce73..5ddcf5631 100644 --- a/tests/simulation/vars.sh +++ b/tests/simulation/vars.sh @@ -32,7 +32,7 @@ SNAPSHOT_FILE="${SIMULATION_DIR}/state_snapshot.ssz" NETWORK_BOOTSTRAP_FILE="${SIMULATION_DIR}/bootstrap_nodes.txt" BEACON_NODE_BIN="${SIMULATION_DIR}/beacon_node" DEPLOY_DEPOSIT_CONTRACT_BIN="${SIMULATION_DIR}/deploy_deposit_contract" -MASTER_NODE_PID_FILE="${SIMULATION_DIR}/node-${MASTER_NODE}/beacon_node.pid" +MASTER_NODE_ADDRESS_FILE="${SIMULATION_DIR}/node-${MASTER_NODE}/beacon_node.address" BASE_P2P_PORT=30000 BASE_RPC_PORT=7000 diff --git a/tests/simulation/wait_master_node.sh b/tests/simulation/wait_master_node.sh deleted file mode 100755 index dc5d08197..000000000 --- a/tests/simulation/wait_master_node.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -if [ ! -f "${MASTER_NODE_PID_FILE}" ]; then - echo Waiting for master node... - while [ ! -f "${MASTER_NODE_PID_FILE}" ]; do - sleep 0.1 - done -fi - From 91888874d8fb731caa094a2455eadf79fb487d07 Mon Sep 17 00:00:00 2001 From: Zahary Karadjov Date: Mon, 23 Mar 2020 13:33:23 +0200 Subject: [PATCH 10/58] [skip ci] remove an obsolete comment --- docker/Dockerfile | 3 --- 1 file changed, 3 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 0e6552bff..74e878d2e 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -48,8 +48,5 @@ COPY --from=build /root/nim-beacon-chain/build/beacon_node /usr/bin/ MAINTAINER Zahary Karadjov LABEL description="Nimbus installation that can act as an ETH2 network bootstrap node." -# TODO: This custom entry script is necessary only because we must clean up -# temporary files left by previous executions of the Go daeamon. -# We should be able to remove it once we have a native LibP2P impl. ENTRYPOINT ["/usr/bin/beacon_node"] From d63e4870e69cb5ff41bb5bdd80deda859fee0ab5 Mon Sep 17 00:00:00 2001 From: cheatfate Date: Mon, 23 Mar 2020 21:53:46 +0200 Subject: [PATCH 11/58] Fix Stream EOF problem. --- beacon_chain/eth2_network.nim | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/beacon_chain/eth2_network.nim b/beacon_chain/eth2_network.nim index a4adcf7a7..e2ec61eca 100644 --- a/beacon_chain/eth2_network.nim +++ b/beacon_chain/eth2_network.nim @@ -15,6 +15,7 @@ import libp2p/protocols/secure/[secure, secio], libp2p/protocols/pubsub/[pubsub, floodsub], libp2p/transports/[transport, tcptransport], + libp2p/stream/lpstream, eth/[keys, async_utils], eth/p2p/[enode, p2p_protocol_dsl], eth/net/nat, eth/p2p/discoveryv5/[enr, node], @@ -272,7 +273,11 @@ proc readMsgBytes(conn: Connection, var responseCode: byte trace "about to read response code" var readResponseCode = conn.readExactly(addr responseCode, 1) - await readResponseCode or deadline + try: + await readResponseCode or deadline + except LPStreamEOFError: + trace "end of stream received" + return if not readResponseCode.finished: trace "response code not received in time" From f2434139e90726e37f494fc06b2d76c0bc717872 Mon Sep 17 00:00:00 2001 From: Zahary Karadjov Date: Mon, 23 Mar 2020 22:29:50 +0200 Subject: [PATCH 12/58] Hotfix: Don't report failure to connect to bootstrap node due to the initial sync taking more than 30 seconds --- beacon_chain/eth2_network.nim | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/beacon_chain/eth2_network.nim b/beacon_chain/eth2_network.nim index e2ec61eca..9fff9afb0 100644 --- a/beacon_chain/eth2_network.nim +++ b/beacon_chain/eth2_network.nim @@ -947,7 +947,9 @@ proc connectToNetwork*(node: Eth2Node, fatal "Failed to connect to any bootstrap node. Quitting", bootstrapEnrs quit 1 - traceAsyncErrors checkIfConnectedToBootstrapNode() + # TODO: The initial sync forces this to time out. + # Revisit when the new Sync manager is integrated. + # traceAsyncErrors checkIfConnectedToBootstrapNode() func peersCount*(node: Eth2Node): int = len(node.peerPool) From b80a5b90dfa70e9ad7a0057b1278298eb3ba69af Mon Sep 17 00:00:00 2001 From: kdeme Date: Tue, 24 Mar 2020 10:54:17 +0100 Subject: [PATCH 13/58] Adjustments to store bootnode enrs in discovery object + bump nim-eth --- beacon_chain/beacon_node.nim | 23 ++------------------ beacon_chain/eth2_discovery.nim | 38 +++++++++++++++++++++------------ beacon_chain/eth2_network.nim | 15 ++++++------- vendor/nim-eth | 2 +- 4 files changed, 33 insertions(+), 45 deletions(-) diff --git a/beacon_chain/beacon_node.nim b/beacon_chain/beacon_node.nim index 440d40932..e8382ccee 100644 --- a/beacon_chain/beacon_node.nim +++ b/beacon_chain/beacon_node.nim @@ -56,8 +56,6 @@ type forkVersion: array[4, byte] netKeys: KeyPair requestManager: RequestManager - bootstrapNodes: seq[ENode] - bootstrapEnrs: seq[enr.Record] db: BeaconChainDB config: BeaconNodeConf attachedValidators: ValidatorPool @@ -188,16 +186,6 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async # monitor mainchainMonitor.start() - var bootNodes: seq[ENode] - var bootEnrs: seq[enr.Record] - for node in conf.bootstrapNodes: - addBootstrapNode(node, bootNodes, bootEnrs, ourPubKey) - loadBootstrapFile(string conf.bootstrapNodesFile, bootNodes, bootEnrs, ourPubKey) - - let persistentBootstrapFile = conf.dataDir / "bootstrap_nodes.txt" - if fileExists(persistentBootstrapFile): - loadBootstrapFile(persistentBootstrapFile, bootNodes, bootEnrs, ourPubKey) - let network = await createEth2Node(conf) let rpcServer = if conf.rpcEnabled: @@ -211,8 +199,6 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async forkVersion: blockPool.headState.data.data.fork.current_version, netKeys: netKeys, requestManager: RequestManager.init(network), - bootstrapNodes: bootNodes, - bootstrapEnrs: bootEnrs, db: db, config: conf, attachedValidators: ValidatorPool.init(), @@ -250,12 +236,7 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async return res proc connectToNetwork(node: BeaconNode) {.async.} = - if node.bootstrapEnrs.len > 0: - info "Connecting to bootstrap nodes", bootstrapEnrs = node.bootstrapEnrs - else: - info "Waiting for connections" - - await node.network.connectToNetwork(node.bootstrapEnrs) + await node.network.connectToNetwork() let addressFile = node.config.dataDir / "beacon_node.address" writeFile(addressFile, node.network.announcedENR.toURI) @@ -1221,7 +1202,7 @@ when isMainModule: bootstrapEnr = enr.Record.init( 1, # sequence number networkKeys.seckey.asEthKey, - bootstrapAddress) + some(bootstrapAddress)) writeFile(bootstrapFile, bootstrapEnr.toURI) echo "Wrote ", bootstrapFile diff --git a/beacon_chain/eth2_discovery.nim b/beacon_chain/eth2_discovery.nim index b228aaad7..427eb1f3a 100644 --- a/beacon_chain/eth2_discovery.nim +++ b/beacon_chain/eth2_discovery.nim @@ -12,20 +12,7 @@ type PublicKey = keys.PublicKey export - Eth2DiscoveryProtocol, open, close, result - -proc new*(T: type Eth2DiscoveryProtocol, - conf: BeaconNodeConf, - ip: IpAddress, rawPrivKeyBytes: openarray[byte]): T = - # TODO - # Implement more configuration options: - # * for setting up a specific key - # * for using a persistent database - var - pk = initPrivateKey(rawPrivKeyBytes) - db = DiscoveryDB.init(newMemoryDB()) - - newProtocol(pk, db, ip, conf.tcpPort, conf.udpPort) + Eth2DiscoveryProtocol, open, start, close, result proc toENode*(a: MultiAddress): Result[ENode, cstring] = if not IPFS.match(a): @@ -165,3 +152,26 @@ proc loadBootstrapFile*(bootstrapFile: string, error "Unknown bootstrap file format", ext quit 1 +proc new*(T: type Eth2DiscoveryProtocol, + conf: BeaconNodeConf, + ip: IpAddress, rawPrivKeyBytes: openarray[byte]): T = + # TODO + # Implement more configuration options: + # * for setting up a specific key + # * for using a persistent database + var + pk = initPrivateKey(rawPrivKeyBytes) + ourPubKey = pk.getPublicKey() + db = DiscoveryDB.init(newMemoryDB()) + + var bootNodes: seq[ENode] + var bootEnrs: seq[enr.Record] + for node in conf.bootstrapNodes: + addBootstrapNode(node, bootNodes, bootEnrs, ourPubKey) + loadBootstrapFile(string conf.bootstrapNodesFile, bootNodes, bootEnrs, ourPubKey) + + let persistentBootstrapFile = conf.dataDir / "bootstrap_nodes.txt" + if fileExists(persistentBootstrapFile): + loadBootstrapFile(persistentBootstrapFile, bootNodes, bootEnrs, ourPubKey) + + newProtocol(pk, db, ip, conf.tcpPort, conf.udpPort, bootEnrs) diff --git a/beacon_chain/eth2_network.nim b/beacon_chain/eth2_network.nim index 9fff9afb0..665a5adb1 100644 --- a/beacon_chain/eth2_network.nim +++ b/beacon_chain/eth2_network.nim @@ -702,6 +702,7 @@ template addKnownPeer*(node: Eth2Node, peer: ENode|enr.Record) = proc start*(node: Eth2Node) {.async.} = node.discovery.open() + node.discovery.start() node.libp2pTransportLoops = await node.switch.start() traceAsyncErrors node.runDiscoveryLoop() @@ -918,7 +919,7 @@ proc getPersistenBootstrapAddr*(conf: BeaconNodeConf, return enr.Record.init(1'u64, # sequence number pair.seckey.asEthKey, - enodeAddress) + some(enodeAddress)) proc announcedENR*(node: Eth2Node): enr.Record = doAssert node.discovery != nil, "The Eth2Node must be initialized" @@ -933,18 +934,14 @@ proc toPeerInfo(enode: ENode): PeerInfo = addresses = @[MultiAddress.init enode.toMultiAddressStr] return PeerInfo.init(peerId, addresses) -proc connectToNetwork*(node: Eth2Node, - bootstrapEnrs: seq[enr.Record]) {.async.} = - for bootstrapNode in bootstrapEnrs: - debug "Adding known peer", peer = bootstrapNode - node.addKnownPeer bootstrapNode - +proc connectToNetwork*(node: Eth2Node) {.async.} = await node.start() proc checkIfConnectedToBootstrapNode {.async.} = await sleepAsync(30.seconds) - if bootstrapEnrs.len > 0 and libp2p_successful_dials.value == 0: - fatal "Failed to connect to any bootstrap node. Quitting", bootstrapEnrs + if node.discovery.bootstrapRecords.len > 0 and libp2p_successful_dials.value == 0: + fatal "Failed to connect to any bootstrap node. Quitting", + bootstrapEnrs = node.discovery.bootstrapRecords quit 1 # TODO: The initial sync forces this to time out. diff --git a/vendor/nim-eth b/vendor/nim-eth index 9c442bf65..c3f23e591 160000 --- a/vendor/nim-eth +++ b/vendor/nim-eth @@ -1 +1 @@ -Subproject commit 9c442bf65b52a4c857cc6e51efe901352e8b6ebf +Subproject commit c3f23e5912efff98fc6c8181db579037e5a19a2c From 86a3bea1ec3dbf247e44301489e91e68f902b21d Mon Sep 17 00:00:00 2001 From: Zahary Karadjov Date: Tue, 24 Mar 2020 18:06:36 +0200 Subject: [PATCH 14/58] Use the latest Chronos and Libp2p --- beacon_chain/eth2_network.nim | 13 ++++++++----- vendor/nim-chronos | 2 +- vendor/nim-libp2p | 2 +- vendor/nim-metrics | 2 +- 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/beacon_chain/eth2_network.nim b/beacon_chain/eth2_network.nim index 665a5adb1..299a17162 100644 --- a/beacon_chain/eth2_network.nim +++ b/beacon_chain/eth2_network.nim @@ -828,7 +828,7 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend = proc setupNat(conf: BeaconNodeConf): tuple[ip: IpAddress, tcpPort: Port, - udpPort: Port] = + udpPort: Port] {.gcsafe.} = # defaults result.ip = globalListeningAddr result.tcpPort = conf.tcpPort @@ -857,9 +857,12 @@ proc setupNat(conf: BeaconNodeConf): tuple[ip: IpAddress, let extIP = getExternalIP(nat) if extIP.isSome: result.ip = extIP.get() - let extPorts = redirectPorts(tcpPort = result.tcpPort, - udpPort = result.udpPort, - description = clientId) + # TODO redirectPorts in considered a gcsafety violation + # because it obtains the address of a non-gcsafe proc? + let extPorts = ({.gcsafe.}: + redirectPorts(tcpPort = result.tcpPort, + udpPort = result.udpPort, + description = clientId)) if extPorts.isSome: (result.tcpPort, result.udpPort) = extPorts.get() @@ -893,7 +896,7 @@ proc getPersistentNetKeys*(conf: BeaconNodeConf): KeyPair = KeyPair(seckey: privKey, pubkey: privKey.getKey()) -proc createEth2Node*(conf: BeaconNodeConf): Future[Eth2Node] {.async.} = +proc createEth2Node*(conf: BeaconNodeConf): Future[Eth2Node] {.async, gcsafe.} = var (extIp, extTcpPort, _) = setupNat(conf) hostAddress = tcpEndPoint(conf.libp2pAddress, conf.tcpPort) diff --git a/vendor/nim-chronos b/vendor/nim-chronos index 7ed9f1431..f3827a13d 160000 --- a/vendor/nim-chronos +++ b/vendor/nim-chronos @@ -1 +1 @@ -Subproject commit 7ed9f1431a0a8262f988553e9927676cad54e470 +Subproject commit f3827a13d12f27e20874df81dc99b55e5dc78244 diff --git a/vendor/nim-libp2p b/vendor/nim-libp2p index 0a3e4a764..a2acdd793 160000 --- a/vendor/nim-libp2p +++ b/vendor/nim-libp2p @@ -1 +1 @@ -Subproject commit 0a3e4a764b718d13fc330f228fce60ed265cfde2 +Subproject commit a2acdd7933ff411bb6f4133d90bb46caec6127dc diff --git a/vendor/nim-metrics b/vendor/nim-metrics index ab1d8891e..19c87b7dc 160000 --- a/vendor/nim-metrics +++ b/vendor/nim-metrics @@ -1 +1 @@ -Subproject commit ab1d8891ef11e4c310c071213bd67aa5ac4b421d +Subproject commit 19c87b7dc91dfbc0b6823a3d2996869397dd34e6 From 22876da593860360aa58fc541a054c2d09c3c54e Mon Sep 17 00:00:00 2001 From: Zahary Karadjov Date: Tue, 24 Mar 2020 19:42:15 +0200 Subject: [PATCH 15/58] Fix gcsafety issues in the test suite --- beacon_chain/sync_manager.nim | 8 ++++---- tests/test_peer_pool.nim | 8 ++++---- tests/test_sync_manager.nim | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/beacon_chain/sync_manager.nim b/beacon_chain/sync_manager.nim index dbd16d203..b006316f1 100644 --- a/beacon_chain/sync_manager.nim +++ b/beacon_chain/sync_manager.nim @@ -30,8 +30,8 @@ type slots*: seq[PeerSlot[A, B]] man: SyncManager[A, B] - GetLocalHeadSlotCallback* = proc(): Slot - UpdateLocalBlocksCallback* = proc(list: openarray[SignedBeaconBlock]): bool + GetLocalHeadSlotCallback* = proc(): Slot {.gcsafe.} + UpdateLocalBlocksCallback* = proc(list: openarray[SignedBeaconBlock]): bool {.gcsafe.} SyncManager*[A, B] = ref object groups*: seq[PeerGroup[A, B]] @@ -128,7 +128,7 @@ proc updateLastSlot*(sq: SyncQueue, last: Slot) {.inline.} = sq.lastSlot = last proc push*(sq: SyncQueue, sr: SyncRequest, - data: seq[SignedBeaconBlock]) {.async.} = + data: seq[SignedBeaconBlock]) {.async, gcsafe.} = ## Push successfull result to queue ``sq``. while true: if (sq.queueSize > 0) and (sr.slot >= sq.outSlot + uint64(sq.queueSize)): @@ -888,7 +888,7 @@ proc updateStatus*[A, B](sman: SyncManager[A, B]) {.async.} = pending[i].cancel() raise exc -proc synchronize*[A, B](sman: SyncManager[A, B]) {.async.} = +proc synchronize*[A, B](sman: SyncManager[A, B]) {.async, gcsafe.} = ## TODO: This synchronization procedure is not optimal, we can do it better ## if spawn N parallel tasks, where N is number of peer groups. var diff --git a/tests/test_peer_pool.nim b/tests/test_peer_pool.nim index 9a845dd9a..a970e43cf 100644 --- a/tests/test_peer_pool.nim +++ b/tests/test_peer_pool.nim @@ -193,14 +193,14 @@ suiteReport "PeerPool testing suite": itemFut23.finished == false itemFut24.finished == false - timedTest "Acquire/Sorting and consistency test": + timedTest "Acquire/Sorting and consistency test": closureScope: const TestsCount = 1000 MaxNumber = 1_000_000 var pool = newPeerPool[PeerTest, PeerTestID]() - proc testAcquireRelease(): Future[int] {.async.} = + proc testAcquireRelease(): Future[int] {.async, gcsafe.} = var weight: int var incoming, outgoing, total: seq[PeerTest] var incWeight1, outWeight1, totWeight1: int @@ -362,7 +362,7 @@ suiteReport "PeerPool testing suite": check waitFor(testPeerLifetime()) == true - timedTest "Safe/Clear test": + timedTest "Safe/Clear test": closureScope: var pool = newPeerPool[PeerTest, PeerTestID]() var peer1 = PeerTest.init("peer1", 10) var peer2 = PeerTest.init("peer2", 9) @@ -409,7 +409,7 @@ suiteReport "PeerPool testing suite": asyncCheck testConsumer() check waitFor(testClose()) == true - timedTest "Access peers by key test": + timedTest "Access peers by key test": closureScope: var pool = newPeerPool[PeerTest, PeerTestID]() var peer1 = PeerTest.init("peer1", 10) var peer2 = PeerTest.init("peer2", 9) diff --git a/tests/test_sync_manager.nim b/tests/test_sync_manager.nim index 9406a7e72..6c42ba5f7 100644 --- a/tests/test_sync_manager.nim +++ b/tests/test_sync_manager.nim @@ -603,7 +603,7 @@ proc checkRequest(peer: SimplePeer, index: int, slot, count, step: int, data: varargs[int]): bool {.inline.} = result = checkRequest(peer.requests[index], slot, count, step, data) -proc syncManagerOnePeerTest(): Future[bool] {.async.} = +proc syncManagerOnePeerTest(): Future[bool] {.async, gcsafe.} = # Syncing with one peer only. var pool = newPeerPool[SimplePeer, SimplePeerKey]() var peer = SimplePeer.init("id1") From 46e395c6475279c08b8152d1a22632b4d9385bcc Mon Sep 17 00:00:00 2001 From: Zahary Karadjov Date: Wed, 25 Mar 2020 18:18:39 +0200 Subject: [PATCH 16/58] Switch to GossipSub --- beacon_chain/eth2_network.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_chain/eth2_network.nim b/beacon_chain/eth2_network.nim index 299a17162..d5cdd48a0 100644 --- a/beacon_chain/eth2_network.nim +++ b/beacon_chain/eth2_network.nim @@ -911,7 +911,7 @@ proc createEth2Node*(conf: BeaconNodeConf): Future[Eth2Node] {.async, gcsafe.} = # that are different from the host address (this is relevant when we # are running behind a NAT). var switch = newStandardSwitch(some keys.seckey, hostAddress, - triggerSelf = true, gossip = false) + triggerSelf = true, gossip = true) result = Eth2Node.init(conf, switch, extIp, keys.seckey.asEthKey) proc getPersistenBootstrapAddr*(conf: BeaconNodeConf, From 9fb1fd4a5cdea6bfbd0eee537e8cb499384df087 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C8=98tefan=20Talpalaru?= Date: Thu, 26 Mar 2020 19:07:19 +0100 Subject: [PATCH 17/58] add --compatibility to accept cpu/mem limits For more details see: https://github.com/status-im/infra-nimbus/issues/12 --- docker/manage_testnet_hosts.nims | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/manage_testnet_hosts.nims b/docker/manage_testnet_hosts.nims index 6130c153d..8418b2059 100644 --- a/docker/manage_testnet_hosts.nims +++ b/docker/manage_testnet_hosts.nims @@ -103,7 +103,7 @@ of restart_nodes: echo &"ssh {n.server} docker pull -q statusteam/nimbus_beacon_node:{conf.network}" # docker-compose will rebuild the container if it detects a newer image. # Prints: "Recreating beacon-node-testnet1-1 ... done". - echo &"ssh {n.server} 'cd /docker/{n.container} && docker-compose up -d'" + echo &"ssh {n.server} 'cd /docker/{n.container} && docker-compose --compatibility up -d'" of reset_network: for n, firstValidator, lastValidator in validatorAssignments(): From 1bfe69b17d66a80aa375937fe67189f111be0d8d Mon Sep 17 00:00:00 2001 From: tersec Date: Thu, 26 Mar 2020 20:32:13 +0000 Subject: [PATCH 18/58] refactor --verify-finalization (#833) * refactor --verify-finalization from run_node to only be invoked via eth2_network_simulation and local sim; clean up various shell issues * remove hardcoded --verify-finalization in launch_local_testnet --- tests/simulation/run_node.sh | 10 ++++++++-- tests/simulation/start.sh | 26 +++++++++++++------------- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/tests/simulation/run_node.sh b/tests/simulation/run_node.sh index 0521c765a..0c7a6f1e3 100755 --- a/tests/simulation/run_node.sh +++ b/tests/simulation/run_node.sh @@ -9,6 +9,13 @@ shift # shellcheck source=/dev/null source "$(dirname "$0")/vars.sh" +if [[ ! -z "$1" ]]; then + ADDITIONAL_BEACON_NODE_ARGS=$1 + shift +else + ADDITIONAL_BEACON_NODE_ARGS="" +fi + if [[ ! -z "$1" ]]; then BOOTSTRAP_NODE_ID=$1 BOOTSTRAP_ADDRESS_FILE="${SIMULATION_DIR}/node-${BOOTSTRAP_NODE_ID}/beacon_node.address" @@ -59,12 +66,11 @@ cd "$DATA_DIR" && $BEACON_NODE_BIN \ --state-snapshot=$SNAPSHOT_FILE \ $DEPOSIT_WEB3_URL_ARG \ --deposit-contract=$DEPOSIT_CONTRACT_ADDRESS \ - --verify-finalization \ --rpc \ --rpc-address="127.0.0.1" \ --rpc-port="$(( $BASE_RPC_PORT + $NODE_ID ))" \ --metrics \ --metrics-address="127.0.0.1" \ --metrics-port="$(( $BASE_METRICS_PORT + $NODE_ID ))" \ + ${ADDITIONAL_BEACON_NODE_ARGS} \ "$@" - diff --git a/tests/simulation/start.sh b/tests/simulation/start.sh index a38bb5c05..d9bacfc42 100755 --- a/tests/simulation/start.sh +++ b/tests/simulation/start.sh @@ -42,7 +42,7 @@ build_beacon_node () { build_beacon_node $BEACON_NODE_BIN if [ ! -f "${LAST_VALIDATOR}" ]; then - echo Building $DEPLOY_DEPOSIT_CONTRACT_BIN + echo Building "${DEPLOY_DEPOSIT_CONTRACT_BIN}" $MAKE NIMFLAGS="-o:\"$DEPLOY_DEPOSIT_CONTRACT_BIN\" $CUSTOM_NIMFLAGS $DEFS" deposit_contract if [ "$DEPOSIT_WEB3_URL_ARG" != "" ]; then @@ -84,10 +84,10 @@ TMUX_SESSION_NAME="${TMUX_SESSION_NAME:-nbc-network-sim}" # Using tmux or multitail is an opt-in USE_MULTITAIL="${USE_MULTITAIL:-no}" -type "$MULTITAIL" &>/dev/null || { echo $MULTITAIL is missing; USE_MULTITAIL="no"; } +type "$MULTITAIL" &>/dev/null || { echo "${MULTITAIL}" is missing; USE_MULTITAIL="no"; } USE_TMUX="${USE_TMUX:-no}" -type "$TMUX" &>/dev/null || { echo $TMUX is missing; USE_TMUX="no"; } +type "$TMUX" &>/dev/null || { echo "${TMUX}" is missing; USE_TMUX="no"; } # Prometheus config (continued inside the loop) mkdir -p "${METRICS_DIR}" @@ -123,12 +123,12 @@ fi COMMANDS=() if [[ "$USE_TMUX" != "no" ]]; then - $TMUX new-session -s $TMUX_SESSION_NAME -d + $TMUX new-session -s "${TMUX_SESSION_NAME}" -d # maybe these should be moved to a user config file - $TMUX set-option -t $TMUX_SESSION_NAME history-limit 999999 - $TMUX set-option -t $TMUX_SESSION_NAME remain-on-exit on - $TMUX set -t $TMUX_SESSION_NAME mouse on + $TMUX set-option -t "${TMUX_SESSION_NAME}" history-limit 999999 + $TMUX set-option -t "${TMUX_SESSION_NAME}" remain-on-exit on + $TMUX set -t "${TMUX_SESSION_NAME}" mouse on fi for i in $(seq $MASTER_NODE -1 $TOTAL_USER_NODES); do @@ -139,11 +139,11 @@ for i in $(seq $MASTER_NODE -1 $TOTAL_USER_NODES); do done fi - CMD="${SIM_ROOT}/run_node.sh $i" + CMD="${SIM_ROOT}/run_node.sh ${i} --verify-finalization" if [[ "$USE_TMUX" != "no" ]]; then - $TMUX split-window -t $TMUX_SESSION_NAME "$CMD" - $TMUX select-layout -t $TMUX_SESSION_NAME tiled + $TMUX split-window -t "${TMUX_SESSION_NAME}" "$CMD" + $TMUX select-layout -t "${TMUX_SESSION_NAME}" tiled elif [[ "$USE_MULTITAIL" != "no" ]]; then if [[ "$i" == "$MASTER_NODE" ]]; then SLEEP="0" @@ -158,7 +158,7 @@ for i in $(seq $MASTER_NODE -1 $TOTAL_USER_NODES); do # Prometheus config cat >> "${METRICS_DIR}/prometheus.yml" < Date: Sat, 28 Mar 2020 13:20:52 -0600 Subject: [PATCH 19/58] bump libp2p (#837) Both `make eth2_network_simulation` and `./scripts/launch_local_testnet.sh --nodes 4 --disable-htop -- --verify-finalization --stop-at-epoch=7` work locally. --- vendor/nim-libp2p | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendor/nim-libp2p b/vendor/nim-libp2p index a2acdd793..6bb4e91a3 160000 --- a/vendor/nim-libp2p +++ b/vendor/nim-libp2p @@ -1 +1 @@ -Subproject commit a2acdd7933ff411bb6f4133d90bb46caec6127dc +Subproject commit 6bb4e91a39fe17239ccdb44b287155758addd1cd From 1276f346e008556a3cfa7ced6bea2b053d8f2174 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C8=98tefan=20Talpalaru?= Date: Sat, 28 Mar 2020 23:04:43 +0100 Subject: [PATCH 20/58] config.nims: --define:nimTypeNames --- config.nims | 2 ++ 1 file changed, 2 insertions(+) diff --git a/config.nims b/config.nims index cd9029f21..89cea8017 100644 --- a/config.nims +++ b/config.nims @@ -38,6 +38,8 @@ else: # enable metric collection --define:metrics --define:chronicles_line_numbers +# for heap-usage-by-instance-type metrics and object base-type strings +--define:nimTypeNames # the default open files limit is too low on macOS (512), breaking the # "--debugger:native" build. It can be increased with `ulimit -n 1024`. From 97abe77398261b71ea883332144234d1fdfea249 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C8=98tefan=20Talpalaru?= Date: Sun, 29 Mar 2020 23:56:25 +0200 Subject: [PATCH 21/58] close unused PeerInfo instance Should not be needed from the GC's point of view, but needed when gathering Future.state statistics, due to `PeerInfo.lifefut`. --- beacon_chain/eth2_network.nim | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/beacon_chain/eth2_network.nim b/beacon_chain/eth2_network.nim index d5cdd48a0..65f50c0d4 100644 --- a/beacon_chain/eth2_network.nim +++ b/beacon_chain/eth2_network.nim @@ -663,13 +663,16 @@ proc runDiscoveryLoop*(node: Eth2Node) {.async.} = try: let discoveredPeers = node.discovery.randomNodes(node.wantedPeers - currentPeerCount) - debug "Discovered peers", peer = $discoveredPeers for peer in discoveredPeers: try: let peerInfo = peer.record.toTypedRecord.toPeerInfo - if peerInfo != nil and peerInfo.id notin node.switch.connections: - # TODO do this in parallel - await node.dialPeer(peerInfo) + if peerInfo != nil: + if peerInfo.id notin node.switch.connections: + debug "Discovered new peer", peer = $peer + # TODO do this in parallel + await node.dialPeer(peerInfo) + else: + peerInfo.close() except CatchableError as err: debug "Failed to connect to peer", peer = $peer, err = err.msg except CatchableError as err: From f5f939bd315c684692f0869fc0e1126e6a3f1647 Mon Sep 17 00:00:00 2001 From: tersec Date: Mon, 30 Mar 2020 11:31:44 +0000 Subject: [PATCH 22/58] 0.11.1 beacon chain spec update (#836) * initial 0.11.1 spec commit; no test regressions and finalizes in eth2_network_simulation * with BLS 0.10/0.11 available, stop skipping attester slashing, proposer slashing, and voluntary exist operations fixture tests * switch param orders to group state.{fork, genesis_validators_root}; bump spec/datatypes spec version for network purposes * mark attestation construction and broadcast and some minimal/mainnet constants as 0.11.1-compatible; remove phase 1 sharding constants from minimal which don't exist in that preset --- AllTests-mainnet.md | 2 +- AllTests-minimal.md | 2 +- FixtureAll-mainnet.md | 2 +- FixtureAll-minimal.md | 2 +- FixtureSSZConsensus-mainnet.md | 2 +- FixtureSSZConsensus-minimal.md | 2 +- beacon_chain/attestation_pool.nim | 2 +- beacon_chain/beacon_node.nim | 16 ++++---- beacon_chain/spec/beaconstate.nim | 13 +++---- beacon_chain/spec/datatypes.nim | 2 +- beacon_chain/spec/helpers.nim | 22 +++++------ beacon_chain/spec/presets/mainnet.nim | 2 +- beacon_chain/spec/presets/minimal.nim | 12 +----- beacon_chain/spec/state_transition_block.nim | 38 +++++++++++-------- beacon_chain/spec/state_transition_epoch.nim | 4 +- beacon_chain/validator_pool.nim | 26 +++++++------ tests/mocking/mock_attestations.nim | 3 +- tests/mocking/mock_blocks.nim | 5 ++- tests/official/fixtures_utils.nim | 2 +- .../test_fixture_const_sanity_check.nim | 4 +- .../test_fixture_operations_attestations.nim | 2 - ..._fixture_operations_attester_slashings.nim | 2 - ..._fixture_operations_proposer_slashings.nim | 2 - ...test_fixture_operations_voluntary_exit.nim | 2 - .../test_fixture_ssz_consensus_objects.nim | 4 +- .../test_fixture_ssz_generic_types.nim | 2 +- tests/testblockutil.nim | 10 +++-- vendor/nim-eth2-scenarios | 2 +- 28 files changed, 91 insertions(+), 98 deletions(-) diff --git a/AllTests-mainnet.md b/AllTests-mainnet.md index bed594b31..fabab6eab 100644 --- a/AllTests-mainnet.md +++ b/AllTests-mainnet.md @@ -51,7 +51,7 @@ OK: 2/2 Fail: 0/2 Skip: 0/2 + Attestation topics OK ``` OK: 1/1 Fail: 0/1 Skip: 0/1 -## Official - 0.11.0 - constants & config [Preset: mainnet] +## Official - 0.11.1 - constants & config [Preset: mainnet] ```diff + BASE_REWARD_FACTOR 64 [Preset: mainnet] OK + BLS_WITHDRAWAL_PREFIX "0x00" [Preset: mainnet] OK diff --git a/AllTests-minimal.md b/AllTests-minimal.md index 1e703b138..53dc3991b 100644 --- a/AllTests-minimal.md +++ b/AllTests-minimal.md @@ -78,7 +78,7 @@ OK: 2/2 Fail: 0/2 Skip: 0/2 + Attestation topics OK ``` OK: 1/1 Fail: 0/1 Skip: 0/1 -## Official - 0.11.0 - constants & config [Preset: minimal] +## Official - 0.11.1 - constants & config [Preset: minimal] ```diff + BASE_REWARD_FACTOR 64 [Preset: minimal] OK + BLS_WITHDRAWAL_PREFIX "0x00" [Preset: minimal] OK diff --git a/FixtureAll-mainnet.md b/FixtureAll-mainnet.md index dd2d02dce..032a35006 100644 --- a/FixtureAll-mainnet.md +++ b/FixtureAll-mainnet.md @@ -45,6 +45,7 @@ OK: 3/3 Fail: 0/3 Skip: 0/3 + [Invalid] after_epoch_slots OK + [Invalid] bad_source_root OK + [Invalid] before_inclusion_delay OK ++ [Invalid] empty_aggregation_bits OK + [Invalid] future_target_epoch OK + [Invalid] invalid_attestation_signature OK + [Invalid] invalid_current_source_root OK @@ -58,7 +59,6 @@ OK: 3/3 Fail: 0/3 Skip: 0/3 + [Invalid] too_many_aggregation_bits OK + [Invalid] wrong_index_for_committee_signature OK + [Invalid] wrong_index_for_slot OK -+ [Valid] empty_aggregation_bits OK + [Valid] success OK + [Valid] success_multi_proposer_index_iterations OK + [Valid] success_previous_epoch OK diff --git a/FixtureAll-minimal.md b/FixtureAll-minimal.md index 64d723a30..a49f4c133 100644 --- a/FixtureAll-minimal.md +++ b/FixtureAll-minimal.md @@ -45,6 +45,7 @@ OK: 3/3 Fail: 0/3 Skip: 0/3 + [Invalid] after_epoch_slots OK + [Invalid] bad_source_root OK + [Invalid] before_inclusion_delay OK ++ [Invalid] empty_aggregation_bits OK + [Invalid] future_target_epoch OK + [Invalid] invalid_attestation_signature OK + [Invalid] invalid_current_source_root OK @@ -58,7 +59,6 @@ OK: 3/3 Fail: 0/3 Skip: 0/3 + [Invalid] too_many_aggregation_bits OK + [Invalid] wrong_index_for_committee_signature OK + [Invalid] wrong_index_for_slot OK -+ [Valid] empty_aggregation_bits OK + [Valid] success OK + [Valid] success_multi_proposer_index_iterations OK + [Valid] success_previous_epoch OK diff --git a/FixtureSSZConsensus-mainnet.md b/FixtureSSZConsensus-mainnet.md index 69733cce5..efe9443d5 100644 --- a/FixtureSSZConsensus-mainnet.md +++ b/FixtureSSZConsensus-mainnet.md @@ -1,6 +1,6 @@ FixtureSSZConsensus-mainnet === -## Official - 0.11.0 - SSZ consensus objects [Preset: mainnet] +## Official - 0.11.1 - SSZ consensus objects [Preset: mainnet] ```diff + Testing AggregateAndProof OK + Testing Attestation OK diff --git a/FixtureSSZConsensus-minimal.md b/FixtureSSZConsensus-minimal.md index 159b5d732..855260878 100644 --- a/FixtureSSZConsensus-minimal.md +++ b/FixtureSSZConsensus-minimal.md @@ -1,6 +1,6 @@ FixtureSSZConsensus-minimal === -## Official - 0.11.0 - SSZ consensus objects [Preset: minimal] +## Official - 0.11.1 - SSZ consensus objects [Preset: minimal] ```diff + Testing AggregateAndProof OK + Testing Attestation OK diff --git a/beacon_chain/attestation_pool.nim b/beacon_chain/attestation_pool.nim index cf2260f98..8b4ae0a14 100644 --- a/beacon_chain/attestation_pool.nim +++ b/beacon_chain/attestation_pool.nim @@ -308,7 +308,7 @@ proc getAttestationsForBlock*( for a in slotData.attestations: var - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/validator.md#construct-attestation + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#construct-attestation attestation = Attestation( aggregation_bits: a.validations[0].aggregation_bits, data: a.data, diff --git a/beacon_chain/beacon_node.nim b/beacon_chain/beacon_node.nim index e8382ccee..d8bdfb433 100644 --- a/beacon_chain/beacon_node.nim +++ b/beacon_chain/beacon_node.nim @@ -316,14 +316,15 @@ proc updateHead(node: BeaconNode): BlockRef = proc sendAttestation(node: BeaconNode, fork: Fork, + genesis_validators_root: Eth2Digest, validator: AttachedValidator, attestationData: AttestationData, committeeLen: int, indexInCommittee: int) {.async.} = logScope: pcs = "send_attestation" - let - validatorSignature = await validator.signAttestation(attestationData, fork) + let validatorSignature = await validator.signAttestation(attestationData, + fork, genesis_validators_root) var aggregationBits = CommitteeValidatorsBits.init(committeeLen) aggregationBits.setBit indexInCommittee @@ -334,7 +335,7 @@ proc sendAttestation(node: BeaconNode, aggregation_bits: aggregationBits ) - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/validator.md#broadcast-attestation + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#broadcast-attestation node.network.broadcast( getAttestationTopic(attestationData.index), attestation) @@ -384,7 +385,7 @@ proc proposeBlock(node: BeaconNode, let message = makeBeaconBlock( state, head.root, - validator.genRandaoReveal(state.fork, slot), + validator.genRandaoReveal(state.fork, state.genesis_validators_root, slot), eth1data, Eth2Digest(), node.attestationPool.getAttestationsForBlock(state), @@ -400,8 +401,8 @@ proc proposeBlock(node: BeaconNode, let blockRoot = hash_tree_root(newBlock.message) # Careful, state no longer valid after here because of the await.. - newBlock.signature = - await validator.signBlockProposal(state.fork, slot, blockRoot) + newBlock.signature = await validator.signBlockProposal( + state.fork, state.genesis_validators_root, slot, blockRoot) (blockRoot, newBlock) @@ -556,7 +557,8 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) = for a in attestations: traceAsyncErrors sendAttestation( - node, state.fork, a.validator, a.data, a.committeeLen, a.indexInCommittee) + node, state.fork, state.genesis_validators_root, a.validator, a.data, + a.committeeLen, a.indexInCommittee) proc handleProposal(node: BeaconNode, head: BlockRef, slot: Slot): Future[BlockRef] {.async.} = diff --git a/beacon_chain/spec/beaconstate.nim b/beacon_chain/spec/beaconstate.nim index 08ba2774f..cc5476767 100644 --- a/beacon_chain/spec/beaconstate.nim +++ b/beacon_chain/spec/beaconstate.nim @@ -286,12 +286,12 @@ func get_block_root*(state: BeaconState, epoch: Epoch): Eth2Digest = # Return the block root at the start of a recent ``epoch``. get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch)) -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#get_total_balance +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#get_total_balance func get_total_balance*(state: BeaconState, validators: auto): Gwei = - ## Return the combined effective balance of the ``indices``. (1 Gwei minimum - ## to avoid divisions by zero.) + ## Return the combined effective balance of the ``indices``. + ## ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero. ## Math safe up to ~10B ETH, afterwhich this overflows uint64. - max(1'u64, + max(EFFECTIVE_BALANCE_INCREMENT, foldl(validators, a + state.validators[b].effective_balance, 0'u64) ) @@ -365,7 +365,7 @@ proc process_registry_updates*(state: var BeaconState) {.nbench.}= validator.activation_epoch = compute_activation_exit_epoch(get_current_epoch(state)) -# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#is_valid_indexed_attestation +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#is_valid_indexed_attestation proc is_valid_indexed_attestation*( state: BeaconState, indexed_attestation: IndexedAttestation, flags: UpdateFlags): bool = @@ -381,7 +381,6 @@ proc is_valid_indexed_attestation*( return false # Verify indices are sorted and unique - # TODO but why? this is a local artifact if indices != sorted(indices, system.cmp): notice "indexed attestation: indices not sorted" return false @@ -436,7 +435,7 @@ func get_indexed_attestation(state: BeaconState, attestation: Attestation, signature: attestation.signature ) -# https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#attestations +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#attestations proc check_attestation*( state: BeaconState, attestation: Attestation, flags: UpdateFlags, stateCache: var StateCache): bool = diff --git a/beacon_chain/spec/datatypes.nim b/beacon_chain/spec/datatypes.nim index 262caf5a0..3527895a3 100644 --- a/beacon_chain/spec/datatypes.nim +++ b/beacon_chain/spec/datatypes.nim @@ -57,7 +57,7 @@ else: loadCustomPreset const_preset const - SPEC_VERSION* = "0.11.0" ## \ + SPEC_VERSION* = "0.11.1" ## \ ## Spec version we're aiming to be compatible with, right now GENESIS_EPOCH* = (GENESIS_SLOT.uint64 div SLOTS_PER_EPOCH).Epoch ##\ diff --git a/beacon_chain/spec/helpers.nim b/beacon_chain/spec/helpers.nim index 33baddee4..10191e190 100644 --- a/beacon_chain/spec/helpers.nim +++ b/beacon_chain/spec/helpers.nim @@ -149,25 +149,21 @@ func compute_domain*( # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#get_domain func get_domain*( - fork: Fork, domain_type: DomainType, epoch: Epoch): Domain = + fork: Fork, domain_type: DomainType, epoch: Epoch, genesis_validators_root: Eth2Digest): Domain = ## Return the signature domain (fork version concatenated with domain type) ## of a message. - let - fork_version = - if epoch < fork.epoch: - fork.previous_version - else: - fork.current_version - compute_domain(domain_type, fork_version) + let fork_version = + if epoch < fork.epoch: + fork.previous_version + else: + fork.current_version + compute_domain(domain_type, fork_version, genesis_validators_root) func get_domain*( - state: BeaconState, domain_type: DomainType, message_epoch: Epoch): Domain = + state: BeaconState, domain_type: DomainType, epoch: Epoch): Domain = ## Return the signature domain (fork version concatenated with domain type) ## of a message. - get_domain(state.fork, domain_type, message_epoch) - -func get_domain*(state: BeaconState, domain_type: DomainType): Domain = - get_domain(state, domain_type, get_current_epoch(state)) + get_domain(state.fork, domain_type, epoch, state. genesis_validators_root) # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#compute_signing_root func compute_signing_root*(ssz_object: auto, domain: Domain): Eth2Digest = diff --git a/beacon_chain/spec/presets/mainnet.nim b/beacon_chain/spec/presets/mainnet.nim index b197d9ef5..1c66e4211 100644 --- a/beacon_chain/spec/presets/mainnet.nim +++ b/beacon_chain/spec/presets/mainnet.nim @@ -56,7 +56,7 @@ const # Gwei values # --------------------------------------------------------------- - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/configs/mainnet.yaml#L52 + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/configs/mainnet.yaml#L58 MIN_DEPOSIT_AMOUNT* = 2'u64^0 * 10'u64^9 ##\ ## Minimum amounth of ETH that can be deposited in one call - deposits can diff --git a/beacon_chain/spec/presets/minimal.nim b/beacon_chain/spec/presets/minimal.nim index 0e1eb0eb8..3c8fd8077 100644 --- a/beacon_chain/spec/presets/minimal.nim +++ b/beacon_chain/spec/presets/minimal.nim @@ -71,7 +71,7 @@ const # Time parameters # --------------------------------------------------------------- - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/configs/minimal.yaml#L71 + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/configs/minimal.yaml#L77 # Changed: Faster to spin up testnets, but does not give validator # reasonable warning time for genesis MIN_GENESIS_DELAY* = 300 @@ -178,16 +178,6 @@ const MIN_GASPRICE* = 32 # Gwei GASPRICE_ADJUSTMENT_COEFFICIENT* = 8 - # Phase 1 - Sharding - # --------------------------------------------------------------- - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/configs/minimal.yaml#L157 - # TODO those are included in minimal.yaml but not mainnet.yaml - # Why? - SHARD_SLOTS_PER_BEACON_SLOT* = 2 # spec: SHARD_SLOTS_PER_EPOCH - EPOCHS_PER_SHARD_PERIOD* = 4 - PHASE_1_FORK_EPOCH* = 8 - PHASE_1_FORK_SLOT* = 64 - # Phase 1 - Custody game # --------------------------------------------------------------- # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase1/custody-game.md#constants diff --git a/beacon_chain/spec/state_transition_block.nim b/beacon_chain/spec/state_transition_block.nim index 7167dc83a..6ca684caa 100644 --- a/beacon_chain/spec/state_transition_block.nim +++ b/beacon_chain/spec/state_transition_block.nim @@ -108,7 +108,8 @@ proc process_randao( let proposer = addr state.validators[proposer_index.get] # Verify that the provided randao value is valid - let signing_root = compute_signing_root(epoch, get_domain(state, DOMAIN_RANDAO)) + let signing_root = compute_signing_root( + epoch, get_domain(state, DOMAIN_RANDAO, get_current_epoch(state))) if skipBLSValidation notin flags: if not blsVerify(proposer.pubkey, signing_root.data, body.randao_reveal): notice "Randao mismatch", proposer_pubkey = shortLog(proposer.pubkey), @@ -481,42 +482,47 @@ proc makeBeaconBlock*( some(blck) -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/validator.md +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#aggregation-selection func get_slot_signature*( - fork: Fork, slot: Slot, privkey: ValidatorPrivKey): ValidatorSig = + fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot, + privkey: ValidatorPrivKey): ValidatorSig = let - domain = - get_domain(fork, DOMAIN_SELECTION_PROOF, compute_epoch_at_slot(slot)) + domain = get_domain(fork, DOMAIN_SELECTION_PROOF, + compute_epoch_at_slot(slot), genesis_validators_root) signing_root = compute_signing_root(slot, domain) blsSign(privKey, signing_root.data) -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/validator.md +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#randao-reveal func get_epoch_signature*( - fork: Fork, slot: Slot, privkey: ValidatorPrivKey): ValidatorSig = + fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot, + privkey: ValidatorPrivKey): ValidatorSig = let - domain = - get_domain(fork, DOMAIN_RANDAO, compute_epoch_at_slot(slot)) + domain = get_domain(fork, DOMAIN_RANDAO, compute_epoch_at_slot(slot), + genesis_validators_root) signing_root = compute_signing_root(compute_epoch_at_slot(slot), domain) blsSign(privKey, signing_root.data) -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/validator.md +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#signature func get_block_signature*( - fork: Fork, slot: Slot, root: Eth2Digest, privkey: ValidatorPrivKey): ValidatorSig = + fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot, + root: Eth2Digest, privkey: ValidatorPrivKey): ValidatorSig = let - domain = - get_domain(fork, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(slot)) + domain = get_domain(fork, DOMAIN_BEACON_PROPOSER, + compute_epoch_at_slot(slot), genesis_validators_root) signing_root = compute_signing_root(root, domain) blsSign(privKey, signing_root.data) -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/validator.md +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#aggregate-signature func get_attestation_signature*( - fork: Fork, attestation: AttestationData, privkey: ValidatorPrivKey): ValidatorSig = + fork: Fork, genesis_validators_root: Eth2Digest, attestation: AttestationData, + privkey: ValidatorPrivKey): ValidatorSig = let attestationRoot = hash_tree_root(attestation) - domain = get_domain(fork, DOMAIN_BEACON_ATTESTER, attestation.target.epoch) + domain = get_domain(fork, DOMAIN_BEACON_ATTESTER, + attestation.target.epoch, genesis_validators_root) signing_root = compute_signing_root(attestationRoot, domain) blsSign(privKey, signing_root.data) diff --git a/beacon_chain/spec/state_transition_epoch.nim b/beacon_chain/spec/state_transition_epoch.nim index eb70fd002..bc582b6f9 100644 --- a/beacon_chain/spec/state_transition_epoch.nim +++ b/beacon_chain/spec/state_transition_epoch.nim @@ -242,7 +242,7 @@ func get_base_reward(state: BeaconState, index: ValidatorIndex, effective_balance * BASE_REWARD_FACTOR div integer_squareroot(total_balance) div BASE_REWARDS_PER_EPOCH -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#rewards-and-penalties-1 +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#rewards-and-penalties-1 func get_attestation_deltas(state: BeaconState, stateCache: var StateCache): tuple[a: seq[Gwei], b: seq[Gwei]] {.nbench.}= let @@ -280,7 +280,7 @@ func get_attestation_deltas(state: BeaconState, stateCache: var StateCache): const increment = EFFECTIVE_BALANCE_INCREMENT let reward_numerator = get_base_reward(state, index, total_balance) * (attesting_balance div increment) - rewards[index] = reward_numerator div (total_balance div increment) + rewards[index] += reward_numerator div (total_balance div increment) else: penalties[index] += get_base_reward(state, index, total_balance) diff --git a/beacon_chain/validator_pool.nim b/beacon_chain/validator_pool.nim index a79e5d235..ece29eb67 100644 --- a/beacon_chain/validator_pool.nim +++ b/beacon_chain/validator_pool.nim @@ -25,7 +25,8 @@ func getValidator*(pool: ValidatorPool, pool.validators.getOrDefault(validatorKey) # TODO: Honest validator - https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/validator.md -proc signBlockProposal*(v: AttachedValidator, fork: Fork, slot: Slot, +proc signBlockProposal*(v: AttachedValidator, fork: Fork, + genesis_validators_root: Eth2Digest, slot: Slot, blockRoot: Eth2Digest): Future[ValidatorSig] {.async.} = if v.kind == inProcess: @@ -34,30 +35,33 @@ proc signBlockProposal*(v: AttachedValidator, fork: Fork, slot: Slot, # replaced by something more sensible await sleepAsync(chronos.milliseconds(1)) - result = get_block_signature(fork, slot, blockRoot, v.privKey) + result = get_block_signature( + fork, genesis_validators_root, slot, blockRoot, v.privKey) else: error "Unimplemented" quit 1 proc signAttestation*(v: AttachedValidator, attestation: AttestationData, - fork: Fork): Future[ValidatorSig] {.async.} = + fork: Fork, genesis_validators_root: Eth2Digest): + Future[ValidatorSig] {.async.} = if v.kind == inProcess: # TODO this is an ugly hack to fake a delay and subsequent async reordering # for the purpose of testing the external validator delay - to be # replaced by something more sensible await sleepAsync(chronos.milliseconds(1)) - result = get_attestation_signature(fork, attestation, v.privKey) + result = get_attestation_signature( + fork, genesis_validators_root, attestation, v.privKey) else: error "Unimplemented" quit 1 -# https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/validator.md#randao-reveal -func genRandaoReveal*(k: ValidatorPrivKey, fork: Fork, slot: Slot): - ValidatorSig = - get_epoch_signature(fork, slot, k) +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#randao-reveal +func genRandaoReveal*(k: ValidatorPrivKey, fork: Fork, + genesis_validators_root: Eth2Digest, slot: Slot): ValidatorSig = + get_epoch_signature(fork, genesis_validators_root, slot, k) -func genRandaoReveal*(v: AttachedValidator, fork: Fork, slot: Slot): - ValidatorSig = - genRandaoReveal(v.privKey, fork, slot) +func genRandaoReveal*(v: AttachedValidator, fork: Fork, + genesis_validators_root: Eth2Digest, slot: Slot): ValidatorSig = + genRandaoReveal(v.privKey, fork, genesis_validators_root, slot) diff --git a/tests/mocking/mock_attestations.nim b/tests/mocking/mock_attestations.nim index 36e3552e7..28bcc3b6e 100644 --- a/tests/mocking/mock_attestations.nim +++ b/tests/mocking/mock_attestations.nim @@ -66,7 +66,8 @@ proc signMockAttestation*(state: BeaconState, attestation: var Attestation) = var first_iter = true # Can't do while loop on hashset for validator_index in participants: let sig = get_attestation_signature( - state.fork, attestation.data, MockPrivKeys[validator_index] + state.fork, state.genesis_validators_root, attestation.data, + MockPrivKeys[validator_index] ) if first_iter: attestation.signature = sig diff --git a/tests/mocking/mock_blocks.nim b/tests/mocking/mock_blocks.nim index 80b011c81..969d5cee0 100644 --- a/tests/mocking/mock_blocks.nim +++ b/tests/mocking/mock_blocks.nim @@ -28,9 +28,10 @@ proc signMockBlockImpl( let privkey = MockPrivKeys[proposer_index] signedBlock.message.body.randao_reveal = get_epoch_signature( - state.fork, block_slot, privkey) + state.fork, state.genesis_validators_root, block_slot, privkey) signedBlock.signature = get_block_signature( - state.fork, block_slot, hash_tree_root(signedBlock.message), privkey) + state.fork, state.genesis_validators_root, block_slot, + hash_tree_root(signedBlock.message), privkey) proc signMockBlock*( state: BeaconState, diff --git a/tests/official/fixtures_utils.nim b/tests/official/fixtures_utils.nim index 050d9f33d..8fa440d7b 100644 --- a/tests/official/fixtures_utils.nim +++ b/tests/official/fixtures_utils.nim @@ -36,7 +36,7 @@ proc readValue*(r: var JsonReader, a: var seq[byte]) {.inline.} = const FixturesDir* = currentSourcePath.rsplit(DirSep, 1)[0] / ".." / ".." / "vendor" / "nim-eth2-scenarios" - SszTestsDir* = FixturesDir/"tests-v0.11.0" + SszTestsDir* = FixturesDir/"tests-v0.11.1" proc parseTest*(path: string, Format: typedesc[Json or SSZ], T: typedesc): T = try: diff --git a/tests/official/test_fixture_const_sanity_check.nim b/tests/official/test_fixture_const_sanity_check.nim index 193229e01..ace4787f7 100644 --- a/tests/official/test_fixture_const_sanity_check.nim +++ b/tests/official/test_fixture_const_sanity_check.nim @@ -19,7 +19,7 @@ import const SpecDir = currentSourcePath.rsplit(DirSep, 1)[0] / ".."/".."/"beacon_chain"/"spec" - Config = FixturesDir/"tests-v0.11.0"/const_preset/"config.yaml" + Config = FixturesDir/"tests-v0.11.1"/const_preset/"config.yaml" type CheckedType = SomeInteger or Slot or Epoch @@ -122,5 +122,5 @@ proc checkConfig() = else: check: ConstsToCheck[constant] == value.getBiggestInt().uint64() -suiteReport "Official - 0.11.0 - constants & config " & preset(): +suiteReport "Official - 0.11.1 - constants & config " & preset(): checkConfig() diff --git a/tests/official/test_fixture_operations_attestations.nim b/tests/official/test_fixture_operations_attestations.nim index d30b79d25..3f33ee492 100644 --- a/tests/official/test_fixture_operations_attestations.nim +++ b/tests/official/test_fixture_operations_attestations.nim @@ -32,8 +32,6 @@ proc runTest(identifier: string) = var flags: UpdateFlags var prefix: string - if not existsFile(testDir/"meta.yaml"): - flags.incl skipBlsValidation if existsFile(testDir/"post.ssz"): prefix = "[Valid] " else: diff --git a/tests/official/test_fixture_operations_attester_slashings.nim b/tests/official/test_fixture_operations_attester_slashings.nim index d44d29658..85f287665 100644 --- a/tests/official/test_fixture_operations_attester_slashings.nim +++ b/tests/official/test_fixture_operations_attester_slashings.nim @@ -32,8 +32,6 @@ proc runTest(identifier: string) = var flags: UpdateFlags var prefix: string - if not existsFile(testDir/"meta.yaml"): - flags.incl skipBlsValidation if existsFile(testDir/"post.ssz"): prefix = "[Valid] " else: diff --git a/tests/official/test_fixture_operations_proposer_slashings.nim b/tests/official/test_fixture_operations_proposer_slashings.nim index 9e2841346..b4249965a 100644 --- a/tests/official/test_fixture_operations_proposer_slashings.nim +++ b/tests/official/test_fixture_operations_proposer_slashings.nim @@ -32,8 +32,6 @@ proc runTest(identifier: string) = var flags: UpdateFlags var prefix: string - if not existsFile(testDir/"meta.yaml"): - flags.incl skipBlsValidation if existsFile(testDir/"post.ssz"): prefix = "[Valid] " else: diff --git a/tests/official/test_fixture_operations_voluntary_exit.nim b/tests/official/test_fixture_operations_voluntary_exit.nim index 392b0cbb5..c2fa41f3d 100644 --- a/tests/official/test_fixture_operations_voluntary_exit.nim +++ b/tests/official/test_fixture_operations_voluntary_exit.nim @@ -32,8 +32,6 @@ proc runTest(identifier: string) = var flags: UpdateFlags var prefix: string - if not existsFile(testDir/"meta.yaml"): - flags.incl skipBlsValidation if existsFile(testDir/"post.ssz"): prefix = "[Valid] " else: diff --git a/tests/official/test_fixture_ssz_consensus_objects.nim b/tests/official/test_fixture_ssz_consensus_objects.nim index b6e61a245..a85db9ec9 100644 --- a/tests/official/test_fixture_ssz_consensus_objects.nim +++ b/tests/official/test_fixture_ssz_consensus_objects.nim @@ -25,7 +25,7 @@ import # ---------------------------------------------------------------- const - SSZDir = FixturesDir/"tests-v0.11.0"/const_preset/"phase0"/"ssz_static" + SSZDir = FixturesDir/"tests-v0.11.1"/const_preset/"phase0"/"ssz_static" type SSZHashTreeRoot = object @@ -106,7 +106,7 @@ proc runSSZtests() = else: raise newException(ValueError, "Unsupported test: " & sszType) -suiteReport "Official - 0.11.0 - SSZ consensus objects " & preset(): +suiteReport "Official - 0.11.1 - SSZ consensus objects " & preset(): runSSZtests() summarizeLongTests("FixtureSSZConsensus") diff --git a/tests/official/test_fixture_ssz_generic_types.nim b/tests/official/test_fixture_ssz_generic_types.nim index aa2c4c092..5a8c52454 100644 --- a/tests/official/test_fixture_ssz_generic_types.nim +++ b/tests/official/test_fixture_ssz_generic_types.nim @@ -23,7 +23,7 @@ import # ------------------------------------------------------------------------ const - SSZDir = FixturesDir/"tests-v0.11.0"/"general"/"phase0"/"ssz_generic" + SSZDir = FixturesDir/"tests-v0.11.1"/"general"/"phase0"/"ssz_generic" type SSZHashTreeRoot = object diff --git a/tests/testblockutil.nim b/tests/testblockutil.nim index e12c3b006..119994b8f 100644 --- a/tests/testblockutil.nim +++ b/tests/testblockutil.nim @@ -84,7 +84,8 @@ proc addTestBlock*( privKey = hackPrivKey(proposer) randao_reveal = if skipBlsValidation notin flags: - privKey.genRandaoReveal(state.fork, state.slot) + privKey.genRandaoReveal( + state.fork, state.genesis_validators_root, state.slot) else: ValidatorSig() @@ -149,7 +150,8 @@ proc makeAttestation*( let sig = if skipBLSValidation notin flags: - get_attestation_signature(state.fork, data, hackPrivKey(validator)) + get_attestation_signature(state.fork, state.genesis_validators_root, + data, hackPrivKey(validator)) else: ValidatorSig() @@ -203,7 +205,7 @@ proc makeFullAttestations*( aggregation_bits: CommitteeValidatorsBits.init(committee.len), data: data, signature: get_attestation_signature( - state.fork, data, + state.fork, state.genesis_validators_root, data, hackPrivKey(state.validators[committee[0]])) ) # Aggregate the remainder @@ -212,7 +214,7 @@ proc makeFullAttestations*( attestation.aggregation_bits.setBit j if skipBLSValidation notin flags: attestation.signature.aggregate(get_attestation_signature( - state.fork, data, + state.fork, state.genesis_validators_root, data, hackPrivKey(state.validators[committee[j]]) )) diff --git a/vendor/nim-eth2-scenarios b/vendor/nim-eth2-scenarios index b06d78d1d..5326d824b 160000 --- a/vendor/nim-eth2-scenarios +++ b/vendor/nim-eth2-scenarios @@ -1 +1 @@ -Subproject commit b06d78d1d8c306a3cce80c391856f44bf7db6119 +Subproject commit 5326d824b1d91ea273095172512eb309f32e0c82 From daabb1b5b29065f502ae24c91b9ca231aad58446 Mon Sep 17 00:00:00 2001 From: tersec Date: Mon, 30 Mar 2020 23:40:24 +0000 Subject: [PATCH 23/58] complete all (non-semantic, comment-only) 0.11.0 -> 0.11.1 beacon chain spec updates, mark all beacon chain v0.11.0 spec references as v0.11.1, and remove now unnecessary/unused UpdateFlags vars from 4 test fixtures (#841) --- beacon_chain/spec/beaconstate.nim | 12 ++--- beacon_chain/spec/crypto.nim | 2 +- beacon_chain/spec/datatypes.nim | 52 +++++++++---------- beacon_chain/spec/helpers.nim | 26 +++++----- beacon_chain/spec/presets/minimal.nim | 2 +- beacon_chain/spec/state_transition_block.nim | 12 ++--- beacon_chain/spec/state_transition_epoch.nim | 18 ++++--- beacon_chain/spec/validator.nim | 4 +- .../test_fixture_operations_attestations.nim | 5 +- ..._fixture_operations_attester_slashings.nim | 5 +- ..._fixture_operations_proposer_slashings.nim | 5 +- ...test_fixture_operations_voluntary_exit.nim | 5 +- 12 files changed, 75 insertions(+), 73 deletions(-) diff --git a/beacon_chain/spec/beaconstate.nim b/beacon_chain/spec/beaconstate.nim index cc5476767..9f94a4cfc 100644 --- a/beacon_chain/spec/beaconstate.nim +++ b/beacon_chain/spec/beaconstate.nim @@ -36,7 +36,7 @@ func increase_balance*( # Increase the validator balance at index ``index`` by ``delta``. state.balances[index] += delta -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#decrease_balance +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#decrease_balance func decrease_balance*( state: var BeaconState, index: ValidatorIndex, delta: Gwei) = ## Decrease the validator balance at index ``index`` by ``delta``, with @@ -104,7 +104,7 @@ func compute_activation_exit_epoch(epoch: Epoch): Epoch = ## ``epoch`` take effect. epoch + 1 + MAX_SEED_LOOKAHEAD -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#get_validator_churn_limit +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#get_validator_churn_limit func get_validator_churn_limit(state: BeaconState): uint64 = # Return the validator churn limit for the current epoch. let active_validator_indices = @@ -112,7 +112,7 @@ func get_validator_churn_limit(state: BeaconState): uint64 = max(MIN_PER_EPOCH_CHURN_LIMIT, len(active_validator_indices) div CHURN_LIMIT_QUOTIENT).uint64 -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#initiate_validator_exit +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#initiate_validator_exit func initiate_validator_exit*(state: var BeaconState, index: ValidatorIndex) = # Initiate the exit of the validator with index ``index``. @@ -297,13 +297,13 @@ func get_total_balance*(state: BeaconState, validators: auto): Gwei = # XXX: Move to state_transition_epoch.nim? -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#is_eligible_for_activation_queue +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#is_eligible_for_activation_queue func is_eligible_for_activation_queue(validator: Validator): bool = # Check if ``validator`` is eligible to be placed into the activation queue. validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and validator.effective_balance == MAX_EFFECTIVE_BALANCE -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#is_eligible_for_activation +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#is_eligible_for_activation func is_eligible_for_activation(state: BeaconState, validator: Validator): bool = # Check if ``validator`` is eligible for activation. @@ -435,7 +435,7 @@ func get_indexed_attestation(state: BeaconState, attestation: Attestation, signature: attestation.signature ) -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#attestations +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#attestations proc check_attestation*( state: BeaconState, attestation: Attestation, flags: UpdateFlags, stateCache: var StateCache): bool = diff --git a/beacon_chain/spec/crypto.nim b/beacon_chain/spec/crypto.nim index 095b2b0fb..2310d9cf2 100644 --- a/beacon_chain/spec/crypto.nim +++ b/beacon_chain/spec/crypto.nim @@ -104,7 +104,7 @@ func pubKey*(privkey: ValidatorPrivKey): ValidatorPubKey = else: privkey.getKey -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#bls-signatures +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#bls-signatures func aggregate*[T](values: openarray[ValidatorSig]): ValidatorSig = ## Aggregate arrays of sequences of Validator Signatures ## This assumes that they are real signatures diff --git a/beacon_chain/spec/datatypes.nim b/beacon_chain/spec/datatypes.nim index 3527895a3..bfdff8295 100644 --- a/beacon_chain/spec/datatypes.nim +++ b/beacon_chain/spec/datatypes.nim @@ -81,7 +81,7 @@ type # Domains # --------------------------------------------------------------- - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#domain-types + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#domain-types DomainType* = enum DOMAIN_BEACON_PROPOSER = 0 DOMAIN_BEACON_ATTESTER = 1 @@ -99,7 +99,7 @@ type # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase1/custody-game.md#signature-domain-types DOMAIN_CUSTODY_BIT_SLASHING = 0x83 - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#custom-types + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#custom-types Domain* = array[32, byte] # https://github.com/nim-lang/Nim/issues/574 and be consistent across @@ -114,17 +114,17 @@ type BitList*[maxLen: static int] = distinct BitSeq - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#proposerslashing + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#proposerslashing ProposerSlashing* = object signed_header_1*: SignedBeaconBlockHeader signed_header_2*: SignedBeaconBlockHeader - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#attesterslashing + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#attesterslashing AttesterSlashing* = object attestation_1*: IndexedAttestation attestation_2*: IndexedAttestation - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#indexedattestation + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#indexedattestation IndexedAttestation* = object # TODO ValidatorIndex, but that doesn't serialize properly attesting_indices*: List[uint64, MAX_VALIDATORS_PER_COMMITTEE] @@ -133,24 +133,24 @@ type CommitteeValidatorsBits* = BitList[MAX_VALIDATORS_PER_COMMITTEE] - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#attestation + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#attestation Attestation* = object aggregation_bits*: CommitteeValidatorsBits data*: AttestationData signature*: ValidatorSig - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#forkdata + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#forkdata ForkData* = object # TODO: Spec introduced an alias for Version = array[4, byte] current_version*: array[4, byte] genesis_validators_root*: Eth2Digest - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#checkpoint + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#checkpoint Checkpoint* = object epoch*: Epoch root*: Eth2Digest - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#AttestationData + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#AttestationData AttestationData* = object slot*: Slot index*: uint64 @@ -162,34 +162,34 @@ type source*: Checkpoint target*: Checkpoint - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#deposit + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#deposit Deposit* = object proof*: array[DEPOSIT_CONTRACT_TREE_DEPTH + 1, Eth2Digest] ##\ ## Merkle path to deposit root data*: DepositData - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#depositmessage + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#depositmessage DepositMessage* = object pubkey*: ValidatorPubKey withdrawal_credentials*: Eth2Digest amount*: Gwei - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#depositdata + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#depositdata DepositData* = object pubkey*: ValidatorPubKey withdrawal_credentials*: Eth2Digest amount*: uint64 signature*: ValidatorSig # Signing over DepositMessage - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#voluntaryexit + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#voluntaryexit VoluntaryExit* = object epoch*: Epoch ##\ ## Earliest epoch when voluntary exit can be processed validator_index*: uint64 - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#beaconblock + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#beaconblock BeaconBlock* = object ## For each slot, a proposer is chosen from the validator pool to propose ## a new block. Once the block as been proposed, it is transmitted to @@ -208,7 +208,7 @@ type body*: BeaconBlockBody - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#beaconblockheader + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#beaconblockheader BeaconBlockHeader* = object slot*: Slot proposer_index*: uint64 @@ -216,7 +216,7 @@ type state_root*: Eth2Digest body_root*: Eth2Digest - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#beaconblockbody + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#beaconblockbody BeaconBlockBody* = object randao_reveal*: ValidatorSig eth1_data*: Eth1Data @@ -229,7 +229,7 @@ type deposits*: List[Deposit, MAX_DEPOSITS] voluntary_exits*: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS] - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#beaconstate + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#beaconstate BeaconState* = object # Versioning genesis_time*: uint64 @@ -283,7 +283,7 @@ type current_justified_checkpoint*: Checkpoint finalized_checkpoint*: Checkpoint - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#validator + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#validator Validator* = object pubkey*: ValidatorPubKey @@ -305,7 +305,7 @@ type withdrawable_epoch*: Epoch ##\ ## When validator can withdraw or transfer funds - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#pendingattestation + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#pendingattestation PendingAttestation* = object aggregation_bits*: CommitteeValidatorsBits data*: AttestationData @@ -315,12 +315,12 @@ type proposer_index*: uint64 - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#historicalbatch + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#historicalbatch HistoricalBatch* = object block_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest] state_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest] - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#fork + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#fork Fork* = object # TODO: Spec introduced an alias for Version = array[4, byte] # and a default parameter to compute_domain @@ -330,28 +330,28 @@ type epoch*: Epoch ##\ ## Epoch of latest fork - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#eth1data + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#eth1data Eth1Data* = object deposit_root*: Eth2Digest deposit_count*: uint64 block_hash*: Eth2Digest - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#signingroot + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#signingroot SigningRoot* = object object_root*: Eth2Digest domain*: Domain - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#signedvoluntaryexit + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#signedvoluntaryexit SignedVoluntaryExit* = object message*: VoluntaryExit signature*: ValidatorSig - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#signedbeaconblock + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#signedbeaconblock SignedBeaconBlock* = object message*: BeaconBlock signature*: ValidatorSig - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#signedbeaconblockheader + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#signedbeaconblockheader SignedBeaconBlockHeader* = object message*: BeaconBlockHeader signature*: ValidatorSig diff --git a/beacon_chain/spec/helpers.nim b/beacon_chain/spec/helpers.nim index 10191e190..4e4ce18ee 100644 --- a/beacon_chain/spec/helpers.nim +++ b/beacon_chain/spec/helpers.nim @@ -15,7 +15,7 @@ import # Internal ./datatypes, ./digest, ../ssz -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#integer_squareroot +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#integer_squareroot func integer_squareroot*(n: SomeInteger): SomeInteger = # Return the largest integer ``x`` such that ``x**2 <= n``. doAssert n >= 0'u64 @@ -28,7 +28,7 @@ func integer_squareroot*(n: SomeInteger): SomeInteger = y = (x + n div x) div 2 x -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#compute_epoch_at_slot +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#compute_epoch_at_slot func compute_epoch_at_slot*(slot: Slot|uint64): Epoch = # Return the epoch number at ``slot``. (slot div SLOTS_PER_EPOCH).Epoch @@ -36,17 +36,17 @@ func compute_epoch_at_slot*(slot: Slot|uint64): Epoch = template epoch*(slot: Slot): Epoch = compute_epoch_at_slot(slot) -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#compute_start_slot_at_epoch +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#compute_start_slot_at_epoch func compute_start_slot_at_epoch*(epoch: Epoch): Slot = # Return the start slot of ``epoch``. (epoch * SLOTS_PER_EPOCH).Slot -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#is_active_validator +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#is_active_validator func is_active_validator*(validator: Validator, epoch: Epoch): bool = ### Check if ``validator`` is active validator.activation_epoch <= epoch and epoch < validator.exit_epoch -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#get_active_validator_indices +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#get_active_validator_indices func get_active_validator_indices*(state: BeaconState, epoch: Epoch): seq[ValidatorIndex] = # Return the sequence of active validator indices at ``epoch``. @@ -54,7 +54,7 @@ func get_active_validator_indices*(state: BeaconState, epoch: Epoch): if is_active_validator(val, epoch): result.add idx.ValidatorIndex -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#get_committee_count_at_slot +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#get_committee_count_at_slot func get_committee_count_at_slot*(state: BeaconState, slot: Slot): uint64 = # Return the number of committees at ``slot``. let epoch = compute_epoch_at_slot(slot) @@ -67,13 +67,13 @@ func get_committee_count_at_slot*(state: BeaconState, slot: Slot): uint64 = # Otherwise, get_beacon_committee(...) cannot access some committees. doAssert (SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT).uint64 >= result -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#get_current_epoch +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#get_current_epoch func get_current_epoch*(state: BeaconState): Epoch = # Return the current epoch. doAssert state.slot >= GENESIS_SLOT, $state.slot compute_epoch_at_slot(state.slot) -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#get_randao_mix +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#get_randao_mix func get_randao_mix*(state: BeaconState, epoch: Epoch): Eth2Digest = ## Returns the randao mix at a recent ``epoch``. @@ -114,7 +114,7 @@ func int_to_bytes4*(x: uint64): array[4, byte] = result[2] = ((x shr 16) and 0xff).byte result[3] = ((x shr 24) and 0xff).byte -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#compute_fork_data_root +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#compute_fork_data_root func compute_fork_data_root(current_version: array[4, byte], genesis_validators_root: Eth2Digest): Eth2Digest = # Return the 32-byte fork data root for the ``current_version`` and @@ -126,7 +126,7 @@ func compute_fork_data_root(current_version: array[4, byte], genesis_validators_root: genesis_validators_root )) -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#compute_fork_digest +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#compute_fork_digest func compute_fork_digest(current_version: array[4, byte], genesis_validators_root: Eth2Digest): array[4, byte] = # Return the 4-byte fork digest for the ``current_version`` and @@ -136,7 +136,7 @@ func compute_fork_digest(current_version: array[4, byte], result[0..3] = compute_fork_data_root(current_version, genesis_validators_root).data[0..3] -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#compute_domain +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#compute_domain func compute_domain*( domain_type: DomainType, fork_version: array[4, byte] = [0'u8, 0, 0, 0], @@ -165,7 +165,7 @@ func get_domain*( ## of a message. get_domain(state.fork, domain_type, epoch, state. genesis_validators_root) -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#compute_signing_root +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#compute_signing_root func compute_signing_root*(ssz_object: auto, domain: Domain): Eth2Digest = # Return the signing root of an object by calculating the root of the # object-domain tree. @@ -175,7 +175,7 @@ func compute_signing_root*(ssz_object: auto, domain: Domain): Eth2Digest = ) hash_tree_root(domain_wrapped_object) -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#get_seed +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#get_seed func get_seed*(state: BeaconState, epoch: Epoch, domain_type: DomainType): Eth2Digest = # Return the seed at ``epoch``. diff --git a/beacon_chain/spec/presets/minimal.nim b/beacon_chain/spec/presets/minimal.nim index 3c8fd8077..9a286304e 100644 --- a/beacon_chain/spec/presets/minimal.nim +++ b/beacon_chain/spec/presets/minimal.nim @@ -43,7 +43,7 @@ const # Constants # --------------------------------------------------------------- - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#constants + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#constants # TODO "The following values are (non-configurable) constants" ... # Unchanged BASE_REWARDS_PER_EPOCH* = 4 diff --git a/beacon_chain/spec/state_transition_block.nim b/beacon_chain/spec/state_transition_block.nim index 6ca684caa..a0cd82686 100644 --- a/beacon_chain/spec/state_transition_block.nim +++ b/beacon_chain/spec/state_transition_block.nim @@ -44,7 +44,7 @@ declareGauge beacon_previous_live_validators, "Number of active validators that declareGauge beacon_pending_deposits, "Number of pending deposits (state.eth1_data.deposit_count - state.eth1_deposit_index)" # On block declareGauge beacon_processed_deposits_total, "Number of total deposits included on chain" # On block -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#block-header +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#block-header proc process_block_header*( state: var BeaconState, blck: BeaconBlock, flags: UpdateFlags, stateCache: var StateCache): bool {.nbench.}= @@ -128,21 +128,21 @@ proc process_randao( true -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#eth1-data +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#eth1-data func process_eth1_data(state: var BeaconState, body: BeaconBlockBody) {.nbench.}= state.eth1_data_votes.add body.eth1_data if state.eth1_data_votes.count(body.eth1_data) * 2 > EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH: state.eth1_data = body.eth1_data -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#is_slashable_validator +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#is_slashable_validator func is_slashable_validator(validator: Validator, epoch: Epoch): bool = # Check if ``validator`` is slashable. (not validator.slashed) and (validator.activation_epoch <= epoch) and (epoch < validator.withdrawable_epoch) -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#proposer-slashings +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#proposer-slashings proc process_proposer_slashing*( state: var BeaconState, proposer_slashing: ProposerSlashing, flags: UpdateFlags, stateCache: var StateCache): bool {.nbench.}= @@ -379,7 +379,7 @@ proc processVoluntaryExits(state: var BeaconState, blck: BeaconBlock, flags: Upd return false return true -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#block-processing +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#block-processing proc process_block*( state: var BeaconState, blck: BeaconBlock, flags: UpdateFlags, stateCache: var StateCache): bool {.nbench.}= @@ -415,7 +415,7 @@ proc process_block*( # TODO, everything below is now in process_operations # and implementation is per element instead of the whole seq - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#operations + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#operations if not processProposerSlashings(state, blck, flags, stateCache): debug "[Block processing] Proposer slashing failure", slot = shortLog(state.slot) return false diff --git a/beacon_chain/spec/state_transition_epoch.nim b/beacon_chain/spec/state_transition_epoch.nim index bc582b6f9..7acbf1c3b 100644 --- a/beacon_chain/spec/state_transition_epoch.nim +++ b/beacon_chain/spec/state_transition_epoch.nim @@ -63,15 +63,17 @@ declareGauge epoch_transition_final_updates, "Epoch transition final updates tim # Spec # -------------------------------------------------------- -# https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#get_total_active_balance +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#get_total_active_balance func get_total_active_balance*(state: BeaconState): Gwei = # Return the combined effective balance of the active validators. + # Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei + # minimum to avoid divisions by zero. # TODO it calls get_total_balance with set(g_a_v_i(...)) get_total_balance( state, get_active_validator_indices(state, get_current_epoch(state))) -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#helper-functions-1 +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#helper-functions-1 func get_matching_source_attestations(state: BeaconState, epoch: Epoch): seq[PendingAttestation] = doAssert epoch in [get_current_epoch(state), get_previous_epoch(state)] @@ -98,6 +100,10 @@ func get_matching_head_attestations(state: BeaconState, epoch: Epoch): func get_attesting_balance( state: BeaconState, attestations: seq[PendingAttestation], stateCache: var StateCache): Gwei = + # Return the combined effective balance of the set of unslashed validators + # participating in ``attestations``. + # Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei + # minimum to avoid divisions by zero. get_total_balance(state, get_unslashed_attesting_indices( state, attestations, stateCache)) @@ -144,7 +150,7 @@ proc process_justification_and_finalization*( ## and `get_matching_source_attestations(...)` via ## https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#helper-functions-1 ## and - ## https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#final-updates + ## https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#final-updates ## after which the state.previous_epoch_attestations is replaced. trace "Non-attesting indices in previous epoch", missing_all_validators= @@ -233,7 +239,7 @@ proc process_justification_and_finalization*( checkpoint = shortLog(state.finalized_checkpoint), cat = "finalization" -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#rewards-and-penalties-1 +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#rewards-and-penalties-1 func get_base_reward(state: BeaconState, index: ValidatorIndex, total_balance: auto): Gwei = # Spec function recalculates total_balance every time, which creates an @@ -367,7 +373,7 @@ func process_slashings*(state: var BeaconState) {.nbench.}= let penalty = penalty_numerator div total_balance * increment decrease_balance(state, index.ValidatorIndex, penalty) -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#final-updates +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#final-updates func process_final_updates*(state: var BeaconState) {.nbench.}= let current_epoch = get_current_epoch(state) @@ -439,7 +445,7 @@ proc process_epoch*(state: var BeaconState) {.nbench.}= # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#slashings process_slashings(state) - # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#final-updates + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#final-updates process_final_updates(state) # Once per epoch metrics diff --git a/beacon_chain/spec/validator.nim b/beacon_chain/spec/validator.nim index 2b35457e6..a9c4acfd5 100644 --- a/beacon_chain/spec/validator.nim +++ b/beacon_chain/spec/validator.nim @@ -145,7 +145,7 @@ func get_empty_per_epoch_cache*(): StateCache = initTable[Epoch, seq[ValidatorIndex]]() result.committee_count_cache = initTable[Epoch, uint64]() -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#compute_proposer_index +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#compute_proposer_index func compute_proposer_index(state: BeaconState, indices: seq[ValidatorIndex], seed: Eth2Digest, stateCache: var StateCache): Option[ValidatorIndex] = # Return from ``indices`` a random index sampled by effective balance. @@ -176,7 +176,7 @@ func compute_proposer_index(state: BeaconState, indices: seq[ValidatorIndex], return some(candidate_index) i += 1 -# https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/beacon-chain.md#get_beacon_proposer_index +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#get_beacon_proposer_index func get_beacon_proposer_index*(state: BeaconState, stateCache: var StateCache): Option[ValidatorIndex] = # Return the beacon proposer index at the current slot. diff --git a/tests/official/test_fixture_operations_attestations.nim b/tests/official/test_fixture_operations_attestations.nim index 3f33ee492..0a97eae16 100644 --- a/tests/official/test_fixture_operations_attestations.nim +++ b/tests/official/test_fixture_operations_attestations.nim @@ -30,7 +30,6 @@ proc runTest(identifier: string) = proc `testImpl _ operations_attestations _ identifier`() = - var flags: UpdateFlags var prefix: string if existsFile(testDir/"post.ssz"): prefix = "[Valid] " @@ -53,10 +52,10 @@ proc runTest(identifier: string) = postRef[] = parseTest(testDir/"post.ssz", SSZ, BeaconState) if postRef.isNil: - let done = process_attestation(stateRef[], attestationRef[], flags, cache) + let done = process_attestation(stateRef[], attestationRef[], {}, cache) doAssert done == false, "We didn't expect this invalid attestation to be processed." else: - let done = process_attestation(stateRef[], attestationRef[], flags, cache) + let done = process_attestation(stateRef[], attestationRef[], {}, cache) doAssert done, "Valid attestation not processed" check: stateRef.hash_tree_root() == postRef.hash_tree_root() reportDiff(stateRef, postRef) diff --git a/tests/official/test_fixture_operations_attester_slashings.nim b/tests/official/test_fixture_operations_attester_slashings.nim index 85f287665..7f1975a47 100644 --- a/tests/official/test_fixture_operations_attester_slashings.nim +++ b/tests/official/test_fixture_operations_attester_slashings.nim @@ -30,7 +30,6 @@ proc runTest(identifier: string) = proc `testImpl _ operations_attester_slashing _ identifier`() = - var flags: UpdateFlags var prefix: string if existsFile(testDir/"post.ssz"): prefix = "[Valid] " @@ -54,11 +53,11 @@ proc runTest(identifier: string) = if postRef.isNil: let done = process_attester_slashing(stateRef[], attesterSlashingRef[], - flags, cache) + {}, cache) doAssert done == false, "We didn't expect this invalid attester slashing to be processed." else: let done = process_attester_slashing(stateRef[], attesterSlashingRef[], - flags, cache) + {}, cache) doAssert done, "Valid attestater slashing not processed" check: stateRef.hash_tree_root() == postRef.hash_tree_root() reportDiff(stateRef, postRef) diff --git a/tests/official/test_fixture_operations_proposer_slashings.nim b/tests/official/test_fixture_operations_proposer_slashings.nim index b4249965a..888668dc8 100644 --- a/tests/official/test_fixture_operations_proposer_slashings.nim +++ b/tests/official/test_fixture_operations_proposer_slashings.nim @@ -30,7 +30,6 @@ proc runTest(identifier: string) = proc `testImpl_proposer_slashing _ identifier`() = - var flags: UpdateFlags var prefix: string if existsFile(testDir/"post.ssz"): prefix = "[Valid] " @@ -53,10 +52,10 @@ proc runTest(identifier: string) = var cache = get_empty_per_epoch_cache() if postRef.isNil: - let done = process_proposer_slashing(stateRef[], proposerSlashing[], flags, cache) + let done = process_proposer_slashing(stateRef[], proposerSlashing[], {}, cache) doAssert done == false, "We didn't expect this invalid proposer slashing to be processed." else: - let done = process_proposer_slashing(stateRef[], proposerSlashing[], flags, cache) + let done = process_proposer_slashing(stateRef[], proposerSlashing[], {}, cache) doAssert done, "Valid proposer slashing not processed" check: stateRef.hash_tree_root() == postRef.hash_tree_root() reportDiff(stateRef, postRef) diff --git a/tests/official/test_fixture_operations_voluntary_exit.nim b/tests/official/test_fixture_operations_voluntary_exit.nim index c2fa41f3d..5d6f631eb 100644 --- a/tests/official/test_fixture_operations_voluntary_exit.nim +++ b/tests/official/test_fixture_operations_voluntary_exit.nim @@ -30,7 +30,6 @@ proc runTest(identifier: string) = proc `testImpl _ voluntary_exit _ identifier`() = - var flags: UpdateFlags var prefix: string if existsFile(testDir/"post.ssz"): prefix = "[Valid] " @@ -51,10 +50,10 @@ proc runTest(identifier: string) = postRef[] = parseTest(testDir/"post.ssz", SSZ, BeaconState) if postRef.isNil: - let done = process_voluntary_exit(stateRef[], voluntaryExit[], flags) + let done = process_voluntary_exit(stateRef[], voluntaryExit[], {}) doAssert done == false, "We didn't expect this invalid voluntary exit to be processed." else: - let done = process_voluntary_exit(stateRef[], voluntaryExit[], flags) + let done = process_voluntary_exit(stateRef[], voluntaryExit[], {}) doAssert done, "Valid voluntary exit not processed" check: stateRef.hash_tree_root() == postRef.hash_tree_root() reportDiff(stateRef, postRef) From 8b9c8a692ec90b1901749578d6e58392c34ae8d9 Mon Sep 17 00:00:00 2001 From: kdeme Date: Tue, 31 Mar 2020 12:02:13 +0200 Subject: [PATCH 24/58] Discv5: Don't pass ip address when external ip is not known --- beacon_chain/beacon_node.nim | 9 +++------ beacon_chain/eth2_discovery.nim | 5 +++-- beacon_chain/eth2_network.nim | 34 ++++++++++++++------------------- vendor/nim-eth | 2 +- 4 files changed, 21 insertions(+), 29 deletions(-) diff --git a/beacon_chain/beacon_node.nim b/beacon_chain/beacon_node.nim index d8bdfb433..c1f1b46c9 100644 --- a/beacon_chain/beacon_node.nim +++ b/beacon_chain/beacon_node.nim @@ -1196,15 +1196,12 @@ when isMainModule: if bootstrapFile.len > 0: let networkKeys = getPersistentNetKeys(config) - bootstrapAddress = enode.Address( - ip: config.bootstrapAddress, - tcpPort: config.bootstrapPort, - udpPort: config.bootstrapPort) - bootstrapEnr = enr.Record.init( 1, # sequence number networkKeys.seckey.asEthKey, - some(bootstrapAddress)) + some(config.bootstrapAddress), + config.bootstrapPort, + config.bootstrapPort) writeFile(bootstrapFile, bootstrapEnr.toURI) echo "Wrote ", bootstrapFile diff --git a/beacon_chain/eth2_discovery.nim b/beacon_chain/eth2_discovery.nim index 427eb1f3a..bbdc281cf 100644 --- a/beacon_chain/eth2_discovery.nim +++ b/beacon_chain/eth2_discovery.nim @@ -154,7 +154,8 @@ proc loadBootstrapFile*(bootstrapFile: string, proc new*(T: type Eth2DiscoveryProtocol, conf: BeaconNodeConf, - ip: IpAddress, rawPrivKeyBytes: openarray[byte]): T = + ip: Option[IpAddress], tcpPort, udpPort: Port, + rawPrivKeyBytes: openarray[byte]): T = # TODO # Implement more configuration options: # * for setting up a specific key @@ -174,4 +175,4 @@ proc new*(T: type Eth2DiscoveryProtocol, if fileExists(persistentBootstrapFile): loadBootstrapFile(persistentBootstrapFile, bootNodes, bootEnrs, ourPubKey) - newProtocol(pk, db, ip, conf.tcpPort, conf.udpPort, bootEnrs) + newProtocol(pk, db, ip, tcpPort, udpPort, bootEnrs) diff --git a/beacon_chain/eth2_network.nim b/beacon_chain/eth2_network.nim index 65f50c0d4..2425c0bdb 100644 --- a/beacon_chain/eth2_network.nim +++ b/beacon_chain/eth2_network.nim @@ -134,9 +134,6 @@ const readTimeoutErrorMsg = "Exceeded read timeout for a request" -let - globalListeningAddr = parseIpAddress("0.0.0.0") - # Metrics for tracking attestation and beacon block loss declareCounter gossip_messages_sent, "Number of gossip messages sent by this peer" @@ -681,10 +678,11 @@ proc runDiscoveryLoop*(node: Eth2Node) {.async.} = await sleepAsync seconds(1) proc init*(T: type Eth2Node, conf: BeaconNodeConf, - switch: Switch, ip: IpAddress, privKey: keys.PrivateKey): T = + switch: Switch, ip: Option[IpAddress], tcpPort, udpPort: Port, + privKey: keys.PrivateKey): T = new result result.switch = switch - result.discovery = Eth2DiscoveryProtocol.new(conf, ip, privKey.data) + result.discovery = Eth2DiscoveryProtocol.new(conf, ip, tcpPort, udpPort, privKey.data) result.wantedPeers = conf.maxPeers result.peerPool = newPeerPool[Peer, PeerID](maxPeers = conf.maxPeers) @@ -829,11 +827,10 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend = result.implementProtocolInit = proc (p: P2PProtocol): NimNode = return newCall(initProtocol, newLit(p.name), p.peerInit, p.netInit) -proc setupNat(conf: BeaconNodeConf): tuple[ip: IpAddress, +proc setupNat(conf: BeaconNodeConf): tuple[ip: Option[IpAddress], tcpPort: Port, udpPort: Port] {.gcsafe.} = # defaults - result.ip = globalListeningAddr result.tcpPort = conf.tcpPort result.udpPort = conf.udpPort @@ -850,16 +847,15 @@ proc setupNat(conf: BeaconNodeConf): tuple[ip: IpAddress, else: if conf.nat.startsWith("extip:") and isIpAddress(conf.nat[6..^1]): # any required port redirection is assumed to be done by hand - result.ip = parseIpAddress(conf.nat[6..^1]) + result.ip = some(parseIpAddress(conf.nat[6..^1])) nat = NatNone else: error "not a valid NAT mechanism, nor a valid IP address", value = conf.nat quit(QuitFailure) if nat != NatNone: - let extIP = getExternalIP(nat) - if extIP.isSome: - result.ip = extIP.get() + result.ip = getExternalIP(nat) + if result.ip.isSome: # TODO redirectPorts in considered a gcsafety violation # because it obtains the address of a non-gcsafe proc? let extPorts = ({.gcsafe.}: @@ -901,10 +897,10 @@ proc getPersistentNetKeys*(conf: BeaconNodeConf): KeyPair = proc createEth2Node*(conf: BeaconNodeConf): Future[Eth2Node] {.async, gcsafe.} = var - (extIp, extTcpPort, _) = setupNat(conf) + (extIp, extTcpPort, extUdpPort) = setupNat(conf) hostAddress = tcpEndPoint(conf.libp2pAddress, conf.tcpPort) - announcedAddresses = if extIp == globalListeningAddr: @[] - else: @[tcpEndPoint(extIp, extTcpPort)] + announcedAddresses = if extIp.isNone(): @[] + else: @[tcpEndPoint(extIp.get(), extTcpPort)] info "Initializing networking", hostAddress, announcedAddresses @@ -915,17 +911,15 @@ proc createEth2Node*(conf: BeaconNodeConf): Future[Eth2Node] {.async, gcsafe.} = # are running behind a NAT). var switch = newStandardSwitch(some keys.seckey, hostAddress, triggerSelf = true, gossip = true) - result = Eth2Node.init(conf, switch, extIp, keys.seckey.asEthKey) + result = Eth2Node.init(conf, switch, extIp, extTcpPort, extUdpPort, + keys.seckey.asEthKey) proc getPersistenBootstrapAddr*(conf: BeaconNodeConf, ip: IpAddress, port: Port): enr.Record = - let - pair = getPersistentNetKeys(conf) - enodeAddress = Address(ip: ip, udpPort: port) - + let pair = getPersistentNetKeys(conf) return enr.Record.init(1'u64, # sequence number pair.seckey.asEthKey, - some(enodeAddress)) + some(ip), port, port) proc announcedENR*(node: Eth2Node): enr.Record = doAssert node.discovery != nil, "The Eth2Node must be initialized" diff --git a/vendor/nim-eth b/vendor/nim-eth index c3f23e591..fe6df94a1 160000 --- a/vendor/nim-eth +++ b/vendor/nim-eth @@ -1 +1 @@ -Subproject commit c3f23e5912efff98fc6c8181db579037e5a19a2c +Subproject commit fe6df94a1956509e77ff533d9d00dd35b403ea22 From 3239b629048c93c8d9d21d7d18bb4a756b13e47d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C8=98tefan=20Talpalaru?= Date: Tue, 31 Mar 2020 14:53:41 +0200 Subject: [PATCH 25/58] move "--import:libbacktrace" to config.nims This simplifies the `./env.sh nim c -r ...` scenario. --- Makefile | 7 +++---- config.nims | 3 +-- vendor/nim-libbacktrace | 2 +- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index dc063481c..975056f5a 100644 --- a/Makefile +++ b/Makefile @@ -58,12 +58,11 @@ endif # must be included after the default target -include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk -# "--import" can't be added to config.nims, for some reason -# "--define:release" implies "--stacktrace:off" and it cannot be added to config.nims either +# "--define:release" implies "--stacktrace:off" and it cannot be added to config.nims ifeq ($(USE_LIBBACKTRACE), 0) NIM_PARAMS := $(NIM_PARAMS) -d:debug -d:disable_libbacktrace else -NIM_PARAMS := $(NIM_PARAMS) -d:release --import:libbacktrace +NIM_PARAMS := $(NIM_PARAMS) -d:release endif #- the Windows build fails on Azure Pipelines if we have Unicode symbols copy/pasted here, @@ -95,7 +94,7 @@ beacon_chain.nims: # nim-libbacktrace libbacktrace: - + $(MAKE) -C vendor/nim-libbacktrace BUILD_CXX_LIB=0 + + $(MAKE) -C vendor/nim-libbacktrace BUILD_CXX_LIB=0 $(HANDLE_OUTPUT) # Windows 10 with WSL enabled, but no distro installed, fails if "../../nimble.sh" is executed directly # in a Makefile recipe but works when prefixing it with `bash`. No idea how the PATH is overridden. diff --git a/config.nims b/config.nims index 89cea8017..b3543c72d 100644 --- a/config.nims +++ b/config.nims @@ -49,8 +49,7 @@ if not defined(macosx): if not (defined(windows) and defined(i386)) and not defined(disable_libbacktrace): # light-weight stack traces using libbacktrace and libunwind --define:nimStackTraceOverride - # "--import:libbacktrace" is added to NIM_PARAMS inside the Makefile, - # because it doesn't work in here ("Error: undeclared identifier: 'copyMem'", like it kicks in in some other NimScript file) + switch("import", "libbacktrace") --define:nimOldCaseObjects # https://github.com/status-im/nim-confutils/issues/9 diff --git a/vendor/nim-libbacktrace b/vendor/nim-libbacktrace index 3b29eed05..da216986c 160000 --- a/vendor/nim-libbacktrace +++ b/vendor/nim-libbacktrace @@ -1 +1 @@ -Subproject commit 3b29eed05a204e4efe5b54a50dc4cbe2dfd38c1b +Subproject commit da216986c635599dccffa2e71eabad03653e5aef From cd388bc9bbde04b5cc0c22130373434405f7bcae Mon Sep 17 00:00:00 2001 From: tersec Date: Tue, 31 Mar 2020 18:39:02 +0000 Subject: [PATCH 26/58] extended validation (#812) * initial extended validation setup * flesh out all TODO items for attestation and beaconblock verification * fix finalization and add chronicles debugging messages * directly use blockPool.headState rather than pointlessly updating it and document this constraint * fix logic relating to first-attestation checking; support validating blocks across multiple forks --- beacon_chain/attestation_aggregation.nim | 4 - beacon_chain/attestation_pool.nim | 117 ++++++++++++++++++++--- beacon_chain/beacon_node.nim | 31 ++++-- beacon_chain/beacon_node_types.nim | 5 +- beacon_chain/block_pool.nim | 71 ++++++++++++++ beacon_chain/eth2_network.nim | 21 +++- beacon_chain/spec/beaconstate.nim | 10 +- beacon_chain/spec/datatypes.nim | 3 + 8 files changed, 224 insertions(+), 38 deletions(-) diff --git a/beacon_chain/attestation_aggregation.nim b/beacon_chain/attestation_aggregation.nim index f6c2bf663..0c0f1b7b5 100644 --- a/beacon_chain/attestation_aggregation.nim +++ b/beacon_chain/attestation_aggregation.nim @@ -30,10 +30,6 @@ import # TODO add tests, especially for validation # https://github.com/status-im/nim-beacon-chain/issues/122#issuecomment-562479965 -const - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/p2p-interface.md#configuration - ATTESTATION_PROPAGATION_SLOT_RANGE = 32 - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/validator.md#aggregation-selection func is_aggregator(state: BeaconState, slot: Slot, index: uint64, slot_signature: ValidatorSig): bool = diff --git a/beacon_chain/attestation_pool.nim b/beacon_chain/attestation_pool.nim index 8b4ae0a14..b98fd9873 100644 --- a/beacon_chain/attestation_pool.nim +++ b/beacon_chain/attestation_pool.nim @@ -1,5 +1,5 @@ import - deques, sequtils, tables, + deques, sequtils, tables, options, chronicles, stew/[bitseqs, byteutils], json_serialization/std/sets, ./spec/[beaconstate, datatypes, crypto, digest, helpers, validator], ./extras, ./ssz, ./block_pool, ./beacon_node_types @@ -35,6 +35,7 @@ proc combine*(tgt: var Attestation, src: Attestation, flags: UpdateFlags) = else: trace "Ignoring overlapping attestations" +# TODO remove/merge with p2p-interface validation proc validate( state: BeaconState, attestation: Attestation): bool = # TODO what constitutes a valid attestation when it's about to be added to @@ -265,26 +266,20 @@ proc add*(pool: var AttestationPool, attestation: Attestation) = pool.addResolved(blck, attestation) -proc getAttestationsForBlock*( - pool: AttestationPool, state: BeaconState): seq[Attestation] = - ## Retrieve attestations that may be added to a new block at the slot of the - ## given state - logScope: pcs = "retrieve_attestation" - - let newBlockSlot = state.slot +proc getAttestationsForSlot(pool: AttestationPool, newBlockSlot: Slot): + Option[SlotData] = if newBlockSlot < (GENESIS_SLOT + MIN_ATTESTATION_INCLUSION_DELAY): debug "Too early for attestations", newBlockSlot = shortLog(newBlockSlot), cat = "query" - return + return none(SlotData) if pool.slots.len == 0: # startingSlot not set yet! info "No attestations found (pool empty)", newBlockSlot = shortLog(newBlockSlot), cat = "query" - return + return none(SlotData) - var cache = get_empty_per_epoch_cache() let # TODO in theory we could include attestations from other slots also, but # we're currently not tracking which attestations have already been included @@ -300,12 +295,29 @@ proc getAttestationsForBlock*( startingSlot = shortLog(pool.startingSlot), endingSlot = shortLog(pool.startingSlot + pool.slots.len.uint64), cat = "query" - return + return none(SlotData) + let slotDequeIdx = int(attestationSlot - pool.startingSlot) + some(pool.slots[slotDequeIdx]) + +proc getAttestationsForBlock*( + pool: AttestationPool, state: BeaconState): seq[Attestation] = + ## Retrieve attestations that may be added to a new block at the slot of the + ## given state + logScope: pcs = "retrieve_attestation" + + # TODO this shouldn't really need state -- it's to recheck/validate, but that + # should be refactored let - slotDequeIdx = int(attestationSlot - pool.startingSlot) - slotData = pool.slots[slotDequeIdx] + newBlockSlot = state.slot + maybeSlotData = getAttestationsForSlot(pool, newBlockSlot) + if maybeSlotData.isNone: + # Logging done in getAttestationsForSlot(...) + return + let slotData = maybeSlotData.get + + var cache = get_empty_per_epoch_cache() for a in slotData.attestations: var # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#construct-attestation @@ -438,3 +450,80 @@ proc selectHead*(pool: AttestationPool): BlockRef = lmdGhost(pool, pool.blockPool.justifiedState.data.data, justifiedHead.blck) newHead + +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#attestation-subnets +proc isValidAttestation*( + pool: AttestationPool, attestation: Attestation, current_slot: Slot, + topicCommitteeIndex: uint64, flags: UpdateFlags): bool = + # The attestation's committee index (attestation.data.index) is for the + # correct subnet. + if attestation.data.index != topicCommitteeIndex: + debug "isValidAttestation: attestation's committee index not for the correct subnet", + topicCommitteeIndex = topicCommitteeIndex, + attestation_data_index = attestation.data.index + return false + + if not (attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= + current_slot and current_slot >= attestation.data.slot): + debug "isValidAttestation: attestation.data.slot not within ATTESTATION_PROPAGATION_SLOT_RANGE" + return false + + # The attestation is unaggregated -- that is, it has exactly one + # participating validator (len([bit for bit in attestation.aggregation_bits + # if bit == 0b1]) == 1). + # TODO a cleverer algorithm, along the lines of countOnes() in nim-stew + # But that belongs in nim-stew, since it'd break abstraction layers, to + # use details of its representation from nim-beacon-chain. + var onesCount = 0 + for aggregation_bit in attestation.aggregation_bits: + if not aggregation_bit: + continue + onesCount += 1 + if onesCount > 1: + debug "isValidAttestation: attestation has too many aggregation bits", + aggregation_bits = attestation.aggregation_bits + return false + if onesCount != 1: + debug "isValidAttestation: attestation has too few aggregation bits" + return false + + # The attestation is the first valid attestation received for the + # participating validator for the slot, attestation.data.slot. + let maybeSlotData = getAttestationsForSlot(pool, attestation.data.slot) + if maybeSlotData.isSome: + for attestationEntry in maybeSlotData.get.attestations: + if attestation.data != attestationEntry.data: + continue + # Attestations might be aggregated eagerly or lazily; allow for both. + for validation in attestationEntry.validations: + if attestation.aggregation_bits.isSubsetOf(validation.aggregation_bits): + debug "isValidAttestation: attestation already exists at slot", + attestation_data_slot = attestation.data.slot, + attestation_aggregation_bits = attestation.aggregation_bits, + attestation_pool_validation = validation.aggregation_bits + return false + + # The block being voted for (attestation.data.beacon_block_root) passes + # validation. + # We rely on the block pool to have been validated, so check for the + # existence of the block in the pool. + # TODO: consider a "slush pool" of attestations whose blocks have not yet + # propagated - i.e. imagine that attestations are smaller than blocks and + # therefore propagate faster, thus reordering their arrival in some nodes + if pool.blockPool.get(attestation.data.beacon_block_root).isNone(): + debug "isValidAttestation: block doesn't exist in block pool", + attestation_data_beacon_block_root = attestation.data.beacon_block_root + return false + + # The signature of attestation is valid. + # TODO need to know above which validator anyway, and this is too general + # as it supports aggregated attestations (which this can't be) + var cache = get_empty_per_epoch_cache() + if not is_valid_indexed_attestation( + pool.blockPool.headState.data.data, + get_indexed_attestation( + pool.blockPool.headState.data.data, attestation, cache), {}): + debug "isValidAttestation: signature verification failed" + return false + + true diff --git a/beacon_chain/beacon_node.nim b/beacon_chain/beacon_node.nim index d8bdfb433..bcc3668f8 100644 --- a/beacon_chain/beacon_node.nim +++ b/beacon_chain/beacon_node.nim @@ -935,14 +935,31 @@ proc run*(node: BeaconNode) = waitFor node.network.subscribe(topicBeaconBlocks) do (signedBlock: SignedBeaconBlock): onBeaconBlock(node, signedBlock) + do (signedBlock: SignedBeaconBlock) -> bool: + let (afterGenesis, slot) = node.beaconClock.now.toSlot() + if not afterGenesis: + return false + node.blockPool.isValidBeaconBlock(signedBlock, slot, {}) - waitFor allFutures(mapIt( - 0'u64 ..< ATTESTATION_SUBNET_COUNT.uint64, - node.network.subscribe(getAttestationTopic(it)) do (attestation: Attestation): - # Avoid double-counting attestation-topic attestations on shared codepath - # when they're reflected through beacon blocks - beacon_attestations_received.inc() - node.onAttestation(attestation))) + proc attestationHandler(attestation: Attestation) = + # Avoid double-counting attestation-topic attestations on shared codepath + # when they're reflected through beacon blocks + beacon_attestations_received.inc() + node.onAttestation(attestation) + + var attestationSubscriptions: seq[Future[void]] = @[] + for it in 0'u64 ..< ATTESTATION_SUBNET_COUNT.uint64: + closureScope: + let ci = it + attestationSubscriptions.add(node.network.subscribe( + getAttestationTopic(ci), attestationHandler, + proc(attestation: Attestation): bool = + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#attestation-subnets + let (afterGenesis, slot) = node.beaconClock.now().toSlot() + if not afterGenesis: + return false + node.attestationPool.isValidAttestation(attestation, slot, ci, {}))) + waitFor allFutures(attestationSubscriptions) let t = node.beaconClock.now().toSlot() diff --git a/beacon_chain/beacon_node_types.nim b/beacon_chain/beacon_node_types.nim index 0a662ee61..d72f22f34 100644 --- a/beacon_chain/beacon_node_types.nim +++ b/beacon_chain/beacon_node_types.nim @@ -136,7 +136,10 @@ type inAdd*: bool - headState*: StateData ## State given by the head block + headState*: StateData ## \ + ## State given by the head block; only update in `updateHead`, not anywhere + ## else via `withState` + justifiedState*: StateData ## Latest justified state, as seen from the head tmpState*: StateData ## Scratchpad - may be any state diff --git a/beacon_chain/block_pool.nim b/beacon_chain/block_pool.nim index 7d758e7e2..ee3eec009 100644 --- a/beacon_chain/block_pool.nim +++ b/beacon_chain/block_pool.nim @@ -956,3 +956,74 @@ proc getProposer*(pool: BlockPool, head: BlockRef, slot: Slot): Option[Validator return return some(state.validators[proposerIdx.get()].pubkey) + +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#global-topics +proc isValidBeaconBlock*(pool: BlockPool, + signed_beacon_block: SignedBeaconBlock, current_slot: Slot, + flags: UpdateFlags): bool = + # In general, checks are ordered from cheap to expensive. Especially, crypto + # verification could be quite a bit more expensive than the rest. This is an + # externally easy-to-invoke function by tossing network packets at the node. + + # The block is not from a future slot + # TODO allow `MAXIMUM_GOSSIP_CLOCK_DISPARITY` leniency, especially towards + # seemingly future slots. + if not (signed_beacon_block.message.slot <= current_slot): + debug "isValidBeaconBlock: block is from a future slot", + signed_beacon_block_message_slot = signed_beacon_block.message.slot, + current_slot = current_slot + return false + + # The block is from a slot greater than the latest finalized slot (with a + # MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e. validate that + # signed_beacon_block.message.slot > + # compute_start_slot_at_epoch(state.finalized_checkpoint.epoch) + if not (signed_beacon_block.message.slot > pool.finalizedHead.slot): + debug "isValidBeaconBlock: block is not from a slot greater than the latest finalized slot" + return false + + # The block is the first block with valid signature received for the proposer + # for the slot, signed_beacon_block.message.slot. + # TODO might check unresolved/orphaned blocks too, and this might not see all + # blocks at a given slot (though, in theory, those get checked elsewhere). + let slotBlockRef = + getBlockByPreciseSlot(pool, signed_beacon_block.message.slot) + if (not slotBlockRef.isNil) and + pool.get(slotBlockRef).data.message.proposer_index == + signed_beacon_block.message.proposer_index: + debug "isValidBeaconBlock: block isn't first block with valid signature received for the proposer", + signed_beacon_block_message_slot = signed_beacon_block.message.slot, + blckRef = getBlockByPreciseSlot(pool, signed_beacon_block.message.slot) + + return false + + # The proposer signature, signed_beacon_block.signature, is valid with + # respect to the proposer_index pubkey. + + # If this block doesn't have a parent we know about, we can't/don't really + # trace it back to a known-good state/checkpoint to verify its prevenance; + # while one could getOrResolve to queue up searching for missing parent it + # might not be the best place. As much as feasible, this function aims for + # answering yes/no, not queuing other action or otherwise altering state. + let parent_ref = pool.getRef(signed_beacon_block.message.parent_root) + if parent_ref.isNil: + return false + + let bs = + BlockSlot(blck: parent_ref, slot: pool.get(parent_ref).data.message.slot) + pool.withState(pool.tmpState, bs): + let + blockRoot = hash_tree_root(signed_beacon_block.message) + domain = get_domain(pool.headState.data.data, DOMAIN_BEACON_PROPOSER, + compute_epoch_at_slot(signed_beacon_block.message.slot)) + signing_root = compute_signing_root(blockRoot, domain) + proposer_index = signed_beacon_block.message.proposer_index + + if proposer_index >= pool.headState.data.data.validators.len.uint64: + return false + if not blsVerify(pool.headState.data.data.validators[proposer_index].pubkey, + signing_root.data, signed_beacon_block.signature): + debug "isValidBeaconBlock: block failed signature verification" + return false + + true diff --git a/beacon_chain/eth2_network.nim b/beacon_chain/eth2_network.nim index 65f50c0d4..1f413840c 100644 --- a/beacon_chain/eth2_network.nim +++ b/beacon_chain/eth2_network.nim @@ -13,7 +13,7 @@ import multiaddress, multicodec, crypto/crypto, protocols/identify, protocols/protocol], libp2p/protocols/secure/[secure, secio], - libp2p/protocols/pubsub/[pubsub, floodsub], + libp2p/protocols/pubsub/[pubsub, floodsub, rpc/messages], libp2p/transports/[transport, tcptransport], libp2p/stream/lpstream, eth/[keys, async_utils], eth/p2p/[enode, p2p_protocol_dsl], @@ -757,7 +757,7 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend = result.afterProtocolInit = proc (p: P2PProtocol) = p.onPeerConnected.params.add newIdentDefs(streamVar, Connection) - result.implementMsg = proc (msg: Message) = + result.implementMsg = proc (msg: p2p_protocol_dsl.Message) = let protocol = msg.protocol msgName = $msg.ident @@ -959,7 +959,8 @@ func peersCount*(node: Eth2Node): int = proc subscribe*[MsgType](node: Eth2Node, topic: string, - msgHandler: proc(msg: MsgType) {.gcsafe.} ) {.async, gcsafe.} = + msgHandler: proc(msg: MsgType) {.gcsafe.}, + msgValidator: proc(msg: MsgType): bool {.gcsafe.} ) {.async, gcsafe.} = template execMsgHandler(peerExpr, gossipBytes, gossipTopic) = inc gossip_messages_received trace "Incoming pubsub message received", @@ -967,6 +968,20 @@ proc subscribe*[MsgType](node: Eth2Node, message_id = `$`(sha256.digest(gossipBytes)) msgHandler SSZ.decode(gossipBytes, MsgType) + # All message types which are subscribed to should be validated; putting + # this in subscribe(...) ensures that the default approach is correct. + template execMsgValidator(gossipBytes, gossipTopic): bool = + trace "Incoming pubsub message received for validation", + len = gossipBytes.len, topic = gossipTopic, + message_id = `$`(sha256.digest(gossipBytes)) + msgValidator SSZ.decode(gossipBytes, MsgType) + + # Validate messages as soon as subscribed + let incomingMsgValidator = proc(topic: string, message: messages.Message): + Future[bool] {.async, gcsafe.} = + return execMsgValidator(message.data, topic) + node.switch.addValidator(topic, incomingMsgValidator) + let incomingMsgHandler = proc(topic: string, data: seq[byte]) {.async, gcsafe.} = execMsgHandler "unknown", data, topic diff --git a/beacon_chain/spec/beaconstate.nim b/beacon_chain/spec/beaconstate.nim index 9f94a4cfc..640e9ac0a 100644 --- a/beacon_chain/spec/beaconstate.nim +++ b/beacon_chain/spec/beaconstate.nim @@ -412,7 +412,7 @@ func get_attesting_indices*(state: BeaconState, result.incl index # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#get_indexed_attestation -func get_indexed_attestation(state: BeaconState, attestation: Attestation, +func get_indexed_attestation*(state: BeaconState, attestation: Attestation, stateCache: var StateCache): IndexedAttestation = # Return the indexed attestation corresponding to ``attestation``. let @@ -420,14 +420,6 @@ func get_indexed_attestation(state: BeaconState, attestation: Attestation, get_attesting_indices( state, attestation.data, attestation.aggregation_bits, stateCache) - ## TODO No fundamental reason to do so many type conversions - ## verify_indexed_attestation checks for sortedness but it's - ## entirely a local artifact, seemingly; networking uses the - ## Attestation data structure, which can't be unsorted. That - ## the conversion here otherwise needs sorting is due to the - ## usage of HashSet -- order only matters in one place (that - ## 0.6.3 highlights and explicates) except in that the spec, - ## for no obvious reason, verifies it. IndexedAttestation( attesting_indices: sorted(mapIt(attesting_indices.toSeq, it.uint64), system.cmp), diff --git a/beacon_chain/spec/datatypes.nim b/beacon_chain/spec/datatypes.nim index bfdff8295..cc3393032 100644 --- a/beacon_chain/spec/datatypes.nim +++ b/beacon_chain/spec/datatypes.nim @@ -74,6 +74,9 @@ const # TODO: This needs revisiting. # Why was the validator WITHDRAWAL_PERIOD altered in the spec? + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#configuration + ATTESTATION_PROPAGATION_SLOT_RANGE* = 32 + template maxSize*(n: int) {.pragma.} type From 6eb4f1f39dd47d55bd11dcba5cde799d51781784 Mon Sep 17 00:00:00 2001 From: tersec Date: Wed, 1 Apr 2020 09:59:55 +0000 Subject: [PATCH 27/58] initial attestation aggregation (#769) * initial attestation aggregation * fix usage of committee index, vs index in committee; uniformly set trailing/following distance; document how the only-broadcast-if mechanism works better and what aggregation already happens, not otherwise sufficiently clear; use correct BlockSlot across epoch boundaries * address inconsistent notion of which slot in past to target for aggregate broadcast; follow 0.11.x aggregate broadcast p2p interface topic * Fix get_slot_signature(...) call after get_domain(...) change required genesis_validators_root * mark all spec references which aren't dealt with in other PRs as v0.11.1 * update two more spec refs to v0.11.1 --- beacon_chain/attestation_aggregation.nim | 57 +++++++++--------- beacon_chain/beacon_node.nim | 74 +++++++++++++++++++++--- beacon_chain/spec/network.nim | 1 + 3 files changed, 98 insertions(+), 34 deletions(-) diff --git a/beacon_chain/attestation_aggregation.nim b/beacon_chain/attestation_aggregation.nim index 0c0f1b7b5..584e47e9e 100644 --- a/beacon_chain/attestation_aggregation.nim +++ b/beacon_chain/attestation_aggregation.nim @@ -5,20 +5,12 @@ # * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -# Have an an aggregated aggregation ready for broadcast at -# SECONDS_PER_SLOT * 2 / 3, i.e. 2/3 through relevant slot -# intervals. -# # The other part is arguably part of attestation pool -- the validation's # something that should be happing on receipt, not aggregation per se. In # that part, check that messages conform -- so, check for each type # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/p2p-interface.md#topics-and-messages # specifies. So by the time this calls attestation pool, all validation's # already done. -# -# Finally, some of the filtering's libp2p stuff. Consistency checks between -# topic/message types and GOSSIP_MAX_SIZE -- mostly doesn't belong here, so -# while TODO, isn't TODO for this module. import options, @@ -26,11 +18,7 @@ import state_transition_block], ./attestation_pool, ./beacon_node_types, ./ssz -# TODO gossipsub validation lives somewhere, maybe here -# TODO add tests, especially for validation -# https://github.com/status-im/nim-beacon-chain/issues/122#issuecomment-562479965 - -# https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/validator.md#aggregation-selection +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#aggregation-selection func is_aggregator(state: BeaconState, slot: Slot, index: uint64, slot_signature: ValidatorSig): bool = # TODO index is a CommitteeIndex, aka uint64 @@ -43,34 +31,49 @@ func is_aggregator(state: BeaconState, slot: Slot, index: uint64, proc aggregate_attestations*( pool: AttestationPool, state: BeaconState, index: uint64, - privkey: ValidatorPrivKey): Option[AggregateAndProof] = + privkey: ValidatorPrivKey, trailing_distance: uint64): Option[AggregateAndProof] = # TODO alias CommitteeIndex to actual type then convert various uint64's here - let - slot = state.slot - 2 - slot_signature = get_slot_signature(state.fork, slot, privkey) + doAssert state.slot >= trailing_distance + + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/p2p-interface.md#configuration + doAssert trailing_distance <= ATTESTATION_PROPAGATION_SLOT_RANGE + + let + slot = state.slot - trailing_distance + slot_signature = get_slot_signature( + state.fork, state.genesis_validators_root, slot, privkey) - if slot < 0: - return none(AggregateAndProof) doAssert slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= state.slot doAssert state.slot >= slot - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/validator.md#aggregation-selection + # TODO performance issue for future, via get_active_validator_indices(...) + doAssert index < get_committee_count_at_slot(state, slot) + + # TODO for testing purposes, refactor this into the condition check + # and just calculation + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#aggregation-selection if not is_aggregator(state, slot, index, slot_signature): return none(AggregateAndProof) - let attestation_data = - makeAttestationData(state, slot, index, get_block_root_at_slot(state, slot)) + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#attestation-data + # describes how to construct an attestation, which applies for makeAttestationData(...) + # TODO this won't actually match anything + let attestation_data = AttestationData( + slot: slot, + index: index, + beacon_block_root: get_block_root_at_slot(state, slot)) - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/validator.md#construct-aggregate - for attestation in getAttestationsForBlock(pool, state, slot): + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#construct-aggregate + # TODO once EV goes in w/ refactoring of getAttestationsForBlock, pull out the getSlot version and use + # it. This is incorrect. + for attestation in getAttestationsForBlock(pool, state): + # getAttestationsForBlock(...) already aggregates if attestation.data == attestation_data: - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/validator.md#aggregateandproof + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#aggregateandproof return some(AggregateAndProof( aggregator_index: index, aggregate: attestation, selection_proof: slot_signature)) - # TODO in catch-up mode, we could get here, so probably shouldn't assert - doAssert false none(AggregateAndProof) diff --git a/beacon_chain/beacon_node.nim b/beacon_chain/beacon_node.nim index bcc3668f8..a3787214b 100644 --- a/beacon_chain/beacon_node.nim +++ b/beacon_chain/beacon_node.nim @@ -15,7 +15,8 @@ import conf, time, state_transition, beacon_chain_db, validator_pool, extras, attestation_pool, block_pool, eth2_network, eth2_discovery, beacon_node_types, mainchain_monitor, version, ssz, ssz/dynamic_navigator, - sync_protocol, request_manager, validator_keygen, interop, statusbar + sync_protocol, request_manager, validator_keygen, interop, statusbar, + attestation_aggregation const genesisFile = "genesis.ssz" @@ -602,6 +603,45 @@ proc verifyFinalization(node: BeaconNode, slot: Slot) = node.blockPool.finalizedHead.blck.slot.compute_epoch_at_slot() doAssert finalizedEpoch + 2 == epoch +proc broadcastAggregatedAttestations( + node: BeaconNode, state: auto, head: var auto, slot: Slot, + trailing_distance: uint64) = + # The index is via a + # locally attested validator. Unlike in handleAttestations(...) there's a + # single one at most per slot (because that's how aggregation attestation + # works), so the machinery that has to handle looping across, basically a + # set of locally attached validators is in principle not necessary, but a + # way to organize this. Then the private key for that validator should be + # the corresponding one -- whatver they are, they match. + + let + bs = BlockSlot(blck: head, slot: slot) + committees_per_slot = get_committee_count_at_slot(state, slot) + var cache = get_empty_per_epoch_cache() + for committee_index in 0'u64.. 2: + const TRAILING_DISTANCE = 1 + let aggregationSlot = slot - TRAILING_DISTANCE + var aggregationHead = getAncestorAt(head, aggregationSlot) + + let bs = BlockSlot(blck: aggregationHead, slot: aggregationSlot) + node.blockPool.withState(node.blockPool.tmpState, bs): + let twoThirdsSlot = + toBeaconTime(slot, seconds(2*int64(SECONDS_PER_SLOT)) div 3) + addTimer(saturate(node.beaconClock.fromNow(twoThirdsSlot))) do (p: pointer): + broadcastAggregatedAttestations( + node, state, aggregationHead, aggregationSlot, TRAILING_DISTANCE) + # TODO ... and beacon clock might jump here also. sigh. let nextSlotStart = saturate(node.beaconClock.fromNow(nextSlot)) diff --git a/beacon_chain/spec/network.nim b/beacon_chain/spec/network.nim index 719237d0c..41076a13d 100644 --- a/beacon_chain/spec/network.nim +++ b/beacon_chain/spec/network.nim @@ -13,6 +13,7 @@ const topicVoluntaryExits* = "/eth2/voluntary_exit/ssz" topicProposerSlashings* = "/eth2/proposer_slashing/ssz" topicAttesterSlashings* = "/eth2/attester_slashing/ssz" + topicAggregateAndProof* = "/eth2/beacon_aggregate_and_proof/ssz" # https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/p2p-interface.md#configuration ATTESTATION_SUBNET_COUNT* = 64 From f811ed63be7f9ce3c924661d82e1fd261d98ec95 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Wed, 1 Apr 2020 04:53:05 -0600 Subject: [PATCH 28/58] use pkill in start.sh to fix mac os (#845) --- tests/simulation/start.sh | 2 +- vendor/nim-metrics | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/simulation/start.sh b/tests/simulation/start.sh index d9bacfc42..6c7c01f78 100755 --- a/tests/simulation/start.sh +++ b/tests/simulation/start.sh @@ -117,7 +117,7 @@ fi # Trap and ignore SIGTERM, so we don't kill this process along with its children. if [ "$USE_MULTITAIL" = "no" ]; then trap '' SIGTERM - trap 'kill -- -$$' SIGINT EXIT + trap 'pkill -P $$ beacon_node' SIGINT EXIT fi COMMANDS=() diff --git a/vendor/nim-metrics b/vendor/nim-metrics index 19c87b7dc..71a51f34d 160000 --- a/vendor/nim-metrics +++ b/vendor/nim-metrics @@ -1 +1 @@ -Subproject commit 19c87b7dc91dfbc0b6823a3d2996869397dd34e6 +Subproject commit 71a51f34df2572df786aeb53d1c7c78bef16a7ef From ea59f2ecf2d8f7d54850d5c0047153144d929bb8 Mon Sep 17 00:00:00 2001 From: tersec Date: Wed, 1 Apr 2020 11:41:39 +0000 Subject: [PATCH 29/58] comments-only changes: update a bunch of v0.10.1 spec references to v0.11.0 and explain rationale for extended validation design in code (#847) --- beacon_chain/block_pool.nim | 24 +++++++++++++++++++- beacon_chain/spec/beaconstate.nim | 8 +++---- beacon_chain/spec/datatypes.nim | 2 +- beacon_chain/spec/helpers.nim | 2 +- beacon_chain/spec/presets/mainnet.nim | 17 ++++++++++---- beacon_chain/spec/presets/minimal.nim | 8 +++---- beacon_chain/spec/state_transition_epoch.nim | 2 +- beacon_chain/spec/validator.nim | 4 ++-- beacon_chain/time.nim | 2 +- 9 files changed, 49 insertions(+), 20 deletions(-) diff --git a/beacon_chain/block_pool.nim b/beacon_chain/block_pool.nim index ee3eec009..dccb9c8fa 100644 --- a/beacon_chain/block_pool.nim +++ b/beacon_chain/block_pool.nim @@ -984,8 +984,30 @@ proc isValidBeaconBlock*(pool: BlockPool, # The block is the first block with valid signature received for the proposer # for the slot, signed_beacon_block.message.slot. + # + # While this condition is similar to the proposer slashing condition at + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#proposer-slashing + # it's not identical, and this check does not address slashing: + # + # (1) The beacon blocks must be conflicting, i.e. different, for the same + # slot and proposer. This check also catches identical blocks. + # + # (2) By this point in the function, it's not been checked whether they're + # signed yet. As in general, expensive checks should be deferred, this + # would add complexity not directly relevant this function. + # + # (3) As evidenced by point (1), the similarity in the validation condition + # and slashing condition, while not coincidental, aren't similar enough + # to combine, as one or the other might drift. + # + # (4) Furthermore, this function, as much as possible, simply returns a yes + # or no answer, without modifying other state for p2p network interface + # validation. Complicating this interface, for the sake of sharing only + # couple lines of code, wouldn't be worthwhile. + # # TODO might check unresolved/orphaned blocks too, and this might not see all - # blocks at a given slot (though, in theory, those get checked elsewhere). + # blocks at a given slot (though, in theory, those get checked elsewhere), or + # adding metrics that count how often these conditions occur. let slotBlockRef = getBlockByPreciseSlot(pool, signed_beacon_block.message.slot) if (not slotBlockRef.isNil) and diff --git a/beacon_chain/spec/beaconstate.nim b/beacon_chain/spec/beaconstate.nim index 640e9ac0a..a39f2dab1 100644 --- a/beacon_chain/spec/beaconstate.nim +++ b/beacon_chain/spec/beaconstate.nim @@ -30,7 +30,7 @@ func is_valid_merkle_branch*(leaf: Eth2Digest, branch: openarray[Eth2Digest], de value = eth2hash(buf) value == root -# https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#increase_balance +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#increase_balance func increase_balance*( state: var BeaconState, index: ValidatorIndex, delta: Gwei) = # Increase the validator balance at index ``index`` by ``delta``. @@ -272,7 +272,7 @@ func get_initial_beacon_block*(state: BeaconState): SignedBeaconBlock = # parent_root, randao_reveal, eth1_data, signature, and body automatically # initialized to default values. -# https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#get_block_root_at_slot +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#get_block_root_at_slot func get_block_root_at_slot*(state: BeaconState, slot: Slot): Eth2Digest = # Return the block root at a recent ``slot``. @@ -281,7 +281,7 @@ func get_block_root_at_slot*(state: BeaconState, doAssert slot < state.slot state.block_roots[slot mod SLOTS_PER_HISTORICAL_ROOT] -# https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#get_block_root +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#get_block_root func get_block_root*(state: BeaconState, epoch: Epoch): Eth2Digest = # Return the block root at the start of a recent ``epoch``. get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch)) @@ -557,7 +557,7 @@ func makeAttestationData*( doAssert slot.compute_epoch_at_slot == current_epoch - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/validator.md#attestation-data + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#attestation-data AttestationData( slot: slot, index: committee_index, diff --git a/beacon_chain/spec/datatypes.nim b/beacon_chain/spec/datatypes.nim index cc3393032..c82b00f59 100644 --- a/beacon_chain/spec/datatypes.nim +++ b/beacon_chain/spec/datatypes.nim @@ -359,7 +359,7 @@ type message*: BeaconBlockHeader signature*: ValidatorSig - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/validator.md#aggregateandproof + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#aggregateandproof AggregateAndProof* = object aggregator_index*: uint64 aggregate*: Attestation diff --git a/beacon_chain/spec/helpers.nim b/beacon_chain/spec/helpers.nim index 4e4ce18ee..5bdc659fe 100644 --- a/beacon_chain/spec/helpers.nim +++ b/beacon_chain/spec/helpers.nim @@ -147,7 +147,7 @@ func compute_domain*( result[0..3] = int_to_bytes4(domain_type.uint64) result[4..31] = fork_data_root.data[0..27] -# https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#get_domain +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#get_domain func get_domain*( fork: Fork, domain_type: DomainType, epoch: Epoch, genesis_validators_root: Eth2Digest): Domain = ## Return the signature domain (fork version concatenated with domain type) diff --git a/beacon_chain/spec/presets/mainnet.nim b/beacon_chain/spec/presets/mainnet.nim index 1c66e4211..5d2dc772c 100644 --- a/beacon_chain/spec/presets/mainnet.nim +++ b/beacon_chain/spec/presets/mainnet.nim @@ -130,10 +130,17 @@ const # State vector lengths # --------------------------------------------------------------- - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/configs/mainnet.yaml#L102 - EPOCHS_PER_HISTORICAL_VECTOR* = 65536 - EPOCHS_PER_SLASHINGS_VECTOR* = 8192 - HISTORICAL_ROOTS_LIMIT* = 16777216 + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/configs/mainnet.yaml#L105 + + EPOCHS_PER_HISTORICAL_VECTOR* = 65536 ##\ + ## epochs (~0.8 years) + + EPOCHS_PER_SLASHINGS_VECTOR* = 8192 ##\ + ## epochs (~36 days) + + HISTORICAL_ROOTS_LIMIT* = 16777216 ##\ + ## epochs (~26,131 years) + VALIDATOR_REGISTRY_LIMIT* = 1099511627776 # Reward and penalty quotients @@ -156,7 +163,7 @@ const # Fork choice # --------------------------------------------------------------- - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/configs/mainnet.yaml#L26 + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/configs/mainnet.yaml#L32 SAFE_SLOTS_TO_UPDATE_JUSTIFIED* = 8 # 96 seconds # Validators diff --git a/beacon_chain/spec/presets/minimal.nim b/beacon_chain/spec/presets/minimal.nim index 9a286304e..a8c790cd2 100644 --- a/beacon_chain/spec/presets/minimal.nim +++ b/beacon_chain/spec/presets/minimal.nim @@ -52,7 +52,7 @@ const # Gwei values # --------------------------------------------------------------- - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/configs/minimal.yaml#L52 + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/configs/minimal.yaml#L58 # Unchanged MIN_DEPOSIT_AMOUNT* = 2'u64^0 * 10'u64^9 @@ -117,7 +117,7 @@ const # Reward and penalty quotients # --------------------------------------------------------------- - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/configs/minimal.yaml#L117 + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/configs/minimal.yaml#L117 BASE_REWARD_FACTOR* = 2'u64^6 WHISTLEBLOWER_REWARD_QUOTIENT* = 2'u64^9 @@ -127,7 +127,7 @@ const # Max operations per block # --------------------------------------------------------------- - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/configs/minimal.yaml#L131 + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/configs/minimal.yaml#L131 MAX_PROPOSER_SLASHINGS* = 2^4 MAX_ATTESTER_SLASHINGS* = 2^0 @@ -144,7 +144,7 @@ const # Validators # --------------------------------------------------------------- - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/configs/minimal.yaml#L32 + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/configs/minimal.yaml#L38 # Changed ETH1_FOLLOW_DISTANCE* = 16 # blocks diff --git a/beacon_chain/spec/state_transition_epoch.nim b/beacon_chain/spec/state_transition_epoch.nim index 7acbf1c3b..753863767 100644 --- a/beacon_chain/spec/state_transition_epoch.nim +++ b/beacon_chain/spec/state_transition_epoch.nim @@ -431,7 +431,7 @@ proc process_epoch*(state: var BeaconState) {.nbench.}= trace "ran process_justification_and_finalization", current_epoch = get_current_epoch(state) - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#rewards-and-penalties-1 + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#rewards-and-penalties-1 process_rewards_and_penalties(state, per_epoch_cache) # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#registry-updates diff --git a/beacon_chain/spec/validator.nim b/beacon_chain/spec/validator.nim index a9c4acfd5..a749735c8 100644 --- a/beacon_chain/spec/validator.nim +++ b/beacon_chain/spec/validator.nim @@ -10,8 +10,8 @@ import options, nimcrypto, sequtils, math, tables, ./datatypes, ./digest, ./helpers -# https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#compute_shuffled_index -# https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#compute_committee +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#compute_shuffled_index +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#compute_committee func get_shuffled_seq*(seed: Eth2Digest, list_size: uint64, ): seq[ValidatorIndex] = diff --git a/beacon_chain/time.nim b/beacon_chain/time.nim index 105073740..fdae54580 100644 --- a/beacon_chain/time.nim +++ b/beacon_chain/time.nim @@ -14,7 +14,7 @@ type ## which blocks are valid - in particular, blocks are not valid if they ## come from the future as seen from the local clock. ## - ## https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/fork-choice.md#fork-choice + ## https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/fork-choice.md#fork-choice ## # TODO replace time in chronos with a proper unit type, then this code can # follow: From eaf4594f7764f00b9e2190ce3944826dc539b122 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C8=98tefan=20Talpalaru?= Date: Wed, 1 Apr 2020 23:05:49 +0200 Subject: [PATCH 30/58] bump submodules --- vendor/nim-chronicles | 2 +- vendor/nim-chronos | 2 +- vendor/nim-confutils | 2 +- vendor/nim-eth | 2 +- vendor/nim-faststreams | 2 +- vendor/nim-json-rpc | 2 +- vendor/nim-json-serialization | 2 +- vendor/nim-libp2p | 2 +- vendor/nim-metrics | 2 +- vendor/nim-serialization | 2 +- vendor/nim-stew | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/vendor/nim-chronicles b/vendor/nim-chronicles index 8da0e30c5..fb8af4631 160000 --- a/vendor/nim-chronicles +++ b/vendor/nim-chronicles @@ -1 +1 @@ -Subproject commit 8da0e30c526ab1c6c825e16546fc5db972c5408d +Subproject commit fb8af46311965fd076412e6e071dda571d282024 diff --git a/vendor/nim-chronos b/vendor/nim-chronos index f3827a13d..cbd8e0382 160000 --- a/vendor/nim-chronos +++ b/vendor/nim-chronos @@ -1 +1 @@ -Subproject commit f3827a13d12f27e20874df81dc99b55e5dc78244 +Subproject commit cbd8e03823c00dd230e48a4613c0f594b77616eb diff --git a/vendor/nim-confutils b/vendor/nim-confutils index 24c73359b..6e5d57049 160000 --- a/vendor/nim-confutils +++ b/vendor/nim-confutils @@ -1 +1 @@ -Subproject commit 24c73359b06340fb3b767c420e0e33e66bd05b86 +Subproject commit 6e5d570490989c753d4645ba9173ef9358d302bb diff --git a/vendor/nim-eth b/vendor/nim-eth index fe6df94a1..5dc0a533b 160000 --- a/vendor/nim-eth +++ b/vendor/nim-eth @@ -1 +1 @@ -Subproject commit fe6df94a1956509e77ff533d9d00dd35b403ea22 +Subproject commit 5dc0a533b09fe2a250411c1113a76a4cc3d25b5b diff --git a/vendor/nim-faststreams b/vendor/nim-faststreams index 5fb40e4ff..8a3cf6778 160000 --- a/vendor/nim-faststreams +++ b/vendor/nim-faststreams @@ -1 +1 @@ -Subproject commit 5fb40e4ffd2c5a7eca88203d99150e2d99732e41 +Subproject commit 8a3cf6778d483a9d701534dfc2f14f3a4dfc4ab8 diff --git a/vendor/nim-json-rpc b/vendor/nim-json-rpc index 6fbaeb61c..5c0d09611 160000 --- a/vendor/nim-json-rpc +++ b/vendor/nim-json-rpc @@ -1 +1 @@ -Subproject commit 6fbaeb61cab889f74870372bc00cc99371e16794 +Subproject commit 5c0d0961114bcaaf3da52d5918bf0b85ef0e4ce9 diff --git a/vendor/nim-json-serialization b/vendor/nim-json-serialization index 6350b72b5..c108ba90e 160000 --- a/vendor/nim-json-serialization +++ b/vendor/nim-json-serialization @@ -1 +1 @@ -Subproject commit 6350b72b5eda69f7ccfa57a94fd420509dbf6f49 +Subproject commit c108ba90e6b304515f08fdcff62f428f3f8fbe53 diff --git a/vendor/nim-libp2p b/vendor/nim-libp2p index 6bb4e91a3..5285f0d09 160000 --- a/vendor/nim-libp2p +++ b/vendor/nim-libp2p @@ -1 +1 @@ -Subproject commit 6bb4e91a39fe17239ccdb44b287155758addd1cd +Subproject commit 5285f0d091d2f5215b5c0dd5bd3399af1afec626 diff --git a/vendor/nim-metrics b/vendor/nim-metrics index 71a51f34d..5db86514a 160000 --- a/vendor/nim-metrics +++ b/vendor/nim-metrics @@ -1 +1 @@ -Subproject commit 71a51f34df2572df786aeb53d1c7c78bef16a7ef +Subproject commit 5db86514a1185620a003d1e5ea1da4c0373c3b6e diff --git a/vendor/nim-serialization b/vendor/nim-serialization index ae60eef4e..0eab8cfee 160000 --- a/vendor/nim-serialization +++ b/vendor/nim-serialization @@ -1 +1 @@ -Subproject commit ae60eef4e8413e49fb0dbcae9a343fb479509fa0 +Subproject commit 0eab8cfeee55cfa3bb893ec31137d3c25b83a1ae diff --git a/vendor/nim-stew b/vendor/nim-stew index 76beeb769..4201f4675 160000 --- a/vendor/nim-stew +++ b/vendor/nim-stew @@ -1 +1 @@ -Subproject commit 76beeb769e30adc912d648c014fd95bf748fef24 +Subproject commit 4201f46750a47d45b6bfc1521f93aaf9cdf1bf0f From fc2fff249b858d7d5415690ea658c56c5da29525 Mon Sep 17 00:00:00 2001 From: kdeme Date: Tue, 31 Mar 2020 12:02:13 +0200 Subject: [PATCH 31/58] Discv5: Don't pass ip address when external ip is not known --- beacon_chain/beacon_node.nim | 9 +++------ beacon_chain/eth2_discovery.nim | 5 +++-- beacon_chain/eth2_network.nim | 34 ++++++++++++++------------------- vendor/nim-eth | 2 +- 4 files changed, 21 insertions(+), 29 deletions(-) diff --git a/beacon_chain/beacon_node.nim b/beacon_chain/beacon_node.nim index a3787214b..acc77576c 100644 --- a/beacon_chain/beacon_node.nim +++ b/beacon_chain/beacon_node.nim @@ -1273,15 +1273,12 @@ when isMainModule: if bootstrapFile.len > 0: let networkKeys = getPersistentNetKeys(config) - bootstrapAddress = enode.Address( - ip: config.bootstrapAddress, - tcpPort: config.bootstrapPort, - udpPort: config.bootstrapPort) - bootstrapEnr = enr.Record.init( 1, # sequence number networkKeys.seckey.asEthKey, - some(bootstrapAddress)) + some(config.bootstrapAddress), + config.bootstrapPort, + config.bootstrapPort) writeFile(bootstrapFile, bootstrapEnr.toURI) echo "Wrote ", bootstrapFile diff --git a/beacon_chain/eth2_discovery.nim b/beacon_chain/eth2_discovery.nim index 427eb1f3a..bbdc281cf 100644 --- a/beacon_chain/eth2_discovery.nim +++ b/beacon_chain/eth2_discovery.nim @@ -154,7 +154,8 @@ proc loadBootstrapFile*(bootstrapFile: string, proc new*(T: type Eth2DiscoveryProtocol, conf: BeaconNodeConf, - ip: IpAddress, rawPrivKeyBytes: openarray[byte]): T = + ip: Option[IpAddress], tcpPort, udpPort: Port, + rawPrivKeyBytes: openarray[byte]): T = # TODO # Implement more configuration options: # * for setting up a specific key @@ -174,4 +175,4 @@ proc new*(T: type Eth2DiscoveryProtocol, if fileExists(persistentBootstrapFile): loadBootstrapFile(persistentBootstrapFile, bootNodes, bootEnrs, ourPubKey) - newProtocol(pk, db, ip, conf.tcpPort, conf.udpPort, bootEnrs) + newProtocol(pk, db, ip, tcpPort, udpPort, bootEnrs) diff --git a/beacon_chain/eth2_network.nim b/beacon_chain/eth2_network.nim index 1f413840c..67968a89b 100644 --- a/beacon_chain/eth2_network.nim +++ b/beacon_chain/eth2_network.nim @@ -134,9 +134,6 @@ const readTimeoutErrorMsg = "Exceeded read timeout for a request" -let - globalListeningAddr = parseIpAddress("0.0.0.0") - # Metrics for tracking attestation and beacon block loss declareCounter gossip_messages_sent, "Number of gossip messages sent by this peer" @@ -681,10 +678,11 @@ proc runDiscoveryLoop*(node: Eth2Node) {.async.} = await sleepAsync seconds(1) proc init*(T: type Eth2Node, conf: BeaconNodeConf, - switch: Switch, ip: IpAddress, privKey: keys.PrivateKey): T = + switch: Switch, ip: Option[IpAddress], tcpPort, udpPort: Port, + privKey: keys.PrivateKey): T = new result result.switch = switch - result.discovery = Eth2DiscoveryProtocol.new(conf, ip, privKey.data) + result.discovery = Eth2DiscoveryProtocol.new(conf, ip, tcpPort, udpPort, privKey.data) result.wantedPeers = conf.maxPeers result.peerPool = newPeerPool[Peer, PeerID](maxPeers = conf.maxPeers) @@ -829,11 +827,10 @@ proc p2pProtocolBackendImpl*(p: P2PProtocol): Backend = result.implementProtocolInit = proc (p: P2PProtocol): NimNode = return newCall(initProtocol, newLit(p.name), p.peerInit, p.netInit) -proc setupNat(conf: BeaconNodeConf): tuple[ip: IpAddress, +proc setupNat(conf: BeaconNodeConf): tuple[ip: Option[IpAddress], tcpPort: Port, udpPort: Port] {.gcsafe.} = # defaults - result.ip = globalListeningAddr result.tcpPort = conf.tcpPort result.udpPort = conf.udpPort @@ -850,16 +847,15 @@ proc setupNat(conf: BeaconNodeConf): tuple[ip: IpAddress, else: if conf.nat.startsWith("extip:") and isIpAddress(conf.nat[6..^1]): # any required port redirection is assumed to be done by hand - result.ip = parseIpAddress(conf.nat[6..^1]) + result.ip = some(parseIpAddress(conf.nat[6..^1])) nat = NatNone else: error "not a valid NAT mechanism, nor a valid IP address", value = conf.nat quit(QuitFailure) if nat != NatNone: - let extIP = getExternalIP(nat) - if extIP.isSome: - result.ip = extIP.get() + result.ip = getExternalIP(nat) + if result.ip.isSome: # TODO redirectPorts in considered a gcsafety violation # because it obtains the address of a non-gcsafe proc? let extPorts = ({.gcsafe.}: @@ -901,10 +897,10 @@ proc getPersistentNetKeys*(conf: BeaconNodeConf): KeyPair = proc createEth2Node*(conf: BeaconNodeConf): Future[Eth2Node] {.async, gcsafe.} = var - (extIp, extTcpPort, _) = setupNat(conf) + (extIp, extTcpPort, extUdpPort) = setupNat(conf) hostAddress = tcpEndPoint(conf.libp2pAddress, conf.tcpPort) - announcedAddresses = if extIp == globalListeningAddr: @[] - else: @[tcpEndPoint(extIp, extTcpPort)] + announcedAddresses = if extIp.isNone(): @[] + else: @[tcpEndPoint(extIp.get(), extTcpPort)] info "Initializing networking", hostAddress, announcedAddresses @@ -915,17 +911,15 @@ proc createEth2Node*(conf: BeaconNodeConf): Future[Eth2Node] {.async, gcsafe.} = # are running behind a NAT). var switch = newStandardSwitch(some keys.seckey, hostAddress, triggerSelf = true, gossip = true) - result = Eth2Node.init(conf, switch, extIp, keys.seckey.asEthKey) + result = Eth2Node.init(conf, switch, extIp, extTcpPort, extUdpPort, + keys.seckey.asEthKey) proc getPersistenBootstrapAddr*(conf: BeaconNodeConf, ip: IpAddress, port: Port): enr.Record = - let - pair = getPersistentNetKeys(conf) - enodeAddress = Address(ip: ip, udpPort: port) - + let pair = getPersistentNetKeys(conf) return enr.Record.init(1'u64, # sequence number pair.seckey.asEthKey, - some(enodeAddress)) + some(ip), port, port) proc announcedENR*(node: Eth2Node): enr.Record = doAssert node.discovery != nil, "The Eth2Node must be initialized" diff --git a/vendor/nim-eth b/vendor/nim-eth index c3f23e591..fe6df94a1 160000 --- a/vendor/nim-eth +++ b/vendor/nim-eth @@ -1 +1 @@ -Subproject commit c3f23e5912efff98fc6c8181db579037e5a19a2c +Subproject commit fe6df94a1956509e77ff533d9d00dd35b403ea22 From 708ac80daef5e05e01d4fc84576f8692adc256a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C8=98tefan=20Talpalaru?= Date: Thu, 2 Apr 2020 10:58:03 +0200 Subject: [PATCH 32/58] Jenkins: reduce the local testnet's log level (#852) --- Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 025287bae..c1c8929e6 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -31,8 +31,8 @@ def runStages() { } if ("${NODE_NAME}" ==~ /linux.*/) { stage("testnet finalization") { - sh "./scripts/launch_local_testnet.sh --testnet 0 --nodes 4 --disable-htop -- --verify-finalization --stop-at-epoch=5" - sh "./scripts/launch_local_testnet.sh --testnet 1 --nodes 4 --disable-htop -- --verify-finalization --stop-at-epoch=5" + sh "./scripts/launch_local_testnet.sh --testnet 0 --nodes 4 --log-level INFO --disable-htop -- --verify-finalization --stop-at-epoch=5" + sh "./scripts/launch_local_testnet.sh --testnet 1 --nodes 4 --log-level INFO --disable-htop -- --verify-finalization --stop-at-epoch=5" } } } From d180724d41c485cd559794aeb57663fa687d4ca1 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Fri, 3 Apr 2020 11:12:38 +0200 Subject: [PATCH 33/58] Bump eth (#857) * vendor: bump stew * vendor: bump nim-eth * vendor: bump web3 too --- beacon_chain/eth2_discovery.nim | 6 +++--- beacon_chain/eth2_network.nim | 7 ++++--- vendor/nim-eth | 2 +- vendor/nim-stew | 2 +- vendor/nim-web3 | 2 +- 5 files changed, 10 insertions(+), 9 deletions(-) diff --git a/beacon_chain/eth2_discovery.nim b/beacon_chain/eth2_discovery.nim index bbdc281cf..0f8712f5b 100644 --- a/beacon_chain/eth2_discovery.nim +++ b/beacon_chain/eth2_discovery.nim @@ -45,7 +45,7 @@ proc toENode*(a: MultiAddress): Result[ENode, cstring] = var pubkey: libp2pCrypto.PublicKey if peerId.extractPublicKey(pubkey): if pubkey.scheme == Secp256k1: - return ok ENode(pubkey: pubkey.skkey, + return ok ENode(pubkey: PublicKey(pubkey.skkey), address: Address(ip: ipAddress, tcpPort: Port tcpPort, udpPort: Port udpPort)) @@ -57,8 +57,8 @@ proc toENode*(a: MultiAddress): Result[ENode, cstring] = return err "Invalid MultiAddress" proc toMultiAddressStr*(enode: ENode): string = - var peerId = PeerID.init(libp2pCrypto.PublicKey(scheme: Secp256k1, - skkey: enode.pubkey)) + var peerId = PeerID.init(libp2pCrypto.PublicKey( + scheme: Secp256k1, skkey: SkPublicKey(enode.pubkey))) &"/ip4/{enode.address.ip}/tcp/{enode.address.tcpPort}/p2p/{peerId.pretty}" proc toENode*(enrRec: enr.Record): Result[ENode, cstring] = diff --git a/beacon_chain/eth2_network.nim b/beacon_chain/eth2_network.nim index 67968a89b..f96d8682e 100644 --- a/beacon_chain/eth2_network.nim +++ b/beacon_chain/eth2_network.nim @@ -610,7 +610,8 @@ proc toPeerInfo*(r: enr.TypedRecord): PeerInfo = if recoverPublicKey(r.secp256k1.get, pubKey) != EthKeysStatus.Success: return # TODO - let peerId = PeerID.init crypto.PublicKey(scheme: Secp256k1, skkey: pubKey) + let peerId = PeerID.init crypto.PublicKey( + scheme: Secp256k1, skkey: SkPublicKey(pubKey)) var addresses = newSeq[MultiAddress]() if r.ip.isSome and r.tcp.isSome: @@ -866,10 +867,10 @@ proc setupNat(conf: BeaconNodeConf): tuple[ip: Option[IpAddress], (result.tcpPort, result.udpPort) = extPorts.get() func asLibp2pKey*(key: keys.PublicKey): PublicKey = - PublicKey(scheme: Secp256k1, skkey: key) + PublicKey(scheme: Secp256k1, skkey: SkPublicKey(key)) func asEthKey*(key: PrivateKey): keys.PrivateKey = - keys.PrivateKey(data: key.skkey.data) + keys.PrivateKey(SkSecretKey(data: key.skkey.data)) proc initAddress*(T: type MultiAddress, str: string): T = let address = MultiAddress.init(str) diff --git a/vendor/nim-eth b/vendor/nim-eth index 5dc0a533b..c827c3732 160000 --- a/vendor/nim-eth +++ b/vendor/nim-eth @@ -1 +1 @@ -Subproject commit 5dc0a533b09fe2a250411c1113a76a4cc3d25b5b +Subproject commit c827c37329541a2f3d7d8057fa577f14537bb832 diff --git a/vendor/nim-stew b/vendor/nim-stew index 4201f4675..86739f99c 160000 --- a/vendor/nim-stew +++ b/vendor/nim-stew @@ -1 +1 @@ -Subproject commit 4201f46750a47d45b6bfc1521f93aaf9cdf1bf0f +Subproject commit 86739f99c4efc1246d45164ef81c1e8f72970b65 diff --git a/vendor/nim-web3 b/vendor/nim-web3 index 89d7a0c8f..da74eabaa 160000 --- a/vendor/nim-web3 +++ b/vendor/nim-web3 @@ -1 +1 @@ -Subproject commit 89d7a0c8fd1eb0f749432bd7136d8f385351c48e +Subproject commit da74eabaa00e2a8f7c58e8d84a02b701041dfc2d From 779349c11da5f1a1d6d8bf74f44ebd4faf52f633 Mon Sep 17 00:00:00 2001 From: Zahary Karadjov Date: Tue, 24 Mar 2020 13:13:07 +0200 Subject: [PATCH 34/58] Compile NBC with Nim 1.2 RC --- .gitmodules | 5 +++++ vendor/nim-libp2p | 2 +- vendor/nim-serialization | 2 +- vendor/nimbus-build-system | 2 +- 4 files changed, 8 insertions(+), 3 deletions(-) diff --git a/.gitmodules b/.gitmodules index 9806259d1..e3e927660 100644 --- a/.gitmodules +++ b/.gitmodules @@ -141,3 +141,8 @@ url = https://github.com/status-im/nim-testutils.git ignore = dirty branch = master +[submodule "vendor/nim-rocksdb"] + path = vendor/nim-rocksdb + url = https://github.com/status-im/nim-rocksdb.git + ignore = dirty + branch = master diff --git a/vendor/nim-libp2p b/vendor/nim-libp2p index 5285f0d09..e39bf0a4c 160000 --- a/vendor/nim-libp2p +++ b/vendor/nim-libp2p @@ -1 +1 @@ -Subproject commit 5285f0d091d2f5215b5c0dd5bd3399af1afec626 +Subproject commit e39bf0a4cf6032a0da77d4bc01caadf9155956fc diff --git a/vendor/nim-serialization b/vendor/nim-serialization index 0eab8cfee..b5f2df79c 160000 --- a/vendor/nim-serialization +++ b/vendor/nim-serialization @@ -1 +1 @@ -Subproject commit 0eab8cfeee55cfa3bb893ec31137d3c25b83a1ae +Subproject commit b5f2df79cc2858c3f3c5d7b9812921816d67d055 diff --git a/vendor/nimbus-build-system b/vendor/nimbus-build-system index 088d3b7f6..67967528e 160000 --- a/vendor/nimbus-build-system +++ b/vendor/nimbus-build-system @@ -1 +1 @@ -Subproject commit 088d3b7f6843fd61c829a5a0c0c29912945963ae +Subproject commit 67967528e519e4de3bb5a028dcff898ff713c127 From d3e225c46106028861204801b819dfd134961a9e Mon Sep 17 00:00:00 2001 From: Zahary Karadjov Date: Mon, 30 Mar 2020 21:24:07 +0300 Subject: [PATCH 35/58] Fix a stack overflow crash with the mainnet preset --- tests/official/test_fixture_state_transition_epoch.nim | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/tests/official/test_fixture_state_transition_epoch.nim b/tests/official/test_fixture_state_transition_epoch.nim index cd163bd6d..2992df177 100644 --- a/tests/official/test_fixture_state_transition_epoch.nim +++ b/tests/official/test_fixture_state_transition_epoch.nim @@ -38,11 +38,8 @@ template runSuite(suiteDir, testName: string, transitionProc: untyped{ident}, us let unitTestName = testDir.rsplit(DirSep, 1)[1] timedTest testName & " - " & unitTestName & preset(): - var stateRef, postRef: ref BeaconState - new stateRef - new postRef - stateRef[] = parseTest(testDir/"pre.ssz", SSZ, BeaconState) - postRef[] = parseTest(testDir/"post.ssz", SSZ, BeaconState) + let stateRef = parseTest(testDir/"pre.ssz", SSZ, ref BeaconState) + let postRef = parseTest(testDir/"post.ssz", SSZ, ref BeaconState) when useCache: var cache = get_empty_per_epoch_cache() From e09183928f55e9b90252c9605f10c78a51d04388 Mon Sep 17 00:00:00 2001 From: Zahary Karadjov Date: Mon, 30 Mar 2020 23:32:05 +0300 Subject: [PATCH 36/58] Disable testing to allow the CI to cache the new Nim --- Makefile | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 975056f5a..7309dcd53 100644 --- a/Makefile +++ b/Makefile @@ -100,10 +100,7 @@ libbacktrace: # in a Makefile recipe but works when prefixing it with `bash`. No idea how the PATH is overridden. DISABLE_TEST_FIXTURES_SCRIPT := 0 test: | build deps -ifeq ($(DISABLE_TEST_FIXTURES_SCRIPT), 0) - V=$(V) scripts/setup_official_tests.sh -endif - $(ENV_SCRIPT) nim test $(NIM_PARAMS) beacon_chain.nims && rm -f 0000-*.json + echo Fake test completed! $(TOOLS): | build deps for D in $(TOOLS_DIRS); do [ -e "$${D}/$@.nim" ] && TOOL_DIR="$${D}" && break; done && \ From 9c46c990543f436e2617885940d78396675cabb7 Mon Sep 17 00:00:00 2001 From: Zahary Karadjov Date: Fri, 3 Apr 2020 23:28:12 +0300 Subject: [PATCH 37/58] Re-enable testing --- Makefile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 7309dcd53..975056f5a 100644 --- a/Makefile +++ b/Makefile @@ -100,7 +100,10 @@ libbacktrace: # in a Makefile recipe but works when prefixing it with `bash`. No idea how the PATH is overridden. DISABLE_TEST_FIXTURES_SCRIPT := 0 test: | build deps - echo Fake test completed! +ifeq ($(DISABLE_TEST_FIXTURES_SCRIPT), 0) + V=$(V) scripts/setup_official_tests.sh +endif + $(ENV_SCRIPT) nim test $(NIM_PARAMS) beacon_chain.nims && rm -f 0000-*.json $(TOOLS): | build deps for D in $(TOOLS_DIRS); do [ -e "$${D}/$@.nim" ] && TOOL_DIR="$${D}" && break; done && \ From 011ed60feb30cbb062ed703cdf5306dfe5e241ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C8=98tefan=20Talpalaru?= Date: Sat, 4 Apr 2020 15:40:05 +0200 Subject: [PATCH 38/58] Azure: change cache key and comment out tests --- azure-pipelines.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 844f6a69f..f92930c6a 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -14,7 +14,7 @@ jobs: - task: CacheBeta@1 displayName: 'cache Nim binaries' inputs: - key: NimBinaries | $(Agent.OS) | $(PLATFORM) | "$(Build.SourceBranchName)" | "v2" + key: NimBinaries | $(Agent.OS) | $(PLATFORM) | "$(Build.SourceBranchName)" | "v3" path: NimBinaries - task: CacheBeta@1 @@ -64,9 +64,9 @@ jobs: git submodule --quiet update --init --recursive scripts/setup_official_tests.sh jsonTestsCache mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} CI_CACHE=NimBinaries update - mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} fetch-dlls - mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE - mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE NIMFLAGS="-d:testnet_servers_image" - file build/beacon_node - mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} DISABLE_TEST_FIXTURES_SCRIPT=1 test + #mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} fetch-dlls + #mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE + #mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE NIMFLAGS="-d:testnet_servers_image" + #file build/beacon_node + #mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} DISABLE_TEST_FIXTURES_SCRIPT=1 test displayName: 'build and test' From f9e45dc1218145094c7c0d47181667b340c07a67 Mon Sep 17 00:00:00 2001 From: Dustin Brody Date: Fri, 3 Apr 2020 19:49:46 +0200 Subject: [PATCH 39/58] document and temporary workaround for extended validation issue --- beacon_chain/block_pool.nim | 41 ++++++++++++++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 3 deletions(-) diff --git a/beacon_chain/block_pool.nim b/beacon_chain/block_pool.nim index dccb9c8fa..1f8743b3d 100644 --- a/beacon_chain/block_pool.nim +++ b/beacon_chain/block_pool.nim @@ -982,6 +982,16 @@ proc isValidBeaconBlock*(pool: BlockPool, debug "isValidBeaconBlock: block is not from a slot greater than the latest finalized slot" return false + # The proposer signature, signed_beacon_block.signature, is valid with + # respect to the proposer_index pubkey. + + # TODO resolve following two checks' robustness and remove this early exit. + const alwaysTrue = true + if alwaysTrue: + return true + + # TODO because this check depends on the proposer aspect, and see the comment + # there for that issue, the fallout is this check isn't reliable anymore. # The block is the first block with valid signature received for the proposer # for the slot, signed_beacon_block.message.slot. # @@ -1019,9 +1029,6 @@ proc isValidBeaconBlock*(pool: BlockPool, return false - # The proposer signature, signed_beacon_block.signature, is valid with - # respect to the proposer_index pubkey. - # If this block doesn't have a parent we know about, we can't/don't really # trace it back to a known-good state/checkpoint to verify its prevenance; # while one could getOrResolve to queue up searching for missing parent it @@ -1029,6 +1036,34 @@ proc isValidBeaconBlock*(pool: BlockPool, # answering yes/no, not queuing other action or otherwise altering state. let parent_ref = pool.getRef(signed_beacon_block.message.parent_root) if parent_ref.isNil: + # TODO find where incorrect block's being produced at/around epoch 20, + # nim-beacon-chain commit 708ac80daef5e05e01d4fc84576f8692adc256a3, at + # 2020-04-02, running `make eth2_network_simulation`, or, alternately, + # why correctly produced ancestor block isn't found. By appearances, a + # chain is being forked, probably by node 0, as nodes 1/2/3 die first, + # then node 0 only dies eventually then nodes 1/2/3 are not around, to + # help it in turn finalize. So node 0 is probably culprit, around/near + # the end of epoch 19, in its block proposal(s). BlockPool.add() later + # discovers this same missing parent. The missing step here is that we + # need to be able to receive this block and store it in unresolved but + # without passing it on to other nodes (which is what EV actually does + # specify). The other BeaconBlock validation conditions cannot change, + # just because later blocks fill in gaps, but this one can. My read of + # the intent here is that only nodes which know about the parentage of + # a block should pass it on. That doesn't mean we shouldn't process it + # though, just not rebroadcast it. + # Debug output: isValidBeaconBlock: incorrectly skipping BLS validation when parent block unknown topics="blkpool" tid=2111475 file=block_pool.nim:1040 current_epoch=22 current_slot=133 parent_root=72b5b0f1 pool_head_slot=131 pool_head_state_root=48e9f4b8 proposed_block_slot=133 proposed_block_state_root=ed7b1ddd proposer_index=42 node=3 + # So it's missing a head update, probably, at slot 132. + debug "isValidBeaconBlock: incorrectly skipping BLS validation when parent block unknown", + current_slot = current_slot, + current_epoch = compute_epoch_at_slot(current_slot), + parent_root = signed_beacon_block.message.parent_root, + proposed_block_slot = signed_beacon_block.message.slot, + proposer_index = signed_beacon_block.message.proposer_index, + proposed_block_state_root = signed_beacon_block.message.state_root, + pool_head_slot = pool.headState.data.data.slot, + pool_head_state_root = pool.headState.data.root + return false let bs = From 60df05a420af598ad24ed293bf983bb31002ab7c Mon Sep 17 00:00:00 2001 From: Dustin Brody Date: Fri, 3 Apr 2020 20:52:55 +0200 Subject: [PATCH 40/58] mark several v0.10.1 spec references as v0.11.1-compatible --- beacon_chain/spec/beaconstate.nim | 4 ++-- beacon_chain/spec/datatypes.nim | 2 +- beacon_chain/spec/presets/mainnet.nim | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/beacon_chain/spec/beaconstate.nim b/beacon_chain/spec/beaconstate.nim index a39f2dab1..a25db4a8e 100644 --- a/beacon_chain/spec/beaconstate.nim +++ b/beacon_chain/spec/beaconstate.nim @@ -12,7 +12,7 @@ import ./crypto, ./datatypes, ./digest, ./helpers, ./validator, ../../nbench/bench_lab -# https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#is_valid_merkle_branch +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#is_valid_merkle_branch func is_valid_merkle_branch*(leaf: Eth2Digest, branch: openarray[Eth2Digest], depth: uint64, index: uint64, root: Eth2Digest): bool {.nbench.}= ## Check if ``leaf`` at ``index`` verifies against the Merkle ``root`` and ## ``branch``. @@ -398,7 +398,7 @@ proc is_valid_indexed_attestation*( true -# https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#get_attesting_indices +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#get_attesting_indices func get_attesting_indices*(state: BeaconState, data: AttestationData, bits: CommitteeValidatorsBits, diff --git a/beacon_chain/spec/datatypes.nim b/beacon_chain/spec/datatypes.nim index c82b00f59..1b2f336c0 100644 --- a/beacon_chain/spec/datatypes.nim +++ b/beacon_chain/spec/datatypes.nim @@ -370,7 +370,7 @@ type message*: AggregateAndProof signature*: ValidatorSig - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/validator.md#eth1block + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#eth1block Eth1Block* = object timestamp*: uint64 # All other eth1 block fields diff --git a/beacon_chain/spec/presets/mainnet.nim b/beacon_chain/spec/presets/mainnet.nim index 5d2dc772c..b04ca11fa 100644 --- a/beacon_chain/spec/presets/mainnet.nim +++ b/beacon_chain/spec/presets/mainnet.nim @@ -73,7 +73,7 @@ const # Initial values # --------------------------------------------------------------- - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/configs/mainnet.yaml#L64 + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/configs/mainnet.yaml#L70 GENESIS_SLOT* = 0.Slot GENESIS_FORK_VERSION* = 0x00000000 BLS_WITHDRAWAL_PREFIX* = 0'u8 @@ -145,7 +145,7 @@ const # Reward and penalty quotients # --------------------------------------------------------------- - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/configs/mainnet.yaml#L114 + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/configs/mainnet.yaml#L117 BASE_REWARD_FACTOR* = 2'u64^6 WHISTLEBLOWER_REWARD_QUOTIENT* = 2'u64^9 PROPOSER_REWARD_QUOTIENT* = 2'u64^3 From 01a5f43b6e6b3598e110dc364c2139f08cc7b4fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C8=98tefan=20Talpalaru?= Date: Sat, 4 Apr 2020 16:01:39 +0200 Subject: [PATCH 41/58] Azure: re-enable tests --- azure-pipelines.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index f92930c6a..66002be60 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -64,9 +64,9 @@ jobs: git submodule --quiet update --init --recursive scripts/setup_official_tests.sh jsonTestsCache mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} CI_CACHE=NimBinaries update - #mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} fetch-dlls - #mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE - #mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE NIMFLAGS="-d:testnet_servers_image" - #file build/beacon_node - #mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} DISABLE_TEST_FIXTURES_SCRIPT=1 test + mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} fetch-dlls + mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE + mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE NIMFLAGS="-d:testnet_servers_image" + file build/beacon_node + mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} DISABLE_TEST_FIXTURES_SCRIPT=1 test displayName: 'build and test' From 9249be72687361e0f2165cf20c6fdf8b75e27100 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C8=98tefan=20Talpalaru?= Date: Sat, 4 Apr 2020 17:21:46 +0200 Subject: [PATCH 42/58] Azure: don't build the tools twice on 32-bit --- azure-pipelines.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 66002be60..1b2929941 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -65,7 +65,10 @@ jobs: scripts/setup_official_tests.sh jsonTestsCache mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} CI_CACHE=NimBinaries update mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} fetch-dlls - mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE + if [[ $PLATFORM == "x64" ]]; then + # the 32-bit job struggles to complete under the 60 minutes time limit, so try to lighten it a little + mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE + fi mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE NIMFLAGS="-d:testnet_servers_image" file build/beacon_node mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} DISABLE_TEST_FIXTURES_SCRIPT=1 test From 0b922e25f05d5434900fdc604ddf94ea4a1107bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C8=98tefan=20Talpalaru?= Date: Sat, 4 Apr 2020 19:22:20 +0200 Subject: [PATCH 43/58] Azure: fail fast on 32-bit --- azure-pipelines.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 1b2929941..f6f01f4d8 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -65,11 +65,12 @@ jobs: scripts/setup_official_tests.sh jsonTestsCache mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} CI_CACHE=NimBinaries update mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} fetch-dlls - if [[ $PLATFORM == "x64" ]]; then - # the 32-bit job struggles to complete under the 60 minutes time limit, so try to lighten it a little - mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE - fi + mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE NIMFLAGS="-d:testnet_servers_image" file build/beacon_node + if [[ $PLATFORM == "x86" ]]; then + # fail fast + export NIMTEST_ABORT_ON_ERROR=1 + fi mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} DISABLE_TEST_FIXTURES_SCRIPT=1 test displayName: 'build and test' From 04cf6d30ebbe628bcdcb1d97bb5d5a7a3ef04054 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Sun, 5 Apr 2020 11:50:31 +0200 Subject: [PATCH 44/58] vendor: bump nim-eth + fallout (#861) --- beacon_chain/beacon_node.nim | 5 ++--- beacon_chain/deposit_contract.nim | 2 +- beacon_chain/eth2_discovery.nim | 6 +++--- beacon_chain/eth2_network.nim | 11 ++++++----- beacon_chain/validator_keygen.nim | 2 +- vendor/nim-eth | 2 +- vendor/nim-stew | 2 +- vendor/nim-web3 | 2 +- 8 files changed, 16 insertions(+), 16 deletions(-) diff --git a/beacon_chain/beacon_node.nim b/beacon_chain/beacon_node.nim index acc77576c..e99e7af0c 100644 --- a/beacon_chain/beacon_node.nim +++ b/beacon_chain/beacon_node.nim @@ -1,6 +1,6 @@ import # Standard library - os, tables, random, strutils, times, sequtils, + os, tables, random, strutils, times, # Nimble packages stew/[objects, bitseqs, byteutils], stew/shims/macros, @@ -12,7 +12,7 @@ import # Local modules spec/[datatypes, digest, crypto, beaconstate, helpers, validator, network, state_transition_block], spec/presets/custom, - conf, time, state_transition, beacon_chain_db, validator_pool, extras, + conf, time, beacon_chain_db, validator_pool, extras, attestation_pool, block_pool, eth2_network, eth2_discovery, beacon_node_types, mainchain_monitor, version, ssz, ssz/dynamic_navigator, sync_protocol, request_manager, validator_keygen, interop, statusbar, @@ -133,7 +133,6 @@ proc getStateFromSnapshot(conf: BeaconNodeConf, state: var BeaconState): bool = proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async.} = let netKeys = getPersistentNetKeys(conf) - ourPubKey = netKeys.pubkey.skkey nickname = if conf.nodeName == "auto": shortForm(netKeys) else: conf.nodeName db = BeaconChainDB.init(kvStore SqliteStoreRef.init(conf.databaseDir)) diff --git a/beacon_chain/deposit_contract.nim b/beacon_chain/deposit_contract.nim index 97fa9c118..0dd92c73f 100644 --- a/beacon_chain/deposit_contract.nim +++ b/beacon_chain/deposit_contract.nim @@ -67,7 +67,7 @@ proc main() {.async.} = let cfg = CliConfig.load() let web3 = await newWeb3(cfg.depositWeb3Url) if cfg.privateKey.len != 0: - web3.privateKey = initPrivateKey(cfg.privateKey) + web3.privateKey = PrivateKey.fromHex(cfg.privateKey)[] else: let accounts = await web3.provider.eth_accounts() doAssert(accounts.len > 0) diff --git a/beacon_chain/eth2_discovery.nim b/beacon_chain/eth2_discovery.nim index 0f8712f5b..e9570531b 100644 --- a/beacon_chain/eth2_discovery.nim +++ b/beacon_chain/eth2_discovery.nim @@ -72,7 +72,7 @@ proc toENode*(enrRec: enr.Record): Result[ENode, cstring] = address_v4: toArray(4, ipBytes)) tcpPort = Port enrRec.get("tcp", uint16) udpPort = Port enrRec.get("udp", uint16) - var pubKey: keys.PublicKey + var pubKey: PublicKey if not enrRec.get(pubKey): return err "Failed to read public key from ENR record" return ok ENode(pubkey: pubkey, @@ -161,8 +161,8 @@ proc new*(T: type Eth2DiscoveryProtocol, # * for setting up a specific key # * for using a persistent database var - pk = initPrivateKey(rawPrivKeyBytes) - ourPubKey = pk.getPublicKey() + pk = PrivateKey.fromRaw(rawPrivKeyBytes).tryGet() + ourPubKey = pk.toPublicKey().tryGet() db = DiscoveryDB.init(newMemoryDB()) var bootNodes: seq[ENode] diff --git a/beacon_chain/eth2_network.nim b/beacon_chain/eth2_network.nim index f96d8682e..83229db78 100644 --- a/beacon_chain/eth2_network.nim +++ b/beacon_chain/eth2_network.nim @@ -606,12 +606,12 @@ proc handleIncomingPeer*(peer: Peer) = proc toPeerInfo*(r: enr.TypedRecord): PeerInfo = if r.secp256k1.isSome: - var pubKey: keys.PublicKey - if recoverPublicKey(r.secp256k1.get, pubKey) != EthKeysStatus.Success: + var pubKey = keys.PublicKey.fromRaw(r.secp256k1.get) + if pubkey.isErr: return # TODO let peerId = PeerID.init crypto.PublicKey( - scheme: Secp256k1, skkey: SkPublicKey(pubKey)) + scheme: Secp256k1, skkey: SkPublicKey(pubKey[])) var addresses = newSeq[MultiAddress]() if r.ip.isSome and r.tcp.isSome: @@ -683,7 +683,8 @@ proc init*(T: type Eth2Node, conf: BeaconNodeConf, privKey: keys.PrivateKey): T = new result result.switch = switch - result.discovery = Eth2DiscoveryProtocol.new(conf, ip, tcpPort, udpPort, privKey.data) + result.discovery = Eth2DiscoveryProtocol.new( + conf, ip, tcpPort, udpPort, privKey.toRaw) result.wantedPeers = conf.maxPeers result.peerPool = newPeerPool[Peer, PeerID](maxPeers = conf.maxPeers) @@ -697,7 +698,7 @@ proc init*(T: type Eth2Node, conf: BeaconNodeConf, msg.protocolMounter result template publicKey*(node: Eth2Node): keys.PublicKey = - node.discovery.privKey.getPublicKey + node.discovery.privKey.toPublicKey.tryGet() template addKnownPeer*(node: Eth2Node, peer: ENode|enr.Record) = node.discovery.addNode peer diff --git a/beacon_chain/validator_keygen.nim b/beacon_chain/validator_keygen.nim index 2c6428e5d..85455ddc9 100644 --- a/beacon_chain/validator_keygen.nim +++ b/beacon_chain/validator_keygen.nim @@ -60,7 +60,7 @@ proc sendDeposits*( var web3 = await newWeb3(depositWeb3Url) if privateKey.len != 0: - web3.privateKey = initPrivateKey(privateKey) + web3.privateKey = PrivateKey.fromHex(privateKey).tryGet() else: let accounts = await web3.provider.eth_accounts() if accounts.len == 0: diff --git a/vendor/nim-eth b/vendor/nim-eth index c827c3732..ac5bbe4d3 160000 --- a/vendor/nim-eth +++ b/vendor/nim-eth @@ -1 +1 @@ -Subproject commit c827c37329541a2f3d7d8057fa577f14537bb832 +Subproject commit ac5bbe4d3d04ca1baf455f5a7e22a04692bcc73a diff --git a/vendor/nim-stew b/vendor/nim-stew index 86739f99c..9414202d5 160000 --- a/vendor/nim-stew +++ b/vendor/nim-stew @@ -1 +1 @@ -Subproject commit 86739f99c4efc1246d45164ef81c1e8f72970b65 +Subproject commit 9414202d53fac99a0b1af33acac816ff9236e6d0 diff --git a/vendor/nim-web3 b/vendor/nim-web3 index da74eabaa..969adf2f1 160000 --- a/vendor/nim-web3 +++ b/vendor/nim-web3 @@ -1 +1 @@ -Subproject commit da74eabaa00e2a8f7c58e8d84a02b701041dfc2d +Subproject commit 969adf2f1ef42753ba26d5ab7eca01617c846792 From b9cc91aef86f5ab1a96c82727709e2c2b0eae82e Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Sun, 5 Apr 2020 21:07:13 +0200 Subject: [PATCH 45/58] keep up with nim-eth --- beacon_chain/eth2_discovery.nim | 24 ++++++++++++++++-------- vendor/nim-eth | 2 +- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/beacon_chain/eth2_discovery.nim b/beacon_chain/eth2_discovery.nim index e9570531b..19611883f 100644 --- a/beacon_chain/eth2_discovery.nim +++ b/beacon_chain/eth2_discovery.nim @@ -1,3 +1,7 @@ +# TODO Cannot use push here becaise it gets applied to PeerID.init (!) +# probably because it's a generic proc... +# {.push raises: [Defect].} + import os, net, strutils, strformat, parseutils, chronicles, stew/[result, objects], eth/keys, eth/trie/db, eth/p2p/enode, @@ -14,11 +18,11 @@ type export Eth2DiscoveryProtocol, open, start, close, result -proc toENode*(a: MultiAddress): Result[ENode, cstring] = - if not IPFS.match(a): - return err "Unsupported MultiAddress" - +proc toENode*(a: MultiAddress): Result[ENode, cstring] {.raises: [Defect].} = try: + if not IPFS.match(a): + return err "Unsupported MultiAddress" + # TODO. This code is quite messy with so much string handling. # MultiAddress can offer a more type-safe API? var @@ -53,6 +57,10 @@ proc toENode*(a: MultiAddress): Result[ENode, cstring] = except CatchableError: # This will reach the error exit path below discard + except Exception: + # TODO: + # libp2p/crypto/ecnist.nim(118, 20) Error: can raise an unlisted exception: Exception + discard return err "Invalid MultiAddress" @@ -61,7 +69,7 @@ proc toMultiAddressStr*(enode: ENode): string = scheme: Secp256k1, skkey: SkPublicKey(enode.pubkey))) &"/ip4/{enode.address.ip}/tcp/{enode.address.tcpPort}/p2p/{peerId.pretty}" -proc toENode*(enrRec: enr.Record): Result[ENode, cstring] = +proc toENode*(enrRec: enr.Record): Result[ENode, cstring] {.raises: [Defect].} = try: # TODO: handle IPv6 let ipBytes = enrRec.get("ip", seq[byte]) @@ -72,10 +80,10 @@ proc toENode*(enrRec: enr.Record): Result[ENode, cstring] = address_v4: toArray(4, ipBytes)) tcpPort = Port enrRec.get("tcp", uint16) udpPort = Port enrRec.get("udp", uint16) - var pubKey: PublicKey - if not enrRec.get(pubKey): + let pubkey = enrRec.get(PublicKey) + if pubkey.isNone: return err "Failed to read public key from ENR record" - return ok ENode(pubkey: pubkey, + return ok ENode(pubkey: pubkey.get(), address: Address(ip: ip, tcpPort: tcpPort, udpPort: udpPort)) diff --git a/vendor/nim-eth b/vendor/nim-eth index ac5bbe4d3..19426e82f 160000 --- a/vendor/nim-eth +++ b/vendor/nim-eth @@ -1 +1 @@ -Subproject commit ac5bbe4d3d04ca1baf455f5a7e22a04692bcc73a +Subproject commit 19426e82f926277e9a8e602c9551da4382d568dd From 021e7d6528010679523459bad045db615b0c33a7 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Mon, 6 Apr 2020 18:37:12 +0200 Subject: [PATCH 46/58] bump modules --- vendor/nim-eth | 2 +- vendor/nim-stew | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/vendor/nim-eth b/vendor/nim-eth index 19426e82f..0b110f328 160000 --- a/vendor/nim-eth +++ b/vendor/nim-eth @@ -1 +1 @@ -Subproject commit 19426e82f926277e9a8e602c9551da4382d568dd +Subproject commit 0b110f3287f26e03f5e7ac4c9e7f0103456895c0 diff --git a/vendor/nim-stew b/vendor/nim-stew index 9414202d5..55c2ec897 160000 --- a/vendor/nim-stew +++ b/vendor/nim-stew @@ -1 +1 @@ -Subproject commit 9414202d53fac99a0b1af33acac816ff9236e6d0 +Subproject commit 55c2ec8977bc486482eb8ae50552226969651e4d From e1d18b431bb418590f440056ed095c8e1fcb2534 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Mon, 6 Apr 2020 20:08:39 +0200 Subject: [PATCH 47/58] reraise defect in generic exception handler --- beacon_chain/eth2_discovery.nim | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/beacon_chain/eth2_discovery.nim b/beacon_chain/eth2_discovery.nim index 19611883f..e7b7c9eeb 100644 --- a/beacon_chain/eth2_discovery.nim +++ b/beacon_chain/eth2_discovery.nim @@ -57,10 +57,11 @@ proc toENode*(a: MultiAddress): Result[ENode, cstring] {.raises: [Defect].} = except CatchableError: # This will reach the error exit path below discard - except Exception: + except Exception as e: # TODO: - # libp2p/crypto/ecnist.nim(118, 20) Error: can raise an unlisted exception: Exception - discard + # nim-libp2p/libp2p/multiaddress.nim(616, 40) Error: can raise an unlisted exception: Exception + if e of Defect: + raise (ref Defect)(e) return err "Invalid MultiAddress" From d7ae86ecd90e3ac6ba832d9dd0321484344979a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mamy=20Andr=C3=A9-Ratsimbazafy?= Date: Mon, 6 Apr 2020 23:58:22 +0200 Subject: [PATCH 48/58] bump nimcrypto: fix equality check of hashes --- vendor/nimcrypto | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendor/nimcrypto b/vendor/nimcrypto index 04f933314..cd58cf69a 160000 --- a/vendor/nimcrypto +++ b/vendor/nimcrypto @@ -1 +1 @@ -Subproject commit 04f933314ca1d7d79fc6e4f19a0bd7566afbf462 +Subproject commit cd58cf69a0b883a4672cd3f79ee38ec0cf2c8c56 From e7de8aae205738548ef67599163607771c7568af Mon Sep 17 00:00:00 2001 From: Dustin Brody Date: Mon, 6 Apr 2020 20:18:42 +0200 Subject: [PATCH 49/58] fix/restore 0.11.1 BeaconState genesis interop: (a) use GENESIS_FORK_VERSION properly in BeaconState init; (b) use GENESIS_FORK_VERSION to be compatible with zcli/zrnt initial eth1 deposits; (c) let List[foo] work with 64-bit ints, so BeaconState.validators SSZ-serializes properly; and (d) update test_interop to use new spec/crypto API --- AllTests-mainnet.md | 6 +- AllTests-minimal.md | 6 +- beacon_chain/interop.nim | 2 +- beacon_chain/spec/beaconstate.nim | 28 ++++-- beacon_chain/spec/crypto.nim | 4 + beacon_chain/spec/datatypes.nim | 3 +- beacon_chain/spec/presets/mainnet.nim | 2 +- beacon_chain/spec/presets/minimal.nim | 2 +- beacon_chain/ssz/types.nim | 4 +- .../test_fixture_const_sanity_check.nim | 1 + tests/test_interop.nim | 88 +++++++++---------- tests/testblockutil.nim | 2 +- 12 files changed, 82 insertions(+), 66 deletions(-) diff --git a/AllTests-mainnet.md b/AllTests-mainnet.md index fabab6eab..40db5d3a9 100644 --- a/AllTests-mainnet.md +++ b/AllTests-mainnet.md @@ -80,7 +80,7 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + EPOCHS_PER_SLASHINGS_VECTOR 8192 [Preset: mainnet] OK + ETH1_FOLLOW_DISTANCE 1024 [Preset: mainnet] OK + GASPRICE_ADJUSTMENT_COEFFICIENT 8 [Preset: mainnet] OK -+ GENESIS_FORK_VERSION "0x00000000" [Preset: mainnet] OK +- GENESIS_FORK_VERSION "0x00000000" [Preset: mainnet] Fail + HISTORICAL_ROOTS_LIMIT 16777216 [Preset: mainnet] OK + HYSTERESIS_DOWNWARD_MULTIPLIER 1 [Preset: mainnet] OK + HYSTERESIS_QUOTIENT 4 [Preset: mainnet] OK @@ -141,7 +141,7 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + VALIDATOR_REGISTRY_LIMIT 1099511627776 [Preset: mainnet] OK + WHISTLEBLOWER_REWARD_QUOTIENT 512 [Preset: mainnet] OK ``` -OK: 85/87 Fail: 2/87 Skip: 0/87 +OK: 84/87 Fail: 3/87 Skip: 0/87 ## PeerPool testing suite ```diff + Access peers by key test OK @@ -222,4 +222,4 @@ OK: 4/4 Fail: 0/4 Skip: 0/4 OK: 8/8 Fail: 0/8 Skip: 0/8 ---TOTAL--- -OK: 142/144 Fail: 2/144 Skip: 0/144 +OK: 141/144 Fail: 3/144 Skip: 0/144 diff --git a/AllTests-minimal.md b/AllTests-minimal.md index 53dc3991b..17a9ec28c 100644 --- a/AllTests-minimal.md +++ b/AllTests-minimal.md @@ -107,7 +107,7 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + EPOCHS_PER_SLASHINGS_VECTOR 64 [Preset: minimal] OK + ETH1_FOLLOW_DISTANCE 16 [Preset: minimal] OK + GASPRICE_ADJUSTMENT_COEFFICIENT 8 [Preset: minimal] OK -+ GENESIS_FORK_VERSION "0x00000001" [Preset: minimal] OK +- GENESIS_FORK_VERSION "0x00000001" [Preset: minimal] Fail + HISTORICAL_ROOTS_LIMIT 16777216 [Preset: minimal] OK + HYSTERESIS_DOWNWARD_MULTIPLIER 1 [Preset: minimal] OK + HYSTERESIS_QUOTIENT 4 [Preset: minimal] OK @@ -168,7 +168,7 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + VALIDATOR_REGISTRY_LIMIT 1099511627776 [Preset: minimal] OK + WHISTLEBLOWER_REWARD_QUOTIENT 512 [Preset: minimal] OK ``` -OK: 85/87 Fail: 2/87 Skip: 0/87 +OK: 84/87 Fail: 3/87 Skip: 0/87 ## PeerPool testing suite ```diff + Access peers by key test OK @@ -249,4 +249,4 @@ OK: 4/4 Fail: 0/4 Skip: 0/4 OK: 8/8 Fail: 0/8 Skip: 0/8 ---TOTAL--- -OK: 157/159 Fail: 2/159 Skip: 0/159 +OK: 156/159 Fail: 3/159 Skip: 0/159 diff --git a/beacon_chain/interop.nim b/beacon_chain/interop.nim index a6e597b43..4cf9e5e8c 100644 --- a/beacon_chain/interop.nim +++ b/beacon_chain/interop.nim @@ -51,7 +51,7 @@ func makeDeposit*( withdrawal_credentials: makeWithdrawalCredentials(pubkey))) if skipBLSValidation notin flags: - let domain = compute_domain(DOMAIN_DEPOSIT) + let domain = compute_domain(DOMAIN_DEPOSIT, GENESIS_FORK_VERSION) let signing_root = compute_signing_root(ret.getDepositMessage, domain) ret.data.signature = bls_sign(privkey, signing_root.data) diff --git a/beacon_chain/spec/beaconstate.nim b/beacon_chain/spec/beaconstate.nim index a25db4a8e..3cfaa54e8 100644 --- a/beacon_chain/spec/beaconstate.nim +++ b/beacon_chain/spec/beaconstate.nim @@ -47,7 +47,7 @@ func decrease_balance*( else: state.balances[index] - delta -# https://github.com/ethereum/eth2.0-specs/blob/v0.9.4/specs/core/0_beacon-chain.md#deposits +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#deposits proc process_deposit*( state: var BeaconState, deposit: Deposit, flags: UpdateFlags = {}): bool {.nbench.}= # Process an Eth1 deposit, registering a validator or increasing its balance. @@ -56,7 +56,7 @@ proc process_deposit*( if skipMerkleValidation notin flags and not is_valid_merkle_branch( hash_tree_root(deposit.data), deposit.proof, - DEPOSIT_CONTRACT_TREE_DEPTH + 1, + DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the `List` length mix-in state.eth1_deposit_index, state.eth1_data.deposit_root, ): @@ -72,8 +72,17 @@ proc process_deposit*( index = validator_pubkeys.find(pubkey) if index == -1: - # Verify the deposit signature (proof of possession) - let domain = compute_domain(DOMAIN_DEPOSIT) + # Verify the deposit signature (proof of possession) which is not checked + # by the deposit contract + + # Fork-agnostic domain since deposits are valid across forks + # + # TODO zcli/zrnt does use the GENESIS_FORK_VERSION which can + # vary between minimal/mainnet, though, despite the comment, + # which is copied verbatim from the eth2 beacon chain spec. + # https://github.com/protolambda/zrnt/blob/v0.11.0/eth2/phase0/kickstart.go#L58 + let domain = compute_domain(DOMAIN_DEPOSIT, GENESIS_FORK_VERSION) + let signing_root = compute_signing_root(deposit.getDepositMessage, domain) if skipBLSValidation notin flags and not bls_verify( pubkey, signing_root.data, @@ -98,7 +107,7 @@ proc process_deposit*( true -# https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#compute_activation_exit_epoch +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#compute_activation_exit_epoch func compute_activation_exit_epoch(epoch: Epoch): Epoch = ## Return the epoch during which validator activations and exits initiated in ## ``epoch`` take effect. @@ -185,7 +194,7 @@ proc slash_validator*(state: var BeaconState, slashed_index: ValidatorIndex, state, whistleblower_index, whistleblowing_reward - proposer_reward) # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#genesis -func initialize_beacon_state_from_eth1*( +proc initialize_beacon_state_from_eth1*( eth1_block_hash: Eth2Digest, eth1_timestamp: uint64, deposits: openArray[Deposit], @@ -208,6 +217,10 @@ func initialize_beacon_state_from_eth1*( const SECONDS_PER_DAY = uint64(60*60*24) var state = BeaconState( + fork: Fork( + previous_version: GENESIS_FORK_VERSION, + current_version: GENESIS_FORK_VERSION, + epoch: GENESIS_EPOCH), genesis_time: eth1_timestamp + 2'u64 * SECONDS_PER_DAY - (eth1_timestamp mod SECONDS_PER_DAY), @@ -248,7 +261,8 @@ func initialize_beacon_state_from_eth1*( validator.activation_epoch = GENESIS_EPOCH # Set genesis validators root for domain separation and chain versioning - state.genesis_validators_root = hash_tree_root(state.validators) + state.genesis_validators_root = + hash_tree_root(sszList(state.validators, VALIDATOR_REGISTRY_LIMIT)) state diff --git a/beacon_chain/spec/crypto.nim b/beacon_chain/spec/crypto.nim index 2310d9cf2..f8c00cb14 100644 --- a/beacon_chain/spec/crypto.nim +++ b/beacon_chain/spec/crypto.nim @@ -260,6 +260,10 @@ func initFromBytes*(val: var ValidatorPrivKey, bytes: openarray[byte]) {.inline. func fromBytes[T](R: type BlsValue[T], bytes: openarray[byte]): R {.inline.}= result.initFromBytes(bytes) +func fromBytes[T](R: var BlsValue[T], bytes: openarray[byte]) {.inline.}= + # This version is only to support tests/test_interop.nim + R.initFromBytes(bytes) + func fromHex*[T](R: var BlsValue[T], hexStr: string) {.inline.} = ## Initialize a BLSValue from its hex representation R.fromBytes(hexStr.hexToSeqByte()) diff --git a/beacon_chain/spec/datatypes.nim b/beacon_chain/spec/datatypes.nim index 1b2f336c0..550767343 100644 --- a/beacon_chain/spec/datatypes.nim +++ b/beacon_chain/spec/datatypes.nim @@ -257,8 +257,7 @@ type eth1_deposit_index*: uint64 # Registry - # TODO List[] won't construct due to VALIDATOR_REGISTRY_LIMIT > high(int) - validators*: seq[Validator] + validators*: List[Validator, VALIDATOR_REGISTRY_LIMIT] balances*: seq[uint64] # Randomness diff --git a/beacon_chain/spec/presets/mainnet.nim b/beacon_chain/spec/presets/mainnet.nim index b04ca11fa..b27bc9158 100644 --- a/beacon_chain/spec/presets/mainnet.nim +++ b/beacon_chain/spec/presets/mainnet.nim @@ -75,7 +75,7 @@ const # --------------------------------------------------------------- # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/configs/mainnet.yaml#L70 GENESIS_SLOT* = 0.Slot - GENESIS_FORK_VERSION* = 0x00000000 + GENESIS_FORK_VERSION* = [0'u8, 0'u8, 0'u8, 0'u8] BLS_WITHDRAWAL_PREFIX* = 0'u8 # Time parameters diff --git a/beacon_chain/spec/presets/minimal.nim b/beacon_chain/spec/presets/minimal.nim index a8c790cd2..a2dafc0e3 100644 --- a/beacon_chain/spec/presets/minimal.nim +++ b/beacon_chain/spec/presets/minimal.nim @@ -66,7 +66,7 @@ const # Unchanged GENESIS_SLOT* = 0.Slot - GENESIS_FORK_VERSION* = 0x01000000 + GENESIS_FORK_VERSION* = [0'u8, 0'u8, 0'u8, 1'u8] BLS_WITHDRAWAL_PREFIX* = 0'u8 # Time parameters diff --git a/beacon_chain/ssz/types.nim b/beacon_chain/ssz/types.nim index 8e3a8968f..474ad283a 100644 --- a/beacon_chain/ssz/types.nim +++ b/beacon_chain/ssz/types.nim @@ -62,9 +62,9 @@ type discard when useListType: - type List*[T; maxLen: static int] = distinct seq[T] + type List*[T; maxLen: static int64] = distinct seq[T] else: - type List*[T; maxLen: static int] = seq[T] + type List*[T; maxLen: static int64] = seq[T] macro unsupported*(T: typed): untyped = # TODO: {.fatal.} breaks compilation even in `compiles()` context, diff --git a/tests/official/test_fixture_const_sanity_check.nim b/tests/official/test_fixture_const_sanity_check.nim index ace4787f7..6b9c7fbe2 100644 --- a/tests/official/test_fixture_const_sanity_check.nim +++ b/tests/official/test_fixture_const_sanity_check.nim @@ -88,6 +88,7 @@ const const IgnoreKeys = [ # Ignore all non-numeric types "DEPOSIT_CONTRACT_ADDRESS", + "GENESIS_FORK_VERSION", "SHARD_BLOCK_OFFSETS" ] diff --git a/tests/test_interop.nim b/tests/test_interop.nim index 47d8ed27d..d054d0641 100644 --- a/tests/test_interop.nim +++ b/tests/test_interop.nim @@ -3,11 +3,7 @@ import unittest, stint, blscurve, ./testutil, stew/byteutils, ../beacon_chain/[extras, interop, ssz], - ../beacon_chain/spec/[beaconstate, crypto, helpers, datatypes] - -# TODO: BLS changes in v0.10.1 will generate different interop signatures -# Requires an update of the interop mocked start -# or of ZRNT / ZCLI to v0.10.1 + ../beacon_chain/spec/[beaconstate, crypto, datatypes] # Interop test yaml, found here: # https://github.com/ethereum/eth2.0-pm/blob/a0b9d22fad424574b1307828f867b30237758468/interop/mocked_start/keygen_10_validators.yaml @@ -35,87 +31,89 @@ type DepositConfig = object # - https://github.com/status-im/eth2.0-specs/blob/c58096754b62389b0ea75dbdd717d362691b7c34/test_libs/pyspec/mockup_genesis.py # - "zcli genesis mock" https://github.com/protolambda/zcli +func fromHex(T: type[ValidatorSig], hex: string): T = result.fromhex(hex) + let depositsConfig = [ DepositConfig( - privkey: ValidatorPrivKey.init(hexToSeqByte"0x25295f0d1d592a90b333e26e85149708208e9f8e8bc18f6c77bd62f8ad7a6866"), + privkey: ValidatorPrivKey.init("0x25295f0d1d592a90b333e26e85149708208e9f8e8bc18f6c77bd62f8ad7a6866"), signing_root: hexToByteArray[32]("139b510ea7f2788ab82da1f427d6cbe1db147c15a053db738ad5500cd83754a6"), domain: DOMAIN_DEPOSIT, - sig: ValidatorSig.fromHex"8684b7f46d25cdd6f937acdaa54bdd2fb34c78d687dca93884ba79e60ebb0df964faa4c49f3469fb882a50c7726985ff0b20c9584cc1ded7c90467422674a05177b2019661f78a5c5c56f67d586f04fd37f555b4876a910bedff830c2bece0aa" + sig: ValidatorSig.fromHex"b796b670fa7eb04b4422bb0872b016895a6adffb1ebd1023db41452701ad65d6fa53d84f3b62e8753bf55230364c6aa318620b574528506ad78517f70c688b82d1c9ad0b12633e0fa5792cf58c21cee9ad25f74156eebd0b6dcd548b91db860f" ), DepositConfig( - privkey: ValidatorPrivKey.init(hexToSeqByte"0x51d0b65185db6989ab0b560d6deed19c7ead0e24b9b6372cbecb1f26bdfad000"), + privkey: ValidatorPrivKey.init("0x51d0b65185db6989ab0b560d6deed19c7ead0e24b9b6372cbecb1f26bdfad000"), signing_root: hexToByteArray[32]("bb4b6184b25873cdf430df3838c8d3e3d16cf3dc3b214e2f3ab7df9e6d5a9b52"), domain: DOMAIN_DEPOSIT, - sig: ValidatorSig.fromHex"a2c86c4f654a2a229a287aabc8c63f224d9fb8e1d77d4a13276a87a80c8b75aa7c55826febe4bae6c826aeeccaa82f370517db4f0d5eed5fbc06a3846088871696b3c32ff3fdebdb52355d1eede85bcd71aaa2c00d6cf088a647332edc21e4f3" + sig: ValidatorSig.fromHex"98c4c6a7e12a2b4aeaa23a7d6ae4d2acabc8193d1c1cb53fabcb107ebcbd9c04189c4278995c62883507926712133d941677bd15407eefa49ea6c1cb97f4f7ee4efc3fe0bfa80e3efc3c6b48646b06e6bb845c4e0e7f21df58ef67147f0da7ea" ), DepositConfig( - privkey: ValidatorPrivKey.init(hexToSeqByte"0x315ed405fafe339603932eebe8dbfd650ce5dafa561f6928664c75db85f97857"), + privkey: ValidatorPrivKey.init("0x315ed405fafe339603932eebe8dbfd650ce5dafa561f6928664c75db85f97857"), signing_root: hexToByteArray[32]("c6ddd74b1b45db17a864c87dd941cb6c6e16540c534cdbe1cc0d43e9a5d87f7c"), domain: DOMAIN_DEPOSIT, - sig: ValidatorSig.fromHex"a5a463d036e9ccb19757b2ddb1e6564a00463aed1ef51bf69264a14b6bfcff93eb6f63664e0df0b5c9e6760c560cb58d135265cecbf360a23641af627bcb17cf6c0541768d3f3b61e27f7c44f21b02cd09b52443405b12fb541f5762cd615d6e" + sig: ValidatorSig.fromHex"8e6163059668ff2db1c8d430a1b0f9aeb330e8eaf680ed9709aaff5d437a54fb0144f2703cbb1e2a4a67c505b534718d0450d99203cccaf18e442bddd27e93ebfa289e6ce30a92e7befb656f12a01cb0204ffd14eed39ae457b7fad22faf8eab" ), DepositConfig( - privkey: ValidatorPrivKey.init(hexToSeqByte"0x25b1166a43c109cb330af8945d364722757c65ed2bfed5444b5a2f057f82d391"), + privkey: ValidatorPrivKey.init("0x25b1166a43c109cb330af8945d364722757c65ed2bfed5444b5a2f057f82d391"), signing_root: hexToByteArray[32]("9397cd33d4e8883dbdc1a1d7df410aa2b627740d11c5574697a2d483a50ab7bb"), domain: DOMAIN_DEPOSIT, - sig: ValidatorSig.fromHex"8731c258353c8aa46a8e38509eecfdc32018429239d9acad9b634a4d010ca51395828c0c056808c6e6df373fef7e9a570b3d648ec455d90f497e12fc3011148eded7265b0f995de72e5982db1dbb6eca8275fc99cdd10704b8cf19ec0bb9c350" + sig: ValidatorSig.fromHex"b389e7b4db5caccad6b0b32394b1e77a814e519f4d0789a1e4bb20e2f7f68d7787fe5f065181eeab72d31d847ae96abc0512466689eafbee0439ab7229fb14272654815f535759467e012d9ab7db6e3b3e86d9f73742c46993c755d1f2893684" ), DepositConfig( - privkey: ValidatorPrivKey.init(hexToSeqByte"0x3f5615898238c4c4f906b507ee917e9ea1bb69b93f1dbd11a34d229c3b06784b"), + privkey: ValidatorPrivKey.init("0x3f5615898238c4c4f906b507ee917e9ea1bb69b93f1dbd11a34d229c3b06784b"), signing_root: hexToByteArray[32]("27340cc0f3b76bcc89c78e67166c13a58c97c232889391d1387fc404c4f5255e"), domain: DOMAIN_DEPOSIT, - sig: ValidatorSig.fromHex"90b20f054f6a2823d66e159050915335e7a4f64bf7ac449ef83bb1d1ba9a6b2385da977b5ba295ea2d019ee3a8140607079d671352ab233b3bf6be45c61dce5b443f23716d64382e34d7676ae64eedd01babeeb8bfd26386371f6bc01f1d4539" + sig: ValidatorSig.fromHex"aeb410612b19c3176fa087fab3e56e278a01cf5ba5379aa7f4e7344dbfa9e3b3f91b6f39af463ce2e448787b0a77ee1a05f22c0d9afd2f0f6137232c432f83c26389c07a8348364ab8a745eda59ecf2aa65fa8eb3f18eacd10e5a8a2e71b1e06" ), DepositConfig( - privkey: ValidatorPrivKey.init(hexToSeqByte"0x055794614bc85ed5436c1f5cab586aab6ca84835788621091f4f3b813761e7a8"), + privkey: ValidatorPrivKey.init("0x055794614bc85ed5436c1f5cab586aab6ca84835788621091f4f3b813761e7a8"), signing_root: hexToByteArray[32]("b8cf48542d8531ae59b56e175228e7fcb82415649b5e992e132d3234b31dda2f"), domain: DOMAIN_DEPOSIT, - sig: ValidatorSig.fromHex"99df72b850141c67fc956a5ba91abb5a091538d963aa6c082e1ea30b7f7e5a54ec0ff79c749342d4635e4901e8dfc9b90604d5466ff2a7b028c53d4dac01ffb3ac0555abd3f52d35aa1ece7e8e9cce273416b3cf582a5f2190e87a3b15641f0c" + sig: ValidatorSig.fromHex"b501a41ca61665dddbe248d2fa15e5498cb2b38dcf2093acd5768efeda1b0ac963e600d8e38c2c91964d8bf72fd197c71824c1d493272caf6140828f7f6b266281f044b4811bbd7ef0f57953b15399b4ef17af5b9c80df5c142600cf17bfee64" ), DepositConfig( - privkey: ValidatorPrivKey.init(hexToSeqByte"0x1023c68852075965e0f7352dee3f76a84a83e7582c181c10179936c6d6348893"), + privkey: ValidatorPrivKey.init("0x1023c68852075965e0f7352dee3f76a84a83e7582c181c10179936c6d6348893"), signing_root: hexToByteArray[32]("5f919d91faecece67422edf573a507fc5f9720f4e37063cceb40aa3b371f1aa9"), domain: DOMAIN_DEPOSIT, - sig: ValidatorSig.fromHex"a4023f36f4f354f69b615b3651596d4b479f005b04f80ef878aaeb342e94ad6f9acddf237309a79247d560b05f4f7139048b5eee0f08da3a11f3ee148ca76e3e1351a733250515a61e12027468cff2de193ab8ee5cd90bdd1c50e529edda512b" + sig: ValidatorSig.fromHex"8f2e2de3c0504cc4d424de1593d508d7488bfc54f61882922b754e97e4faeebe4f24f19184f0630dc51327bc9ab26dd2073d55687f7284ab3395b770d7c4d35bb6e719e6881739e2f4f61e29e11c3b9e61529c202e30f5f5957544eeb0a9626e" ), DepositConfig( - privkey: ValidatorPrivKey.init(hexToSeqByte"0x3a941600dc41e5d20e818473b817a28507c23cdfdb4b659c15461ee5c71e41f5"), + privkey: ValidatorPrivKey.init("0x3a941600dc41e5d20e818473b817a28507c23cdfdb4b659c15461ee5c71e41f5"), signing_root: hexToByteArray[32]("d2ff8bfda7e7bcc64c636a4855d2a1eccb7f47379f526a753fd934ae37ba9ec7"), domain: DOMAIN_DEPOSIT, - sig: ValidatorSig.fromHex"81c52ada6d975a5b968509ab16fa58d617dd36a6c333e6ed86a7977030e4c5d37a488596c6776c2cdf4831ea7337ad7902020092f60e547714449253a947277681ff80b7bf641ca782214fc9ec9b58c66ab43c0a554c133073c96ad35edff101" + sig: ValidatorSig.fromHex"90a83842b6d215f1da3ebf3eeea6c4bff0682ee3f7aa9d06bb818c716cfdb5cd577f997ddd606c908f7a68157f36ff660a0e73265f17cccbd23be5ed053b3812672ba52bce6ec034fadea3b78f46a9c6da88db6327a18a9bb3a7f2747185fc6f" ), DepositConfig( - privkey: ValidatorPrivKey.init(hexToSeqByte"0x066e3bdc0415530e5c7fed6382d5c822c192b620203cf669903e1810a8c67d06"), + privkey: ValidatorPrivKey.init("0x066e3bdc0415530e5c7fed6382d5c822c192b620203cf669903e1810a8c67d06"), signing_root: hexToByteArray[32]("1e19687d32785632ddc9b6b319690ea45c0ea20d7bc8aacbd33f6ebbe30816e1"), domain: DOMAIN_DEPOSIT, - sig: ValidatorSig.fromHex"b4aab8f6624f61f4f5eb6d75839919a3ef6b4e1b19cae6ef063d6281b60ff1d5efe02bcbfc4b9eb1038c42e0a3325d8a0fcf7b64ff3cd9df5c629b864dfdc5b763283254ccd6cfa28cff53e477fb1743440a18d76a776ec4d66c5f50d695ca85" + sig: ValidatorSig.fromHex"a232a8bb03ecd356cf0e18644077880afe7ecfc565c8627841797deb4dfce8366cc0d0f6e151b51c0acc05a66f1363d204e8133e772dfb4878c11f7bf14b8293ce734c37adca9c32cc2987f0bc34242cc30f139d86c44f8d4383af743be3d1ae" ), DepositConfig( - privkey: ValidatorPrivKey.init(hexToSeqByte"0x2b3b88a041168a1c4cd04bdd8de7964fd35238f95442dc678514f9dadb81ec34"), + privkey: ValidatorPrivKey.init("0x2b3b88a041168a1c4cd04bdd8de7964fd35238f95442dc678514f9dadb81ec34"), signing_root: hexToByteArray[32]("64a910a0a3e7da9a7a29ee2c92859314a160040ffb2042641fc56cba75b78012"), domain: DOMAIN_DEPOSIT, - sig: ValidatorSig.fromHex"9603f7dcab6822edb92eb588f1e15fcc685ceb8bcc7257adb0e4a5995820b8ef77215650792120aff871f30a52475ea31212aa741a3f0e6b2dbcb3a63181571306a411c772a7fd08826ddeab98d1c47b5ead82f8e063b9d7f1f217808ee4fb50" + sig: ValidatorSig.fromHex"8e0ccf7dd9dd00820a695161ea865220489ca48504012b7c36c85b3effb896a02ee9714a5e383f7105357a24f791562c1353e331d2cfa048cb94fd4fe42a008b18c5bdec6fcf7c8b75c5f5e582cd9571b308e8b1757d672fbb9092725985a716" ), DepositConfig( - privkey: ValidatorPrivKey.init(hexToSeqByte"0x2e62dbea7fe3127c3b236a92795dd633be51ee7cdfe5424882a2f355df497117"), + privkey: ValidatorPrivKey.init("0x2e62dbea7fe3127c3b236a92795dd633be51ee7cdfe5424882a2f355df497117"), signing_root: hexToByteArray[32]("5bf0c7a39df536b3c8a5dc550f0163af0b33a56b9454b5240cea9ad8356c4117"), domain: DOMAIN_DEPOSIT, - sig: ValidatorSig.fromHex"92b04a4128e84b827b46fd91611acc46f97826d13fbdcbf000b6b3585edd8629e38d4c13f7f3fde5a1170f4f3f55bef21883498602396c875275cb2c795d4488383b1e931fefe813296beea823c228af9e0d97e65742d380a0bbd6f370a89b23" + sig: ValidatorSig.fromHex"a07adeeb639a974fe3ae78a0a28785b195bffeaa2ec558c6baa63458daaf5b7a245940a2d9b91a993515295075eba4e115c6777eda1e7933cb53f64ab36619e49faadf289a8cc1521ca3ae5f9a3f2b88e355ef0b75dd8a9949c9d2a43c5589e0" ), DepositConfig( - privkey: ValidatorPrivKey.init(hexToSeqByte"0x2042dc809c130e91906c9cb0be2fec0d6afaa8f22635efc7a3c2dbf833c1851a"), + privkey: ValidatorPrivKey.init("0x2042dc809c130e91906c9cb0be2fec0d6afaa8f22635efc7a3c2dbf833c1851a"), signing_root: hexToByteArray[32]("e8a45fa71addd854d8d78e0b2cdc8f9100c8a5e03d894c1c382068e8aa4b71e2"), domain: DOMAIN_DEPOSIT, - sig: ValidatorSig.fromHex"89ac6297195e768b5e88cbbb047d8b81c77550c9462df5750f4b899fc0de985fa9e16fccc6c6bd71124eb7806064b7110d534fb8f6ccaf118074cd4f4fac8a22442e8facc2cd380ddc4ebf6b9c2f7e956f418279dc04a6737ede6d7763396ed9" + sig: ValidatorSig.fromHex"95719c0c4dae737aac602aeadf9faeb9ad3492450af249c43a1147a6e471ddb3f2b5979b6587e843d20c9caa8ecd83e8001b57a4f7c302927725966acc959eb6668357831b7a0692f2396a18939d9fa974e611beed4a7a59ffe892e77d2680bd" ), DepositConfig( - privkey: ValidatorPrivKey.init(hexToSeqByte"0x15283c540041cd85c4533ee47517c8bb101c6207e9acbba2935287405a78502c"), + privkey: ValidatorPrivKey.init("0x15283c540041cd85c4533ee47517c8bb101c6207e9acbba2935287405a78502c"), signing_root: hexToByteArray[32]("3dfab0daa3be9c72c5dd3b383e756d6048bb76cd3d09abb4dc991211ae8a547b"), domain: DOMAIN_DEPOSIT, - sig: ValidatorSig.fromHex"8adee09a19ca26d5753b9aa447b0af188a769f061d11bf40b32937ad3fa142ca9bc164323631a4bb78f0a5d4fd1262010134adc723ab377a2e6e362d3e2130a46b0a2088517aee519a424147f043cc5007a13f2d2d5311c18ee2f694ca3f19fc" + sig: ValidatorSig.fromHex"b8221ad674d7c23378b488555eb6e06ce56a342dad84ba6e3a57e108c1c426161b568a9366d82fd0059a23621922a1fc0e59d8eaa66dbb4611a173be167731367edf8daad3b07b64207faf3ea457a335228def3ca61571c4edc15dc392bf4e56" ), DepositConfig( - privkey: ValidatorPrivKey.init(hexToSeqByte"0x03c85e538e1bb30235a87a3758c5571753ca1308b7dee321b74c19f78423999b"), + privkey: ValidatorPrivKey.init("0x03c85e538e1bb30235a87a3758c5571753ca1308b7dee321b74c19f78423999b"), signing_root: hexToByteArray[32]("8905ae60c419e38f263eb818a5536e4144df3c0a800132e07594d457c62b5825"), domain: DOMAIN_DEPOSIT, - sig: ValidatorSig.fromHex"90dc90a295644da5c6d441cd0b33e34b8f1f77230755fd78b9ecbd86fd6e845e554c0579ab88c76ca14b56d9f0749f310cd884c193ec69623ccd724469268574c985ee614e80f00331c24f78a3638576d304c67c2aa6ce8949652257581c18a5" + sig: ValidatorSig.fromHex"a5e61349958745c80862af84e06924748832cae379b02a50909468fef9f07f21d35a98e1287b6219528a1ad566567d0619e049efa9fa6e81410bb3a247cf53b0f6787f747f8229fb9f851290b140f14f14a2adcb23b7cafaf90b301d14169324" ), DepositConfig( - privkey: ValidatorPrivKey.init(hexToSeqByte"0x45a577d5cab31ac5cfff381500e09655f0799f29b130e6ad61c1eec4b15bf8dd"), + privkey: ValidatorPrivKey.init("0x45a577d5cab31ac5cfff381500e09655f0799f29b130e6ad61c1eec4b15bf8dd"), signing_root: hexToByteArray[32]("702d1bd9c27c999923149f6c6578c835943b58b90845086bbf5be3b94aa4663d"), domain: DOMAIN_DEPOSIT, - sig: ValidatorSig.fromHex"9338c8b0050cdb464efae738d6d89ac48d5839ce750e3f1f20acd52a0b61e5c033fa186d3ed0ddf5856af6c4815971b00a68002b1eba45f5af27f91cad04831e32157fecf5fb091a8087829e2d3dd3438e0b86ff8d036be4a3876fa0dfa60e6c" + sig: ValidatorSig.fromHex"893d8e70f2cdb6f7acc3d9828e72d7b20e512956588d8c068b3ef4aa649db369cf962506b7c9107246246d9b20361cd80250109da513809415314af3ef1f220c171dbc2d9c2b62056739703ae4eb1be13fa289ea8472920b2393041f69198dc5" ), DepositConfig( - privkey: ValidatorPrivKey.init(hexToSeqByte"0x03cffafa1cbaa7e585eaee07a9d35ae57f6dfe19a9ea53af9c37e9f3dfac617c"), + privkey: ValidatorPrivKey.init("0x03cffafa1cbaa7e585eaee07a9d35ae57f6dfe19a9ea53af9c37e9f3dfac617c"), signing_root: hexToByteArray[32]("77f3da02c410e9ccba39d89983c52e6e77ca5dec3ae423311a578ee28b2ec0cd"), domain: DOMAIN_DEPOSIT, - sig: ValidatorSig.fromHex"8819f719f7af378f27fe65c699b5206f1f7bbfd62200cab09e7ffe3d8fce0346eaa84b274d66d700cd1a0c0c7b46f62100afb2601270292ddf6a2bddff0248bb8ed6085d10c8c9e691a24b15d74bc7a9fcf931d953300d133f8c0e772704b9ba" + sig: ValidatorSig.fromHex"87ae1567999d3ceefce04c1a48aa189c3d368efbeda53c01962783941c03d3a26e08e5e9d287a927decf4e77755b97e80856e339c3af41dc5ffd373c6e4768de62718ce76cfd8c2062e7673c9eedd2fec235467967f932e59e0b3a32040c0038" ) ] @@ -128,14 +126,13 @@ suiteReport "Interop": check: # getBytes is bigendian and returns full 48 bytes of key.. - Uint256.fromBytesBE(key.getBytes()[48-32..<48]) == v + Uint256.fromBytesBE(key.exportRaw()[48-32..<48]) == v timedTest "Interop signatures": for dep in depositsConfig: let computed_sig = bls_sign( - key = dep.privkey, - msg = dep.signing_root, - domain = compute_domain(dep.domain) + privkey = dep.privkey, + message = dep.signing_root ) check: @@ -152,20 +149,21 @@ suiteReport "Interop": privKey = makeInteropPrivKey(i) deposits.add(makeDeposit(privKey.pubKey(), privKey)) + const genesis_time = 1570500000 var # TODO this currently requires skipMerkleValidation to pass the test # makeDeposit doesn't appear to produce a proof? initialState = initialize_beacon_state_from_eth1( - eth1BlockHash, 1570500000, deposits, {skipMerkleValidation}) + eth1BlockHash, genesis_time, deposits, {skipMerkleValidation}) # https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#create-genesis-state - initialState.genesis_time = 1570500000 + initialState.genesis_time = genesis_time let expected = when const_preset == "minimal": - "5a3bbcae4ab2b4eafded947689fd7bd8214a616ffffd2521befdfe2a3b2f74c0" + "410c8758710155b49208d52c9e4bd2f11aa16a7c7521e560a2d05dcd69a023b3" elif const_preset == "mainnet": - "db0a887acd5e201ac579d6cdc0c4932f2a0adf342d84dc5cd11ce959fbce3760" + "95a0b1e7b0b77d0cbe2bcd12c90469e68edb141424b1a6126f1d55498afe3ae6" else: "unimplemented" check: diff --git a/tests/testblockutil.nim b/tests/testblockutil.nim index 119994b8f..f043a6f8f 100644 --- a/tests/testblockutil.nim +++ b/tests/testblockutil.nim @@ -43,7 +43,7 @@ func makeDeposit(i: int, flags: UpdateFlags): Deposit = privkey = makeFakeValidatorPrivKey(i) pubkey = privkey.pubKey() withdrawal_credentials = makeFakeHash(i) - domain = compute_domain(DOMAIN_DEPOSIT) + domain = compute_domain(DOMAIN_DEPOSIT, GENESIS_FORK_VERSION) result = Deposit( data: DepositData( From 2771deadfc77bc5a1bca9590752fee2dcf32f2d5 Mon Sep 17 00:00:00 2001 From: Dustin Brody Date: Mon, 6 Apr 2020 20:55:47 +0200 Subject: [PATCH 50/58] re-add test_interop to all_tests and mark several v0.10.1 phase 0 spec references as v0.11.1 --- AllTests-mainnet.md | 9 ++++++++- AllTests-minimal.md | 9 ++++++++- beacon_chain/block_pool.nim | 2 +- beacon_chain/spec/beaconstate.nim | 2 +- beacon_chain/spec/presets/mainnet.nim | 4 ++-- beacon_chain/spec/presets/minimal.nim | 2 +- tests/all_tests.nim | 8 ++------ 7 files changed, 23 insertions(+), 13 deletions(-) diff --git a/AllTests-mainnet.md b/AllTests-mainnet.md index 40db5d3a9..13d955180 100644 --- a/AllTests-mainnet.md +++ b/AllTests-mainnet.md @@ -51,6 +51,13 @@ OK: 2/2 Fail: 0/2 Skip: 0/2 + Attestation topics OK ``` OK: 1/1 Fail: 0/1 Skip: 0/1 +## Interop +```diff ++ Interop genesis OK ++ Interop signatures OK ++ Mocked start private key OK +``` +OK: 3/3 Fail: 0/3 Skip: 0/3 ## Official - 0.11.1 - constants & config [Preset: mainnet] ```diff + BASE_REWARD_FACTOR 64 [Preset: mainnet] OK @@ -222,4 +229,4 @@ OK: 4/4 Fail: 0/4 Skip: 0/4 OK: 8/8 Fail: 0/8 Skip: 0/8 ---TOTAL--- -OK: 141/144 Fail: 3/144 Skip: 0/144 +OK: 144/147 Fail: 3/147 Skip: 0/147 diff --git a/AllTests-minimal.md b/AllTests-minimal.md index 17a9ec28c..944218df0 100644 --- a/AllTests-minimal.md +++ b/AllTests-minimal.md @@ -78,6 +78,13 @@ OK: 2/2 Fail: 0/2 Skip: 0/2 + Attestation topics OK ``` OK: 1/1 Fail: 0/1 Skip: 0/1 +## Interop +```diff ++ Interop genesis OK ++ Interop signatures OK ++ Mocked start private key OK +``` +OK: 3/3 Fail: 0/3 Skip: 0/3 ## Official - 0.11.1 - constants & config [Preset: minimal] ```diff + BASE_REWARD_FACTOR 64 [Preset: minimal] OK @@ -249,4 +256,4 @@ OK: 4/4 Fail: 0/4 Skip: 0/4 OK: 8/8 Fail: 0/8 Skip: 0/8 ---TOTAL--- -OK: 156/159 Fail: 3/159 Skip: 0/159 +OK: 159/162 Fail: 3/162 Skip: 0/162 diff --git a/beacon_chain/block_pool.nim b/beacon_chain/block_pool.nim index 1f8743b3d..507d0b81a 100644 --- a/beacon_chain/block_pool.nim +++ b/beacon_chain/block_pool.nim @@ -943,7 +943,7 @@ proc getProposer*(pool: BlockPool, head: BlockRef, slot: Slot): Option[Validator pool.withState(pool.tmpState, head.atSlot(slot)): var cache = get_empty_per_epoch_cache() - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/validator.md#validator-assignments + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#validator-assignments let proposerIdx = get_beacon_proposer_index(state, cache) if proposerIdx.isNone: warn "Missing proposer index", diff --git a/beacon_chain/spec/beaconstate.nim b/beacon_chain/spec/beaconstate.nim index 3cfaa54e8..de12e1399 100644 --- a/beacon_chain/spec/beaconstate.nim +++ b/beacon_chain/spec/beaconstate.nim @@ -193,7 +193,7 @@ proc slash_validator*(state: var BeaconState, slashed_index: ValidatorIndex, increase_balance( state, whistleblower_index, whistleblowing_reward - proposer_reward) -# https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#genesis +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#genesis proc initialize_beacon_state_from_eth1*( eth1_block_hash: Eth2Digest, eth1_timestamp: uint64, diff --git a/beacon_chain/spec/presets/mainnet.nim b/beacon_chain/spec/presets/mainnet.nim index b27bc9158..33dfeb0d8 100644 --- a/beacon_chain/spec/presets/mainnet.nim +++ b/beacon_chain/spec/presets/mainnet.nim @@ -20,7 +20,7 @@ type const # Misc # --------------------------------------------------------------- - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/configs/mainnet.yaml#L6 + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/configs/mainnet.yaml#L6 MAX_COMMITTEES_PER_SLOT* {.intdefine.} = 64 @@ -49,7 +49,7 @@ const HYSTERESIS_UPWARD_MULTIPLIER* = 5 # Constants (TODO: not actually configurable) - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#constants + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#constants BASE_REWARDS_PER_EPOCH* = 4 DEPOSIT_CONTRACT_TREE_DEPTH* = 32 diff --git a/beacon_chain/spec/presets/minimal.nim b/beacon_chain/spec/presets/minimal.nim index a2dafc0e3..aa474a512 100644 --- a/beacon_chain/spec/presets/minimal.nim +++ b/beacon_chain/spec/presets/minimal.nim @@ -20,7 +20,7 @@ type const # Misc # --------------------------------------------------------------- - # https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/configs/minimal.yaml#L4 + # https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/configs/minimal.yaml#L4 # Changed MAX_COMMITTEES_PER_SLOT* = 4 diff --git a/tests/all_tests.nim b/tests/all_tests.nim index 17d1b8ef3..acdb1714b 100644 --- a/tests/all_tests.nim +++ b/tests/all_tests.nim @@ -27,12 +27,8 @@ import # Unit test ./test_zero_signature, ./test_peer_pool, ./test_sync_manager, - ./test_honest_validator - - # ./test_interop - # TODO: BLS changes in v0.10.1 will generate different interop signatures - # Requires an update of the interop mocked start: https://github.com/ethereum/eth2.0-pm/tree/master/interop/mocked_start - # or of ZRNT / ZCLI to v0.10.1 + ./test_honest_validator, + ./test_interop import # Refactor state transition unit tests # TODO re-enable when useful From b4fc641b8c37437fdbb5dbeb68a4f3df41b824f3 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Tue, 7 Apr 2020 12:37:08 +0200 Subject: [PATCH 51/58] result->results, bump bearssl --- beacon_chain/eth2_discovery.nim | 4 ++-- vendor/nim-bearssl | 2 +- vendor/nim-eth | 2 +- vendor/nim-stew | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/beacon_chain/eth2_discovery.nim b/beacon_chain/eth2_discovery.nim index e7b7c9eeb..5da43d292 100644 --- a/beacon_chain/eth2_discovery.nim +++ b/beacon_chain/eth2_discovery.nim @@ -4,7 +4,7 @@ import os, net, strutils, strformat, parseutils, - chronicles, stew/[result, objects], eth/keys, eth/trie/db, eth/p2p/enode, + chronicles, stew/[results, objects], eth/keys, eth/trie/db, eth/p2p/enode, eth/p2p/discoveryv5/[enr, protocol, discovery_db, types], libp2p/[multiaddress, peer], libp2p/crypto/crypto as libp2pCrypto, @@ -16,7 +16,7 @@ type PublicKey = keys.PublicKey export - Eth2DiscoveryProtocol, open, start, close, result + Eth2DiscoveryProtocol, open, start, close, results proc toENode*(a: MultiAddress): Result[ENode, cstring] {.raises: [Defect].} = try: diff --git a/vendor/nim-bearssl b/vendor/nim-bearssl index 924fb6cad..68c6d2730 160000 --- a/vendor/nim-bearssl +++ b/vendor/nim-bearssl @@ -1 +1 @@ -Subproject commit 924fb6cad1c849eec29d2c96c9803e4f43d6a8f0 +Subproject commit 68c6d27304245c948526487b37e10951acf7dbc8 diff --git a/vendor/nim-eth b/vendor/nim-eth index 0b110f328..ec1492a65 160000 --- a/vendor/nim-eth +++ b/vendor/nim-eth @@ -1 +1 @@ -Subproject commit 0b110f3287f26e03f5e7ac4c9e7f0103456895c0 +Subproject commit ec1492a65a1d82e83181c7d216dd97fb3268d5f0 diff --git a/vendor/nim-stew b/vendor/nim-stew index 55c2ec897..b06a5b6e3 160000 --- a/vendor/nim-stew +++ b/vendor/nim-stew @@ -1 +1 @@ -Subproject commit 55c2ec8977bc486482eb8ae50552226969651e4d +Subproject commit b06a5b6e32aa4d5abf9c1019ab6728fa8f360cc5 From 2930b65eb4e552ac53cdec25fb3be853465b1b32 Mon Sep 17 00:00:00 2001 From: Zahary Karadjov Date: Tue, 7 Apr 2020 19:04:22 +0300 Subject: [PATCH 52/58] [skip ci] simple logging fix --- beacon_chain/beacon_node.nim | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/beacon_chain/beacon_node.nim b/beacon_chain/beacon_node.nim index e99e7af0c..d1505a0e0 100644 --- a/beacon_chain/beacon_node.nim +++ b/beacon_chain/beacon_node.nim @@ -1213,10 +1213,6 @@ when isMainModule: proc (logLevel: LogLevel, msg: LogOutputStr) {.gcsafe.} = stdout.write(msg) - debug "Launching beacon node", - version = fullVersionStr, - cmdParams = commandLineParams(), config - randomize() if config.logLevel != LogLevel.NONE: @@ -1299,6 +1295,11 @@ when isMainModule: reportFailureFor keyFile.string of noCommand: + debug "Launching beacon node", + version = fullVersionStr, + cmdParams = commandLineParams(), + config + createPidFile(config.dataDir.string / "beacon_node.pid") var node = waitFor BeaconNode.init(config) From b2d2624d1479356d89e63867edcb62cf4d785b02 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Tue, 7 Apr 2020 14:23:48 -0600 Subject: [PATCH 53/58] bump libp2p to lastet master --- vendor/nim-libp2p | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendor/nim-libp2p b/vendor/nim-libp2p index 5285f0d09..00fbc9246 160000 --- a/vendor/nim-libp2p +++ b/vendor/nim-libp2p @@ -1 +1 @@ -Subproject commit 5285f0d091d2f5215b5c0dd5bd3399af1afec626 +Subproject commit 00fbc9246ea033469d50e3ad06f47e4209703c13 From f876ae574bd16af151d5f9c481abddcaf5163930 Mon Sep 17 00:00:00 2001 From: Dmitriy Ryajov Date: Tue, 7 Apr 2020 14:57:52 -0600 Subject: [PATCH 54/58] fix compile error --- vendor/nim-libp2p | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendor/nim-libp2p b/vendor/nim-libp2p index 00fbc9246..f4740c8b8 160000 --- a/vendor/nim-libp2p +++ b/vendor/nim-libp2p @@ -1 +1 @@ -Subproject commit 00fbc9246ea033469d50e3ad06f47e4209703c13 +Subproject commit f4740c8b8e1d55b45ed578cb42e1cd084f9b9644 From 2308dc952017129ed74375fa695fd51110d0b884 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C8=98tefan=20Talpalaru?= Date: Wed, 8 Apr 2020 14:36:03 +0200 Subject: [PATCH 55/58] bump Nim target commit to latest devel --- vendor/nimbus-build-system | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendor/nimbus-build-system b/vendor/nimbus-build-system index 67967528e..0cce46e12 160000 --- a/vendor/nimbus-build-system +++ b/vendor/nimbus-build-system @@ -1 +1 @@ -Subproject commit 67967528e519e4de3bb5a028dcff898ff713c127 +Subproject commit 0cce46e1260b053349d0d6f337f5d67a7bc14462 From a7fe5e4218908f7fd8ebcea38335e8e8e4645518 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C8=98tefan=20Talpalaru?= Date: Wed, 8 Apr 2020 17:33:13 +0200 Subject: [PATCH 56/58] Azure: change NimBinaries cache key - also bump submodules to match "devel" --- azure-pipelines.yml | 2 +- vendor/nim-bearssl | 2 +- vendor/nim-eth | 2 +- vendor/nim-libp2p | 2 +- vendor/nim-serialization | 2 +- vendor/nim-stew | 2 +- vendor/nim-web3 | 2 +- vendor/nimcrypto | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index f6f01f4d8..b10b3fb0e 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -14,7 +14,7 @@ jobs: - task: CacheBeta@1 displayName: 'cache Nim binaries' inputs: - key: NimBinaries | $(Agent.OS) | $(PLATFORM) | "$(Build.SourceBranchName)" | "v3" + key: NimBinaries | $(Agent.OS) | $(PLATFORM) | "$(Build.SourceBranchName)" | "v4" path: NimBinaries - task: CacheBeta@1 diff --git a/vendor/nim-bearssl b/vendor/nim-bearssl index 924fb6cad..68c6d2730 160000 --- a/vendor/nim-bearssl +++ b/vendor/nim-bearssl @@ -1 +1 @@ -Subproject commit 924fb6cad1c849eec29d2c96c9803e4f43d6a8f0 +Subproject commit 68c6d27304245c948526487b37e10951acf7dbc8 diff --git a/vendor/nim-eth b/vendor/nim-eth index c827c3732..ec1492a65 160000 --- a/vendor/nim-eth +++ b/vendor/nim-eth @@ -1 +1 @@ -Subproject commit c827c37329541a2f3d7d8057fa577f14537bb832 +Subproject commit ec1492a65a1d82e83181c7d216dd97fb3268d5f0 diff --git a/vendor/nim-libp2p b/vendor/nim-libp2p index e39bf0a4c..f4740c8b8 160000 --- a/vendor/nim-libp2p +++ b/vendor/nim-libp2p @@ -1 +1 @@ -Subproject commit e39bf0a4cf6032a0da77d4bc01caadf9155956fc +Subproject commit f4740c8b8e1d55b45ed578cb42e1cd084f9b9644 diff --git a/vendor/nim-serialization b/vendor/nim-serialization index b5f2df79c..0eab8cfee 160000 --- a/vendor/nim-serialization +++ b/vendor/nim-serialization @@ -1 +1 @@ -Subproject commit b5f2df79cc2858c3f3c5d7b9812921816d67d055 +Subproject commit 0eab8cfeee55cfa3bb893ec31137d3c25b83a1ae diff --git a/vendor/nim-stew b/vendor/nim-stew index 86739f99c..b06a5b6e3 160000 --- a/vendor/nim-stew +++ b/vendor/nim-stew @@ -1 +1 @@ -Subproject commit 86739f99c4efc1246d45164ef81c1e8f72970b65 +Subproject commit b06a5b6e32aa4d5abf9c1019ab6728fa8f360cc5 diff --git a/vendor/nim-web3 b/vendor/nim-web3 index da74eabaa..969adf2f1 160000 --- a/vendor/nim-web3 +++ b/vendor/nim-web3 @@ -1 +1 @@ -Subproject commit da74eabaa00e2a8f7c58e8d84a02b701041dfc2d +Subproject commit 969adf2f1ef42753ba26d5ab7eca01617c846792 diff --git a/vendor/nimcrypto b/vendor/nimcrypto index 04f933314..cd58cf69a 160000 --- a/vendor/nimcrypto +++ b/vendor/nimcrypto @@ -1 +1 @@ -Subproject commit 04f933314ca1d7d79fc6e4f19a0bd7566afbf462 +Subproject commit cd58cf69a0b883a4672cd3f79ee38ec0cf2c8c56 From 366da6df36ff3efa950ae35a403262c9d5aa2861 Mon Sep 17 00:00:00 2001 From: Dustin Brody Date: Wed, 8 Apr 2020 16:06:30 +0200 Subject: [PATCH 57/58] fix and refactor merkle_minimal sanity checks to run correctly and under CI --- tests/all_tests.nim | 1 + tests/mocking/merkle_minimal.nim | 110 ++++++++++++++++--------------- tests/test_mocking.nim | 16 +++++ 3 files changed, 73 insertions(+), 54 deletions(-) create mode 100644 tests/test_mocking.nim diff --git a/tests/all_tests.nim b/tests/all_tests.nim index acdb1714b..396b02f77 100644 --- a/tests/all_tests.nim +++ b/tests/all_tests.nim @@ -19,6 +19,7 @@ import # Unit test ./test_discovery_helpers, ./test_helpers, ./test_kvstore, + ./test_mocking, ./test_kvstore_sqlite3, ./test_ssz, ./test_state_transition, diff --git a/tests/mocking/merkle_minimal.nim b/tests/mocking/merkle_minimal.nim index 26afea317..5f60564a8 100644 --- a/tests/mocking/merkle_minimal.nim +++ b/tests/mocking/merkle_minimal.nim @@ -1,16 +1,19 @@ # beacon_chain -# Copyright (c) 2018-2019 Status Research & Development GmbH +# Copyright (c) 2018-2020 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. +# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/tests/core/pyspec/eth2spec/utils/merkle_minimal.py + # Merkle tree helpers # --------------------------------------------------------------- import + strutils, macros, bitops, # Specs - ../../beacon_chain/spec/[datatypes, digest], + ../../beacon_chain/spec/[beaconstate, datatypes, digest], ../../beacon_chain/ssz func round_step_down*(x: Natural, step: static Natural): int {.inline.} = @@ -82,9 +85,7 @@ proc getMerkleProof*[Depth: static int]( else: result[depth] = ZeroHashes[depth] -when isMainModule: # Checks - import strutils, macros, bitops - +proc testMerkleMinimal*(): bool = proc toDigest[N: static int](x: array[N, byte]): Eth2Digest = result.data[0 .. N-1] = x @@ -122,63 +123,64 @@ when isMainModule: # Checks # Running tests with hash_tree_root([a, b, c]) # works for depth 2 (3 or 4 leaves) - when false: - macro roundTrips(): untyped = - result = newStmtList() + macro roundTrips(): untyped = + result = newStmtList() - # Unsure why sszList ident is undeclared in "quote do" - let list = bindSym"sszList" + # Unsure why sszList ident is undeclared in "quote do" + let list = bindSym"sszList" - # compile-time unrolled test - for nleaves in [3, 4, 5, 7, 8, 1 shl 10, 1 shl 32]: - let depth = fastLog2(nleaves-1) + 1 + # compile-time unrolled test + for nleaves in [3, 4, 5, 7, 8, 1 shl 10, 1 shl 32]: + let depth = fastLog2(nleaves-1) + 1 - result.add quote do: - block: - let tree = merkleTreeFromLeaves([a, b, c], Depth = `depth`) - echo "Tree: ", tree + result.add quote do: + block: + let tree = merkleTreeFromLeaves([a, b, c], Depth = `depth`) + #echo "Tree: ", tree - let leaves = `list`(@[a, b, c], int64(`nleaves`)) - let root = hash_tree_root(leaves) - echo "Root: ", root + doAssert tree.nnznodes[`depth`].len == 1 + let root = tree.nnznodes[`depth`][0] + #echo "Root: ", root - block: # proof for a - let index = 0 - let proof = getMerkleProof(tree, index) - echo "Proof: ", proof + block: # proof for a + let index = 0 + let proof = getMerkleProof(tree, index) + #echo "Proof: ", proof - doAssert is_valid_merkle_branch( - a, get_merkle_proof(tree, index = index), - depth = `depth`, - index = index.uint64, - root = root - ), "Failed (depth: " & $`depth` & - ", nleaves: " & $`nleaves` & ')' + doAssert is_valid_merkle_branch( + a, get_merkle_proof(tree, index = index), + depth = `depth`, + index = index.uint64, + root = root + ), "Failed (depth: " & $`depth` & + ", nleaves: " & $`nleaves` & ')' - block: # proof for b - let index = 1 - let proof = getMerkleProof(tree, index) - # echo "Proof: ", proof + block: # proof for b + let index = 1 + let proof = getMerkleProof(tree, index) - doAssert is_valid_merkle_branch( - b, get_merkle_proof(tree, index = index), - depth = `depth`, - index = index.uint64, - root = root - ), "Failed (depth: " & $`depth` & - ", nleaves: " & $`nleaves` & ')' + doAssert is_valid_merkle_branch( + b, get_merkle_proof(tree, index = index), + depth = `depth`, + index = index.uint64, + root = root + ), "Failed (depth: " & $`depth` & + ", nleaves: " & $`nleaves` & ')' - block: # proof for c - let index = 2 - let proof = getMerkleProof(tree, index) - # echo "Proof: ", proof + block: # proof for c + let index = 2 + let proof = getMerkleProof(tree, index) - doAssert is_valid_merkle_branch( - c, get_merkle_proof(tree, index = index), - depth = `depth`, - index = index.uint64, - root = root - ), "Failed (depth: " & $`depth` & - ", nleaves: " & $`nleaves` & ')' + doAssert is_valid_merkle_branch( + c, get_merkle_proof(tree, index = index), + depth = `depth`, + index = index.uint64, + root = root + ), "Failed (depth: " & $`depth` & + ", nleaves: " & $`nleaves` & ')' - roundTrips() + roundTrips() + true + +when isMainModule: + discard testMerkleMinimal() diff --git a/tests/test_mocking.nim b/tests/test_mocking.nim new file mode 100644 index 000000000..2c74e2d12 --- /dev/null +++ b/tests/test_mocking.nim @@ -0,0 +1,16 @@ +# beacon_chain +# Copyright (c) 2020 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.used.} + +import + unittest, ./testutil, ./mocking/merkle_minimal + +suiteReport "Mocking utilities": + timedTest "merkle_minimal": + check: + testMerkleMinimal() From 5dde7e0e737bff0bdbb2cc28950155af3e0614bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C8=98tefan=20Talpalaru?= Date: Wed, 8 Apr 2020 18:58:25 +0200 Subject: [PATCH 58/58] cleanup --- .gitmodules | 7 ++----- azure-pipelines.yml | 6 ++---- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/.gitmodules b/.gitmodules index e3e927660..73bedb8ff 100644 --- a/.gitmodules +++ b/.gitmodules @@ -136,13 +136,10 @@ [submodule "vendor/nim-sqlite3-abi"] path = vendor/nim-sqlite3-abi url = https://github.com/arnetheduck/nim-sqlite3-abi.git + ignore = dirty + branch = master [submodule "vendor/nim-testutils"] path = vendor/nim-testutils url = https://github.com/status-im/nim-testutils.git ignore = dirty branch = master -[submodule "vendor/nim-rocksdb"] - path = vendor/nim-rocksdb - url = https://github.com/status-im/nim-rocksdb.git - ignore = dirty - branch = master diff --git a/azure-pipelines.yml b/azure-pipelines.yml index b10b3fb0e..6124324b0 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -68,9 +68,7 @@ jobs: mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} LOG_LEVEL=TRACE NIMFLAGS="-d:testnet_servers_image" file build/beacon_node - if [[ $PLATFORM == "x86" ]]; then - # fail fast - export NIMTEST_ABORT_ON_ERROR=1 - fi + # fail fast + export NIMTEST_ABORT_ON_ERROR=1 mingw32-make -j2 ARCH_OVERRIDE=${PLATFORM} DISABLE_TEST_FIXTURES_SCRIPT=1 test displayName: 'build and test'