Bump nim-web3 and others
Bump nim-json-rpc and nimbus-eth2 too. Reason: both nim-json-rpc and nim-web3 migrate from stdlib/json to nim-json-serialization
This commit is contained in:
parent
964b355dc8
commit
3e21281d12
|
@ -1,12 +1,12 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2021-2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
import
|
||||
std/os,
|
||||
std/[os, json],
|
||||
json_rpc/rpcclient,
|
||||
json_rpc/errors, # TODO: should be exported in json_rpc/clients/httpclient
|
||||
./rpc_types, rpc_discovery_api # for the PongResponse
|
||||
|
|
|
@ -1,3 +1,10 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
## Portal State Network json-rpc calls
|
||||
proc portal_stateNodeInfo(): NodeInfo
|
||||
proc portal_stateRoutingTableInfo(): RoutingTableInfo
|
||||
|
@ -6,8 +13,7 @@ proc portal_stateAddEnrs(enrs: seq[Record]): bool
|
|||
proc portal_stateGetEnr(nodeId: NodeId): Record
|
||||
proc portal_stateDeleteEnr(nodeId: NodeId): bool
|
||||
proc portal_stateLookupEnr(nodeId: NodeId): Record
|
||||
proc portal_statePing(enr: Record): tuple[
|
||||
enrSeq: uint64, customPayload: string]
|
||||
proc portal_statePing(enr: Record): PingResult
|
||||
proc portal_stateFindNodes(enr: Record): seq[Record]
|
||||
proc portal_stateFindContent(enr: Record, contentKey: string): JsonNode
|
||||
proc portal_stateOffer(
|
||||
|
@ -26,8 +32,7 @@ proc portal_historyAddEnrs(enrs: seq[Record]): bool
|
|||
proc portal_historyGetEnr(nodeId: NodeId): Record
|
||||
proc portal_historyDeleteEnr(nodeId: NodeId): bool
|
||||
proc portal_historyLookupEnr(nodeId: NodeId): Record
|
||||
proc portal_historyPing(enr: Record): tuple[
|
||||
enrSeq: uint64, customPayload: string]
|
||||
proc portal_historyPing(enr: Record): PingResult
|
||||
proc portal_historyFindNodes(enr: Record): seq[Record]
|
||||
proc portal_historyFindContent(enr: Record, contentKey: string): JsonNode
|
||||
proc portal_historyOffer(
|
||||
|
@ -46,8 +51,7 @@ proc portal_beaconAddEnrs(enrs: seq[Record]): bool
|
|||
proc portal_beaconGetEnr(nodeId: NodeId): Record
|
||||
proc portal_beaconDeleteEnr(nodeId: NodeId): bool
|
||||
proc portal_beaconLookupEnr(nodeId: NodeId): Record
|
||||
proc portal_beaconPing(enr: Record): tuple[
|
||||
enrSeq: uint64, customPayload: string]
|
||||
proc portal_beaconPing(enr: Record): PingResult
|
||||
proc portal_beaconFindNodes(enr: Record): seq[Record]
|
||||
proc portal_beaconFindContent(enr: Record, contentKey: string): JsonNode
|
||||
proc portal_beaconOffer(
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2021-2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
|
@ -21,6 +21,8 @@ type
|
|||
recipientIP: string
|
||||
recipientPort: uint16
|
||||
|
||||
PongResponse.useDefaultSerializationIn JrpcConv
|
||||
|
||||
proc installDiscoveryApiHandlers*(rpcServer: RpcServer|RpcProxy,
|
||||
d: discv5_protocol.Protocol) =
|
||||
## Discovery v5 JSON-RPC API such as defined here:
|
||||
|
@ -30,7 +32,7 @@ proc installDiscoveryApiHandlers*(rpcServer: RpcServer|RpcProxy,
|
|||
return d.routingTable.getNodeInfo()
|
||||
|
||||
rpcServer.rpc("discv5_updateNodeInfo") do(
|
||||
kvPairs: seq[tuple[key: string, value: string]]) -> NodeInfo:
|
||||
kvPairs: seq[(string, string)]) -> NodeInfo:
|
||||
# TODO: Not according to spec, as spec only allows socket address.
|
||||
# portal-specs PR has been created with suggested change as is here.
|
||||
let enrFields = kvPairs.map(
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2021-2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
|
@ -8,13 +8,17 @@
|
|||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/sequtils,
|
||||
json_rpc/[rpcproxy, rpcserver], stew/byteutils,
|
||||
std/[sequtils, json],
|
||||
json_rpc/[rpcproxy, rpcserver],
|
||||
json_serialization/std/tables,
|
||||
stew/byteutils,
|
||||
eth/p2p/discoveryv5/nodes_verification,
|
||||
../network/wire/portal_protocol,
|
||||
./rpc_types
|
||||
|
||||
export rpcserver
|
||||
export
|
||||
rpcserver,
|
||||
tables
|
||||
|
||||
# Portal Network JSON-RPC impelentation as per specification:
|
||||
# https://github.com/ethereum/portal-network-specs/tree/master/jsonrpc
|
||||
|
@ -24,6 +28,11 @@ type
|
|||
content: string
|
||||
utpTransfer: bool
|
||||
|
||||
ContentInfo.useDefaultSerializationIn JrpcConv
|
||||
TraceContentLookupResult.useDefaultSerializationIn JrpcConv
|
||||
TraceObject.useDefaultSerializationIn JrpcConv
|
||||
NodeMetadata.useDefaultSerializationIn JrpcConv
|
||||
TraceResponse.useDefaultSerializationIn JrpcConv
|
||||
|
||||
# Note:
|
||||
# Using a string for the network parameter will give an error in the rpc macro:
|
||||
|
@ -87,7 +96,7 @@ proc installPortalApiHandlers*(
|
|||
raise newException(ValueError, "Record not found in DHT lookup.")
|
||||
|
||||
rpcServer.rpc("portal_" & network & "Ping") do(
|
||||
enr: Record) -> tuple[enrSeq: uint64, dataRadius: UInt256]:
|
||||
enr: Record) -> PingResult:
|
||||
let
|
||||
node = toNodeWithAddress(enr)
|
||||
pong = await p.ping(node)
|
||||
|
@ -119,7 +128,7 @@ proc installPortalApiHandlers*(
|
|||
return nodes.get().map(proc(n: Node): Record = n.record)
|
||||
|
||||
rpcServer.rpc("portal_" & network & "FindContent") do(
|
||||
enr: Record, contentKey: string) -> JsonNode:
|
||||
enr: Record, contentKey: string) -> JsonString:
|
||||
let
|
||||
node = toNodeWithAddress(enr)
|
||||
foundContentResult = await p.findContent(
|
||||
|
@ -131,14 +140,15 @@ proc installPortalApiHandlers*(
|
|||
let foundContent = foundContentResult.get()
|
||||
case foundContent.kind:
|
||||
of Content:
|
||||
return %ContentInfo(
|
||||
let res = ContentInfo(
|
||||
content: foundContent.content.to0xHex(),
|
||||
utpTransfer: foundContent.utpTransfer
|
||||
)
|
||||
return JrpcConv.encode(res).JsonString
|
||||
of Nodes:
|
||||
var rpcRes = newJObject()
|
||||
rpcRes["enrs"] = %foundContent.nodes.map(proc(n: Node): Record = n.record)
|
||||
return rpcRes
|
||||
let enrs = foundContent.nodes.map(proc(n: Node): Record = n.record)
|
||||
let jsonEnrs = JrpcConv.encode(enrs)
|
||||
return ("{\"enrs\":" & jsonEnrs & "}").JsonString
|
||||
|
||||
rpcServer.rpc("portal_" & network & "Offer") do(
|
||||
enr: Record, contentKey: string, contentValue: string) -> string:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2021-2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
|
@ -8,6 +8,7 @@
|
|||
{.push raises: [].}
|
||||
|
||||
import
|
||||
stint,
|
||||
json_rpc/jsonmarshal,
|
||||
stew/[results, byteutils],
|
||||
eth/p2p/discoveryv5/[routing_table, enr, node]
|
||||
|
@ -23,6 +24,12 @@ type
|
|||
localNodeId*: NodeId
|
||||
buckets*: seq[seq[NodeId]]
|
||||
|
||||
PingResult* = tuple[enrSeq: uint64, dataRadius: UInt256]
|
||||
|
||||
NodeInfo.useDefaultSerializationIn JrpcConv
|
||||
RoutingTableInfo.useDefaultSerializationIn JrpcConv
|
||||
(string,string).useDefaultSerializationIn JrpcConv
|
||||
|
||||
func getNodeInfo*(r: RoutingTable): NodeInfo =
|
||||
NodeInfo(enr: r.localNode.record, nodeId: r.localNode.id)
|
||||
|
||||
|
@ -50,49 +57,61 @@ func toNodeWithAddress*(enr: Record): Node {.raises: [ValueError].} =
|
|||
else:
|
||||
node
|
||||
|
||||
func `%`*(value: Record): JsonNode =
|
||||
newJString(value.toURI())
|
||||
proc writeValue*(w: var JsonWriter[JrpcConv], v: Record)
|
||||
{.gcsafe, raises: [IOError].} =
|
||||
w.writeValue(v.toURI())
|
||||
|
||||
func fromJson*(n: JsonNode, argName: string, result: var Record)
|
||||
{.raises: [ValueError].} =
|
||||
n.kind.expect(JString, argName)
|
||||
if not fromURI(result, n.getStr()):
|
||||
raise newException(ValueError, "Invalid ENR")
|
||||
proc readValue*(r: var JsonReader[JrpcConv], val: var Record)
|
||||
{.gcsafe, raises: [IOError, JsonReaderError].} =
|
||||
if not fromURI(val, r.parseString()):
|
||||
r.raiseUnexpectedValue("Invalid ENR")
|
||||
|
||||
func `%`*(value: NodeId): JsonNode =
|
||||
%("0x" & value.toHex())
|
||||
proc writeValue*(w: var JsonWriter[JrpcConv], v: NodeId)
|
||||
{.gcsafe, raises: [IOError].} =
|
||||
w.writeValue("0x" & v.toHex())
|
||||
|
||||
func `%`*(value: Opt[NodeId]): JsonNode =
|
||||
if value.isSome():
|
||||
%("0x" & value.get().toHex())
|
||||
proc writeValue*(w: var JsonWriter[JrpcConv], v: Opt[NodeId])
|
||||
{.gcsafe, raises: [IOError].} =
|
||||
if v.isSome():
|
||||
w.writeValue("0x" & v.get().toHex())
|
||||
else:
|
||||
%("0x")
|
||||
w.writeValue("0x")
|
||||
|
||||
func `%`*(value: Opt[seq[byte]]): JsonNode =
|
||||
if value.isSome():
|
||||
%(value.get().to0xHex())
|
||||
proc readValue*(r: var JsonReader[JrpcConv], val: var NodeId)
|
||||
{.gcsafe, raises: [IOError, JsonReaderError].} =
|
||||
try:
|
||||
val = NodeId.fromHex(r.parseString())
|
||||
except ValueError as exc:
|
||||
r.raiseUnexpectedValue("NodeId parser error: " & exc.msg)
|
||||
|
||||
proc writeValue*(w: var JsonWriter[JrpcConv], v: Opt[seq[byte]])
|
||||
{.gcsafe, raises: [IOError].} =
|
||||
if v.isSome():
|
||||
w.writeValue(v.get().to0xHex())
|
||||
else:
|
||||
%("0x")
|
||||
w.writeValue("0x")
|
||||
|
||||
func fromJson*(n: JsonNode, argName: string, result: var NodeId)
|
||||
{.raises: [ValueError].} =
|
||||
n.kind.expect(JString, argName)
|
||||
proc readValue*(r: var JsonReader[JrpcConv], val: var seq[byte])
|
||||
{.gcsafe, raises: [IOError, JsonReaderError].} =
|
||||
try:
|
||||
val = hexToSeqByte(r.parseString())
|
||||
except ValueError as exc:
|
||||
r.raiseUnexpectedValue("seq[byte] parser error: " & exc.msg)
|
||||
|
||||
# TODO: fromHex (and thus parse) call seems to let pass several invalid
|
||||
# UInt256.
|
||||
result = UInt256.fromHex(n.getStr())
|
||||
proc writeValue*(w: var JsonWriter[JrpcConv], v: PingResult)
|
||||
{.gcsafe, raises: [IOError].} =
|
||||
w.beginRecord()
|
||||
w.writeField("enrSeq", v.enrSeq)
|
||||
w.writeField("dataRadius", "0x" & v.dataRadius.toHex)
|
||||
w.endRecord()
|
||||
|
||||
# TODO: This one should go to nim-json-rpc but before we can do that we will
|
||||
# have to update the vendor module to the current latest.
|
||||
func fromJson*(n: JsonNode, argName: string, result: var uint16)
|
||||
{.raises: [ValueError].} =
|
||||
n.kind.expect(JInt, argName)
|
||||
let asInt = n.getBiggestInt()
|
||||
if asInt < 0:
|
||||
raise newException(
|
||||
ValueError, "JSON-RPC input is an unexpected negative value")
|
||||
if asInt > BiggestInt(uint16.high()):
|
||||
raise newException(
|
||||
ValueError, "JSON-RPC input is too large for uint32")
|
||||
|
||||
result = uint16(asInt)
|
||||
proc readValue*(r: var JsonReader[JrpcConv], val: var PingResult)
|
||||
{.gcsafe, raises: [IOError, SerializationError].} =
|
||||
try:
|
||||
for field in r.readObjectFields():
|
||||
case field:
|
||||
of "enrSeq": val.enrSeq = r.parseInt(uint64)
|
||||
of "dataRadius": val.dataRadius = UInt256.fromHex(r.parseString())
|
||||
else: discard
|
||||
except ValueError as exc:
|
||||
r.raiseUnexpectedValue("PingResult parser error: " & exc.msg)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus - Portal Network
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
|
@ -48,7 +48,8 @@ procSuite "Discovery RPC":
|
|||
|
||||
asyncTest "Get local node info":
|
||||
let tc = await setupTest(rng)
|
||||
let resp = await tc.client.call("discv5_nodeInfo", %[])
|
||||
let jsonBytes = await tc.client.call("discv5_nodeInfo", %[])
|
||||
let resp = JrpcConv.decode(jsonBytes.string, JsonNode)
|
||||
|
||||
check:
|
||||
resp.contains("nodeId")
|
||||
|
|
|
@ -669,7 +669,9 @@ proc run(config: BeaconBridgeConf) {.raises: [CatchableError].} =
|
|||
waitFor (RpcHttpClient(web3Client.get())).connect(config.web3Url.get().web3Url)
|
||||
|
||||
info "Listening to incoming network requests"
|
||||
network.initBeaconSync(cfg, forkDigests, genesisBlockRoot, getBeaconTime)
|
||||
network.registerProtocol(
|
||||
PeerSync, PeerSync.NetworkState.init(
|
||||
cfg, forkDigests, genesisBlockRoot, getBeaconTime))
|
||||
network.addValidator(
|
||||
getBeaconBlocksTopic(forkDigests.phase0),
|
||||
proc (signedBlock: phase0.SignedBeaconBlock): errors.ValidationResult =
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2022-2024 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
|
@ -8,37 +8,39 @@
|
|||
|
||||
|
||||
import
|
||||
std/hashes,
|
||||
std/[hashes, json],
|
||||
json_rpc/jsonmarshal,
|
||||
stew/[byteutils, endians2],
|
||||
eth/p2p/discoveryv5/node,
|
||||
eth/utp/[utp_discv5_protocol, utp_router]
|
||||
|
||||
export jsonmarshal
|
||||
export jsonmarshal, json
|
||||
|
||||
type SKey* = object
|
||||
id*: uint16
|
||||
nodeId*: NodeId
|
||||
|
||||
proc `%`*(value: SKey): JsonNode =
|
||||
let hex = value.nodeId.toBytesBE().toHex()
|
||||
let numId = value.id.toBytesBE().toHex()
|
||||
proc writeValue*(w: var JsonWriter[JrpcConv], v: SKey)
|
||||
{.gcsafe, raises: [IOError].} =
|
||||
let hex = v.nodeId.toBytesBE().toHex()
|
||||
let numId = v.id.toBytesBE().toHex()
|
||||
let finalStr = hex & numId
|
||||
newJString(finalStr)
|
||||
w.writeValue(finalStr)
|
||||
|
||||
proc fromJson*(n: JsonNode, argName: string, result: var SKey)
|
||||
{.raises: [ValueError].} =
|
||||
n.kind.expect(JString, argName)
|
||||
let str = n.getStr()
|
||||
let strLen = len(str)
|
||||
if (strLen >= 64):
|
||||
proc readValue*(r: var JsonReader[JrpcConv], val: var SKey)
|
||||
{.gcsafe, raises: [IOError, JsonReaderError].} =
|
||||
let str = r.parseString()
|
||||
if str.len < 64:
|
||||
r.raiseUnexpectedValue("SKey: too short string")
|
||||
|
||||
try:
|
||||
let nodeIdStr = str.substr(0, 63)
|
||||
let connIdStr = str.substr(64)
|
||||
let nodeId = NodeId.fromHex(nodeIdStr)
|
||||
let connId = uint16.fromBytesBE(connIdStr.hexToSeqByte())
|
||||
result = SKey(nodeId: nodeId, id: connId)
|
||||
else:
|
||||
raise newException(ValueError, "Too short string")
|
||||
val = SKey(nodeId: nodeId, id: connId)
|
||||
except ValueError as exc:
|
||||
r.raiseUnexpectedValue("Skey parser error: " & exc.msg)
|
||||
|
||||
proc hash*(x: SKey): Hash =
|
||||
var h = 0
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2022-2024 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
|
@ -17,7 +17,8 @@ import
|
|||
eth/p2p/discoveryv5/enr,
|
||||
eth/utp/[utp_discv5_protocol, utp_router],
|
||||
eth/keys,
|
||||
../../rpc/rpc_discovery_api
|
||||
../../rpc/rpc_discovery_api,
|
||||
./utp_rpc_types
|
||||
|
||||
const
|
||||
defaultListenAddress* = (static parseIpAddress("127.0.0.1"))
|
||||
|
@ -43,49 +44,14 @@ type AppConf* = object
|
|||
desc: "RPC listening address"
|
||||
name: "rpc-listen-address" .}: IpAddress
|
||||
|
||||
proc `%`*(value: enr.Record): JsonNode =
|
||||
newJString(value.toURI())
|
||||
proc writeValue*(w: var JsonWriter[JrpcConv], v: Record)
|
||||
{.gcsafe, raises: [IOError].} =
|
||||
w.writeValue(v.toURI())
|
||||
|
||||
proc fromJson*(n: JsonNode, argName: string, result: var Record)
|
||||
{.raises: [ValueError].} =
|
||||
n.kind.expect(JString, argName)
|
||||
echo "ENr looks " & n.getStr()
|
||||
|
||||
if not fromURI(result, n.getStr()):
|
||||
raise newException(ValueError, "Invalid ENR")
|
||||
|
||||
type SKey = object
|
||||
id: uint16
|
||||
nodeId: NodeId
|
||||
|
||||
proc `%`*(value: SKey): JsonNode =
|
||||
let hex = value.nodeId.toBytesBE().toHex()
|
||||
let numId = value.id.toBytesBE().toHex()
|
||||
let finalStr = hex & numId
|
||||
newJString(finalStr)
|
||||
|
||||
proc fromJson*(n: JsonNode, argName: string, result: var SKey)
|
||||
{.raises: [ValueError].} =
|
||||
n.kind.expect(JString, argName)
|
||||
let str = n.getStr()
|
||||
let strLen = len(str)
|
||||
if (strLen >= 64):
|
||||
let nodeIdStr = str.substr(0, 63)
|
||||
let connIdStr = str.substr(64)
|
||||
let nodeId = NodeId.fromHex(nodeIdStr)
|
||||
let connId = uint16.fromBytesBE(connIdStr.hexToSeqByte())
|
||||
result = SKey(nodeId: nodeId, id: connId)
|
||||
else:
|
||||
raise newException(ValueError, "Too short string")
|
||||
|
||||
proc hash(x: SKey): Hash =
|
||||
var h = 0
|
||||
h = h !& x.id.hash
|
||||
h = h !& x.nodeId.hash
|
||||
!$h
|
||||
|
||||
func toSKey(k: UtpSocketKey[NodeAddress]): SKey =
|
||||
SKey(id: k.rcvId, nodeId: k.remoteAddress.nodeId)
|
||||
proc readValue*(r: var JsonReader[JrpcConv], val: var Record)
|
||||
{.gcsafe, raises: [IOError, JsonReaderError].} =
|
||||
if not fromURI(val, r.parseString()):
|
||||
r.raiseUnexpectedValue("Invalid ENR")
|
||||
|
||||
proc installUtpHandlers(
|
||||
srv: RpcHttpServer,
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -14,6 +14,7 @@ import
|
|||
chronicles,
|
||||
nimcrypto/[hmac],
|
||||
web3/engine_api_types,
|
||||
web3/conversions,
|
||||
json_rpc/[rpcclient],
|
||||
./types
|
||||
|
||||
|
@ -23,6 +24,9 @@ const
|
|||
maxTimeDriftSeconds = 60'i64
|
||||
defaultProtectedHeader = "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9"
|
||||
|
||||
createRpcSigsFromNim(RpcClient):
|
||||
proc engine_exchangeTransitionConfigurationV1(transitionConfiguration: TransitionConfigurationV1): TransitionConfigurationV1
|
||||
|
||||
proc base64urlEncode(x: auto): string =
|
||||
base64.encode(x, safe = true).replace("=", "")
|
||||
|
||||
|
@ -62,7 +66,7 @@ template genAuthTest(procName: untyped, timeDriftSeconds: int64, customAuthSecre
|
|||
let client = getClient(env, token)
|
||||
|
||||
try:
|
||||
discard waitFor client.call("engine_exchangeTransitionConfigurationV1", %[%tConf])
|
||||
discard waitFor client.engine_exchangeTransitionConfigurationV1(tConf)
|
||||
testCond authOk:
|
||||
error "Authentication was supposed to fail authentication but passed"
|
||||
except CatchableError:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -208,19 +208,19 @@ proc verifyBeaconRootStorage*(client: RpcClient, payload: ExecutionPayload): boo
|
|||
error "verifyBeaconRootStorage", msg=r.error
|
||||
return false
|
||||
|
||||
if r.get != payload.timestamp.uint64.u256:
|
||||
if r.get.u256 != payload.timestamp.uint64.u256:
|
||||
error "verifyBeaconRootStorage storage 1",
|
||||
expect=payload.timestamp.uint64.u256,
|
||||
get=r.get
|
||||
get=r.get.u256
|
||||
return false
|
||||
|
||||
# Verify the beacon root key
|
||||
r = client.storageAt(precompileAddress, beaconRootKey, blockNumber)
|
||||
let parentBeaconBlockRoot = timestampToBeaconRoot(payload.timestamp)
|
||||
if parentBeaconBlockRoot != beaconRoot(r.get):
|
||||
if parentBeaconBlockRoot != r.get:
|
||||
error "verifyBeaconRootStorage storage 2",
|
||||
expect=parentBeaconBlockRoot.toHex,
|
||||
get=beaconRoot(r.get).toHex
|
||||
get=r.get.toHex
|
||||
return false
|
||||
|
||||
return true
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -522,12 +522,12 @@ proc txByHash*(client: RpcClient, txHash: Hash256): Result[RPCTx, string] =
|
|||
return err("failed to get transaction: " & txHash.data.toHex)
|
||||
return ok(res.toRPCTx)
|
||||
|
||||
proc storageAt*(client: RpcClient, address: EthAddress, slot: UInt256): Result[UInt256, string] =
|
||||
proc storageAt*(client: RpcClient, address: EthAddress, slot: UInt256): Result[FixedBytes[32], string] =
|
||||
wrapTry:
|
||||
let res = waitFor client.eth_getStorageAt(w3Addr(address), slot, blockId("latest"))
|
||||
return ok(res)
|
||||
|
||||
proc storageAt*(client: RpcClient, address: EthAddress, slot: UInt256, number: common.BlockNumber): Result[UInt256, string] =
|
||||
proc storageAt*(client: RpcClient, address: EthAddress, slot: UInt256, number: common.BlockNumber): Result[FixedBytes[32], string] =
|
||||
wrapTry:
|
||||
let res = waitFor client.eth_getStorageAt(w3Addr(address), slot, blockId(number.truncate(uint64)))
|
||||
return ok(res)
|
||||
|
@ -562,19 +562,30 @@ proc verifyPoWProgress*(client: RpcClient, lastBlockHash: Hash256): Future[Resul
|
|||
|
||||
return err("verify PoW Progress timeout")
|
||||
|
||||
type
|
||||
TraceOpts = object
|
||||
disableStorage: bool
|
||||
disableMemory: bool
|
||||
disableState: bool
|
||||
disableStateDiff: bool
|
||||
|
||||
TraceOpts.useDefaultSerializationIn JrpcConv
|
||||
|
||||
createRpcSigsFromNim(RpcClient):
|
||||
proc debug_traceTransaction(hash: TxHash, opts: TraceOpts): JsonNode
|
||||
|
||||
proc debugPrevRandaoTransaction*(client: RpcClient, tx: Transaction, expectedPrevRandao: Hash256): Result[void, string] =
|
||||
wrapTry:
|
||||
let hash = w3Hash tx.rlpHash
|
||||
# we only interested in stack, disable all other elems
|
||||
let opts = %* {
|
||||
"disableStorage": true,
|
||||
"disableMemory": true,
|
||||
"disableState": true,
|
||||
"disableStateDiff": true
|
||||
}
|
||||
let opts = TraceOpts(
|
||||
disableStorage: true,
|
||||
disableMemory: true,
|
||||
disableState: true,
|
||||
disableStateDiff: true
|
||||
)
|
||||
|
||||
let res = waitFor client.call("debug_traceTransaction", %[%hash, opts])
|
||||
let res = waitFor client.debug_traceTransaction(hash, opts)
|
||||
let structLogs = res["structLogs"]
|
||||
|
||||
var prevRandaoFound = false
|
||||
|
@ -607,8 +618,8 @@ template expectBalanceEqual*(res: Result[UInt256, string], account: EthAddress,
|
|||
return err("invalid wd balance at $1, expect $2, get $3" % [
|
||||
account.toHex, $expectedBalance, $res.get])
|
||||
|
||||
template expectStorageEqual*(res: Result[UInt256, string], account: EthAddress,
|
||||
expectedValue: UInt256): auto =
|
||||
template expectStorageEqual*(res: Result[FixedBytes[32], string], account: EthAddress,
|
||||
expectedValue: FixedBytes[32]): auto =
|
||||
if res.isErr:
|
||||
return err(res.error)
|
||||
if res.get != expectedValue:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -25,6 +25,6 @@ proc txInPayload*(payload: ExecutionPayload, txHash: common.Hash256): bool =
|
|||
proc checkPrevRandaoValue*(client: RpcClient, expectedPrevRandao: common.Hash256, blockNumber: uint64): bool =
|
||||
let storageKey = blockNumber.u256
|
||||
let r = client.storageAt(prevRandaoContractAddr, storageKey)
|
||||
let expected = UInt256.fromBytesBE(expectedPrevRandao.data)
|
||||
let expected = FixedBytes[32](expectedPrevRandao.data)
|
||||
r.expectStorageEqual(expected)
|
||||
return true
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -86,9 +86,6 @@ func timestampToBeaconRoot*(timestamp: Quantity): FixedBytes[32] =
|
|||
let h = keccakHash(timestamp.uint64.toBytesBE)
|
||||
FixedBytes[32](h.data)
|
||||
|
||||
func beaconRoot*(x: UInt256): FixedBytes[32] =
|
||||
FixedBytes[32](x.toByteArrayBE)
|
||||
|
||||
proc randomBytes*(_: type common.Hash256): common.Hash256 =
|
||||
doAssert randomBytes(result.data) == 32
|
||||
|
||||
|
@ -248,7 +245,7 @@ template expectHash*(res: untyped, hash: common.Hash256) =
|
|||
testCond s.blockHash == hash:
|
||||
error "Unexpected expectHash", expect=hash.short, get=s.blockHash.short
|
||||
|
||||
template expectStorageEqual*(res: untyped, expectedValue: UInt256) =
|
||||
template expectStorageEqual*(res: untyped, expectedValue: FixedBytes[32]) =
|
||||
testCond res.isOk:
|
||||
error "expectStorageEqual", msg=res.error
|
||||
testCond res.get == expectedValue:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -162,12 +162,12 @@ proc verifyContractsStorage(ws: WDBaseSpec, env: TestEnv): Result[void, string]
|
|||
|
||||
if latestPayloadNumber.truncate(int) >= ws.forkHeight:
|
||||
# Shanghai
|
||||
r.expectStorageEqual(WARM_COINBASE_ADDRESS, 100.u256) # WARM_STORAGE_READ_COST
|
||||
p.expectStorageEqual(PUSH0_ADDRESS, latestPayloadNumber) # tx succeeded
|
||||
r.expectStorageEqual(WARM_COINBASE_ADDRESS, 100.u256.w3FixedBytes) # WARM_STORAGE_READ_COST
|
||||
p.expectStorageEqual(PUSH0_ADDRESS, latestPayloadNumber.w3FixedBytes) # tx succeeded
|
||||
else:
|
||||
# Pre-Shanghai
|
||||
r.expectStorageEqual(WARM_COINBASE_ADDRESS, 2600.u256) # COLD_ACCOUNT_ACCESS_COST
|
||||
p.expectStorageEqual(PUSH0_ADDRESS, 0.u256) # tx must've failed
|
||||
r.expectStorageEqual(WARM_COINBASE_ADDRESS, 2600.u256.w3FixedBytes) # COLD_ACCOUNT_ACCESS_COST
|
||||
p.expectStorageEqual(PUSH0_ADDRESS, 0.u256.w3FixedBytes) # tx must've failed
|
||||
|
||||
ok()
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -14,7 +14,8 @@ import
|
|||
json_rpc/[rpcclient],
|
||||
stew/[byteutils, results],
|
||||
../engine_client,
|
||||
../../../nimbus/utils/utils
|
||||
../../../nimbus/utils/utils,
|
||||
../../../nimbus/beacon/web3_eth_conv
|
||||
|
||||
type
|
||||
Withdrawals* = ref object
|
||||
|
@ -93,7 +94,7 @@ proc verifyWithdrawals*(wh: WDHistory, blockNumber: uint64, rpcBlock: Option[UIn
|
|||
client.storageAt(account, 0.u256, rpcBlock.get)
|
||||
else:
|
||||
client.storageAt(account, 0.u256)
|
||||
s.expectStorageEqual(account, 0.u256)
|
||||
s.expectStorageEqual(account, 0.u256.w3FixedBytes)
|
||||
ok()
|
||||
|
||||
# Create a new copy of the withdrawals history
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
@ -101,7 +101,7 @@ proc validatePostState(node: JsonNode, t: TestEnv): bool =
|
|||
echo sRes.error
|
||||
return false
|
||||
|
||||
if val != sRes.value:
|
||||
if val.w3FixedBytes != sRes.value:
|
||||
echo "storage recieved from account 0x",
|
||||
account.toHex,
|
||||
" at slot 0x",
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
@ -86,6 +86,9 @@ func u64*(x: Option[Web3Quantity]): Option[uint64] =
|
|||
func u256*(x: Web3Quantity): UInt256 =
|
||||
u256(x.uint64)
|
||||
|
||||
func u256*(x: FixedBytes[32]): UInt256 =
|
||||
UInt256.fromBytesBE(x.bytes)
|
||||
|
||||
func ethTime*(x: Web3Quantity): common.EthTime =
|
||||
common.EthTime(x)
|
||||
|
||||
|
@ -103,7 +106,7 @@ func ethHashes*(list: openArray[Web3Hash]): seq[common.Hash256] =
|
|||
func ethHashes*(list: Option[seq[Web3Hash]]): Option[seq[common.Hash256]] =
|
||||
if list.isNone: none(seq[common.Hash256])
|
||||
else: some ethHashes(list.get)
|
||||
|
||||
|
||||
func ethAddr*(x: Web3Address): common.EthAddress =
|
||||
EthAddress x
|
||||
|
||||
|
@ -218,6 +221,9 @@ func w3Qty*(x: Option[uint64]): Option[Web3Quantity] =
|
|||
func w3Qty*(x: uint64): Web3Quantity =
|
||||
Web3Quantity(x)
|
||||
|
||||
func w3FixedBytes*(x: UInt256): FixedBytes[32] =
|
||||
FixedBytes[32](x.toBytesBE)
|
||||
|
||||
func w3ExtraData*(x: common.Blob): Web3ExtraData =
|
||||
Web3ExtraData x
|
||||
|
||||
|
|
|
@ -25,17 +25,20 @@ import
|
|||
../../../sync/protocol,
|
||||
../../../db/[core_db, distinct_tries, incomplete_db, storage_types],
|
||||
../data_sources,
|
||||
../../../beacon/web3_eth_conv
|
||||
../../../beacon/web3_eth_conv,
|
||||
web3/conversions,
|
||||
web3
|
||||
|
||||
when defined(legacy_eth66_enabled):
|
||||
import
|
||||
../../../sync/protocol/eth66 as proto_eth66
|
||||
from ../../../sync/protocol/eth66 import getNodeData
|
||||
|
||||
from web3 import Web3, BlockHash, BlockObject, FixedBytes, Address, ProofResponse, StorageProof, newWeb3, fromJson, fromHex, eth_getBlockByHash, eth_getBlockByNumber, eth_getCode, eth_getProof, blockId, `%`
|
||||
|
||||
export AsyncOperationFactory, AsyncDataSource
|
||||
|
||||
type
|
||||
BlockHeader = eth_types.BlockHeader
|
||||
|
||||
var durationSpentDoingFetches*: times.Duration
|
||||
var fetchCounter*: int
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
@ -29,6 +29,9 @@ type
|
|||
ip : string # address string
|
||||
ports : NodePorts
|
||||
|
||||
NodePorts.useDefaultSerializationIn JrpcConv
|
||||
NodeInfo.useDefaultSerializationIn JrpcConv
|
||||
|
||||
proc setupCommonRpc*(node: EthereumNode, conf: NimbusConf, server: RpcServer) =
|
||||
server.rpc("web3_clientVersion") do() -> string:
|
||||
result = conf.agentString
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
@ -9,7 +9,7 @@
|
|||
|
||||
import
|
||||
std/json,
|
||||
json_rpc/rpcserver,
|
||||
json_rpc/rpcserver,
|
||||
./rpc_utils,
|
||||
./rpc_types,
|
||||
../tracer, ../vm_types,
|
||||
|
@ -27,6 +27,8 @@ type
|
|||
disableState: Option[bool]
|
||||
disableStateDiff: Option[bool]
|
||||
|
||||
TraceOptions.useDefaultSerializationIn JrpcConv
|
||||
|
||||
proc isTrue(x: Option[bool]): bool =
|
||||
result = x.isSome and x.get() == true
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2022-2024 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
|
@ -17,7 +17,6 @@ export rpc_types
|
|||
|
||||
type
|
||||
BlockHeader = eth_types.BlockHeader
|
||||
Hash256 = eth_types.Hash256
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
|
@ -28,13 +27,6 @@ proc topicToDigest(t: seq[eth_types.Topic]): seq[Web3Topic] =
|
|||
resSeq.add(ht)
|
||||
return resSeq
|
||||
|
||||
func ethTopics(topics: openArray[Option[seq[Web3Hash]]]): seq[Option[seq[Hash256]]] =
|
||||
for x in topics:
|
||||
if x.isSome:
|
||||
result.add some(ethHashes(x.get))
|
||||
else:
|
||||
result.add none(seq[Hash256])
|
||||
|
||||
proc deriveLogs*(header: BlockHeader, transactions: seq[Transaction], receipts: seq[Receipt]): seq[FilterLog] =
|
||||
## Derive log fields, does not deal with pending log, only the logs with
|
||||
## full data set
|
||||
|
@ -67,37 +59,51 @@ proc deriveLogs*(header: BlockHeader, transactions: seq[Transaction], receipts:
|
|||
|
||||
return resLogs
|
||||
|
||||
func participateInFilter(x: AddressOrList): bool =
|
||||
if x.kind == slkNull:
|
||||
return false
|
||||
if x.kind == slkList:
|
||||
if x.list.len == 0:
|
||||
return false
|
||||
true
|
||||
|
||||
proc bloomFilter*(
|
||||
bloom: eth_types.BloomFilter,
|
||||
addresses: seq[EthAddress],
|
||||
topics: seq[Option[seq[Hash256]]]): bool =
|
||||
addresses: AddressOrList,
|
||||
topics: seq[TopicOrList]): bool =
|
||||
|
||||
let bloomFilter = bFilter.BloomFilter(value: StUint[2048].fromBytesBE(bloom))
|
||||
|
||||
if len(addresses) > 0:
|
||||
if addresses.participateInFilter():
|
||||
var addrIncluded: bool = false
|
||||
for address in addresses:
|
||||
if bloomFilter.contains(address):
|
||||
addrIncluded = true
|
||||
break
|
||||
if addresses.kind == slkSingle:
|
||||
addrIncluded = bloomFilter.contains(addresses.single.bytes)
|
||||
elif addresses.kind == slkList:
|
||||
for address in addresses.list:
|
||||
if bloomFilter.contains(address.bytes):
|
||||
addrIncluded = true
|
||||
break
|
||||
if not addrIncluded:
|
||||
return false
|
||||
|
||||
for sub in topics:
|
||||
|
||||
if sub.isNone():
|
||||
if sub.kind == slkNull:
|
||||
# catch all wildcard
|
||||
continue
|
||||
|
||||
let subTops = sub.unsafeGet()
|
||||
var topicIncluded = len(subTops) == 0
|
||||
for topic in subTops:
|
||||
# This is is quite not obvious, but passing topic as MDigest256 fails, as
|
||||
# it does not use internal keccak256 hashing. To achieve desired semantics,
|
||||
# we need use digest bare bytes so that they will be properly kec256 hashes
|
||||
if bloomFilter.contains(topic.data):
|
||||
var topicIncluded = false
|
||||
if sub.kind == slkSingle:
|
||||
if bloomFilter.contains(sub.single.bytes):
|
||||
topicIncluded = true
|
||||
break
|
||||
else:
|
||||
topicIncluded = sub.list.len == 0
|
||||
for topic in sub.list:
|
||||
# This is is quite not obvious, but passing topic as MDigest256 fails, as
|
||||
# it does not use internal keccak256 hashing. To achieve desired semantics,
|
||||
# we need use digest bare bytes so that they will be properly kec256 hashes
|
||||
if bloomFilter.contains(topic.bytes):
|
||||
topicIncluded = true
|
||||
break
|
||||
|
||||
if not topicIncluded:
|
||||
return false
|
||||
|
@ -106,34 +112,29 @@ proc bloomFilter*(
|
|||
|
||||
proc headerBloomFilter*(
|
||||
header: BlockHeader,
|
||||
addresses: seq[EthAddress],
|
||||
topics: seq[Option[seq[Hash256]]]): bool =
|
||||
addresses: AddressOrList,
|
||||
topics: seq[TopicOrList]): bool =
|
||||
return bloomFilter(header.bloom, addresses, topics)
|
||||
|
||||
proc headerBloomFilter*(
|
||||
header: BlockHeader,
|
||||
addresses: seq[Web3Address],
|
||||
topics: seq[Option[seq[Web3Hash]]]): bool =
|
||||
headerBloomFilter(header, addresses.ethAddrs, topics.ethTopics)
|
||||
|
||||
proc matchTopics(log: FilterLog, topics: seq[Option[seq[Hash256]]]): bool =
|
||||
proc matchTopics(log: FilterLog, topics: seq[TopicOrList]): bool =
|
||||
for i, sub in topics:
|
||||
|
||||
if sub.isNone():
|
||||
if sub.kind == slkNull:
|
||||
# null subtopic i.e it matches all possible move to nex
|
||||
continue
|
||||
|
||||
let subTops = sub.unsafeGet()
|
||||
|
||||
# treat empty as wildcard, although caller should rather use none kind of
|
||||
# option to indicate that. If nim would have NonEmptySeq type that would be
|
||||
# use case for it.
|
||||
var match = len(subTops) == 0
|
||||
|
||||
for topic in subTops:
|
||||
if log.topics[i].ethHash == topic:
|
||||
match = true
|
||||
break
|
||||
var match = false
|
||||
if sub.kind == slkSingle:
|
||||
match = log.topics[i] == sub.single
|
||||
else:
|
||||
# treat empty as wildcard, although caller should rather use none kind of
|
||||
# option to indicate that. If nim would have NonEmptySeq type that would be
|
||||
# use case for it.
|
||||
match = sub.list.len == 0
|
||||
for topic in sub.list:
|
||||
if log.topics[i] == topic:
|
||||
match = true
|
||||
break
|
||||
|
||||
if not match:
|
||||
return false
|
||||
|
@ -142,13 +143,18 @@ proc matchTopics(log: FilterLog, topics: seq[Option[seq[Hash256]]]): bool =
|
|||
|
||||
proc filterLogs*(
|
||||
logs: openArray[FilterLog],
|
||||
addresses: seq[EthAddress],
|
||||
topics: seq[Option[seq[Hash256]]]): seq[FilterLog] =
|
||||
addresses: AddressOrList,
|
||||
topics: seq[TopicOrList]): seq[FilterLog] =
|
||||
|
||||
var filteredLogs: seq[FilterLog] = newSeq[FilterLog]()
|
||||
|
||||
for log in logs:
|
||||
if len(addresses) > 0 and (not addresses.contains(log.address.ethAddr)):
|
||||
if addresses.kind == slkSingle and (addresses.single != log.address):
|
||||
continue
|
||||
|
||||
if addresses.kind == slkList and
|
||||
addresses.list.len > 0 and
|
||||
(not addresses.list.contains(log.address)):
|
||||
continue
|
||||
|
||||
if len(topics) > len(log.topics):
|
||||
|
@ -160,9 +166,3 @@ proc filterLogs*(
|
|||
filteredLogs.add(log)
|
||||
|
||||
return filteredLogs
|
||||
|
||||
proc filterLogs*(
|
||||
logs: openArray[FilterLog],
|
||||
addresses: seq[Web3Address],
|
||||
topics: seq[Option[seq[Web3Hash]]]): seq[FilterLog] =
|
||||
filterLogs(logs, addresses.ethAddrs, topics.ethTopics)
|
||||
|
|
|
@ -59,7 +59,7 @@ proc setupEthRpc*(
|
|||
server.rpc("eth_chainId") do() -> Web3Quantity:
|
||||
return w3Qty(distinctBase(com.chainId))
|
||||
|
||||
server.rpc("eth_syncing") do() -> JsonNode:
|
||||
server.rpc("eth_syncing") do() -> SyncingStatus:
|
||||
## Returns SyncObject or false when not syncing.
|
||||
# TODO: make sure we are not syncing
|
||||
# when we reach the recent block
|
||||
|
@ -70,9 +70,9 @@ proc setupEthRpc*(
|
|||
currentBlock : w3Qty com.syncCurrent,
|
||||
highestBlock : w3Qty com.syncHighest
|
||||
)
|
||||
result = %sync
|
||||
result = SyncingStatus(syncing: true, syncObject: sync)
|
||||
else:
|
||||
result = newJBool(false)
|
||||
result = SyncingStatus(syncing: false)
|
||||
|
||||
server.rpc("eth_coinbase") do() -> Web3Address:
|
||||
## Returns the current coinbase address.
|
||||
|
@ -114,7 +114,7 @@ proc setupEthRpc*(
|
|||
address = data.ethAddr
|
||||
result = accDB.getBalance(address)
|
||||
|
||||
server.rpc("eth_getStorageAt") do(data: Web3Address, slot: UInt256, quantityTag: BlockTag) -> UInt256:
|
||||
server.rpc("eth_getStorageAt") do(data: Web3Address, slot: UInt256, quantityTag: BlockTag) -> FixedBytes[32]:
|
||||
## Returns the value from a storage position at a given address.
|
||||
##
|
||||
## data: address of the storage.
|
||||
|
@ -124,7 +124,7 @@ proc setupEthRpc*(
|
|||
let
|
||||
accDB = stateDBFromTag(quantityTag)
|
||||
address = data.ethAddr
|
||||
result = accDB.getStorage(address, slot)[0]
|
||||
result = accDB.getStorage(address, slot)[0].w3FixedBytes
|
||||
|
||||
server.rpc("eth_getTransactionCount") do(data: Web3Address, quantityTag: BlockTag) -> Web3Quantity:
|
||||
## Returns the number of transactions sent from an address.
|
||||
|
@ -279,7 +279,7 @@ proc setupEthRpc*(
|
|||
res = rpcCallEvm(callData, header, com)
|
||||
result = res.output
|
||||
|
||||
server.rpc("eth_estimateGas") do(call: EthCall, quantityTag: BlockTag) -> Web3Quantity:
|
||||
server.rpc("eth_estimateGas") do(call: EthCall) -> Web3Quantity:
|
||||
## Generates and returns an estimate of how much gas is necessary to allow the transaction to complete.
|
||||
## The transaction will not be added to the blockchain. Note that the estimate may be significantly more than
|
||||
## the amount of gas actually used by the transaction, for a variety of reasons including EVM mechanics and node performance.
|
||||
|
@ -288,7 +288,8 @@ proc setupEthRpc*(
|
|||
## quantityTag: integer block number, or the string "latest", "earliest" or "pending", see the default block parameter.
|
||||
## Returns the amount of gas used.
|
||||
let
|
||||
header = chainDB.headerFromTag(quantityTag)
|
||||
# TODO: use latest spec EthCall
|
||||
header = chainDB.headerFromTag(blockId("latest"))
|
||||
callData = callData(call)
|
||||
# TODO: DEFAULT_RPC_GAS_CAP should configurable
|
||||
gasUsed = rpcEstimateGas(callData, header, com, DEFAULT_RPC_GAS_CAP)
|
||||
|
@ -492,7 +493,7 @@ proc setupEthRpc*(
|
|||
# would operate on this enum instead of raw strings. This change would need
|
||||
# to be done on every endpoint to be consistent.
|
||||
let fromHeader = chainDB.headerFromTag(filterOptions.fromBlock)
|
||||
let toHeader = chainDB.headerFromTag(filterOptions.fromBlock)
|
||||
let toHeader = chainDB.headerFromTag(filterOptions.toBlock)
|
||||
|
||||
# Note: if fromHeader.blockNumber > toHeader.blockNumber, no logs will be
|
||||
# returned. This is consistent with, what other ethereum clients return
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2019-2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2019-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -65,7 +65,7 @@ proc captureAccount(n: JsonNode, db: LedgerRef, address: EthAddress, name: strin
|
|||
let codeHash = db.getCodeHash(address)
|
||||
let storageRoot = db.getStorageRoot(address)
|
||||
|
||||
jaccount["nonce"] = %(nonce.Web3Quantity)
|
||||
jaccount["nonce"] = %(conversions.`$`(nonce.Web3Quantity))
|
||||
jaccount["balance"] = %("0x" & balance.toHex)
|
||||
|
||||
let code = db.getCode(address)
|
||||
|
|
|
@ -126,7 +126,9 @@ proc run(config: VerifiedProxyConf) {.raises: [CatchableError].} =
|
|||
verifiedProxy.installEthApiHandlers()
|
||||
|
||||
info "Listening to incoming network requests"
|
||||
network.initBeaconSync(cfg, forkDigests, genesisBlockRoot, getBeaconTime)
|
||||
network.registerProtocol(
|
||||
PeerSync, PeerSync.NetworkState.init(
|
||||
cfg, forkDigests, genesisBlockRoot, getBeaconTime))
|
||||
network.addValidator(
|
||||
getBeaconBlocksTopic(forkDigests.phase0),
|
||||
proc (signedBlock: phase0.SignedBeaconBlock): ValidationResult =
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2020-2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2020-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -36,12 +36,14 @@ proc request*(
|
|||
params: JsonNode,
|
||||
client: Option[RpcClient] = none[RpcClient]()): JsonNode =
|
||||
if client.isSome():
|
||||
result = waitFor client.unsafeGet().call(methodName, params)
|
||||
let res = waitFor client.unsafeGet().call(methodName, params)
|
||||
result = JrpcConv.decode(res.string, JsonNode)
|
||||
else:
|
||||
var client = newRpcHttpClient()
|
||||
#client.httpMethod(MethodPost)
|
||||
waitFor client.connect("127.0.0.1", Port(8545), false)
|
||||
result = waitFor client.call(methodName, params)
|
||||
let res = waitFor client.call(methodName, params)
|
||||
result = JrpcConv.decode(res.string, JsonNode)
|
||||
waitFor client.close()
|
||||
|
||||
proc requestBlockBody(
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2022-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
@ -8,10 +8,9 @@
|
|||
|
||||
|
||||
import
|
||||
std/[options, strutils, typetraits],
|
||||
std/[options, typetraits],
|
||||
unittest2,
|
||||
eth/[common/eth_types],
|
||||
nimcrypto/hash,
|
||||
stew/byteutils,
|
||||
../nimbus/rpc/filters,
|
||||
../nimbus/beacon/web3_eth_conv,
|
||||
|
@ -36,52 +35,52 @@ proc filtersMain*() =
|
|||
log.logIndex.unsafeGet() == w3Qty(i.uint64)
|
||||
|
||||
test "Filter with empty parameters should return all logs":
|
||||
let addrs = newSeq[EthAddress]()
|
||||
let filtered = filterLogs(allLogs, addrs, @[])
|
||||
let addrs = newSeq[Address]()
|
||||
let filtered = filterLogs(allLogs, AddressOrList(kind: slkList, list: addrs), @[])
|
||||
check:
|
||||
len(filtered) == len(allLogs)
|
||||
|
||||
test "Filter and BloomFilter for one address with one valid log":
|
||||
let address = hexToByteArray[20]("0x0e0989b1f9b8a38983c2ba8053269ca62ec9b195")
|
||||
let filteredLogs = filterLogs(allLogs, @[address], @[])
|
||||
let address = Address.fromHex("0x0e0989b1f9b8a38983c2ba8053269ca62ec9b195")
|
||||
let filteredLogs = filterLogs(allLogs, AddressOrList(kind: slkList, list: @[address]), @[])
|
||||
|
||||
check:
|
||||
headerBloomFilter(blockHeader4514995, @[address], @[])
|
||||
headerBloomFilter(blockHeader4514995, AddressOrList(kind: slkList, list: @[address]), @[])
|
||||
len(filteredLogs) == 1
|
||||
filteredLogs[0].address == w3Addr address
|
||||
filteredLogs[0].address == address
|
||||
|
||||
test "Filter and BloomFilter for one address with multiple valid logs":
|
||||
let address = hexToByteArray[20]("0x878d7ed5c194349f37b18688964e8db1eb0fcca1")
|
||||
let filteredLogs = filterLogs(allLogs, @[address], @[])
|
||||
let address = Address.fromHex("0x878d7ed5c194349f37b18688964e8db1eb0fcca1")
|
||||
let filteredLogs = filterLogs(allLogs, AddressOrList(kind: slkSingle, single: address), @[])
|
||||
|
||||
check:
|
||||
headerBloomFilter(blockHeader4514995, @[address], @[])
|
||||
headerBloomFilter(blockHeader4514995, AddressOrList(kind: slkList, list: @[address]), @[])
|
||||
len(filteredLogs) == 2
|
||||
|
||||
for log in filteredLogs:
|
||||
check:
|
||||
log.address == w3Addr address
|
||||
log.address == address
|
||||
|
||||
test "Filter and BloomFilter for multiple address with multiple valid logs":
|
||||
let address = hexToByteArray[20]("0x878d7ed5c194349f37b18688964e8db1eb0fcca1")
|
||||
let address1 = hexToByteArray[20]("0x0e0989b1f9b8a38983c2ba8053269ca62ec9b195")
|
||||
let filteredLogs = filterLogs(allLogs, @[address, address1], @[])
|
||||
let address = Address.fromHex("0x878d7ed5c194349f37b18688964e8db1eb0fcca1")
|
||||
let address1 = Address.fromHex("0x0e0989b1f9b8a38983c2ba8053269ca62ec9b195")
|
||||
let filteredLogs = filterLogs(allLogs, AddressOrList(kind: slkList, list: @[address, address1]), @[])
|
||||
|
||||
check:
|
||||
headerBloomFilter(blockHeader4514995, @[address, address1], @[])
|
||||
headerBloomFilter(blockHeader4514995, AddressOrList(kind: slkList, list: @[address, address1]), @[])
|
||||
len(filteredLogs) == 3
|
||||
|
||||
test "Filter topics, too many filters":
|
||||
let filteredLogs =
|
||||
filterLogs(
|
||||
allLogs,
|
||||
@[],
|
||||
AddressOrList(kind: slkList, list: @[]),
|
||||
@[
|
||||
none[seq[Web3Hash]](),
|
||||
none[seq[Web3Hash]](),
|
||||
none[seq[Web3Hash]](),
|
||||
none[seq[Web3Hash]](),
|
||||
none[seq[Web3Hash]]()
|
||||
TopicOrList(kind: slkNull),
|
||||
TopicOrList(kind: slkNull),
|
||||
TopicOrList(kind: slkNull),
|
||||
TopicOrList(kind: slkNull),
|
||||
TopicOrList(kind: slkNull)
|
||||
]
|
||||
)
|
||||
|
||||
|
@ -94,8 +93,8 @@ proc filtersMain*() =
|
|||
let filteredLogs =
|
||||
filterLogs(
|
||||
allLogs,
|
||||
@[],
|
||||
@[some(@[topic])]
|
||||
AddressOrList(kind: slkList, list: @[]),
|
||||
@[TopicOrList(kind: slkList, list: @[topic])]
|
||||
)
|
||||
|
||||
check:
|
||||
|
@ -113,8 +112,8 @@ proc filtersMain*() =
|
|||
let filteredLogs =
|
||||
filterLogs(
|
||||
allLogs,
|
||||
@[],
|
||||
@[some(@[topic]), some(@[topic1])]
|
||||
AddressOrList(kind: slkList, list: @[]),
|
||||
@[TopicOrList(kind: slkList, list: @[topic]), TopicOrList(kind: slkList, list: @[topic1])]
|
||||
)
|
||||
|
||||
check:
|
||||
|
@ -133,8 +132,12 @@ proc filtersMain*() =
|
|||
let filteredLogs =
|
||||
filterLogs(
|
||||
allLogs,
|
||||
@[],
|
||||
@[some(@[topic]), none[seq[Web3Hash]](), some(@[topic1])]
|
||||
AddressOrList(kind: slkList, list: @[]),
|
||||
@[
|
||||
TopicOrList(kind: slkList, list: @[topic]),
|
||||
TopicOrList(kind: slkNull),
|
||||
TopicOrList(kind: slkList, list: @[topic1])
|
||||
]
|
||||
)
|
||||
|
||||
check:
|
||||
|
@ -152,9 +155,9 @@ proc filtersMain*() =
|
|||
let filteredLogs =
|
||||
filterLogs(
|
||||
allLogs,
|
||||
@[],
|
||||
AddressOrList(kind: slkList, list: @[]),
|
||||
@[
|
||||
some(@[topic, topic1])
|
||||
TopicOrList(kind: slkList, list: @[topic, topic1])
|
||||
]
|
||||
)
|
||||
|
||||
|
@ -175,10 +178,10 @@ proc filtersMain*() =
|
|||
let filteredLogs =
|
||||
filterLogs(
|
||||
allLogs,
|
||||
@[],
|
||||
AddressOrList(kind: slkNull),
|
||||
@[
|
||||
some(@[topic, topic1]),
|
||||
some(@[topic2, topic3])
|
||||
TopicOrList(kind: slkList, list: @[topic, topic1]),
|
||||
TopicOrList(kind: slkList, list: @[topic2, topic3])
|
||||
]
|
||||
)
|
||||
|
||||
|
@ -193,7 +196,7 @@ proc filtersMain*() =
|
|||
# general propety based tests
|
||||
test "Specific address query should provide results only with given address":
|
||||
for log in allLogs:
|
||||
let filtered = filterLogs(allLogs, @[log.address], @[])
|
||||
let filtered = filterLogs(allLogs, AddressOrList(kind: slkSingle, single: log.address), @[])
|
||||
|
||||
check:
|
||||
len(filtered) > 0
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
|
@ -8,10 +8,10 @@
|
|||
# those terms.
|
||||
|
||||
import
|
||||
std/[json, os, strutils, typetraits],
|
||||
std/[json, os, typetraits],
|
||||
unittest2,
|
||||
json_rpc/[rpcserver, rpcclient],
|
||||
web3/[engine_api_types],
|
||||
web3/[engine_api_types, conversions],
|
||||
../nimbus/sync/protocol,
|
||||
../nimbus/rpc,
|
||||
../nimbus/common,
|
||||
|
@ -26,53 +26,38 @@ const
|
|||
stepsFile = baseDir / "steps.json"
|
||||
|
||||
type
|
||||
Step = ref object
|
||||
StepObj = object
|
||||
name: string
|
||||
meth: string
|
||||
`method`: string
|
||||
params: JSonNode
|
||||
expect: JsonNode
|
||||
error : bool
|
||||
expect: JsonString
|
||||
error : JsonString
|
||||
|
||||
Steps = ref object
|
||||
list: seq[Step]
|
||||
Step = ref StepObj
|
||||
Steps = seq[Step]
|
||||
|
||||
proc parseStep(s: Step, node: JsonNode) =
|
||||
for k, v in node:
|
||||
case k
|
||||
of "name": s.name = v.getStr()
|
||||
of "method": s.meth = v.getStr()
|
||||
of "params": s.params = v
|
||||
of "expect": s.expect = v
|
||||
of "error": s.error = true
|
||||
else:
|
||||
doAssert(false, "unknown key: " & k)
|
||||
|
||||
proc parseSteps(node: JsonNode): Steps =
|
||||
let ss = Steps(list: @[])
|
||||
for n in node:
|
||||
let s = Step()
|
||||
s.parseStep(n)
|
||||
ss.list.add s
|
||||
ss
|
||||
StepObj.useDefaultSerializationIn JrpcConv
|
||||
|
||||
proc forkChoiceUpdate(step: Step, client: RpcClient, testStatusIMPL: var TestStatus) =
|
||||
let arg = step.params[1]
|
||||
if arg.kind == JNull:
|
||||
step.params.elems.setLen(1)
|
||||
|
||||
let res = waitFor client.call(step.meth, step.params)
|
||||
check toLowerAscii($res) == toLowerAscii($step.expect)
|
||||
let jsonBytes = waitFor client.call(step.`method`, step.params)
|
||||
let resA = JrpcConv.decode(jsonBytes.string, ForkchoiceUpdatedResponse)
|
||||
let resB = JrpcConv.decode(step.expect.string, ForkchoiceUpdatedResponse)
|
||||
check resA == resB
|
||||
|
||||
proc getPayload(step: Step, client: RpcClient, testStatusIMPL: var TestStatus) =
|
||||
try:
|
||||
let res = waitFor client.call(step.meth, step.params)
|
||||
check toLowerAscii($res) == toLowerAscii($step.expect)
|
||||
let jsonBytes = waitFor client.call(step.`method`, step.params)
|
||||
let resA = JrpcConv.decode(jsonBytes.string, ExecutionPayloadV1)
|
||||
let resB = JrpcConv.decode(step.expect.string, ExecutionPayloadV1)
|
||||
check resA == resB
|
||||
except CatchableError:
|
||||
check step.error == true
|
||||
check step.error.string.len > 0
|
||||
|
||||
proc newPayload(step: Step, client: RpcClient, testStatusIMPL: var TestStatus) =
|
||||
let res = waitFor client.call(step.meth, step.params)
|
||||
check toLowerAscii($res) == toLowerAscii($step.expect)
|
||||
let jsonBytes = waitFor client.call(step.`method`, step.params)
|
||||
let resA = JrpcConv.decode(jsonBytes.string, PayloadStatusV1)
|
||||
let resB = JrpcConv.decode(step.expect.string, PayloadStatusV1)
|
||||
check resA == resB
|
||||
|
||||
proc runTest(steps: Steps) =
|
||||
let
|
||||
|
@ -107,9 +92,9 @@ proc runTest(steps: Steps) =
|
|||
waitFor client.connect("127.0.0.1", conf.rpcPort)
|
||||
|
||||
suite "Engine API tests":
|
||||
for i, step in steps.list:
|
||||
for i, step in steps:
|
||||
test $i & " " & step.name:
|
||||
case step.meth
|
||||
case step.`method`
|
||||
of "engine_forkchoiceUpdatedV1":
|
||||
forkChoiceUpdate(step, client, testStatusIMPL)
|
||||
of "engine_getPayloadV1":
|
||||
|
@ -117,7 +102,7 @@ proc runTest(steps: Steps) =
|
|||
of "engine_newPayloadV1":
|
||||
newPayload(step, client, testStatusIMPL)
|
||||
else:
|
||||
doAssert(false, "unknown method: " & step.meth)
|
||||
doAssert(false, "unknown method: " & step.`method`)
|
||||
|
||||
waitFor client.close()
|
||||
waitFor sealingEngine.stop()
|
||||
|
@ -125,8 +110,7 @@ proc runTest(steps: Steps) =
|
|||
waitFor rpcServer.closeWait()
|
||||
|
||||
proc testEngineAPI() =
|
||||
let node = parseJSON(readFile(stepsFile))
|
||||
let steps = parseSteps(node)
|
||||
let steps = JrpcConv.loadFile(stepsFile, Steps)
|
||||
runTest(steps)
|
||||
|
||||
proc toId(x: int): PayloadId =
|
||||
|
|
|
@ -289,14 +289,13 @@ proc rpcMain*() =
|
|||
|
||||
test "eth_syncing":
|
||||
let res = await client.eth_syncing()
|
||||
if res.kind == JBool:
|
||||
if res.syncing == false:
|
||||
let syncing = ethNode.peerPool.connectedNodes.len > 0
|
||||
check res.getBool() == syncing
|
||||
check syncing == false
|
||||
else:
|
||||
check res.kind == JObject
|
||||
check com.syncStart == UInt256.fromHex(res["startingBlock"].getStr())
|
||||
check com.syncCurrent == UInt256.fromHex(res["currentBlock"].getStr())
|
||||
check com.syncHighest == UInt256.fromHex(res["highestBlock"].getStr())
|
||||
check com.syncStart == res.syncObject.startingBlock.uint64.u256
|
||||
check com.syncCurrent == res.syncObject.currentBlock.uint64.u256
|
||||
check com.syncHighest == res.syncObject.highestBlock.uint64.u256
|
||||
|
||||
test "eth_coinbase":
|
||||
let res = await client.eth_coinbase()
|
||||
|
@ -337,7 +336,7 @@ proc rpcMain*() =
|
|||
|
||||
test "eth_getStorageAt":
|
||||
let res = await client.eth_getStorageAt(w3Addr("0xfff33a3bd36abdbd412707b8e310d6011454a7ae"), 0.u256, blockId(0'u64))
|
||||
check 0.u256 == res
|
||||
check w3Hash() == res
|
||||
|
||||
test "eth_getTransactionCount":
|
||||
let res = await client.eth_getTransactionCount(w3Addr("0xfff7ac99c8e4feb60c9750054bdc14ce1857f181"), blockId(0'u64))
|
||||
|
@ -369,7 +368,7 @@ proc rpcMain*() =
|
|||
let msg = "hello world"
|
||||
let msgBytes = @(msg.toOpenArrayByte(0, msg.len-1))
|
||||
|
||||
expect ValueError:
|
||||
expect JsonRpcError:
|
||||
discard await client.eth_sign(w3Addr(ks2), msgBytes)
|
||||
|
||||
let res = await client.eth_sign(w3Addr(signer), msgBytes)
|
||||
|
@ -404,7 +403,7 @@ proc rpcMain*() =
|
|||
|
||||
test "eth_call":
|
||||
var ec = EthCall(
|
||||
source: w3Addr(signer).some,
|
||||
`from`: w3Addr(signer).some,
|
||||
to: w3Addr(ks2).some,
|
||||
gas: w3Qty(100000'u).some,
|
||||
gasPrice: none(Quantity),
|
||||
|
@ -416,14 +415,14 @@ proc rpcMain*() =
|
|||
|
||||
test "eth_estimateGas":
|
||||
var ec = EthCall(
|
||||
source: w3Addr(signer).some,
|
||||
`from`: w3Addr(signer).some,
|
||||
to: w3Addr(ks3).some,
|
||||
gas: w3Qty(42000'u).some,
|
||||
gasPrice: w3Qty(100'u).some,
|
||||
value: some 100.u256
|
||||
)
|
||||
|
||||
let res = await client.eth_estimateGas(ec, "latest")
|
||||
let res = await client.eth_estimateGas(ec)
|
||||
check res == w3Qty(21000'u64)
|
||||
|
||||
test "eth_getBlockByHash":
|
||||
|
@ -535,7 +534,7 @@ proc rpcMain*() =
|
|||
l.logIndex.unsafeGet() == w3Qty(i.uint64)
|
||||
inc i
|
||||
|
||||
test "eth_getLogs by blockhash, filter logs at specific postions":
|
||||
test "eth_getLogs by blockhash, filter logs at specific positions":
|
||||
let testHeader = getBlockHeader4514995()
|
||||
let testHash = testHeader.blockHash
|
||||
|
||||
|
@ -544,7 +543,11 @@ proc rpcMain*() =
|
|||
|
||||
let filterOptions = FilterOptions(
|
||||
blockHash: some(w3Hash testHash),
|
||||
topics: @[some(@[topic]), none[seq[Web3Hash]](), some(@[topic1])]
|
||||
topics: @[
|
||||
TopicOrList(kind: slkList, list: @[topic]),
|
||||
TopicOrList(kind: slkNull),
|
||||
TopicOrList(kind: slkList, list: @[topic1])
|
||||
]
|
||||
)
|
||||
|
||||
let logs = await client.eth_getLogs(filterOptions)
|
||||
|
@ -568,8 +571,8 @@ proc rpcMain*() =
|
|||
let filterOptions = FilterOptions(
|
||||
blockHash: some(w3Hash testHash),
|
||||
topics: @[
|
||||
some(@[topic, topic1]),
|
||||
some(@[topic2, topic3])
|
||||
TopicOrList(kind: slkList, list: @[topic, topic1]),
|
||||
TopicOrList(kind: slkList, list: @[topic2, topic3])
|
||||
]
|
||||
)
|
||||
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit a8731e91bc336d930ac66f985d3b88ed7cf2a7d7
|
||||
Subproject commit f90e946b6a92432cbfe7abb59f9c05af17bfddde
|
|
@ -1 +1 @@
|
|||
Subproject commit be1bb307dc40afb329e559eda601b74f9ab476d5
|
||||
Subproject commit 4ab592bddb0d0c7c27d5f19a303f2e4a262b6f95
|
|
@ -1 +1 @@
|
|||
Subproject commit c815e71af028f5610cd52d678dd3cbdda9bfece1
|
||||
Subproject commit 5404178a4004c4bc13c75853a02b1a74f2ca302c
|
Loading…
Reference in New Issue