Bump nim-eth/web3/kzg4844/nimbus-eth2 and related fixes (#2392)
Bump nim-eth, which requires nimbus-eth2 bump, which requires bumps of web3 and kzg4844 + related fixes to all those bumps.
This commit is contained in:
parent
1a96b4a97c
commit
4fd2ecddec
|
@ -137,7 +137,7 @@ proc run(config: PortalConf) {.raises: [CatchableError].} =
|
||||||
d = newProtocol(
|
d = newProtocol(
|
||||||
netkey,
|
netkey,
|
||||||
extIp,
|
extIp,
|
||||||
none(Port),
|
Opt.none(Port),
|
||||||
extUdpPort,
|
extUdpPort,
|
||||||
# Note: The addition of default clientInfo to the ENR is a temporary
|
# Note: The addition of default clientInfo to the ENR is a temporary
|
||||||
# measure to easily identify & debug the clients used in the testnet.
|
# measure to easily identify & debug the clients used in the testnet.
|
||||||
|
@ -147,9 +147,9 @@ proc run(config: PortalConf) {.raises: [CatchableError].} =
|
||||||
previousRecord =
|
previousRecord =
|
||||||
# TODO: discv5/enr code still uses Option, to be changed.
|
# TODO: discv5/enr code still uses Option, to be changed.
|
||||||
if previousEnr.isSome():
|
if previousEnr.isSome():
|
||||||
some(previousEnr.get())
|
Opt.some(previousEnr.get())
|
||||||
else:
|
else:
|
||||||
none(enr.Record)
|
Opt.none(enr.Record)
|
||||||
,
|
,
|
||||||
bindIp = bindIp,
|
bindIp = bindIp,
|
||||||
bindPort = udpPort,
|
bindPort = udpPort,
|
||||||
|
|
|
@ -24,16 +24,16 @@ proc initDiscoveryNode*(
|
||||||
address: Address,
|
address: Address,
|
||||||
bootstrapRecords: openArray[Record] = [],
|
bootstrapRecords: openArray[Record] = [],
|
||||||
localEnrFields: openArray[(string, seq[byte])] = [],
|
localEnrFields: openArray[(string, seq[byte])] = [],
|
||||||
previousRecord = none[enr.Record](),
|
previousRecord = Opt.none(enr.Record),
|
||||||
): discv5_protocol.Protocol {.raises: [CatchableError].} =
|
): discv5_protocol.Protocol {.raises: [CatchableError].} =
|
||||||
# set bucketIpLimit to allow bucket split
|
# set bucketIpLimit to allow bucket split
|
||||||
let config = DiscoveryConfig.init(1000, 24, 5)
|
let config = DiscoveryConfig.init(1000, 24, 5)
|
||||||
|
|
||||||
result = newProtocol(
|
result = newProtocol(
|
||||||
privKey,
|
privKey,
|
||||||
some(address.ip),
|
Opt.some(address.ip),
|
||||||
some(address.port),
|
Opt.some(address.port),
|
||||||
some(address.port),
|
Opt.some(address.port),
|
||||||
bindPort = address.port,
|
bindPort = address.port,
|
||||||
bootstrapRecords = bootstrapRecords,
|
bootstrapRecords = bootstrapRecords,
|
||||||
localEnrFields = localEnrFields,
|
localEnrFields = localEnrFields,
|
||||||
|
|
|
@ -264,7 +264,7 @@ proc run(config: BeaconBridgeConf) {.raises: [CatchableError].} =
|
||||||
portalRpcClient = newRpcHttpClient()
|
portalRpcClient = newRpcHttpClient()
|
||||||
|
|
||||||
optimisticHandler = proc(
|
optimisticHandler = proc(
|
||||||
signedBlock: ForkedMsgTrustedSignedBeaconBlock
|
signedBlock: ForkedSignedBeaconBlock
|
||||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||||
# TODO: Should not be gossiping optimistic blocks, but instead store them
|
# TODO: Should not be gossiping optimistic blocks, but instead store them
|
||||||
# in a cache and only gossip them after they are confirmed due to an LC
|
# in a cache and only gossip them after they are confirmed due to an LC
|
||||||
|
@ -512,7 +512,6 @@ proc run(config: BeaconBridgeConf) {.raises: [CatchableError].} =
|
||||||
withForkyHeader(optimisticHeader):
|
withForkyHeader(optimisticHeader):
|
||||||
when lcDataFork > LightClientDataFork.None:
|
when lcDataFork > LightClientDataFork.None:
|
||||||
info "New LC optimistic header", optimistic_header = shortLog(forkyHeader)
|
info "New LC optimistic header", optimistic_header = shortLog(forkyHeader)
|
||||||
optimisticProcessor.setOptimisticHeader(forkyHeader.beacon)
|
|
||||||
|
|
||||||
lightClient.onFinalizedHeader = onFinalizedHeader
|
lightClient.onFinalizedHeader = onFinalizedHeader
|
||||||
lightClient.onOptimisticHeader = onOptimisticHeader
|
lightClient.onOptimisticHeader = onOptimisticHeader
|
||||||
|
|
|
@ -231,7 +231,7 @@ proc run(config: PortalCliConf) =
|
||||||
let d = newProtocol(
|
let d = newProtocol(
|
||||||
config.networkKey,
|
config.networkKey,
|
||||||
extIp,
|
extIp,
|
||||||
none(Port),
|
Opt.none(Port),
|
||||||
extUdpPort,
|
extUdpPort,
|
||||||
bootstrapRecords = bootstrapRecords,
|
bootstrapRecords = bootstrapRecords,
|
||||||
bindIp = bindIp,
|
bindIp = bindIp,
|
||||||
|
|
|
@ -12,7 +12,7 @@ import
|
||||||
chronicles,
|
chronicles,
|
||||||
confutils,
|
confutils,
|
||||||
confutils/std/net as confNet,
|
confutils/std/net as confNet,
|
||||||
stew/[byteutils, endians2],
|
stew/byteutils,
|
||||||
json_rpc/servers/httpserver,
|
json_rpc/servers/httpserver,
|
||||||
eth/p2p/discoveryv5/protocol,
|
eth/p2p/discoveryv5/protocol,
|
||||||
eth/p2p/discoveryv5/enr,
|
eth/p2p/discoveryv5/enr,
|
||||||
|
@ -54,7 +54,7 @@ proc installUtpHandlers(
|
||||||
d: protocol.Protocol,
|
d: protocol.Protocol,
|
||||||
s: UtpDiscv5Protocol,
|
s: UtpDiscv5Protocol,
|
||||||
t: ref Table[SKey, UtpSocket[NodeAddress]],
|
t: ref Table[SKey, UtpSocket[NodeAddress]],
|
||||||
) {.raises: [CatchableError].} =
|
) {.raises: [].} =
|
||||||
srv.rpc("utp_connect") do(r: enr.Record) -> SKey:
|
srv.rpc("utp_connect") do(r: enr.Record) -> SKey:
|
||||||
let nodeRes = newNode(r)
|
let nodeRes = newNode(r)
|
||||||
|
|
||||||
|
@ -148,9 +148,9 @@ when isMainModule:
|
||||||
|
|
||||||
let d = newProtocol(
|
let d = newProtocol(
|
||||||
key,
|
key,
|
||||||
some(discAddress),
|
Opt.some(discAddress),
|
||||||
none(Port),
|
Opt.none(Port),
|
||||||
some(conf.udpPort),
|
Opt.some(conf.udpPort),
|
||||||
bootstrapRecords = @[],
|
bootstrapRecords = @[],
|
||||||
bindIp = discAddress,
|
bindIp = discAddress,
|
||||||
bindPort = conf.udpPort,
|
bindPort = conf.udpPort,
|
||||||
|
|
|
@ -83,9 +83,9 @@ template validatePayload(apiVersion, version, payload) =
|
||||||
"excessBlobGas is expected from execution payload")
|
"excessBlobGas is expected from execution payload")
|
||||||
|
|
||||||
if apiVersion >= Version.V4 or version >= Version.V4:
|
if apiVersion >= Version.V4 or version >= Version.V4:
|
||||||
if payload.depositReceipts.isNone:
|
if payload.depositRequests.isNone:
|
||||||
raise invalidParams("newPayload" & $apiVersion &
|
raise invalidParams("newPayload" & $apiVersion &
|
||||||
"depositReceipts is expected from execution payload")
|
"depositRequests is expected from execution payload")
|
||||||
if payload.exits.isNone:
|
if payload.exits.isNone:
|
||||||
raise invalidParams("newPayload" & $apiVersion &
|
raise invalidParams("newPayload" & $apiVersion &
|
||||||
"exits is expected from execution payload")
|
"exits is expected from execution payload")
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[os, strutils],
|
std/[os, strutils],
|
||||||
|
stew/arrayops,
|
||||||
nimcrypto/sha2,
|
nimcrypto/sha2,
|
||||||
kzg4844/kzg_ex as kzg,
|
kzg4844/kzg_ex as kzg,
|
||||||
results,
|
results,
|
||||||
|
@ -39,8 +40,8 @@ const
|
||||||
|
|
||||||
|
|
||||||
# kzgToVersionedHash implements kzg_to_versioned_hash from EIP-4844
|
# kzgToVersionedHash implements kzg_to_versioned_hash from EIP-4844
|
||||||
proc kzgToVersionedHash*(kzg: kzg.KZGCommitment): VersionedHash =
|
proc kzgToVersionedHash*(kzg: kzg.KzgCommitment): VersionedHash =
|
||||||
result = sha256.digest(kzg.bytes)
|
result = sha256.digest(kzg)
|
||||||
result.data[0] = VERSIONED_HASH_VERSION_KZG
|
result.data[0] = VERSIONED_HASH_VERSION_KZG
|
||||||
|
|
||||||
# pointEvaluation implements point_evaluation_precompile from EIP-4844
|
# pointEvaluation implements point_evaluation_precompile from EIP-4844
|
||||||
|
@ -54,20 +55,14 @@ proc pointEvaluation*(input: openArray[byte]): Result[void, string] =
|
||||||
if input.len != PrecompileInputLength:
|
if input.len != PrecompileInputLength:
|
||||||
return err("invalid input length")
|
return err("invalid input length")
|
||||||
|
|
||||||
var
|
let
|
||||||
versionedHash: KzgBytes32
|
versionedHash = KzgBytes32.initCopyFrom(input.toOpenArray(0, 31))
|
||||||
z: KzgBytes32
|
z = KzgBytes32.initCopyFrom(input.toOpenArray(32, 63))
|
||||||
y: KzgBytes32
|
y = KzgBytes32.initCopyFrom(input.toOpenArray(64, 95))
|
||||||
commitment: KzgBytes48
|
commitment = KzgBytes48.initCopyFrom(input.toOpenArray(96, 143))
|
||||||
kzgProof: KzgBytes48
|
kzgProof = KzgBytes48.initCopyFrom(input.toOpenArray(144, 191))
|
||||||
|
|
||||||
versionedHash.bytes[0..<32] = input[0..<32]
|
if kzgToVersionedHash(commitment).data != versionedHash:
|
||||||
z.bytes[0..<32] = input[32..<64]
|
|
||||||
y.bytes[0..<32] = input[64..<96]
|
|
||||||
commitment.bytes[0..<48] = input[96..<144]
|
|
||||||
kzgProof.bytes[0..<48] = input[144..<192]
|
|
||||||
|
|
||||||
if kzgToVersionedHash(commitment).data != versionedHash.bytes:
|
|
||||||
return err("versionedHash should equal to kzgToVersionedHash(commitment)")
|
return err("versionedHash should equal to kzgToVersionedHash(commitment)")
|
||||||
|
|
||||||
# Verify KZG proof
|
# Verify KZG proof
|
||||||
|
@ -183,14 +178,13 @@ proc validateBlobTransactionWrapper*(tx: PooledTransaction):
|
||||||
if not goodFormatted:
|
if not goodFormatted:
|
||||||
return err("tx wrapper is ill formatted")
|
return err("tx wrapper is ill formatted")
|
||||||
|
|
||||||
let commitments = tx.networkPayload.commitments.mapIt(
|
let commitments = tx.networkPayload.commitments
|
||||||
kzg.KzgCommitment(bytes: it))
|
|
||||||
|
|
||||||
# Verify that commitments match the blobs by checking the KZG proof
|
# Verify that commitments match the blobs by checking the KZG proof
|
||||||
let res = kzg.verifyBlobKzgProofBatch(
|
let res = kzg.verifyBlobKzgProofBatch(
|
||||||
tx.networkPayload.blobs.mapIt(kzg.KzgBlob(bytes: it)),
|
tx.networkPayload.blobs,
|
||||||
commitments,
|
commitments,
|
||||||
tx.networkPayload.proofs.mapIt(kzg.KzgProof(bytes: it)))
|
tx.networkPayload.proofs)
|
||||||
|
|
||||||
if res.isErr:
|
if res.isErr:
|
||||||
return err(res.error)
|
return err(res.error)
|
||||||
|
|
|
@ -46,7 +46,6 @@ func getConfiguredChainId(networkMetadata: Eth2NetworkMetadata): Quantity =
|
||||||
chainId =
|
chainId =
|
||||||
case net
|
case net
|
||||||
of mainnet: 1.Quantity
|
of mainnet: 1.Quantity
|
||||||
of goerli: 5.Quantity
|
|
||||||
of sepolia: 11155111.Quantity
|
of sepolia: 11155111.Quantity
|
||||||
of holesky: 17000.Quantity
|
of holesky: 17000.Quantity
|
||||||
return chainId
|
return chainId
|
||||||
|
@ -130,7 +129,7 @@ proc run*(
|
||||||
verifiedProxy = VerifiedRpcProxy.new(rpcProxy, blockCache, chainId)
|
verifiedProxy = VerifiedRpcProxy.new(rpcProxy, blockCache, chainId)
|
||||||
|
|
||||||
optimisticHandler = proc(
|
optimisticHandler = proc(
|
||||||
signedBlock: ForkedMsgTrustedSignedBeaconBlock
|
signedBlock: ForkedSignedBeaconBlock
|
||||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||||
notice "New LC optimistic block",
|
notice "New LC optimistic block",
|
||||||
opt = signedBlock.toBlockId(), wallSlot = getBeaconTime().slotOrZero
|
opt = signedBlock.toBlockId(), wallSlot = getBeaconTime().slotOrZero
|
||||||
|
@ -214,7 +213,6 @@ proc run*(
|
||||||
withForkyHeader(optimisticHeader):
|
withForkyHeader(optimisticHeader):
|
||||||
when lcDataFork > LightClientDataFork.None:
|
when lcDataFork > LightClientDataFork.None:
|
||||||
info "New LC optimistic header", optimistic_header = shortLog(forkyHeader)
|
info "New LC optimistic header", optimistic_header = shortLog(forkyHeader)
|
||||||
optimisticProcessor.setOptimisticHeader(forkyHeader.beacon)
|
|
||||||
if headerCallback != nil:
|
if headerCallback != nil:
|
||||||
try:
|
try:
|
||||||
headerCallback(cstring(Json.encode(forkyHeader)), 1)
|
headerCallback(cstring(Json.encode(forkyHeader)), 1)
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit f169068df6c11a2aeba27584c60e354e19c42e94
|
Subproject commit 26212c881b464ed64cac20442fb45144d3ecd3b3
|
|
@ -1 +1 @@
|
||||||
Subproject commit 2f5cee7bea0d62e2b502ff668f752bda7f3eb0c4
|
Subproject commit f12616d0675d9f6346141ca95f0840ab227eb213
|
|
@ -1 +1 @@
|
||||||
Subproject commit b705f816439f0068ece8c234336bc7093222d00f
|
Subproject commit fc226d4511199aa57a3fcd5cc44695c6b365a6bf
|
|
@ -1 +1 @@
|
||||||
Subproject commit c5326619a4d094db6f9e36c18992e0fa62fdc3d1
|
Subproject commit 7a8c1d818a835312cc661e941154556ace0d70b4
|
Loading…
Reference in New Issue