Bump nim-kzg4844 and nimbus-eth2 for gcc-14 compatibility (#2357)

This commit is contained in:
andri lim 2024-06-14 21:41:59 +07:00 committed by GitHub
parent 68f462e3e4
commit 4c458190e9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 46 additions and 39 deletions

View File

@ -89,7 +89,7 @@ func verifyBlob*(blobId: BlobID, blob: kzg.KzgBlob): bool =
# This chunk is greater than the modulus, but we can't reduce it in this byte position, so we will try in the next byte position # This chunk is greater than the modulus, but we can't reduce it in this byte position, so we will try in the next byte position
expectedFieldElem[blobByteIdx] = BLS_MODULUS[i] expectedFieldElem[blobByteIdx] = BLS_MODULUS[i]
if not equalMem(blob[chunkIdx*32].unsafeaddr, expectedFieldElem[0].addr, 32): if not equalMem(blob.bytes[chunkIdx*32].unsafeaddr, expectedFieldElem[0].addr, 32):
return false return false
# Hash the current hash # Hash the current hash
@ -109,24 +109,24 @@ proc fillBlob(blobId: BlobID): KzgBlob =
var currentHashed = sha256.digest(blobIdBytes) var currentHashed = sha256.digest(blobIdBytes)
for chunkIdx in 0..<FIELD_ELEMENTS_PER_BLOB: for chunkIdx in 0..<FIELD_ELEMENTS_PER_BLOB:
copyMem(result[chunkIdx*32].addr, currentHashed.data[0].addr, 32) copyMem(result.bytes[chunkIdx*32].addr, currentHashed.data[0].addr, 32)
# Check that no 32 bytes chunks are greater than the BLS modulus # Check that no 32 bytes chunks are greater than the BLS modulus
for i in 0..<32: for i in 0..<32:
#blobByteIdx = ((chunkIdx + 1) * 32) - i - 1 #blobByteIdx = ((chunkIdx + 1) * 32) - i - 1
let blobByteIdx = (chunkIdx * 32) + i let blobByteIdx = (chunkIdx * 32) + i
if result[blobByteIdx] < BLS_MODULUS[i]: if result.bytes[blobByteIdx] < BLS_MODULUS[i]:
# go to next chunk # go to next chunk
break break
elif result[blobByteIdx] >= BLS_MODULUS[i]: elif result.bytes[blobByteIdx] >= BLS_MODULUS[i]:
if BLS_MODULUS[i] > 0: if BLS_MODULUS[i] > 0:
# This chunk is greater than the modulus, and we can reduce it in this byte position # This chunk is greater than the modulus, and we can reduce it in this byte position
result[blobByteIdx] = BLS_MODULUS[i] - 1 result.bytes[blobByteIdx] = BLS_MODULUS[i] - 1
# go to next chunk # go to next chunk
break break
else: else:
# This chunk is greater than the modulus, but we can't reduce it in this byte position, so we will try in the next byte position # This chunk is greater than the modulus, but we can't reduce it in this byte position, so we will try in the next byte position
result[blobByteIdx] = BLS_MODULUS[i] result.bytes[blobByteIdx] = BLS_MODULUS[i]
# Hash the current hash # Hash the current hash
currentHashed = sha256.digest(currentHashed.data) currentHashed = sha256.digest(currentHashed.data)
@ -140,7 +140,7 @@ proc generateBlob(blobid: BlobID): BlobCommitment =
proc getVersionedHash*(blobid: BlobID, commitmentVersion: byte): Hash256 = proc getVersionedHash*(blobid: BlobID, commitmentVersion: byte): Hash256 =
let res = blobid.generateBlob() let res = blobid.generateBlob()
result = sha256.digest(res.commitment) result = sha256.digest(res.commitment.bytes)
result.data[0] = commitmentVersion result.data[0] = commitmentVersion
proc blobDataGenerator*(startBlobId: BlobID, blobCount: int): BlobTxWrapData = proc blobDataGenerator*(startBlobId: BlobID, blobCount: int): BlobTxWrapData =

View File

@ -186,9 +186,9 @@ proc getBlobDataInPayload*(pool: TestBlobTxPool, payload: ExecutionPayload): Res
for i in 0..<blobTx.tx.versionedHashes.len: for i in 0..<blobTx.tx.versionedHashes.len:
blobData.data.add BlobWrapData( blobData.data.add BlobWrapData(
versionedHash: blobTx.tx.versionedHashes[i], versionedHash: blobTx.tx.versionedHashes[i],
commitment : np.commitments[i], commitment : kzg.KzgCommitment(bytes: np.commitments[i]),
blob : np.blobs[i], blob : kzg.KzgBlob(bytes: np.blobs[i]),
proof : np.proofs[i], proof : kzg.KzgProof(bytes: np.proofs[i]),
) )
blobData.txs.add blobTx.tx blobData.txs.add blobTx.tx

View File

@ -161,15 +161,15 @@ proc verifyBlobBundle(step: NewPayloads,
let bundleBlob = blobBundle.blobs[i].bytes let bundleBlob = blobBundle.blobs[i].bytes
let bundleProof = blobBundle.proofs[i].bytes let bundleProof = blobBundle.proofs[i].bytes
if bundleCommitment != blobData.commitment: if bundleCommitment != blobData.commitment.bytes:
error "KZG mismatch at index of the bundle", index=i error "KZG mismatch at index of the bundle", index=i
return false return false
if bundleBlob != blobData.blob: if bundleBlob != blobData.blob.bytes:
error "blob mismatch at index of the bundle", index=i error "blob mismatch at index of the bundle", index=i
return false return false
if bundleProof != blobData.proof: if bundleProof != blobData.proof.bytes:
error "proof mismatch at index of the bundle", index=i error "proof mismatch at index of the bundle", index=i
return false return false

View File

@ -20,6 +20,8 @@ import
../../../nimbus/common, ../../../nimbus/common,
../../../nimbus/utils/utils ../../../nimbus/utils/utils
from std/sequtils import mapIt
type type
BaseTx* = object of RootObj BaseTx* = object of RootObj
recipient* : Opt[EthAddress] recipient* : Opt[EthAddress]
@ -193,9 +195,9 @@ proc makeTxOfType(params: MakeTxParams, tc: BaseTx): PooledTransaction =
versionedHashes: system.move(blobData.hashes), versionedHashes: system.move(blobData.hashes),
), ),
networkPayload: NetworkPayload( networkPayload: NetworkPayload(
blobs: system.move(blobData.blobs), blobs: blobData.blobs.mapIt(it.bytes),
commitments: system.move(blobData.commitments), commitments: blobData.commitments.mapIt(it.bytes),
proofs: system.move(blobData.proofs), proofs: blobData.proofs.mapIt(it.bytes),
) )
) )
else: else:
@ -337,9 +339,9 @@ proc makeTx*(params: MakeTxParams, tc: BlobTx): PooledTransaction =
PooledTransaction( PooledTransaction(
tx: signTransaction(unsignedTx, params.key, params.chainId, eip155 = true), tx: signTransaction(unsignedTx, params.key, params.chainId, eip155 = true),
networkPayload: NetworkPayload( networkPayload: NetworkPayload(
blobs : data.blobs, blobs : data.blobs.mapIt(it.bytes),
commitments: data.commitments, commitments: data.commitments.mapIt(it.bytes),
proofs : data.proofs, proofs : data.proofs.mapIt(it.bytes),
), ),
) )

View File

@ -17,12 +17,12 @@ import
../constants, ../constants,
../common/common ../common/common
from std/sequtils import mapIt
{.push raises: [].} {.push raises: [].}
type type
Bytes32 = array[32, byte]
Bytes64 = array[64, byte] Bytes64 = array[64, byte]
Bytes48 = array[48, byte]
const const
BLS_MODULUS_STR = "52435875175126190479447740508185965837690552500527637822603658699938581184513" BLS_MODULUS_STR = "52435875175126190479447740508185965837690552500527637822603658699938581184513"
@ -40,7 +40,7 @@ const
# kzgToVersionedHash implements kzg_to_versioned_hash from EIP-4844 # kzgToVersionedHash implements kzg_to_versioned_hash from EIP-4844
proc kzgToVersionedHash*(kzg: kzg.KZGCommitment): VersionedHash = proc kzgToVersionedHash*(kzg: kzg.KZGCommitment): VersionedHash =
result = sha256.digest(kzg) result = sha256.digest(kzg.bytes)
result.data[0] = VERSIONED_HASH_VERSION_KZG result.data[0] = VERSIONED_HASH_VERSION_KZG
# pointEvaluation implements point_evaluation_precompile from EIP-4844 # pointEvaluation implements point_evaluation_precompile from EIP-4844
@ -55,19 +55,19 @@ proc pointEvaluation*(input: openArray[byte]): Result[void, string] =
return err("invalid input length") return err("invalid input length")
var var
versionedHash: Bytes32 versionedHash: KzgBytes32
z: Bytes32 z: KzgBytes32
y: Bytes32 y: KzgBytes32
commitment: Bytes48 commitment: KzgBytes48
kzgProof: Bytes48 kzgProof: KzgBytes48
versionedHash[0..<32] = input[0..<32] versionedHash.bytes[0..<32] = input[0..<32]
z[0..<32] = input[32..<64] z.bytes[0..<32] = input[32..<64]
y[0..<32] = input[64..<96] y.bytes[0..<32] = input[64..<96]
commitment[0..<48] = input[96..<144] commitment.bytes[0..<48] = input[96..<144]
kzgProof[0..<48] = input[144..<192] kzgProof.bytes[0..<48] = input[144..<192]
if kzgToVersionedHash(commitment).data != versionedHash: if kzgToVersionedHash(commitment).data != versionedHash.bytes:
return err("versionedHash should equal to kzgToVersionedHash(commitment)") return err("versionedHash should equal to kzgToVersionedHash(commitment)")
# Verify KZG proof # Verify KZG proof
@ -183,9 +183,15 @@ proc validateBlobTransactionWrapper*(tx: PooledTransaction):
if not goodFormatted: if not goodFormatted:
return err("tx wrapper is ill formatted") return err("tx wrapper is ill formatted")
let commitments = tx.networkPayload.commitments.mapIt(
kzg.KzgCommitment(bytes: it))
# Verify that commitments match the blobs by checking the KZG proof # Verify that commitments match the blobs by checking the KZG proof
let res = kzg.verifyBlobKzgProofBatch(tx.networkPayload.blobs, let res = kzg.verifyBlobKzgProofBatch(
tx.networkPayload.commitments, tx.networkPayload.proofs) tx.networkPayload.blobs.mapIt(kzg.KzgBlob(bytes: it)),
commitments,
tx.networkPayload.proofs.mapIt(kzg.KzgProof(bytes: it)))
if res.isErr: if res.isErr:
return err(res.error) return err(res.error)
@ -199,8 +205,7 @@ proc validateBlobTransactionWrapper*(tx: PooledTransaction):
if tx.tx.versionedHashes[i].data[0] != VERSIONED_HASH_VERSION_KZG: if tx.tx.versionedHashes[i].data[0] != VERSIONED_HASH_VERSION_KZG:
return err("wrong kzg version in versioned hash at index " & $i) return err("wrong kzg version in versioned hash at index " & $i)
if tx.tx.versionedHashes[i] != if tx.tx.versionedHashes[i] != kzgToVersionedHash(commitments[i]):
kzgToVersionedHash(tx.networkPayload.commitments[i]):
return err("tx versioned hash not match commitments at index " & $i) return err("tx versioned hash not match commitments at index " & $i)
ok() ok()

2
vendor/nim-kzg4844 vendored

@ -1 +1 @@
Subproject commit f12616d0675d9f6346141ca95f0840ab227eb213 Subproject commit 2f5cee7bea0d62e2b502ff668f752bda7f3eb0c4

2
vendor/nimbus-eth2 vendored

@ -1 +1 @@
Subproject commit fb0494e7399b62d7120a8c0b06a854a9a52b8eec Subproject commit c5326619a4d094db6f9e36c18992e0fa62fdc3d1