Remove unused files because of rebase conflict (#1748)

This commit is contained in:
andri lim 2023-09-13 11:42:39 +07:00 committed by GitHub
parent 56215ed83f
commit 73622459b1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 0 additions and 507 deletions

View File

@ -1,240 +0,0 @@
# Nimbus
# Copyright (c) 2022-2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import
std/[typetraits, times, strutils],
eth/[rlp, common],
json_rpc/errors,
eth/[trie, rlp, common, common/eth_types, trie/db],
stew/[results, byteutils, endians2],
../../constants,
../../db/core_db,
../../utils/utils,
../../rpc/execution_types,
./mergetypes
type Hash256 = eth_types.Hash256
proc computePayloadId*(headBlockHash: Hash256, params: SomePayloadAttributes): PayloadID =
var dest: Hash256
var ctx: sha256
ctx.init()
ctx.update(headBlockHash.data)
ctx.update(toBytesBE distinctBase params.timestamp)
ctx.update(distinctBase params.prevRandao)
ctx.update(distinctBase params.suggestedFeeRecipient)
# FIXME-Adam: Do we need to include the withdrawals in this calculation?
# https://github.com/ethereum/go-ethereum/pull/25838#discussion_r1024340383
# "The execution api specs define that this ID can be completely random. It
# used to be derived from payload attributes in the past, but maybe it's
# time to use a randomized ID to not break it with any changes to the
# attributes?"
ctx.finish dest.data
ctx.clear()
(distinctBase result)[0..7] = dest.data[0..7]
proc append*(w: var RlpWriter, q: Quantity) =
w.append(uint64(q))
proc append*(w: var RlpWriter, a: Address) =
w.append(distinctBase(a))
template unsafeQuantityToInt64(q: Quantity): int64 =
int64 q
template asEthHash*(hash: engine_api_types.BlockHash): Hash256 =
Hash256(data: distinctBase(hash))
proc calcRootHashRlp*(items: openArray[seq[byte]]): Hash256 =
var tr = newCoreDbRef(LegacyDbMemory).mptPrune
for i, t in items:
tr.put(rlp.encode(i), t)
return tr.rootHash()
proc toWithdrawal*(w: WithdrawalV1): Withdrawal =
Withdrawal(
index: uint64(w.index),
validatorIndex: uint64(w.validatorIndex),
address: distinctBase(w.address),
amount: uint64(w.amount) # AARDVARK: is this wei or gwei or what?
)
proc toWithdrawalV1*(w: Withdrawal): WithdrawalV1 =
WithdrawalV1(
index: Quantity(w.index),
validatorIndex: Quantity(w.validatorIndex),
address: Address(w.address),
amount: Quantity(w.amount) # AARDVARK: is this wei or gwei or what?
)
proc maybeWithdrawalsRoot(payload: SomeExecutionPayload): Option[Hash256] =
when payload is ExecutionPayloadV1:
none(Hash256)
else:
var wds = newSeqOfCap[Withdrawal](payload.withdrawals.len)
for wd in payload.withdrawals:
wds.add toWithdrawal(wd)
some(utils.calcWithdrawalsRoot(wds))
proc toWithdrawals(withdrawals: openArray[WithdrawalV1]): seq[WithDrawal] =
result = newSeqOfCap[Withdrawal](withdrawals.len)
for wd in withdrawals:
result.add toWithdrawal(wd)
proc maybeBlobGasUsed(payload: SomeExecutionPayload): Option[uint64] =
when payload is ExecutionPayloadV3:
some(payload.blobGasUsed.uint64)
else:
none(uint64)
proc maybeExcessBlobGas(payload: SomeExecutionPayload): Option[uint64] =
when payload is ExecutionPayloadV3:
some(payload.excessBlobGas.uint64)
else:
none(uint64)
proc toBlockHeader*(payload: SomeExecutionPayload): EthBlockHeader =
let transactions = seq[seq[byte]](payload.transactions)
let txRoot = calcRootHashRlp(transactions)
EthBlockHeader(
parentHash : payload.parentHash.asEthHash,
ommersHash : EMPTY_UNCLE_HASH,
coinbase : EthAddress payload.feeRecipient,
stateRoot : payload.stateRoot.asEthHash,
txRoot : txRoot,
receiptRoot : payload.receiptsRoot.asEthHash,
bloom : distinctBase(payload.logsBloom),
difficulty : default(DifficultyInt),
blockNumber : payload.blockNumber.distinctBase.u256,
gasLimit : payload.gasLimit.unsafeQuantityToInt64,
gasUsed : payload.gasUsed.unsafeQuantityToInt64,
timestamp : fromUnix payload.timestamp.unsafeQuantityToInt64,
extraData : bytes payload.extraData,
mixDigest : payload.prevRandao.asEthHash, # EIP-4399 redefine `mixDigest` -> `prevRandao`
nonce : default(BlockNonce),
fee : some payload.baseFeePerGas,
withdrawalsRoot: payload.maybeWithdrawalsRoot, # EIP-4895
blobGasUsed : payload.maybeBlobGasUsed, # EIP-4844
excessBlobGas : payload.maybeExcessBlobGas, # EIP-4844
)
proc toTypedTransaction*(tx: Transaction): TypedTransaction =
TypedTransaction(rlp.encode(tx))
proc toBlockBody*(payload: SomeExecutionPayload): BlockBody =
result.transactions.setLen(payload.transactions.len)
for i, tx in payload.transactions:
result.transactions[i] = rlp.decode(distinctBase tx, Transaction)
when payload is ExecutionPayloadV2:
result.withdrawals = some(payload.withdrawals.toWithdrawals)
when payload is ExecutionPayloadV3:
result.withdrawals = some(payload.withdrawals.toWithdrawals)
proc `$`*(x: BlockHash): string =
toHex(x)
template toValidHash*(x: Hash256): Option[BlockHash] =
some(BlockHash(x.data))
proc validateBlockHash*(header: EthBlockHeader, gotHash: Hash256): Result[void, PayloadStatusV1] =
let wantHash = header.blockHash
if wantHash != gotHash:
let status = PayloadStatusV1(
# This used to say invalid_block_hash, but see here:
# https://github.com/ethereum/execution-apis/blob/main/src/engine/shanghai.md#engine_newpayloadv2
# "INVALID_BLOCK_HASH status value is supplanted by INVALID."
status: PayloadExecutionStatus.invalid,
validationError: some("blockhash mismatch, want $1, got $2" % [$wantHash, $gotHash])
)
return err(status)
return ok()
proc simpleFCU*(status: PayloadExecutionStatus): ForkchoiceUpdatedResponse =
ForkchoiceUpdatedResponse(payloadStatus: PayloadStatusV1(status: status))
proc simpleFCU*(status: PayloadExecutionStatus, msg: string): ForkchoiceUpdatedResponse =
ForkchoiceUpdatedResponse(
payloadStatus: PayloadStatusV1(
status: status,
validationError: some(msg)
)
)
proc invalidFCU*(hash: Hash256 = Hash256()): ForkchoiceUpdatedResponse =
ForkchoiceUpdatedResponse(payloadStatus:
PayloadStatusV1(
status: PayloadExecutionStatus.invalid,
latestValidHash: toValidHash(hash)
)
)
proc validFCU*(id: Option[PayloadID], validHash: Hash256): ForkchoiceUpdatedResponse =
ForkchoiceUpdatedResponse(
payloadStatus: PayloadStatusV1(
status: PayloadExecutionStatus.valid,
latestValidHash: toValidHash(validHash)
),
payloadId: id
)
proc invalidStatus*(validHash: Hash256, msg: string): PayloadStatusV1 =
PayloadStatusV1(
status: PayloadExecutionStatus.invalid,
latestValidHash: toValidHash(validHash),
validationError: some(msg)
)
proc invalidStatus*(validHash: Hash256 = Hash256()): PayloadStatusV1 =
PayloadStatusV1(
status: PayloadExecutionStatus.invalid,
latestValidHash: toValidHash(validHash)
)
proc acceptedStatus*(validHash: Hash256): PayloadStatusV1 =
PayloadStatusV1(
status: PayloadExecutionStatus.accepted,
latestValidHash: toValidHash(validHash)
)
proc acceptedStatus*(): PayloadStatusV1 =
PayloadStatusV1(
status: PayloadExecutionStatus.accepted
)
proc validStatus*(validHash: Hash256): PayloadStatusV1 =
PayloadStatusV1(
status: PayloadExecutionStatus.valid,
latestValidHash: toValidHash(validHash)
)
proc invalidParams*(msg: string): ref InvalidRequest =
(ref InvalidRequest)(
code: engineApiInvalidParams,
msg: msg
)
proc unknownPayload*(msg: string): ref InvalidRequest =
(ref InvalidRequest)(
code: engineApiUnknownPayload,
msg: msg
)
proc invalidAttr*(msg: string): ref InvalidRequest =
(ref InvalidRequest)(
code: engineApiInvalidPayloadAttributes,
msg: msg
)
proc unsupportedFork*(msg: string): ref InvalidRequest =
(ref InvalidRequest)(
code: engineApiUnsupportedFork,
msg: msg
)

View File

@ -1,267 +0,0 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Aristo (aka Patricia) DB trancoder test
import
std/sequtils,
eth/common,
stew/[byteutils, endians2],
unittest2,
../../nimbus/db/kvstore_rocksdb,
../../nimbus/db/aristo/[
aristo_desc, aristo_debug, aristo_init, aristo_transcode, aristo_vid],
"."/[test_aristo_cache, test_helpers]
type
TesterDesc = object
prng: uint32 ## random state
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc posixPrngRand(state: var uint32): byte =
## POSIX.1-2001 example of a rand() implementation, see manual page rand(3).
state = state * 1103515245 + 12345;
let val = (state shr 16) and 32767 # mod 2^31
(val shr 8).byte # Extract second byte
proc rand[W: SomeInteger|VertexID](ap: var TesterDesc; T: type W): T =
var a: array[sizeof T,byte]
for n in 0 ..< sizeof T:
a[n] = ap.prng.posixPrngRand().byte
when sizeof(T) == 1:
let w = uint8.fromBytesBE(a).T
when sizeof(T) == 2:
let w = uint16.fromBytesBE(a).T
when sizeof(T) == 4:
let w = uint32.fromBytesBE(a).T
else:
let w = uint64.fromBytesBE(a).T
when T is SomeUnsignedInt:
# That way, `fromBytesBE()` can be applied to `uint`
result = w
else:
# That way the result is independent of endianness
(addr result).copyMem(unsafeAddr w, sizeof w)
proc vidRand(td: var TesterDesc; bits = 19): VertexID =
if bits < 64:
let
mask = (1u64 shl max(1,bits)) - 1
rval = td.rand uint64
(rval and mask).VertexID
else:
td.rand VertexID
proc init(T: type TesterDesc; seed: int): TesterDesc =
result.prng = (seed and 0x7fffffff).uint32
# -----
proc getOrEmpty(rc: Result[Blob,AristoError]; noisy = true): Blob =
if rc.isOk:
return rc.value
noisy.say "***", "error=", rc.error
proc `+`(a: VertexID, b: int): VertexID =
(a.uint64 + b.uint64).VertexID
# ------------------------------------------------------------------------------
# Public test function
# ------------------------------------------------------------------------------
proc test_transcodeAccounts*(
noisy = true;
rocky: RocksStoreRef;
stopAfter = high(int);
) =
## Transcoder tests on accounts database
var
adb = newAristoDbRef BackendNone
count = -1
for (n, key, value) in rocky.walkAllDb():
if stopAfter < n:
break
count = n
# RLP <-> NIM object mapping
let node0 = value.decode(NodeRef)
block:
let blob0 = rlp.encode node0
if value != blob0:
check value.len == blob0.len
check value == blob0
noisy.say "***", "count=", count, " value=", value.rlpFromBytes.inspect
noisy.say "***", "count=", count, " blob0=", blob0.rlpFromBytes.inspect
# Provide DbRecord with dummy links and expanded payload. Registering the
# node as vertex and re-converting it does the job
var node = node0.updated(VertexID(1), adb)
if node.error != AristoError(0):
check node.error == AristoError(0)
else:
case node.vType:
of aristo_desc.Leaf:
let account = node.lData.rawBlob.decode(Account)
node.key[0] = account.storageRoot.to(HashKey)
node.lData = PayloadRef(
pType: AccountData,
account: AristoAccount(
nonce: account.nonce,
balance: account.balance,
codeHash: account.codehash,
storageID: adb.vidAttach HashLabel(
root: VertexID(1),
key: account.storageRoot.to(HashKey))))
of aristo_desc.Extension:
# key <-> vtx correspondence
check node.key[0] == node0.key[0]
check node.eVid != VertexID(0)
of aristo_desc.Branch:
for n in 0..15:
# key[n] <-> vtx[n] correspondence
check node.key[n] == node0.key[n]
if node.key[n].isValid != node.bVid[n].isValid:
check node.key[n].isValid == node.bVid[n].isValid
echo ">>> node=", node.pp
# This NIM object must match to the same RLP encoded byte stream
block:
var blob1 = rlp.encode node
if value != blob1:
check value.len == blob1.len
check value == blob1
noisy.say "***", "count=", count, " value=", value.rlpFromBytes.inspect
noisy.say "***", "count=", count, " blob1=", blob1.rlpFromBytes.inspect
# NIM object <-> DbRecord mapping
let dbr = node.blobify.getOrEmpty(noisy)
var node1 = dbr.deblobify(VertexRef).asNode(adb)
if node1.error != AristoError(0):
check node1.error == AristoError(0)
block:
if node != node1:
check node == node1
noisy.say "***", "count=", count, " node=", node.pp(adb)
noisy.say "***", "count=", count, " node1=", node1.pp(adb)
# Serialise back with expanded `AccountData` type payload (if any)
let dbr1 = node1.blobify.getOrEmpty(noisy)
block:
if dbr != dbr1:
check dbr == dbr1
noisy.say "***", "count=", count, " dbr=", dbr.toHex
noisy.say "***", "count=", count, " dbr1=", dbr1.toHex
# Serialise back as is
let dbr2 = dbr.deblobify(VertexRef).asNode(adb).blobify.getOrEmpty(noisy)
block:
if dbr != dbr2:
check dbr == dbr2
noisy.say "***", "count=", count, " dbr=", dbr.toHex
noisy.say "***", "count=", count, " dbr2=", dbr2.toHex
noisy.say "***", "records visited: ", count + 1
proc test_transcodeVidRecycleLists*(noisy = true; seed = 42) =
## Transcode VID lists held in `AristoDb` descriptor
var td = TesterDesc.init seed
let db = newAristoDbRef BackendNone
# Add some randum numbers
block:
let first = td.vidRand()
db.vidDispose first
var
expectedVids = 1
count = 1
# Feed some numbers used and some discaded
while expectedVids < 5 or count < 5 + expectedVids:
count.inc
let vid = td.vidRand()
expectedVids += (vid < first).ord
db.vidDispose vid
check db.top.vGen.len == expectedVids
noisy.say "***", "vids=", db.top.vGen.len, " discarded=", count-expectedVids
# Serialise/deserialise
block:
let dbBlob = db.top.vGen.blobify
# Deserialise
let
db1 = newAristoDbRef BackendNone
rc = dbBlob.deblobify seq[VertexID]
if rc.isErr:
check rc.error == AristoError(0)
else:
db1.top.vGen = rc.value
check db.top.vGen == db1.top.vGen
# Make sure that recycled numbers are fetched first
let topVid = db.top.vGen[^1]
while 1 < db.top.vGen.len:
let w = db.vidFetch()
check w < topVid
check db.top.vGen.len == 1 and db.top.vGen[0] == topVid
# Get some consecutive vertex IDs
for n in 0 .. 5:
let w = db.vidFetch()
check w == topVid + n
check db.top.vGen.len == 1
# Repeat last test after clearing the cache
db.top.vGen.setLen(0)
for n in 0 .. 5:
let w = db.vidFetch()
check w == VertexID(2) + n # VertexID(1) is default root ID
check db.top.vGen.len == 1
# Recycling and re-org tests
db.top.vGen = @[8, 7, 3, 4, 5, 9].mapIt(VertexID(it))
db.vidReorg()
check db.top.vGen == @[3, 4, 5, 7].mapIt(VertexID(it))
db.top.vGen = @[8, 7, 6, 3, 4, 5, 9].mapIt(VertexID(it))
db.vidReorg()
check db.top.vGen == @[3].mapIt(VertexID(it))
db.top.vGen = @[5, 4, 3, 7].mapIt(VertexID(it))
db.vidReorg()
check db.top.vGen == @[5, 4, 3, 7].mapIt(VertexID(it))
db.top.vGen = @[5].mapIt(VertexID(it))
db.vidReorg()
check db.top.vGen == @[5].mapIt(VertexID(it))
db.top.vGen = @[3, 5].mapIt(VertexID(it))
db.vidReorg()
check db.top.vGen == @[3, 5].mapIt(VertexID(it))
db.top.vGen = @[4, 5].mapIt(VertexID(it))
db.vidReorg()
check db.top.vGen == @[4].mapIt(VertexID(it))
db.top.vGen.setLen(0)
db.vidReorg()
check db.top.vGen.len == 0
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------