Add getLogs implementation (#1143)

* Add eth_getLogs json-rpc endpoint in Fluffy and Nimbus
This commit is contained in:
KonradStaniec 2022-06-29 17:44:08 +02:00 committed by GitHub
parent 7f0bc71b65
commit fdb048ed21
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 825 additions and 50 deletions

View File

@ -182,17 +182,11 @@ proc getBlockHeader*(
return maybeHeader
proc getBlock*(
h: HistoryNetwork, chainId: uint16, hash: BlockHash):
Future[Option[Block]] {.async.} =
let maybeHeader = await h.getBlockHeader(chainId, hash)
if maybeHeader.isNone():
# we do not have header for given hash,so we would not be able to validate
# that received body really belong to it
return none(Block)
let header = maybeHeader.unsafeGet()
proc getBlockBody*(
h: HistoryNetwork,
chainId: uint16,
hash: BlockHash,
header: BlockHeader):Future[Option[BlockBody]] {.async.} =
let (keyEncoded, contentId) = getEncodedKeyForContent(blockBody, chainId, hash)
@ -200,20 +194,20 @@ proc getBlock*(
if maybeBodyFromDb.isSome():
info "Fetched block body from database", hash
return some[Block]((header, maybeBodyFromDb.unsafeGet()))
return some[BlockBody](maybeBodyFromDb.unsafeGet())
let maybeBodyContent = await h.portalProtocol.contentLookup(keyEncoded, contentId)
if maybeBodyContent.isNone():
warn "Failed fetching block body from the network", hash
return none(Block)
return none(BlockBody)
let bodyContent = maybeBodyContent.unsafeGet()
let maybeBody = validateBodyBytes(bodyContent.content, header.txRoot, header.ommersHash)
if maybeBody.isNone():
return none(Block)
return none(BlockBody)
info "Fetched block body from the network", hash
@ -228,7 +222,28 @@ proc getBlock*(
h.portalProtocol.storeContent(contentId, bodyContent.content)
return some[Block]((header, blockBody))
return some(blockBody)
proc getBlock*(
h: HistoryNetwork, chainId: uint16, hash: BlockHash):
Future[Option[Block]] {.async.} =
let maybeHeader = await h.getBlockHeader(chainId, hash)
if maybeHeader.isNone():
# we do not have header for given hash,so we would not be able to validate
# that received body really belong to it
return none(Block)
let header = maybeHeader.unsafeGet()
let maybeBody = await h.getBlockBody(chainId, hash, header)
if maybeBody.isNone():
return none(Block)
let body = maybeBody.unsafeGet()
return some[Block]((header, body))
proc validateExpectedReceipts(
receipts: seq[Receipt],
@ -270,9 +285,9 @@ proc validateReceiptsBytes*(
proc getReceipts*(
h: HistoryNetwork,
chainId: uint16,
hash: BlockHash,
header: BlockHeader,
chainId: uint16): Future[Option[seq[Receipt]]] {.async.} =
header: BlockHeader): Future[Option[seq[Receipt]]] {.async.} =
# header does not have any receipts, return early and do not save empty bytes
# into the database
if header.receiptRoot == BLANK_ROOT_HASH:

View File

@ -1 +1,2 @@
proc eth_getBlockByHash(data: EthHashStr, fullTransactions: bool): Option[BlockObject]
proc eth_getLogs(filterOptions: FilterOptions): seq[FilterLog]

View File

@ -12,7 +12,7 @@ import
json_rpc/[rpcproxy, rpcserver], nimcrypto/[hash, keccak], stew/byteutils,
web3/conversions, # sigh, for FixedBytes marshalling
eth/[common/eth_types, rlp],
../../nimbus/rpc/[rpc_types, hexstrings],
../../nimbus/rpc/[rpc_types, hexstrings, filters],
../../nimbus/transaction,
../network/history/[history_network, history_content]
@ -172,7 +172,7 @@ proc installEthApiHandlers*(
rpcServerWithProxy.registerProxyMethod("eth_getFilterLogs")
rpcServerWithProxy.registerProxyMethod("eth_getLogs")
# rpcServerWithProxy.registerProxyMethod("eth_getLogs")
rpcServerWithProxy.registerProxyMethod("eth_newBlockFilter")
@ -207,7 +207,6 @@ proc installEthApiHandlers*(
let (header, body) = blockRes.unsafeGet()
return some(BlockObject.init(header, body))
rpcServerWithProxy.rpc("eth_getBlockTransactionCountByHash") do(
data: EthHashStr) -> HexQuantityStr:
## Returns the number of transactions in a block from a block matching the
@ -235,3 +234,41 @@ proc installEthApiHandlers*(
# would need to be implemented to get this information.
# rpcServerWithProxy.rpc("eth_getTransactionReceipt") do(
# data: EthHashStr) -> Option[ReceiptObject]:
rpcServerWithProxy.rpc("eth_getLogs") do(filterOptions: FilterOptions) -> seq[FilterLog]:
if filterOptions.blockhash.isNone():
# currently only queries with provided blockhash are supported. To support
# range queries it would require Indicies network.
raise newException(ValueError, "Unsupported query. Field `blockhash` needs to be provided")
else:
let hash = filterOptions.blockHash.unsafeGet()
let maybeHeader = await historyNetwork.getBlockHeader(1'u16, hash)
if maybeHeader.isNone():
raise newException(ValueError, "Could not find header with requested hash")
let header = maybeHeader.unsafeGet()
if headerBloomFilter(header, filterOptions.address, filterOptions.topics):
# TODO: These queries could be done concurrently, investigate if there
# are no assumptions about usage of concurrent queries on portal
# wire protocol level
let maybeBody = await historyNetwork.getBlockBody(1'u16, hash, header)
let maybeReceipts = await historyNetwork.getReceipts(1'u16, hash, header)
if maybeBody.isSome() and maybeReceipts.isSome():
let body = maybeBody.unsafeGet()
let receipts = maybeReceipts.unsafeGet()
let logs = deriveLogs(header, body.transactions, receipts)
let filteredLogs = filterLogs(logs, filterOptions.address, filterOptions.topics)
return filteredLogs
else:
if maybeBody.isNone():
raise newException(ValueError, "Could not find body for requested hash")
else:
raise newException(ValueError, "Could not find receipts for requested hash")
else:
# bloomfilter returned false, we do known that there is no logs matching
# given criteria
return @[]

File diff suppressed because one or more lines are too long

View File

@ -192,7 +192,7 @@ procSuite "Portal testnet tests":
# because the data needs to propagate over the nodes. What one could do is
# add a json-rpc debug proc that returns whether the offer queue is empty or
# not. And then poll every node until all nodes have an empty queue.
await sleepAsync(20.seconds)
await sleepAsync(60.seconds)
let blockData = readBlockDataTable(dataFile)
check blockData.isOk()
@ -212,6 +212,16 @@ procSuite "Portal testnet tests":
tx.fromJson("tx", txObj)
check txObj.blockHash.get() == hash
let filterOptions = FilterOptions(
blockHash: some(hash)
)
let logs = await client.eth_getLogs(filterOptions)
for l in logs:
check:
l.blockHash == some(hash)
# TODO: Check ommersHash, need the headers and not just the hashes
# for uncle in blockObj.uncles:
# discard

View File

@ -132,6 +132,22 @@ proc getBlockHeader*(self: BaseChainDB; n: BlockNumber, output: var BlockHeader)
if self.getBlockHash(n, blockHash):
result = self.getBlockHeader(blockHash, output)
proc getBlockHeaderWithHash*(self: BaseChainDB; n: BlockNumber): Option[(BlockHeader, Hash256)] =
## Returns the block header and its hash, with the given number in the canonical chain.
## Hash is returned to avoid recomputing it
var hash: Hash256
if self.getBlockHash(n, hash):
# Note: this will throw if header is not present.
var header: BlockHeader
if self.getBlockHeader(hash, header):
return some((header, hash))
else:
# this should not happen, but if it happen lets fail laudly as this means
# something is super wrong
raiseAssert("Corrupted database. Mapping number->hash present, without header in database")
else:
return none[(BlockHeader, Hash256)]()
proc getBlockHeader*(self: BaseChainDB; n: BlockNumber): BlockHeader =
## Returns the block header with the given number in the canonical chain.
## Raises BlockNotFound error if the block is not in the DB.
@ -373,6 +389,12 @@ iterator getReceipts*(self: BaseChainDB; receiptRoot: Hash256): Receipt =
break
inc receiptIdx
proc getReceipts*(self: BaseChainDB; receiptRoot: Hash256): seq[Receipt] =
var receipts = newSeq[Receipt]()
for r in self.getReceipts(receiptRoot):
receipts.add(r)
return receipts
proc readTerminalHash*(self: BaseChainDB; h: var Hash256): bool =
let bytes = self.db.get(terminalHashKey().toOpenArray)
if bytes.len == 0:

143
nimbus/rpc/filters.nim Normal file
View File

@ -0,0 +1,143 @@
# Nimbus
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [Defect].}
import
std/options,
eth/common/eth_types,
eth/bloom as bFilter,
stint,
./rpc_types,
./hexstrings
proc topicToDigest(t: seq[Topic]): seq[Hash256] =
var resSeq: seq[Hash256] = @[]
for top in t:
let ht = Hash256(data: top)
resSeq.add(ht)
return resSeq
proc deriveLogs*(header: BlockHeader, transactions: seq[Transaction], receipts: seq[Receipt]): seq[FilterLog] =
## Derive log fields, does not deal with pending log, only the logs with
## full data set
doAssert(len(transactions) == len(receipts))
var resLogs: seq[FilterLog] = @[]
var logIndex = 0
for i, receipt in receipts:
for log in receipt.logs:
let filterLog = FilterLog(
# TODO investigate how to handle this field
# - in nimbus info about log removel would need to be kept at synchronization
# level, to keep track about potential re-orgs
# - in fluffy there is no concept of re-org
removed: false,
logIndex: some(encodeQuantity(uint32(logIndex))),
transactionIndex: some(encodeQuantity(uint32(i))),
transactionHash: some(transactions[i].rlpHash),
blockHash: some(header.blockHash),
blockNumber: some(encodeQuantity(header.blockNumber)),
address: log.address,
data: log.data,
# TODO topics should probably be kept as Hash256 in receipts
topics: topicToDigest(log.topics)
)
inc logIndex
resLogs.add(filterLog)
return resLogs
proc bloomFilter*(
bloom: eth_types.BloomFilter,
addresses: seq[EthAddress],
topics: seq[Option[seq[Hash256]]]): bool =
let bloomFilter = bFilter.BloomFilter(value: StUint[2048].fromBytesBE(bloom))
if len(addresses) > 0:
var addrIncluded: bool = false
for address in addresses:
if bloomFilter.contains(address):
addrIncluded = true
break
if not addrIncluded:
return false
for sub in topics:
if sub.isNone():
# catch all wildcard
continue
let subTops = sub.unsafeGet()
var topicIncluded = len(subTops) == 0
for topic in subTops:
# This is is quite not obvious, but passing topic as MDigest256 fails, as
# it does not use internal keccak256 hashing. To achieve desired semantics,
# we need use digest bare bytes so that they will be properly kec256 hashes
if bloomFilter.contains(topic.data):
topicIncluded = true
break
if not topicIncluded:
return false
return true
proc headerBloomFilter*(
header: BlockHeader,
addresses: seq[EthAddress],
topics: seq[Option[seq[Hash256]]]): bool =
return bloomFilter(header.bloom, addresses, topics)
proc matchTopics(log: FilterLog, topics: seq[Option[seq[Hash256]]]): bool =
for i, sub in topics:
if sub.isNone():
# null subtopic i.e it matches all possible move to nex
continue
let subTops = sub.unsafeGet()
# treat empty as wildcard, although caller should rather use none kind of
# option to indicate that. If nim would have NonEmptySeq type that would be
# use case for it.
var match = len(subTops) == 0
for topic in subTops:
if log.topics[i] == topic :
match = true
break
if not match:
return false
return true
proc filterLogs*(
logs: openArray[FilterLog],
addresses: seq[EthAddress],
topics: seq[Option[seq[Hash256]]]): seq[FilterLog] =
var filteredLogs: seq[FilterLog] = newSeq[FilterLog]()
for log in logs:
if len(addresses) > 0 and (not addresses.contains(log.address)):
continue
if len(topics) > len(log.topics):
continue
if not matchTopics(log, topics):
continue
filteredLogs.add(log)
return filteredLogs

View File

@ -152,6 +152,22 @@ proc validateHashStr*(value: string) {.inline.} =
if unlikely(not value.isValidEthHash):
raise newException(ValueError, SInvalidHash & ": " & value)
proc getEthAddress*(value: string): Option[EthAddress] =
if value.isValidEthAddress:
var ethAddress: EthAddress
hexToByteArray(value, ethAddress)
return some(ethAddress)
else:
return none[EthAddress]()
proc getHash256*(value: string): Option[Hash256] =
if value.isValidHash256:
var hash: Hash256
hexToByteArray(value, hash.data)
return some(hash)
else:
return none[Hash256]()
# Initialisation
proc hexQuantityStr*(value: string): HexQuantityStr {.inline.} =

View File

@ -18,7 +18,8 @@ import
rpc_types, rpc_utils,
../transaction/call_evm,
../utils/tx_pool,
../chain_config
../chain_config,
./filters
#[
Note:
@ -428,6 +429,74 @@ proc setupEthRpc*(node: EthereumNode, ctx: EthContext, chain: BaseChainDB, txPoo
uncle.totalDifficulty = encodeQuantity(chain.getScore(header.hash))
result = some(uncle)
proc getLogsForBlock(
chain: BaseChainDB,
hash: Hash256,
header: BlockHeader,
opts: FilterOptions): seq[FilterLog] =
if headerBloomFilter(header, opts.address, opts.topics):
let blockBody = chain.getBlockBody(hash)
let receipts = chain.getReceipts(header.receiptRoot)
# Note: this will hit assertion error if number of block transactions
# do not match block receipts.
# Although this is fine as number of receipts should always match number
# of transactions
let logs = deriveLogs(header, blockBody.transactions, receipts)
let filteredLogs = filterLogs(logs, opts.address, opts.topics)
return filteredLogs
else:
return @[]
proc getLogsForRange(
chain: BaseChainDB,
start: UInt256,
finish: UInt256,
opts: FilterOptions): seq[FilterLog] =
var logs = newSeq[FilterLog]()
var i = start
while i <= finish:
let res = chain.getBlockHeaderWithHash(i)
if res.isSome():
let (hash, header)= res.unsafeGet()
let filtered = chain.getLogsForBlock(header, hash, opts)
logs.add(filtered)
else:
#
return logs
i = i + 1
return logs
server.rpc("eth_getLogs") do(filterOptions: FilterOptions) -> seq[FilterLog]:
## filterOptions: settings for this filter.
## Returns a list of all logs matching a given filter object.
## TODO: Current implementation is pretty naive and not efficient
## as it requires to fetch all transactions and all receipts from database.
## Other clients (Geth):
## - Store logs related data in receipts.
## - Have separate indexes for Logs in given block
## Both of those changes require improvements to the way how we keep our data
## in Nimbus.
if filterOptions.blockHash.isSome():
let hash = filterOptions.blockHash.unsafeGet()
let header = chain.getBlockHeader(hash)
return getLogsForBlock(chain, hash, header, filterOptions)
else:
# TODO: do something smarter with tags. It would be the best if
# tag would be an enum (Earliest, Latest, Pending, Number), and all operations
# would operate on this enum instead of raw strings. This change would need
# to be done on every endpoint to be consistent.
let fromHeader = chain.headerFromTag(filterOptions.fromBlock)
let toHeader = chain.headerFromTag(filterOptions.fromBlock)
# Note: if fromHeader.blockNumber > toHeader.blockNumber, no logs will be
# returned. This is consistent with, what other ethereum clients return
let logs = chain.getLogsForRange(
fromHeader.blockNumber,
toHeader.blockNumber,
filterOptions
)
return logs
#[
server.rpc("eth_newFilter") do(filterOptions: FilterOptions) -> int:
## Creates a filter object, based on filter options, to notify when the state changes (logs).

View File

@ -1,4 +1,6 @@
import
json_rpc/jsonmarshal,
stew/byteutils,
hexstrings, options, eth/[common, rlp], json
from
@ -90,18 +92,17 @@ type
FilterLog* = object
# Returned to user
removed*: bool # true when the log was removed, due to a chain reorganization. false if its a valid log.
logIndex*: Option[HexQuantityStr] # integer of the log index position in the block. null when its pending log.
transactionIndex*: Option[HexQuantityStr] # integer of the transactions index position log was created from. null when its pending log.
transactionHash*: Option[Hash256] # hash of the transactions this log was created from. null when its pending log.
blockHash*: Option[Hash256] # hash of the block where this log was in. null when its pending. null when its pending log.
blockNumber*: Option[BlockNumber] # the block number where this log was in. null when its pending. null when its pending log.
address*: EthAddress # address from which this log originated.
data*: seq[Hash256] # contains one or more 32 Bytes non-indexed arguments of the log.
topics*: array[4, Hash256] # array of 0 to 4 32 Bytes DATA of indexed log arguments.
# (In solidity: The first topic is the hash of the signature of the event.
# (e.g. Deposit(address,bytes32,uint256)), except you declared the event with the anonymous specifier.)
removed*: bool # true when the log was removed, due to a chain reorganization. false if its a valid log.
logIndex*: Option[HexQuantityStr] # integer of the log index position in the block. null when its pending log.
transactionIndex*: Option[HexQuantityStr] # integer of the transactions index position log was created from. null when its pending log.
transactionHash*: Option[Hash256] # hash of the transactions this log was created from. null when its pending log.
blockHash*: Option[Hash256] # hash of the block where this log was in. null when its pending. null when its pending log.
blockNumber*: Option[HexQuantityStr] # the block number where this log was in. null when its pending. null when its pending log.
address*: EthAddress # address from which this log originated.
data*: seq[byte] # contains one or more 32 Bytes non-indexed arguments of the log.
topics*: seq[Hash256] # array of 0 to 4 32 Bytes DATA of indexed log arguments.
# (In solidity: The first topic is the hash of the signature of the event.
# (e.g. Deposit(address,bytes32,uint256)), except you declared the event with the anonymous specifier.)
ReceiptObject* = object
# A transaction receipt object, or null when no receipt was found:
transactionHash*: Hash256 # hash of the transaction.
@ -121,17 +122,134 @@ type
# Before EIP-1559, this is equal to the transaction's gas price.
# After, it is equal to baseFeePerGas + min(maxFeePerGas - baseFeePerGas, maxPriorityFeePerGas).
FilterDataKind* = enum fkItem, fkList
FilterData* = object
# Difficult to process variant objects in input data, as kind is immutable.
# TODO: This might need more work to handle "or" options
kind*: FilterDataKind
items*: seq[FilterData]
item*: UInt256
FilterOptions* = object
# Parameter from user
fromBlock*: Option[string] # (optional, default: "latest") integer block number, or "latest" for the last mined block or "pending", "earliest" for not yet mined transactions.
toBlock*: Option[string] # (optional, default: "latest") integer block number, or "latest" for the last mined block or "pending", "earliest" for not yet mined transactions.
address*: Option[EthAddress] # (optional) contract address or a list of addresses from which logs should originate.
topics*: Option[seq[FilterData]] # (optional) list of DATA topics. Topics are order-dependent. Each topic can also be a list of DATA with "or" options.
fromBlock*: Option[string] # (optional, default: "latest") integer block number, or "latest" for the last mined block or "pending", "earliest" for not yet mined transactions.
toBlock*: Option[string] # (optional, default: "latest") integer block number, or "latest" for the last mined block or "pending", "earliest" for not yet mined transactions.
address*: seq[EthAddress] # (optional) contract address or a list of addresses from which logs should originate.
topics*: seq[Option[seq[Hash256]]] # (optional) list of DATA topics. Topics are order-dependent. Each topic can also be a list of DATA with "or" options.
blockhash*: Option[Hash256] # (optional) hash of the block. If its present, fromBlock and toBlock, should be none. Introduced in EIP234
proc fromJson*(n: JsonNode, argName: string, result: var FilterOptions) =
proc getOptionString(argName: string): Option[string] =
let s = n.getOrDefault(argName)
if s == nil:
return none[string]()
elif s.kind == JNull:
return none[string]()
else:
s.kind.expect(JString, argName)
return some[string](s.getStr())
proc getAddress(): seq[EthAddress] =
## Address can by provided in two formats:
## 1. {"address": "hexAddress"}
## 2. {"address": ["hexAddress1", "hexAddress2" ...]}
## So either as sigle string or array of strings
let addressNode = n.getOrDefault("address")
if addressNode.isNil:
return @[]
else:
case addressNode.kind
of JString:
var addrs: EthAddress
fromJson(addressNode, "address", addrs)
return @[addrs]
of JArray:
var addresses = newSeq[EthAddress]()
for i, e in addressNode.elems:
if e.kind == JString and e.str.isValidEthAddress:
var address: EthAddress
hexToByteArray(e.getStr(), address)
addresses.add(address)
else:
let msg = "Address at index " & $i & "is not a valid Ethereum address"
raise newException(ValueError, msg)
return addresses
else:
raise newException(ValueError, "Parameter 'address` should be either string or of array of strings")
proc getTopics(): seq[Option[seq[Hash256]]] =
## Topics can be provided in many forms:
## [] "anything"
## [A] "A in first position (and anything after)"
## [null, B] "anything in first position AND B in second position (and anything after)"
## [A, B] "A in first position AND B in second position (and anything after)"
## [[A, B], [A, B]] "(A OR B) in first position AND (A OR B) in second position (and anything after)"
##
## In this custom deserialized JNull is desarializing to None subtopic seq.
## alternative would be to deserialize to empty seq, it would simplify some
## filters code but would be less explicit
let topicsNode = n.getOrDefault("topics")
if topicsNode.isNil:
return @[]
else:
topicsNode.kind.expect(JArray, "topics")
var filterArr = newSeq[Option[seq[Hash256]]]()
for i, e in topicsNode.elems:
case e.kind
of JNull:
# catch all match
filterArr.add(none[seq[Hash256]]())
of JString:
let hexstring = e.getStr()
# specific topic match
if hexstring.isValidHash256:
var hash: Hash256
hexToByteArray(hexstring, hash.data)
filterArr.add(some(@[hash]))
else:
let msg = "Invalid topic at pos: " & $i & ". Expected 32byte hex string"
raise newException(ValueError, msg)
of JArray:
if len(e.elems) == 0:
filterArr.add(none[seq[Hash256]]())
else:
var orFilters = newSeq[Hash256]()
for j, orTopic in e.elems:
if orTopic.kind == JString and orTopic.str.isValidHash256:
var hash: Hash256
hexToByteArray(orTopic.getStr(), hash.data)
orFilters.add(hash)
else:
let msg = "Invlid topic at pos: " & $i & ", sub pos: " & $j & ". Expected 32byte hex string"
raise newException(ValueError, msg)
filterArr.add(some(orFilters))
else:
let msg = "Invalid arg at pos: " & $i & ". Expected (null, string, array)"
raise newException(ValueError, msg)
return filterArr
proc getBlockHash(): Option[Hash256] =
let s = getOptionString("blockhash")
if s.isNone():
return none[Hash256]()
else:
let strHash = s.unsafeGet()
if strHash.isValidHash256:
var hash: Hash256
hexToByteArray(strHash, hash.data)
return some(hash)
else:
let msg = "Invalid 'blockhash'. Expected 32byte hex string"
raise newException(ValueError, msg)
n.kind.expect(JObject, argName)
let blockHash = getBlockHash()
# TODO: Tags should deserialize to some kind of Enum, to avoid using raw strings
# in other layers. But this should be done on all endpoint to keep them constistent
let fromBlock = getOptionString("fromBlock")
let toBlock = getOptionString("toBlock")
if blockHash.isSome():
if fromBlock.isSome() or toBlock.isSome():
raise newException(ValueError, "fromBlock and toBlock are not allowed if blockHash is present")
result.fromBlock = fromBlock
result.toBlock = toBlock
result.address = getAddress()
result.topics = getTopics()
result.blockhash = blockHash

View File

@ -13,6 +13,9 @@ import hexstrings, eth/[common, rlp, keys, trie/db], stew/byteutils, nimcrypto,
../utils, ../transaction,
../transaction/call_evm, ../forks
const
defaultTag = "latest"
func toAddress*(value: EthAddressStr): EthAddress = hexToPaddedByteArray[20](value.string)
func toHash*(value: array[32, byte]): Hash256 {.inline.} =
@ -46,6 +49,12 @@ proc headerFromTag*(chain: BaseChainDB, blockTag: string): BlockHeader =
let blockNum = stint.fromHex(UInt256, tag)
result = chain.getBlockHeader(blockNum.toBlockNumber)
proc headerFromTag*(chain: BaseChainDB, blockTag: Option[string]): BlockHeader =
if blockTag.isSome():
return chain.headerFromTag(blockTag.unsafeGet())
else:
return chain.headerFromTag(defaultTag)
proc calculateMedianGasPrice*(chain: BaseChainDB): GasInt =
var prices = newSeqOfCap[GasInt](64)
let header = chain.getCanonicalHead()

View File

@ -24,6 +24,7 @@ cliBuilder:
./test_tracer_json,
./test_persistblock_json,
./test_rpc,
./test_filters,
./test_op_arith,
./test_op_bit,
./test_op_env,

View File

@ -48,6 +48,7 @@ proc eth_getTransactionByBlockNumberAndIndex(quantityTag: string, quantity: HexQ
proc eth_getTransactionReceipt(data: Hash256): Option[ReceiptObject]
proc eth_getUncleByBlockHashAndIndex(data: Hash256, quantity: HexQuantityStr): Option[BlockObject]
proc eth_getUncleByBlockNumberAndIndex(quantityTag: string, quantity: HexQuantityStr): Option[BlockObject]
proc eth_getLogs(filterOptions: FilterOptions): seq[FilterLog]
#[
proc eth_getCompilers(): seq[string]

File diff suppressed because one or more lines are too long

201
tests/test_filters.nim Normal file
View File

@ -0,0 +1,201 @@
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import
std/[options, strutils, typetraits],
unittest2,
eth/[common/eth_types, rlp],
nimcrypto/hash,
stew/byteutils,
../nimbus/rpc/filters,
./test_block_fixture
let allLogs = deriveLogs(blockHeader4514995, blockBody4514995.transactions, receipts4514995)
proc filtersMain*() =
# All magic numbers and addresses in following tests are confirmed with geth eth_getLogs,
# responses
suite "Log filters":
# specific tests comparing results with geth
test "Proper log number and indexes":
check:
len(allLogs) == 54
for i, log in allLogs:
check:
fromHex[int](distinctBase(log.logIndex.unsafeGet())) == i
test "Filter with empty parameters should return all logs":
let filtered = filterLogs(allLogs, @[], @[])
check:
len(filtered) == len(allLogs)
test "Filter and BloomFilter for one address with one valid log":
let address = hexToByteArray[20]("0x0e0989b1f9b8a38983c2ba8053269ca62ec9b195")
let filteredLogs = filterLogs(allLogs, @[address], @[])
check:
headerBloomFilter(blockHeader4514995, @[address], @[])
len(filteredLogs) == 1
filteredLogs[0].address == address
test "Filter and BloomFilter for one address with multiple valid logs":
let address = hexToByteArray[20]("0x878d7ed5c194349f37b18688964e8db1eb0fcca1")
let filteredLogs = filterLogs(allLogs, @[address], @[])
check:
headerBloomFilter(blockHeader4514995, @[address], @[])
len(filteredLogs) == 2
for log in filteredLogs:
check:
log.address == address
test "Filter and BloomFilter for multiple address with multiple valid logs":
let address = hexToByteArray[20]("0x878d7ed5c194349f37b18688964e8db1eb0fcca1")
let address1 = hexToByteArray[20]("0x0e0989b1f9b8a38983c2ba8053269ca62ec9b195")
let filteredLogs = filterLogs(allLogs, @[address, address1], @[])
check:
headerBloomFilter(blockHeader4514995, @[address, address1], @[])
len(filteredLogs) == 3
test "Filter topics, too many filters":
let filteredLogs =
filterLogs(
allLogs,
@[],
@[
none[seq[Hash256]](),
none[seq[Hash256]](),
none[seq[Hash256]](),
none[seq[Hash256]](),
none[seq[Hash256]]()
]
)
check:
len(filteredLogs) == 0
test "Filter topics, specific topic at first position":
let topic = hash.fromHex(MDigest[256], "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef")
let filteredLogs =
filterLogs(
allLogs,
@[],
@[some(@[topic])]
)
check:
len(filteredLogs) == 15
for log in filteredLogs:
check:
log.topics[0] == topic
test "Filter topics, specific topic at first position and second position":
let topic = hash.fromHex(MDigest[256], "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef")
let topic1 = hash.fromHex(MDigest[256], "0x000000000000000000000000919040a01a0adcef25ed6ecbc6ab2a86ca6d77df")
let filteredLogs =
filterLogs(
allLogs,
@[],
@[some(@[topic]), some(@[topic1])]
)
check:
len(filteredLogs) == 1
for log in filteredLogs:
check:
log.topics[0] == topic
log.topics[1] == topic1
test "Filter topics, specific topic at first position and third position":
let topic = hash.fromHex(MDigest[256], "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef")
let topic1 = hash.fromHex(MDigest[256], "0x000000000000000000000000fdc183d01a793613736cd40a5a578f49add1772b")
let filteredLogs =
filterLogs(
allLogs,
@[],
@[some(@[topic]), none[seq[Hash256]](), some(@[topic1])]
)
check:
len(filteredLogs) == 1
for log in filteredLogs:
check:
log.topics[0] == topic
log.topics[2] == topic1
test "Filter topics, or query at first position":
let topic = hash.fromHex(MDigest[256], "0x4a504a94899432a9846e1aa406dceb1bcfd538bb839071d49d1e5e23f5be30ef")
let topic1 = hash.fromHex(MDigest[256], "0x526441bb6c1aba3c9a4a6ca1d6545da9c2333c8c48343ef398eb858d72b79236")
let filteredLogs =
filterLogs(
allLogs,
@[],
@[
some(@[topic, topic1])
]
)
check:
len(filteredLogs) == 2
for log in filteredLogs:
check:
log.topics[0] == topic or log.topics[0] == topic1
test "Filter topics, or query at first position and or query at second position":
let topic = hash.fromHex(MDigest[256], "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef")
let topic1 = hash.fromHex(MDigest[256], "0xa64da754fccf55aa65a1f0128a648633fade3884b236e879ee9f64c78df5d5d7")
let topic2 = hash.fromHex(MDigest[256], "0x000000000000000000000000e16c02eac87920033ac72fc55ee1df3151c75786")
let topic3 = hash.fromHex(MDigest[256], "0x000000000000000000000000b626a5facc4de1c813f5293ec3be31979f1d1c78")
let filteredLogs =
filterLogs(
allLogs,
@[],
@[
some(@[topic, topic1]),
some(@[topic2, topic3])
]
)
check:
len(filteredLogs) == 2
for log in filteredLogs:
check:
log.topics[0] == topic or log.topics[0] == topic1
log.topics[1] == topic2 or log.topics[1] == topic3
# general propety based tests
test "Specific address query should provide results only with given address":
for log in allLogs:
let filtered = filterLogs(allLogs, @[log.address], @[])
check:
len(filtered) > 0
for filteredLog in filtered:
check:
filteredLog.address == log.address
when isMainModule:
filtersMain()

View File

@ -6,19 +6,19 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import
asynctest, json, strformat, strutils, options, tables, os,
nimcrypto, stew/byteutils, times,
asynctest, json, strformat, strutils, options, tables, os, typetraits, nimcrypto,
nimcrypto/hash, stew/byteutils, times,
json_rpc/[rpcserver, rpcclient], eth/common as eth_common,
eth/[rlp, keys, trie/db, p2p/private/p2p_types],
../nimbus/rpc/[common, p2p, rpc_utils],
../nimbus/[constants, config, genesis, utils, transaction,
vm_state, vm_types, version],
../nimbus/db/[accounts_cache, db_chain],
../nimbus/db/[accounts_cache, db_chain, storage_types],
../nimbus/sync/protocol,
../nimbus/p2p/[chain, executor, executor/executor_helpers],
../nimbus/utils/[difficulty, tx_pool],
../nimbus/[context, chain_config],
./test_helpers, ./macro_assembler, ./rpcclient/eth_api
./test_helpers, ./macro_assembler, ./rpcclient/eth_api, ./test_block_fixture
const
zeroAddress = block:
@ -30,6 +30,14 @@ type
txHash: Hash256
blockHash: Hash256
proc persistFixtureBlock(chainDB: BaseChainDB) =
let header = getBlockHeader4514995()
# Manually inserting header to avoid any parent checks
chainDB.db.put(genericHashKey(header.blockHash).toOpenArray, rlp.encode(header))
chainDB.addBlockNumberToHashLookup(header)
discard chainDB.persistTransactions(header.blockNumber, getBlockBody4514995().transactions)
discard chainDB.persistReceipts(getReceipts4514995())
proc setupEnv(chainDB: BaseChainDB, signer, ks2: EthAddress, ctx: EthContext): TestEnv =
var
parent = chainDB.getCanonicalHead()
@ -116,6 +124,7 @@ proc setupEnv(chainDB: BaseChainDB, signer, ks2: EthAddress, ctx: EthContext): T
header.ommersHash = chainDB.persistUncles(uncles)
discard chainDB.persistHeaderToDb(header)
chainDB.persistFixtureBlock()
result = TestEnv(
txHash: signedTx1.rlpHash,
blockHash: header.hash
@ -415,6 +424,91 @@ proc rpcMain*() =
let res2 = await client.eth_getUncleByBlockNumberAndIndex("latest", encodeQuantity(1))
check res2.isNone
test "eth_getLogs by blockhash, no filters":
let testHeader = getBlockHeader4514995()
let testHash = testHeader.blockHash
let filterOptions = FilterOptions(
blockHash: some(testHash),
topics: @[]
)
let logs = await client.eth_getLogs(filterOptions)
check:
len(logs) == 54
var i = 0
for l in logs:
check:
l.blockHash.isSome()
l.blockHash.unsafeGet() == testHash
fromHex[int](distinctBase(l.logIndex.unsafeGet())) == i
inc i
test "eth_getLogs by blockNumber, no filters":
let testHeader = getBlockHeader4514995()
let testHash = testHeader.blockHash
let fBlock: string = distinctBase(encodeQuantity(testHeader.blockNumber))
let tBlock: string = distinctBase(encodeQuantity(testHeader.blockNumber))
let filterOptions = FilterOptions(
fromBlock: some(fBlock),
toBlock: some(tBlock)
)
let logs = await client.eth_getLogs(filterOptions)
check:
len(logs) == 54
var i = 0
for l in logs:
check:
l.blockHash.isSome()
l.blockHash.unsafeGet() == testHash
fromHex[int](distinctBase(l.logIndex.unsafeGet())) == i
inc i
test "eth_getLogs by blockhash, filter logs at specific postions":
let testHeader = getBlockHeader4514995()
let testHash = testHeader.blockHash
let topic = hash.fromHex(MDigest[256], "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef")
let topic1 = hash.fromHex(MDigest[256], "0x000000000000000000000000fdc183d01a793613736cd40a5a578f49add1772b")
let filterOptions = FilterOptions(
blockHash: some(testHash),
topics: @[some(@[topic]), none[seq[Hash256]](), some(@[topic1])]
)
let logs = await client.eth_getLogs(filterOptions)
check:
len(logs) == 1
test "eth_getLogs by blockhash, filter logs at specific postions with or options":
let testHeader = getBlockHeader4514995()
let testHash = testHeader.blockHash
let topic = hash.fromHex(MDigest[256], "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef")
let topic1 = hash.fromHex(MDigest[256], "0xa64da754fccf55aa65a1f0128a648633fade3884b236e879ee9f64c78df5d5d7")
let topic2 = hash.fromHex(MDigest[256], "0x000000000000000000000000e16c02eac87920033ac72fc55ee1df3151c75786")
let topic3 = hash.fromHex(MDigest[256], "0x000000000000000000000000b626a5facc4de1c813f5293ec3be31979f1d1c78")
let filterOptions = FilterOptions(
blockHash: some(testHash),
topics: @[
some(@[topic, topic1]),
some(@[topic2, topic3])
]
)
let logs = await client.eth_getLogs(filterOptions)
check:
len(logs) == 2
rpcServer.stop()
rpcServer.close()