2022-10-10 02:31:28 +00:00
|
|
|
import
|
2022-11-09 12:39:57 +00:00
|
|
|
std/[tables, times, hashes, sets],
|
|
|
|
chronicles, chronos,
|
2022-12-02 04:39:12 +00:00
|
|
|
eth/p2p,
|
2022-11-09 12:39:57 +00:00
|
|
|
eth/p2p/peer_pool,
|
|
|
|
"."/[types, protocol],
|
2022-10-10 02:31:28 +00:00
|
|
|
./protocol/eth/eth_types,
|
2022-10-19 10:04:06 +00:00
|
|
|
./protocol/trace_config, # gossip noise control
|
2022-12-02 04:39:12 +00:00
|
|
|
../core/chain,
|
|
|
|
../core/tx_pool,
|
|
|
|
../core/tx_pool/tx_item
|
2022-10-10 02:31:28 +00:00
|
|
|
|
|
|
|
type
|
2022-11-09 12:39:57 +00:00
|
|
|
HashToTime = TableRef[Hash256, Time]
|
|
|
|
|
2022-11-16 06:45:28 +00:00
|
|
|
NewBlockHandler* = proc(
|
|
|
|
arg: pointer,
|
|
|
|
peer: Peer,
|
|
|
|
blk: EthBlock,
|
|
|
|
totalDifficulty: DifficultyInt) {.
|
|
|
|
gcsafe, raises: [Defect, CatchableError].}
|
|
|
|
|
|
|
|
NewBlockHashesHandler* = proc(
|
|
|
|
arg: pointer,
|
|
|
|
peer: Peer,
|
|
|
|
hashes: openArray[NewBlockHashesAnnounce]) {.
|
|
|
|
gcsafe, raises: [Defect, CatchableError].}
|
|
|
|
|
|
|
|
NewBlockHandlerPair = object
|
|
|
|
arg: pointer
|
|
|
|
handler: NewBlockHandler
|
|
|
|
|
|
|
|
NewBlockHashesHandlerPair = object
|
|
|
|
arg: pointer
|
|
|
|
handler: NewBlockHashesHandler
|
|
|
|
|
2022-10-10 02:31:28 +00:00
|
|
|
EthWireRef* = ref object of EthWireBase
|
2022-12-02 04:39:12 +00:00
|
|
|
db: ChainDBRef
|
|
|
|
chain: ChainRef
|
2022-10-10 02:31:28 +00:00
|
|
|
txPool: TxPoolRef
|
2022-11-09 12:39:57 +00:00
|
|
|
peerPool: PeerPool
|
|
|
|
disableTxPool: bool
|
|
|
|
knownByPeer: Table[Peer, HashToTime]
|
|
|
|
pending: HashSet[Hash256]
|
|
|
|
lastCleanup: Time
|
2022-11-16 06:45:28 +00:00
|
|
|
newBlockHandler: NewBlockHandlerPair
|
|
|
|
newBlockHashesHandler: NewBlockHashesHandlerPair
|
2022-11-14 07:32:33 +00:00
|
|
|
|
|
|
|
ReconnectRef = ref object
|
|
|
|
pool: PeerPool
|
|
|
|
node: Node
|
2022-11-09 12:39:57 +00:00
|
|
|
|
|
|
|
const
|
|
|
|
NUM_PEERS_REBROADCAST_QUOTIENT = 4
|
|
|
|
POOLED_STORAGE_TIME_LIMIT = initDuration(minutes = 20)
|
2022-11-14 07:32:33 +00:00
|
|
|
PEER_LONG_BANTIME = chronos.minutes(150)
|
2022-11-09 12:39:57 +00:00
|
|
|
|
2022-11-16 06:45:28 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions: helper functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc notEnabled(name: string) =
|
|
|
|
debug "Wire handler method is disabled", meth = name
|
|
|
|
|
|
|
|
proc notImplemented(name: string) =
|
|
|
|
debug "Wire handler method not implemented", meth = name
|
|
|
|
|
|
|
|
proc inPool(ctx: EthWireRef, txHash: Hash256): bool =
|
|
|
|
let res = ctx.txPool.getItem(txHash)
|
|
|
|
res.isOk
|
|
|
|
|
|
|
|
proc inPoolAndOk(ctx: EthWireRef, txHash: Hash256): bool =
|
|
|
|
let res = ctx.txPool.getItem(txHash)
|
|
|
|
if res.isErr: return false
|
|
|
|
res.get().reject == txInfoOk
|
|
|
|
|
2022-12-02 04:39:12 +00:00
|
|
|
proc successorHeader(db: ChainDBRef,
|
2022-11-16 06:45:28 +00:00
|
|
|
h: BlockHeader,
|
|
|
|
output: var BlockHeader,
|
|
|
|
skip = 0'u): bool {.gcsafe, raises: [Defect,RlpError].} =
|
|
|
|
let offset = 1 + skip.toBlockNumber
|
|
|
|
if h.blockNumber <= (not 0.toBlockNumber) - offset:
|
|
|
|
result = db.getBlockHeader(h.blockNumber + offset, output)
|
|
|
|
|
2022-12-02 04:39:12 +00:00
|
|
|
proc ancestorHeader(db: ChainDBRef,
|
2022-11-16 06:45:28 +00:00
|
|
|
h: BlockHeader,
|
|
|
|
output: var BlockHeader,
|
|
|
|
skip = 0'u): bool {.gcsafe, raises: [Defect,RlpError].} =
|
|
|
|
let offset = 1 + skip.toBlockNumber
|
|
|
|
if h.blockNumber >= offset:
|
|
|
|
result = db.getBlockHeader(h.blockNumber - offset, output)
|
|
|
|
|
2022-12-02 04:39:12 +00:00
|
|
|
proc blockHeader(db: ChainDBRef,
|
2022-11-16 06:45:28 +00:00
|
|
|
b: HashOrNum,
|
|
|
|
output: var BlockHeader): bool
|
|
|
|
{.gcsafe, raises: [Defect,RlpError].} =
|
|
|
|
if b.isHash:
|
|
|
|
db.getBlockHeader(b.hash, output)
|
|
|
|
else:
|
|
|
|
db.getBlockHeader(b.number, output)
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions: peers related functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-11-09 12:39:57 +00:00
|
|
|
proc hash(peer: Peer): hashes.Hash =
|
|
|
|
hash(peer.remote)
|
|
|
|
|
2022-11-16 06:45:28 +00:00
|
|
|
proc getPeers(ctx: EthWireRef, thisPeer: Peer): seq[Peer] =
|
|
|
|
# do not send back tx or txhash to thisPeer
|
|
|
|
for peer in peers(ctx.peerPool):
|
|
|
|
if peer != thisPeer:
|
|
|
|
result.add peer
|
|
|
|
|
2022-11-14 07:32:33 +00:00
|
|
|
proc banExpiredReconnect(arg: pointer) {.gcsafe, raises: [Defect].} =
|
|
|
|
# Reconnect to peer after ban period if pool is empty
|
|
|
|
try:
|
|
|
|
|
|
|
|
let reconnect = cast[ReconnectRef](arg)
|
|
|
|
if reconnect.pool.len > 0:
|
|
|
|
return
|
|
|
|
|
|
|
|
asyncSpawn reconnect.pool.connectToNode(reconnect.node)
|
|
|
|
|
|
|
|
except TransportError:
|
|
|
|
debug "Transport got closed during banExpiredReconnect"
|
|
|
|
except CatchableError as e:
|
|
|
|
debug "Exception in banExpiredReconnect", exc = e.name, err = e.msg
|
|
|
|
|
|
|
|
proc banPeer(pool: PeerPool, peer: Peer, banTime: chronos.Duration) {.async.} =
|
|
|
|
try:
|
|
|
|
|
|
|
|
await peer.disconnect(SubprotocolReason)
|
|
|
|
|
|
|
|
let expired = Moment.fromNow(banTime)
|
|
|
|
let reconnect = ReconnectRef(
|
|
|
|
pool: pool,
|
|
|
|
node: peer.remote
|
|
|
|
)
|
|
|
|
|
|
|
|
discard setTimer(
|
|
|
|
expired,
|
|
|
|
banExpiredReconnect,
|
|
|
|
cast[pointer](reconnect)
|
|
|
|
)
|
|
|
|
|
|
|
|
except TransportError:
|
|
|
|
debug "Transport got closed during banPeer"
|
|
|
|
except CatchableError as e:
|
|
|
|
debug "Exception in banPeer", exc = e.name, err = e.msg
|
|
|
|
|
2022-11-09 12:39:57 +00:00
|
|
|
proc cleanupKnownByPeer(ctx: EthWireRef) =
|
|
|
|
let now = getTime()
|
|
|
|
var tmp = initHashSet[Hash256]()
|
|
|
|
for _, map in ctx.knownByPeer:
|
|
|
|
for hash, time in map:
|
|
|
|
if time - now >= POOLED_STORAGE_TIME_LIMIT:
|
|
|
|
tmp.incl hash
|
|
|
|
for hash in tmp:
|
|
|
|
map.del(hash)
|
|
|
|
tmp.clear()
|
|
|
|
|
2022-11-16 06:45:28 +00:00
|
|
|
var tmpPeer = initHashSet[Peer]()
|
|
|
|
for peer, map in ctx.knownByPeer:
|
|
|
|
if map.len == 0:
|
|
|
|
tmpPeer.incl peer
|
|
|
|
|
|
|
|
for peer in tmpPeer:
|
|
|
|
ctx.knownByPeer.del peer
|
|
|
|
|
2022-11-09 12:39:57 +00:00
|
|
|
ctx.lastCleanup = now
|
|
|
|
|
|
|
|
proc addToKnownByPeer(ctx: EthWireRef, txHashes: openArray[Hash256], peer: Peer) =
|
|
|
|
var map: HashToTime
|
2022-12-15 15:36:43 +00:00
|
|
|
ctx.knownByPeer.withValue(peer, val) do:
|
|
|
|
map = val[]
|
|
|
|
do:
|
2022-11-09 12:39:57 +00:00
|
|
|
map = newTable[Hash256, Time]()
|
2022-12-15 15:36:43 +00:00
|
|
|
ctx.knownByPeer[peer] = map
|
2022-11-09 12:39:57 +00:00
|
|
|
|
|
|
|
for txHash in txHashes:
|
|
|
|
if txHash notin map:
|
|
|
|
map[txHash] = getTime()
|
|
|
|
|
2022-11-16 06:45:28 +00:00
|
|
|
proc addToKnownByPeer(ctx: EthWireRef,
|
|
|
|
txHashes: openArray[Hash256],
|
|
|
|
peer: Peer,
|
|
|
|
newHashes: var seq[Hash256]) =
|
2022-11-09 12:39:57 +00:00
|
|
|
var map: HashToTime
|
2022-12-15 15:36:43 +00:00
|
|
|
ctx.knownByPeer.withValue(peer, val) do:
|
|
|
|
map = val[]
|
|
|
|
do:
|
2022-11-09 12:39:57 +00:00
|
|
|
map = newTable[Hash256, Time]()
|
2022-12-15 15:36:43 +00:00
|
|
|
ctx.knownByPeer[peer] = map
|
2022-11-09 12:39:57 +00:00
|
|
|
|
|
|
|
newHashes = newSeqOfCap[Hash256](txHashes.len)
|
|
|
|
for txHash in txHashes:
|
|
|
|
if txHash notin map:
|
|
|
|
map[txHash] = getTime()
|
|
|
|
newHashes.add txHash
|
|
|
|
|
2022-11-16 06:45:28 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions: async workers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-11-09 12:39:57 +00:00
|
|
|
proc sendNewTxHashes(ctx: EthWireRef,
|
|
|
|
txHashes: seq[Hash256],
|
|
|
|
peers: seq[Peer]): Future[void] {.async.} =
|
2022-11-14 05:43:05 +00:00
|
|
|
try:
|
2022-11-09 12:39:57 +00:00
|
|
|
|
2022-11-14 05:43:05 +00:00
|
|
|
for peer in peers:
|
|
|
|
# Add to known tx hashes and get hashes still to send to peer
|
|
|
|
var hashesToSend: seq[Hash256]
|
|
|
|
ctx.addToKnownByPeer(txHashes, peer, hashesToSend)
|
|
|
|
|
|
|
|
# Broadcast to peer if at least 1 new tx hash to announce
|
|
|
|
if hashesToSend.len > 0:
|
|
|
|
await peer.newPooledTransactionHashes(hashesToSend)
|
|
|
|
|
|
|
|
except TransportError:
|
|
|
|
debug "Transport got closed during sendNewTxHashes"
|
|
|
|
except CatchableError as e:
|
|
|
|
debug "Exception in sendNewTxHashes", exc = e.name, err = e.msg
|
2022-11-09 12:39:57 +00:00
|
|
|
|
|
|
|
proc sendTransactions(ctx: EthWireRef,
|
|
|
|
txHashes: seq[Hash256],
|
|
|
|
txs: seq[Transaction],
|
|
|
|
peers: seq[Peer]): Future[void] {.async.} =
|
2022-11-14 05:43:05 +00:00
|
|
|
try:
|
|
|
|
|
|
|
|
for peer in peers:
|
|
|
|
# This is used to avoid re-sending along pooledTxHashes
|
|
|
|
# announcements/re-broadcasts
|
|
|
|
ctx.addToKnownByPeer(txHashes, peer)
|
|
|
|
await peer.transactions(txs)
|
|
|
|
|
|
|
|
except TransportError:
|
|
|
|
debug "Transport got closed during sendTransactions"
|
|
|
|
except CatchableError as e:
|
|
|
|
debug "Exception in sendTransactions", exc = e.name, err = e.msg
|
2022-11-09 12:39:57 +00:00
|
|
|
|
2022-11-16 06:45:28 +00:00
|
|
|
proc fetchTransactions(ctx: EthWireRef, reqHashes: seq[Hash256], peer: Peer): Future[void] {.async.} =
|
|
|
|
debug "fetchTx: requesting txs",
|
|
|
|
number = reqHashes.len
|
2022-10-10 02:31:28 +00:00
|
|
|
|
2022-11-16 06:45:28 +00:00
|
|
|
try:
|
2022-11-09 12:39:57 +00:00
|
|
|
|
2022-11-16 06:45:28 +00:00
|
|
|
let res = await peer.getPooledTransactions(reqHashes)
|
|
|
|
if res.isNone:
|
|
|
|
error "not able to get pooled transactions"
|
|
|
|
return
|
|
|
|
|
|
|
|
let txs = res.get()
|
|
|
|
debug "fetchTx: received requested txs",
|
|
|
|
number = txs.transactions.len
|
|
|
|
|
|
|
|
# Remove from pending list regardless if tx is in result
|
|
|
|
for tx in txs.transactions:
|
|
|
|
let txHash = rlpHash(tx)
|
|
|
|
ctx.pending.excl txHash
|
|
|
|
|
2022-12-02 04:39:12 +00:00
|
|
|
ctx.txPool.add(txs.transactions)
|
2022-11-16 06:45:28 +00:00
|
|
|
|
|
|
|
except TransportError:
|
|
|
|
debug "Transport got closed during fetchTransactions"
|
|
|
|
return
|
|
|
|
except CatchableError as e:
|
|
|
|
debug "Exception in fetchTransactions", exc = e.name, err = e.msg
|
|
|
|
return
|
|
|
|
|
|
|
|
var newTxHashes = newSeqOfCap[Hash256](reqHashes.len)
|
|
|
|
for txHash in reqHashes:
|
|
|
|
if ctx.inPoolAndOk(txHash):
|
|
|
|
newTxHashes.add txHash
|
|
|
|
|
|
|
|
let peers = ctx.getPeers(peer)
|
|
|
|
if peers.len == 0 or newTxHashes.len == 0:
|
|
|
|
return
|
|
|
|
|
|
|
|
await ctx.sendNewTxHashes(newTxHashes, peers)
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions: peer observer
|
|
|
|
# ------------------------------------------------------------------------------
|
2022-11-09 12:39:57 +00:00
|
|
|
|
|
|
|
proc onPeerConnected(ctx: EthWireRef, peer: Peer) =
|
|
|
|
if ctx.disableTxPool:
|
|
|
|
return
|
|
|
|
|
|
|
|
var txHashes = newSeqOfCap[Hash256](ctx.txPool.numTxs)
|
|
|
|
for txHash, item in okPairs(ctx.txPool):
|
|
|
|
txHashes.add txHash
|
|
|
|
|
|
|
|
if txHashes.len == 0:
|
|
|
|
return
|
|
|
|
|
|
|
|
debug "announce tx hashes to newly connected peer",
|
|
|
|
number = txHashes.len
|
|
|
|
|
|
|
|
asyncSpawn ctx.sendNewTxHashes(txHashes, @[peer])
|
|
|
|
|
|
|
|
proc onPeerDisconnected(ctx: EthWireRef, peer: Peer) =
|
|
|
|
debug "ethwire: remove peer from knownByPeer",
|
|
|
|
peer
|
|
|
|
|
|
|
|
ctx.knownByPeer.del(peer)
|
|
|
|
|
|
|
|
proc setupPeerObserver(ctx: EthWireRef) =
|
|
|
|
var po = PeerObserver(
|
|
|
|
onPeerConnected:
|
|
|
|
proc(p: Peer) {.gcsafe.} =
|
|
|
|
ctx.onPeerConnected(p),
|
|
|
|
onPeerDisconnected:
|
|
|
|
proc(p: Peer) {.gcsafe.} =
|
|
|
|
ctx.onPeerDisconnected(p))
|
|
|
|
po.setProtocol eth
|
|
|
|
ctx.peerPool.addObserver(ctx, po)
|
|
|
|
|
2022-11-16 06:45:28 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public constructor/destructor
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-11-09 12:39:57 +00:00
|
|
|
proc new*(_: type EthWireRef,
|
2022-12-02 04:39:12 +00:00
|
|
|
chain: ChainRef,
|
2022-11-09 12:39:57 +00:00
|
|
|
txPool: TxPoolRef,
|
2022-12-02 04:39:12 +00:00
|
|
|
peerPool: PeerPool): EthWireRef =
|
2022-11-09 12:39:57 +00:00
|
|
|
let ctx = EthWireRef(
|
2022-10-10 02:31:28 +00:00
|
|
|
db: chain.db,
|
|
|
|
chain: chain,
|
2022-11-09 12:39:57 +00:00
|
|
|
txPool: txPool,
|
|
|
|
peerPool: peerPool,
|
2022-11-14 07:32:33 +00:00
|
|
|
lastCleanup: getTime(),
|
2022-10-10 02:31:28 +00:00
|
|
|
)
|
|
|
|
|
2022-11-09 12:39:57 +00:00
|
|
|
ctx.setupPeerObserver()
|
|
|
|
ctx
|
|
|
|
|
2022-11-16 06:45:28 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions: callbacks setters
|
|
|
|
# ------------------------------------------------------------------------------
|
2022-10-19 10:04:06 +00:00
|
|
|
|
2022-11-16 06:45:28 +00:00
|
|
|
proc setNewBlockHandler*(ctx: EthWireRef, handler: NewBlockHandler, arg: pointer) =
|
|
|
|
ctx.newBlockHandler = NewBlockHandlerPair(
|
|
|
|
arg: arg,
|
|
|
|
handler: handler
|
|
|
|
)
|
|
|
|
|
|
|
|
proc setNewBlockHashesHandler*(ctx: EthWireRef, handler: NewBlockHashesHandler, arg: pointer) =
|
|
|
|
ctx.newBlockHashesHandler = NewBlockHashesHandlerPair(
|
|
|
|
arg: arg,
|
|
|
|
handler: handler
|
|
|
|
)
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions: eth wire protocol handlers
|
|
|
|
# ------------------------------------------------------------------------------
|
2022-10-10 02:31:28 +00:00
|
|
|
|
2022-11-09 12:39:57 +00:00
|
|
|
proc txPoolEnabled*(ctx: EthWireRef; ena: bool) =
|
|
|
|
ctx.disableTxPool = not ena
|
2022-10-19 10:04:06 +00:00
|
|
|
|
2022-10-10 02:31:28 +00:00
|
|
|
method getStatus*(ctx: EthWireRef): EthState {.gcsafe.} =
|
|
|
|
let
|
|
|
|
db = ctx.db
|
2022-12-02 04:39:12 +00:00
|
|
|
com = ctx.chain.com
|
2022-10-10 02:31:28 +00:00
|
|
|
bestBlock = db.getCanonicalHead()
|
2022-12-02 04:39:12 +00:00
|
|
|
forkId = com.forkId(bestBlock.blockNumber)
|
2022-10-10 02:31:28 +00:00
|
|
|
|
|
|
|
EthState(
|
|
|
|
totalDifficulty: db.headTotalDifficulty,
|
2022-12-02 04:39:12 +00:00
|
|
|
genesisHash: com.genesisHash,
|
2022-10-10 02:31:28 +00:00
|
|
|
bestBlockHash: bestBlock.blockHash,
|
|
|
|
forkId: ChainForkId(
|
|
|
|
forkHash: forkId.crc.toBytesBE,
|
|
|
|
forkNext: forkId.nextFork.toBlockNumber
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
method getReceipts*(ctx: EthWireRef, hashes: openArray[Hash256]): seq[seq[Receipt]] {.gcsafe.} =
|
|
|
|
let db = ctx.db
|
|
|
|
var header: BlockHeader
|
|
|
|
for blockHash in hashes:
|
|
|
|
if db.getBlockHeader(blockHash, header):
|
|
|
|
result.add db.getReceipts(header.receiptRoot)
|
|
|
|
else:
|
|
|
|
result.add @[]
|
|
|
|
trace "handlers.getReceipts: blockHeader not found", blockHash
|
|
|
|
|
|
|
|
method getPooledTxs*(ctx: EthWireRef, hashes: openArray[Hash256]): seq[Transaction] {.gcsafe.} =
|
|
|
|
let txPool = ctx.txPool
|
|
|
|
for txHash in hashes:
|
|
|
|
let res = txPool.getItem(txHash)
|
|
|
|
if res.isOk:
|
|
|
|
result.add res.value.tx
|
|
|
|
else:
|
|
|
|
trace "handlers.getPooledTxs: tx not found", txHash
|
|
|
|
|
|
|
|
method getBlockBodies*(ctx: EthWireRef, hashes: openArray[Hash256]): seq[BlockBody] {.gcsafe.} =
|
|
|
|
let db = ctx.db
|
|
|
|
var body: BlockBody
|
|
|
|
for blockHash in hashes:
|
|
|
|
if db.getBlockBody(blockHash, body):
|
|
|
|
result.add body
|
|
|
|
else:
|
|
|
|
result.add BlockBody()
|
|
|
|
trace "handlers.getBlockBodies: blockBody not found", blockHash
|
|
|
|
|
|
|
|
method getBlockHeaders*(ctx: EthWireRef, req: BlocksRequest): seq[BlockHeader] {.gcsafe.} =
|
|
|
|
let db = ctx.db
|
|
|
|
var foundBlock: BlockHeader
|
|
|
|
result = newSeqOfCap[BlockHeader](req.maxResults)
|
|
|
|
|
|
|
|
if db.blockHeader(req.startBlock, foundBlock):
|
|
|
|
result.add foundBlock
|
|
|
|
|
|
|
|
while uint64(result.len) < req.maxResults:
|
|
|
|
if not req.reverse:
|
|
|
|
if not db.successorHeader(foundBlock, foundBlock, req.skip):
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
if not db.ancestorHeader(foundBlock, foundBlock, req.skip):
|
|
|
|
break
|
|
|
|
result.add foundBlock
|
|
|
|
|
|
|
|
method handleAnnouncedTxs*(ctx: EthWireRef, peer: Peer, txs: openArray[Transaction]) {.gcsafe.} =
|
2022-11-09 12:39:57 +00:00
|
|
|
if ctx.disableTxPool:
|
2022-10-19 10:04:06 +00:00
|
|
|
when trMissingOrDisabledGossipOk:
|
|
|
|
notEnabled("handleAnnouncedTxs")
|
2022-11-09 12:39:57 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
if txs.len == 0:
|
|
|
|
return
|
|
|
|
|
|
|
|
debug "received new transactions",
|
|
|
|
number = txs.len
|
|
|
|
|
|
|
|
if ctx.lastCleanup - getTime() > POOLED_STORAGE_TIME_LIMIT:
|
|
|
|
ctx.cleanupKnownByPeer()
|
|
|
|
|
|
|
|
var txHashes = newSeqOfCap[Hash256](txs.len)
|
|
|
|
for tx in txs:
|
|
|
|
txHashes.add rlpHash(tx)
|
|
|
|
|
|
|
|
ctx.addToKnownByPeer(txHashes, peer)
|
2022-12-02 04:39:12 +00:00
|
|
|
ctx.txPool.add(txs)
|
2022-11-09 12:39:57 +00:00
|
|
|
|
|
|
|
var newTxHashes = newSeqOfCap[Hash256](txHashes.len)
|
|
|
|
var validTxs = newSeqOfCap[Transaction](txHashes.len)
|
|
|
|
for i, txHash in txHashes:
|
|
|
|
if ctx.inPoolAndOk(txHash):
|
|
|
|
newTxHashes.add txHash
|
|
|
|
validTxs.add txs[i]
|
|
|
|
|
|
|
|
let
|
|
|
|
peers = ctx.getPeers(peer)
|
|
|
|
numPeers = peers.len
|
|
|
|
sendFull = max(1, numPeers div NUM_PEERS_REBROADCAST_QUOTIENT)
|
|
|
|
|
|
|
|
if numPeers == 0 or validTxs.len == 0:
|
|
|
|
return
|
|
|
|
|
|
|
|
asyncSpawn ctx.sendTransactions(txHashes, validTxs, peers[0..<sendFull])
|
|
|
|
|
|
|
|
asyncSpawn ctx.sendNewTxHashes(newTxHashes, peers[sendFull..^1])
|
|
|
|
|
2022-10-10 02:31:28 +00:00
|
|
|
method handleAnnouncedTxsHashes*(ctx: EthWireRef, peer: Peer, txHashes: openArray[Hash256]) {.gcsafe.} =
|
2022-11-09 12:39:57 +00:00
|
|
|
if ctx.disableTxPool:
|
|
|
|
when trMissingOrDisabledGossipOk:
|
2022-11-14 07:32:33 +00:00
|
|
|
notEnabled("handleAnnouncedTxsHashes")
|
2022-11-09 12:39:57 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
if txHashes.len == 0:
|
|
|
|
return
|
|
|
|
|
|
|
|
if ctx.lastCleanup - getTime() > POOLED_STORAGE_TIME_LIMIT:
|
|
|
|
ctx.cleanupKnownByPeer()
|
|
|
|
|
|
|
|
ctx.addToKnownByPeer(txHashes, peer)
|
|
|
|
var reqHashes = newSeqOfCap[Hash256](txHashes.len)
|
|
|
|
for txHash in txHashes:
|
|
|
|
if txHash in ctx.pending or ctx.inPool(txHash):
|
|
|
|
continue
|
|
|
|
reqHashes.add txHash
|
|
|
|
|
|
|
|
if reqHashes.len == 0:
|
|
|
|
return
|
|
|
|
|
|
|
|
debug "handleAnnouncedTxsHashes: received new tx hashes",
|
|
|
|
number = reqHashes.len
|
|
|
|
|
|
|
|
for txHash in reqHashes:
|
|
|
|
ctx.pending.incl txHash
|
|
|
|
|
|
|
|
asyncSpawn ctx.fetchTransactions(reqHashes, peer)
|
2022-10-10 02:31:28 +00:00
|
|
|
|
|
|
|
method handleNewBlock*(ctx: EthWireRef, peer: Peer, blk: EthBlock, totalDifficulty: DifficultyInt) {.gcsafe.} =
|
2022-12-02 04:39:12 +00:00
|
|
|
if ctx.chain.com.forkGTE(MergeFork):
|
2022-11-14 07:32:33 +00:00
|
|
|
debug "Dropping peer for sending NewBlock after merge (EIP-3675)",
|
|
|
|
peer, blockNumber=blk.header.blockNumber,
|
|
|
|
blockHash=blk.header.blockHash, totalDifficulty
|
|
|
|
asyncSpawn banPeer(ctx.peerPool, peer, PEER_LONG_BANTIME)
|
|
|
|
return
|
2022-10-10 02:31:28 +00:00
|
|
|
|
2022-11-16 06:45:28 +00:00
|
|
|
if not ctx.newBlockHandler.handler.isNil:
|
|
|
|
ctx.newBlockHandler.handler(
|
|
|
|
ctx.newBlockHandler.arg,
|
|
|
|
peer, blk, totalDifficulty
|
|
|
|
)
|
|
|
|
|
2022-10-10 02:31:28 +00:00
|
|
|
method handleNewBlockHashes*(ctx: EthWireRef, peer: Peer, hashes: openArray[NewBlockHashesAnnounce]) {.gcsafe.} =
|
2022-12-02 04:39:12 +00:00
|
|
|
if ctx.chain.com.forkGTE(MergeFork):
|
2022-11-14 07:32:33 +00:00
|
|
|
debug "Dropping peer for sending NewBlockHashes after merge (EIP-3675)",
|
|
|
|
peer, numHashes=hashes.len
|
|
|
|
asyncSpawn banPeer(ctx.peerPool, peer, PEER_LONG_BANTIME)
|
|
|
|
return
|
2022-10-10 02:31:28 +00:00
|
|
|
|
2022-11-16 06:45:28 +00:00
|
|
|
if not ctx.newBlockHashesHandler.handler.isNil:
|
|
|
|
ctx.newBlockHashesHandler.handler(
|
|
|
|
ctx.newBlockHashesHandler.arg,
|
|
|
|
peer,
|
|
|
|
hashes
|
|
|
|
)
|
|
|
|
|
2022-10-10 02:31:28 +00:00
|
|
|
when defined(legacy_eth66_enabled):
|
|
|
|
method getStorageNodes*(ctx: EthWireRef, hashes: openArray[Hash256]): seq[Blob] {.gcsafe.} =
|
|
|
|
let db = ctx.db.db
|
|
|
|
for hash in hashes:
|
|
|
|
result.add db.get(hash.data)
|
|
|
|
|
|
|
|
method handleNodeData*(ctx: EthWireRef, peer: Peer, data: openArray[Blob]) {.gcsafe.} =
|
|
|
|
notImplemented("handleNodeData")
|