mirror of
https://github.com/logos-storage/logos-storage-nim.git
synced 2026-01-02 13:33:10 +00:00
* cleanup imports and logs * add BlockHandle type * revert deps * refactor: async error handling and future tracking improvements - Update async procedures to use explicit raises annotation - Modify TrackedFutures to handle futures with no raised exceptions - Replace `asyncSpawn` with explicit future tracking - Update test suites to use `unittest2` - Standardize error handling across network and async components - Remove deprecated error handling patterns This commit introduces a more robust approach to async error handling and future management, improving type safety and reducing potential runtime errors. * bump nim-serde * remove asyncSpawn * rework background downloads and prefetch * imporove logging * refactor: enhance async procedures with error handling and raise annotations * misc cleanup * misc * refactor: implement allFinishedFailed to aggregate future results with success and failure tracking * refactor: update error handling in reader procedures to raise ChunkerError and CancelledError * refactor: improve error handling in wantListHandler and accountHandler procedures * refactor: simplify LPStreamReadError creation by consolidating parameters * refactor: enhance error handling in AsyncStreamWrapper to catch unexpected errors * refactor: enhance error handling in advertiser and discovery loops to improve resilience * misc * refactor: improve code structure and readability * remove cancellation from addSlotToQueue * refactor: add assertion for unexpected errors in local store checks * refactor: prevent tracking of finished futures and improve test assertions * refactor: improve error handling in local store checks * remove usage of msgDetail * feat: add initial implementation of discovery engine and related components * refactor: improve task scheduling logic by removing unnecessary break statement * break after scheduling a task * make taskHandler cancelable * refactor: update async handlers to raise CancelledError * refactor(advertiser): streamline error handling and improve task flow in advertise loops * fix: correct spelling of "divisible" in error messages and comments * refactor(discovery): simplify discovery task loop and improve error handling * refactor(engine): filter peers before processing in cancelBlocks procedure
292 lines
7.9 KiB
Nim
292 lines
7.9 KiB
Nim
import std/sequtils
|
|
import std/tables
|
|
|
|
import pkg/chronos
|
|
|
|
import pkg/codex/rng
|
|
import pkg/codex/chunker
|
|
import pkg/codex/blocktype as bt
|
|
import pkg/codex/blockexchange
|
|
|
|
import ../../asynctest
|
|
import ../examples
|
|
import ../helpers
|
|
|
|
asyncchecksuite "Network - Handlers":
|
|
let
|
|
rng = Rng.instance()
|
|
seckey = PrivateKey.random(rng[]).tryGet()
|
|
peerId = PeerId.init(seckey.getPublicKey().tryGet()).tryGet()
|
|
chunker = RandomChunker.new(Rng.instance(), size = 1024, chunkSize = 256)
|
|
|
|
var
|
|
network: BlockExcNetwork
|
|
networkPeer: NetworkPeer
|
|
buffer: BufferStream
|
|
blocks: seq[bt.Block]
|
|
done: Future[void]
|
|
|
|
proc getConn(): Future[Connection] {.async: (raises: [CancelledError]).} =
|
|
return Connection(buffer)
|
|
|
|
setup:
|
|
while true:
|
|
let chunk = await chunker.getBytes()
|
|
if chunk.len <= 0:
|
|
break
|
|
|
|
blocks.add(bt.Block.new(chunk).tryGet())
|
|
|
|
done = newFuture[void]()
|
|
buffer = BufferStream.new()
|
|
network = BlockExcNetwork.new(switch = newStandardSwitch(), connProvider = getConn)
|
|
network.setupPeer(peerId)
|
|
networkPeer = network.peers[peerId]
|
|
discard await networkPeer.connect()
|
|
|
|
test "Want List handler":
|
|
proc wantListHandler(peer: PeerId, wantList: WantList) {.async: (raises: []).} =
|
|
# check that we got the correct amount of entries
|
|
check wantList.entries.len == 4
|
|
|
|
for b in blocks:
|
|
check b.address in wantList.entries
|
|
let entry = wantList.entries[wantList.entries.find(b.address)]
|
|
check entry.wantType == WantType.WantHave
|
|
check entry.priority == 1
|
|
check entry.cancel == true
|
|
check entry.sendDontHave == true
|
|
|
|
done.complete()
|
|
|
|
network.handlers.onWantList = wantListHandler
|
|
|
|
let wantList =
|
|
makeWantList(blocks.mapIt(it.cid), 1, true, WantType.WantHave, true, true)
|
|
|
|
let msg = Message(wantlist: wantList)
|
|
await buffer.pushData(lenPrefix(protobufEncode(msg)))
|
|
|
|
await done.wait(500.millis)
|
|
|
|
test "Blocks Handler":
|
|
proc blocksDeliveryHandler(
|
|
peer: PeerId, blocksDelivery: seq[BlockDelivery]
|
|
) {.async: (raises: []).} =
|
|
check blocks == blocksDelivery.mapIt(it.blk)
|
|
done.complete()
|
|
|
|
network.handlers.onBlocksDelivery = blocksDeliveryHandler
|
|
|
|
let msg =
|
|
Message(payload: blocks.mapIt(BlockDelivery(blk: it, address: it.address)))
|
|
await buffer.pushData(lenPrefix(protobufEncode(msg)))
|
|
|
|
await done.wait(500.millis)
|
|
|
|
test "Presence Handler":
|
|
proc presenceHandler(
|
|
peer: PeerId, presence: seq[BlockPresence]
|
|
) {.async: (raises: []).} =
|
|
for b in blocks:
|
|
check:
|
|
b.address in presence
|
|
|
|
done.complete()
|
|
|
|
network.handlers.onPresence = presenceHandler
|
|
|
|
let msg = Message(
|
|
blockPresences:
|
|
blocks.mapIt(BlockPresence(address: it.address, type: BlockPresenceType.Have))
|
|
)
|
|
await buffer.pushData(lenPrefix(protobufEncode(msg)))
|
|
|
|
await done.wait(500.millis)
|
|
|
|
test "Handles account messages":
|
|
let account = Account(address: EthAddress.example)
|
|
|
|
proc handleAccount(peer: PeerId, received: Account) {.async: (raises: []).} =
|
|
check received == account
|
|
done.complete()
|
|
|
|
network.handlers.onAccount = handleAccount
|
|
|
|
let message = Message(account: AccountMessage.init(account))
|
|
await buffer.pushData(lenPrefix(protobufEncode(message)))
|
|
|
|
await done.wait(100.millis)
|
|
|
|
test "Handles payment messages":
|
|
let payment = SignedState.example
|
|
|
|
proc handlePayment(peer: PeerId, received: SignedState) {.async: (raises: []).} =
|
|
check received == payment
|
|
done.complete()
|
|
|
|
network.handlers.onPayment = handlePayment
|
|
|
|
let message = Message(payment: StateChannelUpdate.init(payment))
|
|
await buffer.pushData(lenPrefix(protobufEncode(message)))
|
|
|
|
await done.wait(100.millis)
|
|
|
|
asyncchecksuite "Network - Senders":
|
|
let chunker = RandomChunker.new(Rng.instance(), size = 1024, chunkSize = 256)
|
|
|
|
var
|
|
switch1, switch2: Switch
|
|
network1, network2: BlockExcNetwork
|
|
blocks: seq[bt.Block]
|
|
done: Future[void]
|
|
|
|
setup:
|
|
while true:
|
|
let chunk = await chunker.getBytes()
|
|
if chunk.len <= 0:
|
|
break
|
|
|
|
blocks.add(bt.Block.new(chunk).tryGet())
|
|
|
|
done = newFuture[void]()
|
|
switch1 = newStandardSwitch()
|
|
switch2 = newStandardSwitch()
|
|
network1 = BlockExcNetwork.new(switch = switch1)
|
|
switch1.mount(network1)
|
|
|
|
network2 = BlockExcNetwork.new(switch = switch2)
|
|
switch2.mount(network2)
|
|
|
|
await switch1.start()
|
|
await switch2.start()
|
|
|
|
await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs)
|
|
|
|
teardown:
|
|
await allFuturesThrowing(switch1.stop(), switch2.stop())
|
|
|
|
test "Send want list":
|
|
proc wantListHandler(peer: PeerId, wantList: WantList) {.async: (raises: []).} =
|
|
# check that we got the correct amount of entries
|
|
check wantList.entries.len == 4
|
|
|
|
for b in blocks:
|
|
check b.address in wantList.entries
|
|
let entry = wantList.entries[wantList.entries.find(b.address)]
|
|
check entry.wantType == WantType.WantHave
|
|
check entry.priority == 1
|
|
check entry.cancel == true
|
|
check entry.sendDontHave == true
|
|
|
|
done.complete()
|
|
|
|
network2.handlers.onWantList = wantListHandler
|
|
await network1.sendWantList(
|
|
switch2.peerInfo.peerId,
|
|
blocks.mapIt(it.address),
|
|
1,
|
|
true,
|
|
WantType.WantHave,
|
|
true,
|
|
true,
|
|
)
|
|
|
|
await done.wait(500.millis)
|
|
|
|
test "send blocks":
|
|
proc blocksDeliveryHandler(
|
|
peer: PeerId, blocksDelivery: seq[BlockDelivery]
|
|
) {.async: (raises: []).} =
|
|
check blocks == blocksDelivery.mapIt(it.blk)
|
|
done.complete()
|
|
|
|
network2.handlers.onBlocksDelivery = blocksDeliveryHandler
|
|
await network1.sendBlocksDelivery(
|
|
switch2.peerInfo.peerId, blocks.mapIt(BlockDelivery(blk: it, address: it.address))
|
|
)
|
|
|
|
await done.wait(500.millis)
|
|
|
|
test "send presence":
|
|
proc presenceHandler(
|
|
peer: PeerId, precense: seq[BlockPresence]
|
|
) {.async: (raises: []).} =
|
|
for b in blocks:
|
|
check:
|
|
b.address in precense
|
|
|
|
done.complete()
|
|
|
|
network2.handlers.onPresence = presenceHandler
|
|
|
|
await network1.sendBlockPresence(
|
|
switch2.peerInfo.peerId,
|
|
blocks.mapIt(BlockPresence(address: it.address, type: BlockPresenceType.Have)),
|
|
)
|
|
|
|
await done.wait(500.millis)
|
|
|
|
test "send account":
|
|
let account = Account(address: EthAddress.example)
|
|
|
|
proc handleAccount(peer: PeerId, received: Account) {.async: (raises: []).} =
|
|
check received == account
|
|
done.complete()
|
|
|
|
network2.handlers.onAccount = handleAccount
|
|
|
|
await network1.sendAccount(switch2.peerInfo.peerId, account)
|
|
await done.wait(500.millis)
|
|
|
|
test "send payment":
|
|
let payment = SignedState.example
|
|
|
|
proc handlePayment(peer: PeerId, received: SignedState) {.async: (raises: []).} =
|
|
check received == payment
|
|
done.complete()
|
|
|
|
network2.handlers.onPayment = handlePayment
|
|
|
|
await network1.sendPayment(switch2.peerInfo.peerId, payment)
|
|
await done.wait(500.millis)
|
|
|
|
asyncchecksuite "Network - Test Limits":
|
|
var
|
|
switch1, switch2: Switch
|
|
network1, network2: BlockExcNetwork
|
|
done: Future[void]
|
|
|
|
setup:
|
|
done = newFuture[void]()
|
|
switch1 = newStandardSwitch()
|
|
switch2 = newStandardSwitch()
|
|
|
|
network1 = BlockExcNetwork.new(switch = switch1, maxInflight = 0)
|
|
switch1.mount(network1)
|
|
|
|
network2 = BlockExcNetwork.new(switch = switch2)
|
|
switch2.mount(network2)
|
|
|
|
await switch1.start()
|
|
await switch2.start()
|
|
|
|
await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs)
|
|
|
|
teardown:
|
|
await allFuturesThrowing(switch1.stop(), switch2.stop())
|
|
|
|
test "Concurrent Sends":
|
|
let account = Account(address: EthAddress.example)
|
|
network2.handlers.onAccount = proc(
|
|
peer: PeerId, received: Account
|
|
) {.async: (raises: []).} =
|
|
check false
|
|
|
|
let fut = network1.send(
|
|
switch2.peerInfo.peerId, Message(account: AccountMessage.init(account))
|
|
)
|
|
|
|
await sleepAsync(100.millis)
|
|
check not fut.finished
|