code compiles but nimsuggest crashes

This commit is contained in:
Daniil Sobol 2023-10-01 20:09:17 +03:00
parent f5cdee967b
commit 8f8f78aa74
No known key found for this signature in database
GPG Key ID: 5121E3B3F2304E22
3 changed files with 3 additions and 47 deletions

View File

@ -233,19 +233,6 @@ type
defaultValueDesc: $defaultDisablePoke
name: "disable-poke" .}: bool
timeoutExpiration* {.
hidden
desc: "I have no idea how to describe this parameter"
defaultValue: defaultTimeoutExpiration
defaultValueDesc: $defaultTimeoutExpiration
name: "timeout-expiration" .}: uint32
timeoutInterval* {.
desc: "Minimal interval between pings"
defaultValue: defaultTimeoutInterval
defaultValueDesc: $defaultTimeoutInterval
name: "timeout-interval" .}: uint32
case cmd* {.
command
defaultValue: noCommand .}: PortalCmd

View File

@ -11,7 +11,7 @@
{.push raises: [].}
import
std/[sequtils, sets, algorithm, tables, asyncdispatch, times, math],
std/[sequtils, sets, algorithm, tables, math],
stew/[results, byteutils, leb128, endians2], chronicles, chronos, nimcrypto/hash,
bearssl, ssz_serialization, metrics, faststreams,
eth/rlp, eth/p2p/discoveryv5/[protocol, node, enr, routing_table, random2,
@ -172,7 +172,7 @@ type
offerWorkers: seq[Future[void]]
disablePoke: bool
pingTimings: Table[NodeId, uint64]
timeoutExpiration: uint64
timeoutInterval: uint64
PortalResult*[T] = Result[T, string]
@ -196,23 +196,6 @@ type
# content is in their range
nodesInterestedInContent*: seq[Node]
proc gcOldTimestamps(p: PortalProtocol) {.async.} =
# TODO Better way to do this would be also track total memory and memory consumption
# and/or track memory consumtion grow rate and if we start to consume too much memory
# or if the grow rate is too fast then we have to drop old timings starting from
# oldest ones
while true:
let nowMilliSeconds = uint64(epochTime() * 1000)
for nodeId, timeout in p.pingTimings:
# NOTE not sure how to go about this cause uint64 - uint64 still uint64
# so in case where clocks moved backwards (think NTP correction) there might be a case
# where this works incorrectly
if nowMilliSeconds - timeout > p.timeoutExpiration:
p.pingTimings.del(nodeId)
# TODO this value should probably be adaptive, relative to the size
# of the routinng table
await sleepAsync(1000)
proc init*(
T: type ContentKV,
contentKey: ByteList,
@ -497,16 +480,12 @@ proc new*(T: type PortalProtocol,
offerQueue: newAsyncQueue[OfferRequest](concurrentOffers),
disablePoke: config.disablePoke,
pingTimings: initTable[NodeId, uint64](),
timeoutExpiration: config.timeoutExpiration,
timeoutInterval: config.timeoutInterval
timeoutInterval: 30_000 # 30 seconds
)
proto.baseProtocol.registerTalkProtocol(@(proto.protocolId), proto).expect(
"Only one protocol should have this id")
# start updating ping timings
asyncCheck gcOldTimestamps(proto)
proto
# Sends the discv5 talkreq nessage with provided Portal message, awaits and
@ -593,14 +572,6 @@ proc recordsFromBytes*(rawRecords: List[ByteList, 32]): PortalResult[seq[Record]
proc ping*(p: PortalProtocol, dst: Node):
Future[PortalResult[PongMessage]] {.async.} =
let nowMilliSeconds = uint64(epochTime() * 1000) # milliseconds
if p.pingTimings.hasKey(dst):
if p.pingTimings[dst] + p.timeoutInterval > nowMilliSeconds # short-circuit this ping if not enougth time passed since last ping for this node
return err("Ping cooldown") # Is there a better way to short-circuit that?
p.pingTimings[dst] = nowMilliSeconds
let pongResponse = await p.pingImpl(dst)
if pongResponse.isOk():

View File

@ -33,8 +33,6 @@ const
defaultRadiusConfig* = RadiusConfig(kind: Dynamic)
defaultRadiusConfigDesc* = $defaultRadiusConfig.kind
defaultDisablePoke* = false
defaultTimeoutExpiration* = 60 * 60 * 24 * 1000 # One day
defaultTimeoutInterval = 1 * 1000 # 1 sec
defaultPortalProtocolConfig* = PortalProtocolConfig(
tableIpLimits: DefaultTableIpLimits,