fluffy: Make concurrent offers configurable at cli (#2854)
This commit is contained in:
parent
6086c2903c
commit
b671499fdc
|
@ -232,7 +232,7 @@ type
|
|||
"the same machines. The option might be removed/adjusted in the future",
|
||||
defaultValue: defaultPortalProtocolConfig.tableIpLimits.tableIpLimit,
|
||||
defaultValueDesc: $defaultTableIpLimitDesc,
|
||||
name: "table-ip-limit"
|
||||
name: "debug-table-ip-limit"
|
||||
.}: uint
|
||||
|
||||
bucketIpLimit* {.
|
||||
|
@ -243,7 +243,7 @@ type
|
|||
"the same machines. The option might be removed/adjusted in the future",
|
||||
defaultValue: defaultPortalProtocolConfig.tableIpLimits.bucketIpLimit,
|
||||
defaultValueDesc: $defaultBucketIpLimitDesc,
|
||||
name: "bucket-ip-limit"
|
||||
name: "debug-bucket-ip-limit"
|
||||
.}: uint
|
||||
|
||||
bitsPerHop* {.
|
||||
|
@ -251,7 +251,7 @@ type
|
|||
desc: "Kademlia's b variable, increase for less hops per lookup",
|
||||
defaultValue: defaultPortalProtocolConfig.bitsPerHop,
|
||||
defaultValueDesc: $defaultBitsPerHopDesc,
|
||||
name: "bits-per-hop"
|
||||
name: "debug-bits-per-hop"
|
||||
.}: int
|
||||
|
||||
maxGossipNodes* {.
|
||||
|
@ -259,7 +259,14 @@ type
|
|||
desc: "The maximum number of nodes to send content to during gossip",
|
||||
defaultValue: defaultPortalProtocolConfig.maxGossipNodes,
|
||||
defaultValueDesc: $defaultMaxGossipNodesDesc,
|
||||
name: "max-gossip-nodes"
|
||||
name: "debug-max-gossip-nodes"
|
||||
.}: int
|
||||
|
||||
maxConcurrentOffers* {.
|
||||
hidden,
|
||||
desc: "The maximum number of offers to send concurrently",
|
||||
defaultValue: defaultPortalProtocolConfig.maxConcurrentOffers,
|
||||
name: "debug-max-concurrent-offers"
|
||||
.}: int
|
||||
|
||||
radiusConfig* {.
|
||||
|
@ -316,14 +323,14 @@ type
|
|||
"Size of the in memory local content cache. This is the max number " &
|
||||
"of content values that can be stored in the cache.",
|
||||
defaultValue: defaultPortalProtocolConfig.contentCacheSize,
|
||||
name: "content-cache-size"
|
||||
name: "debug-content-cache-size"
|
||||
.}: int
|
||||
|
||||
disableContentCache* {.
|
||||
hidden,
|
||||
desc: "Disable the in memory local content cache",
|
||||
defaultValue: defaultPortalProtocolConfig.disableContentCache,
|
||||
name: "disable-content-cache"
|
||||
name: "debug-disable-content-cache"
|
||||
.}: bool
|
||||
|
||||
disablePoke* {.
|
||||
|
|
|
@ -183,7 +183,7 @@ proc run(
|
|||
portalProtocolConfig = PortalProtocolConfig.init(
|
||||
config.tableIpLimit, config.bucketIpLimit, config.bitsPerHop, config.radiusConfig,
|
||||
config.disablePoke, config.maxGossipNodes, config.contentCacheSize,
|
||||
config.disableContentCache,
|
||||
config.disableContentCache, config.maxConcurrentOffers,
|
||||
)
|
||||
|
||||
portalNodeConfig = PortalNodeConfig(
|
||||
|
|
|
@ -125,20 +125,6 @@ const
|
|||
## value in milliseconds
|
||||
initialLookups = 1 ## Amount of lookups done when populating the routing table
|
||||
|
||||
# These are the concurrent offers per Portal wire protocol that is running.
|
||||
# Using the `offerQueue` allows for limiting the amount of offers send and
|
||||
# thus how many streams can be started.
|
||||
# TODO:
|
||||
# More thought needs to go into this as it is currently on a per network
|
||||
# basis. Keep it simple like that? Or limit it better at the stream transport
|
||||
# level? In the latter case, this might still need to be checked/blocked at
|
||||
# the very start of sending the offer, because blocking/waiting too long
|
||||
# between the received accept message and actually starting the stream and
|
||||
# sending data could give issues due to timeouts on the other side.
|
||||
# And then there are still limits to be applied also for FindContent and the
|
||||
# incoming directions.
|
||||
concurrentOffers = 50
|
||||
|
||||
type
|
||||
ToContentIdHandler* =
|
||||
proc(contentKey: ContentKeyByteList): results.Opt[ContentId] {.raises: [], gcsafe.}
|
||||
|
@ -591,7 +577,7 @@ proc new*(
|
|||
bootstrapRecords: @bootstrapRecords,
|
||||
stream: stream,
|
||||
radiusCache: RadiusCache.init(256),
|
||||
offerQueue: newAsyncQueue[OfferRequest](concurrentOffers),
|
||||
offerQueue: newAsyncQueue[OfferRequest](config.maxConcurrentOffers),
|
||||
pingTimings: Table[NodeId, chronos.Moment](),
|
||||
config: config,
|
||||
)
|
||||
|
@ -1758,7 +1744,19 @@ proc start*(p: PortalProtocol) =
|
|||
p.refreshLoop = refreshLoop(p)
|
||||
p.revalidateLoop = revalidateLoop(p)
|
||||
|
||||
for i in 0 ..< concurrentOffers:
|
||||
# These are the concurrent offers per Portal wire protocol that is running.
|
||||
# Using the `offerQueue` allows for limiting the amount of offers send and
|
||||
# thus how many streams can be started.
|
||||
# TODO:
|
||||
# More thought needs to go into this as it is currently on a per network
|
||||
# basis. Keep it simple like that? Or limit it better at the stream transport
|
||||
# level? In the latter case, this might still need to be checked/blocked at
|
||||
# the very start of sending the offer, because blocking/waiting too long
|
||||
# between the received accept message and actually starting the stream and
|
||||
# sending data could give issues due to timeouts on the other side.
|
||||
# And then there are still limits to be applied also for FindContent and the
|
||||
# incoming directions.
|
||||
for i in 0 ..< p.config.maxConcurrentOffers:
|
||||
p.offerWorkers.add(offerWorker(p))
|
||||
|
||||
proc stop*(p: PortalProtocol) {.async: (raises: []).} =
|
||||
|
|
|
@ -43,6 +43,7 @@ type
|
|||
maxGossipNodes*: int
|
||||
contentCacheSize*: int
|
||||
disableContentCache*: bool
|
||||
maxConcurrentOffers*: int
|
||||
|
||||
const
|
||||
defaultRadiusConfig* = RadiusConfig(kind: Dynamic)
|
||||
|
@ -51,6 +52,7 @@ const
|
|||
defaultMaxGossipNodes* = 4
|
||||
defaultContentCacheSize* = 100
|
||||
defaultDisableContentCache* = false
|
||||
defaultMaxConcurrentOffers* = 50
|
||||
revalidationTimeout* = chronos.seconds(30)
|
||||
|
||||
defaultPortalProtocolConfig* = PortalProtocolConfig(
|
||||
|
@ -61,6 +63,7 @@ const
|
|||
maxGossipNodes: defaultMaxGossipNodes,
|
||||
contentCacheSize: defaultContentCacheSize,
|
||||
disableContentCache: defaultDisableContentCache,
|
||||
maxConcurrentOffers: defaultMaxConcurrentOffers,
|
||||
)
|
||||
|
||||
proc init*(
|
||||
|
@ -73,6 +76,7 @@ proc init*(
|
|||
maxGossipNodes: int,
|
||||
contentCacheSize: int,
|
||||
disableContentCache: bool,
|
||||
maxConcurrentOffers: int,
|
||||
): T =
|
||||
PortalProtocolConfig(
|
||||
tableIpLimits:
|
||||
|
@ -83,6 +87,7 @@ proc init*(
|
|||
maxGossipNodes: maxGossipNodes,
|
||||
contentCacheSize: contentCacheSize,
|
||||
disableContentCache: disableContentCache,
|
||||
maxConcurrentOffers: maxConcurrentOffers,
|
||||
)
|
||||
|
||||
func fromLogRadius*(T: type UInt256, logRadius: uint16): T =
|
||||
|
|
|
@ -342,9 +342,9 @@ for NUM_NODE in $(seq 0 $(( NUM_NODES - 1 ))); do
|
|||
--metrics \
|
||||
--metrics-address="127.0.0.1" \
|
||||
--metrics-port="$(( BASE_METRICS_PORT + NUM_NODE ))" \
|
||||
--table-ip-limit=1024 \
|
||||
--bucket-ip-limit=24 \
|
||||
--bits-per-hop=1 \
|
||||
--debug-table-ip-limit=1024 \
|
||||
--debug-bucket-ip-limit=24 \
|
||||
--debug-bits-per-hop=1 \
|
||||
--portal-subnetworks="${PORTAL_SUBNETWORKS}" \
|
||||
--disable-state-root-validation="${DISABLE_STATE_ROOT_VALIDATION}" \
|
||||
${TRUSTED_BLOCK_ROOT_ARG} \
|
||||
|
|
Loading…
Reference in New Issue