Reverse calculate the radius at node restart (#2593)
This avoid restarting the node always with a full radius, which causes the node the be bombarded with offers which it later has to delete anyhow. In order to implement this functionality, several changes were made as the radius needed to move from the Portal wire protocol current location to the contentDB and beaconDB, which is conceptually more correct anyhow. So radius is now part of the database objects and a handler is used in the portal wire protocol to access its value.
This commit is contained in:
parent
d39c589ec3
commit
0a80a3bb25
|
@ -18,7 +18,7 @@ import
|
|||
../network/wire/[portal_protocol, portal_protocol_config],
|
||||
./content_db_custom_sql_functions
|
||||
|
||||
export kvstore_sqlite3
|
||||
export kvstore_sqlite3, portal_protocol_config
|
||||
|
||||
# This version of content db is the most basic, simple solution where data is
|
||||
# stored no matter what content type or content network in the same kvstore with
|
||||
|
@ -53,6 +53,8 @@ type
|
|||
kv: KvStoreRef
|
||||
manualCheckpoint: bool
|
||||
storageCapacity*: uint64
|
||||
dataRadius*: UInt256
|
||||
localId: NodeId
|
||||
sizeStmt: SqliteStmt[NoParams, int64]
|
||||
unusedSizeStmt: SqliteStmt[NoParams, int64]
|
||||
vacuumStmt: SqliteStmt[NoParams, void]
|
||||
|
@ -80,10 +82,117 @@ template expectDb(x: auto): untyped =
|
|||
# full disk - this requires manual intervention, so we'll panic for now
|
||||
x.expect("working database (disk broken/full?)")
|
||||
|
||||
## Public calls to get database size, content size and similar.
|
||||
|
||||
proc size*(db: ContentDB): int64 =
|
||||
## Return current size of DB as product of sqlite page_count and page_size:
|
||||
## https://www.sqlite.org/pragma.html#pragma_page_count
|
||||
## https://www.sqlite.org/pragma.html#pragma_page_size
|
||||
## It returns the total size of db on the disk, i.e both data and metadata
|
||||
## used to store content.
|
||||
## It is worth noting that when deleting content, the size may lag behind due
|
||||
## to the way how deleting works in sqlite.
|
||||
## Good description can be found in: https://www.sqlite.org/lang_vacuum.html
|
||||
var size: int64 = 0
|
||||
discard (
|
||||
db.sizeStmt.exec do(res: int64):
|
||||
size = res).expectDb()
|
||||
return size
|
||||
|
||||
proc unusedSize(db: ContentDB): int64 =
|
||||
## Returns the total size of the pages which are unused by the database,
|
||||
## i.e they can be re-used for new content.
|
||||
var size: int64 = 0
|
||||
discard (
|
||||
db.unusedSizeStmt.exec do(res: int64):
|
||||
size = res).expectDb()
|
||||
return size
|
||||
|
||||
proc usedSize*(db: ContentDB): int64 =
|
||||
## Returns the total size of the database (data + metadata) minus the unused
|
||||
## pages.
|
||||
db.size() - db.unusedSize()
|
||||
|
||||
proc contentSize*(db: ContentDB): int64 =
|
||||
## Returns total size of the content stored in DB.
|
||||
var size: int64 = 0
|
||||
discard (
|
||||
db.contentSizeStmt.exec do(res: int64):
|
||||
size = res).expectDb()
|
||||
return size
|
||||
|
||||
proc contentCount*(db: ContentDB): int64 =
|
||||
var count: int64 = 0
|
||||
discard (
|
||||
db.contentCountStmt.exec do(res: int64):
|
||||
count = res).expectDb()
|
||||
return count
|
||||
|
||||
## Radius estimation and initialization related calls
|
||||
|
||||
proc getLargestDistance*(db: ContentDB, localId: UInt256): UInt256 =
|
||||
var distanceBytes: array[32, byte]
|
||||
discard (
|
||||
db.largestDistanceStmt.exec(
|
||||
localId.toBytesBE(),
|
||||
proc(res: array[32, byte]) =
|
||||
distanceBytes = res,
|
||||
)
|
||||
).expectDb()
|
||||
|
||||
return UInt256.fromBytesBE(distanceBytes)
|
||||
|
||||
func estimateNewRadius(
|
||||
currentSize: uint64, storageCapacity: uint64, currentRadius: UInt256
|
||||
): UInt256 =
|
||||
if storageCapacity == 0:
|
||||
return 0.stuint(256)
|
||||
|
||||
let sizeRatio = currentSize div storageCapacity
|
||||
if sizeRatio > 0:
|
||||
currentRadius div sizeRatio.stuint(256)
|
||||
else:
|
||||
currentRadius
|
||||
|
||||
func estimateNewRadius*(db: ContentDB, rc: RadiusConfig): UInt256 =
|
||||
## Rough estimation of the new radius for pruning when adjusting the storage
|
||||
## capacity.
|
||||
case rc.kind
|
||||
of Static:
|
||||
UInt256.fromLogRadius(rc.logRadius)
|
||||
of Dynamic:
|
||||
if db.storageCapacity == 0:
|
||||
return 0.stuint(256)
|
||||
|
||||
let oldRadiusApproximation = db.getLargestDistance(db.localId)
|
||||
estimateNewRadius(uint64(db.usedSize()), db.storageCapacity, oldRadiusApproximation)
|
||||
|
||||
func setInitialRadius*(db: ContentDB, rc: RadiusConfig) =
|
||||
## Set the initial radius based on the radius config and the storage capacity
|
||||
## and furthest distance of the content in the database.
|
||||
## In case of a dynamic radius, if the storage capacity is near full, the
|
||||
## radius will be set to the largest distance of the content in the database.
|
||||
## Else the radius will be set to the maximum value.
|
||||
case rc.kind
|
||||
of Static:
|
||||
db.dataRadius = UInt256.fromLogRadius(rc.logRadius)
|
||||
of Dynamic:
|
||||
if db.storageCapacity == 0:
|
||||
db.dataRadius = 0.stuint(256)
|
||||
return
|
||||
|
||||
let sizeRatio = db.usedSize().float / db.storageCapacity.float
|
||||
if sizeRatio > 0.95:
|
||||
db.dataRadius = db.getLargestDistance(db.localId)
|
||||
else:
|
||||
db.dataRadius = UInt256.high()
|
||||
|
||||
proc new*(
|
||||
T: type ContentDB,
|
||||
path: string,
|
||||
storageCapacity: uint64,
|
||||
radiusConfig: RadiusConfig,
|
||||
localId: NodeId,
|
||||
inMemory = false,
|
||||
manualCheckpoint = false,
|
||||
): ContentDB =
|
||||
|
@ -141,7 +250,7 @@ proc new*(
|
|||
"SELECT max(xorDistance(?, key)) FROM kvstore", array[32, byte], array[32, byte]
|
||||
)[]
|
||||
|
||||
ContentDB(
|
||||
let contentDb = ContentDB(
|
||||
kv: kvStore,
|
||||
backend: db,
|
||||
manualCheckpoint: manualCheckpoint,
|
||||
|
@ -156,6 +265,9 @@ proc new*(
|
|||
largestDistanceStmt: largestDistanceStmt,
|
||||
)
|
||||
|
||||
contentDb.setInitialRadius(radiusConfig)
|
||||
contentDb
|
||||
|
||||
template disposeSafe(s: untyped): untyped =
|
||||
if distinctBase(s) != nil:
|
||||
s.dispose()
|
||||
|
@ -237,78 +349,8 @@ proc del*(db: ContentDB, key: ContentId) =
|
|||
proc getSszDecoded*(db: ContentDB, key: ContentId, T: type auto): Opt[T] =
|
||||
db.getSszDecoded(key.toBytesBE(), T)
|
||||
|
||||
## Public calls to get database size, content size and similar.
|
||||
|
||||
proc size*(db: ContentDB): int64 =
|
||||
## Return current size of DB as product of sqlite page_count and page_size:
|
||||
## https://www.sqlite.org/pragma.html#pragma_page_count
|
||||
## https://www.sqlite.org/pragma.html#pragma_page_size
|
||||
## It returns the total size of db on the disk, i.e both data and metadata
|
||||
## used to store content.
|
||||
## It is worth noting that when deleting content, the size may lag behind due
|
||||
## to the way how deleting works in sqlite.
|
||||
## Good description can be found in: https://www.sqlite.org/lang_vacuum.html
|
||||
var size: int64 = 0
|
||||
discard (
|
||||
db.sizeStmt.exec do(res: int64):
|
||||
size = res).expectDb()
|
||||
return size
|
||||
|
||||
proc unusedSize(db: ContentDB): int64 =
|
||||
## Returns the total size of the pages which are unused by the database,
|
||||
## i.e they can be re-used for new content.
|
||||
var size: int64 = 0
|
||||
discard (
|
||||
db.unusedSizeStmt.exec do(res: int64):
|
||||
size = res).expectDb()
|
||||
return size
|
||||
|
||||
proc usedSize*(db: ContentDB): int64 =
|
||||
## Returns the total size of the database (data + metadata) minus the unused
|
||||
## pages.
|
||||
db.size() - db.unusedSize()
|
||||
|
||||
proc contentSize*(db: ContentDB): int64 =
|
||||
## Returns total size of the content stored in DB.
|
||||
var size: int64 = 0
|
||||
discard (
|
||||
db.contentSizeStmt.exec do(res: int64):
|
||||
size = res).expectDb()
|
||||
return size
|
||||
|
||||
proc contentCount*(db: ContentDB): int64 =
|
||||
var count: int64 = 0
|
||||
discard (
|
||||
db.contentCountStmt.exec do(res: int64):
|
||||
count = res).expectDb()
|
||||
return count
|
||||
|
||||
## Pruning related calls
|
||||
|
||||
proc getLargestDistance*(db: ContentDB, localId: UInt256): UInt256 =
|
||||
var distanceBytes: array[32, byte]
|
||||
discard (
|
||||
db.largestDistanceStmt.exec(
|
||||
localId.toBytesBE(),
|
||||
proc(res: array[32, byte]) =
|
||||
distanceBytes = res,
|
||||
)
|
||||
).expectDb()
|
||||
|
||||
return UInt256.fromBytesBE(distanceBytes)
|
||||
|
||||
func estimateNewRadius(
|
||||
currentSize: uint64, storageCapacity: uint64, currentRadius: UInt256
|
||||
): UInt256 =
|
||||
let sizeRatio = currentSize div storageCapacity
|
||||
if sizeRatio > 0:
|
||||
currentRadius div sizeRatio.stuint(256)
|
||||
else:
|
||||
currentRadius
|
||||
|
||||
func estimateNewRadius*(db: ContentDB, currentRadius: UInt256): UInt256 =
|
||||
estimateNewRadius(uint64(db.usedSize()), db.storageCapacity, currentRadius)
|
||||
|
||||
proc deleteContentFraction*(
|
||||
db: ContentDB, target: UInt256, fraction: float64
|
||||
): (UInt256, int64, int64, int64) =
|
||||
|
@ -419,12 +461,12 @@ proc put*(
|
|||
)
|
||||
|
||||
proc adjustRadius(
|
||||
p: PortalProtocol, deletedFraction: float64, distanceOfFurthestElement: UInt256
|
||||
db: ContentDB, deletedFraction: float64, distanceOfFurthestElement: UInt256
|
||||
) =
|
||||
# Invert fraction as the UInt256 implementation does not support
|
||||
# multiplication by float
|
||||
let invertedFractionAsInt = int64(1.0 / deletedFraction)
|
||||
let scaledRadius = p.dataRadius div u256(invertedFractionAsInt)
|
||||
let scaledRadius = db.dataRadius div u256(invertedFractionAsInt)
|
||||
|
||||
# Choose a larger value to avoid the situation where the
|
||||
# `distanceOfFurthestElement is very close to the local id so that the local
|
||||
|
@ -433,12 +475,12 @@ proc adjustRadius(
|
|||
let newRadius = max(scaledRadius, distanceOfFurthestElement)
|
||||
|
||||
info "Database radius adjusted",
|
||||
oldRadius = p.dataRadius, newRadius = newRadius, distanceOfFurthestElement
|
||||
oldRadius = db.dataRadius, newRadius = newRadius, distanceOfFurthestElement
|
||||
|
||||
# Both scaledRadius and distanceOfFurthestElement are smaller than current
|
||||
# dataRadius, so the radius will constantly decrease through the node its
|
||||
# lifetime.
|
||||
p.dataRadius = newRadius
|
||||
db.dataRadius = newRadius
|
||||
|
||||
proc createGetHandler*(db: ContentDB): DbGetHandler =
|
||||
return (
|
||||
|
@ -473,7 +515,7 @@ proc createStoreHandler*(
|
|||
)
|
||||
|
||||
if res.deletedFraction > 0.0:
|
||||
p.adjustRadius(res.deletedFraction, res.distanceOfFurthestElement)
|
||||
db.adjustRadius(res.deletedFraction, res.distanceOfFurthestElement)
|
||||
else:
|
||||
# Note:
|
||||
# This can occur when the furthest content is bigger than the fraction
|
||||
|
@ -488,3 +530,9 @@ proc createStoreHandler*(
|
|||
# so we will effectivly store fraction of the network
|
||||
db.put(contentId, content)
|
||||
)
|
||||
|
||||
proc createRadiusHandler*(db: ContentDB): DbRadiusHandler =
|
||||
return (
|
||||
proc(): UInt256 {.raises: [], gcsafe.} =
|
||||
db.dataRadius
|
||||
)
|
||||
|
|
|
@ -129,16 +129,12 @@ proc run(config: PortalConf) {.raises: [CatchableError].} =
|
|||
config.dataDir / config.network.getDbDirectory() / "contentdb_" &
|
||||
d.localNode.id.toBytesBE().toOpenArray(0, 8).toHex(),
|
||||
storageCapacity = config.storageCapacityMB * 1_000_000,
|
||||
radiusConfig = config.radiusConfig,
|
||||
localId = d.localNode.id,
|
||||
manualCheckpoint = true,
|
||||
)
|
||||
|
||||
let radius =
|
||||
if config.radiusConfig.kind == Static:
|
||||
UInt256.fromLogRadius(config.radiusConfig.logRadius)
|
||||
else:
|
||||
let oldRadiusApproximation = db.getLargestDistance(d.localNode.id)
|
||||
db.estimateNewRadius(oldRadiusApproximation)
|
||||
|
||||
let radius = db.estimateNewRadius(config.radiusConfig)
|
||||
# Note: In the case of dynamical radius this is all an approximation that
|
||||
# heavily relies on uniformly distributed content and thus will always
|
||||
# have an error margin, either down or up of the requested capacity.
|
||||
|
|
|
@ -22,7 +22,7 @@ import
|
|||
./beacon_content,
|
||||
./beacon_chain_historical_summaries,
|
||||
./beacon_init_loader,
|
||||
../wire/portal_protocol
|
||||
../wire/[portal_protocol, portal_protocol_config]
|
||||
|
||||
from beacon_chain/spec/helpers import is_better_update, toMeta
|
||||
|
||||
|
@ -38,6 +38,7 @@ type
|
|||
BeaconDb* = ref object
|
||||
backend: SqStoreRef
|
||||
kv: KvStoreRef
|
||||
dataRadius*: UInt256
|
||||
bestUpdates: BestLightClientUpdateStore
|
||||
forkDigests: ForkDigests
|
||||
cfg*: RuntimeConfig
|
||||
|
@ -159,6 +160,7 @@ proc new*(
|
|||
BeaconDb(
|
||||
backend: db,
|
||||
kv: kvStore,
|
||||
dataRadius: UInt256.high(), # Radius to max to accept all data
|
||||
bestUpdates: bestUpdates,
|
||||
cfg: networkData.metadata.cfg,
|
||||
forkDigests: (newClone networkData.forks)[],
|
||||
|
@ -411,3 +413,9 @@ proc createStoreHandler*(db: BeaconDb): DbStoreHandler =
|
|||
else:
|
||||
db.put(contentId, content)
|
||||
)
|
||||
|
||||
proc createRadiusHandler*(db: BeaconDb): DbRadiusHandler =
|
||||
return (
|
||||
proc(): UInt256 {.raises: [], gcsafe.} =
|
||||
db.dataRadius
|
||||
)
|
||||
|
|
|
@ -196,23 +196,15 @@ proc new*(
|
|||
|
||||
stream = streamManager.registerNewStream(contentQueue)
|
||||
|
||||
# Need to adjust the radius to a static max value as for the Beacon chain
|
||||
# network all data must be accepted currently.
|
||||
portalConfigAdjusted = PortalProtocolConfig(
|
||||
tableIpLimits: portalConfig.tableIpLimits,
|
||||
bitsPerHop: portalConfig.bitsPerHop,
|
||||
radiusConfig: RadiusConfig(kind: Static, logRadius: 256),
|
||||
disablePoke: portalConfig.disablePoke,
|
||||
)
|
||||
|
||||
portalProtocol = PortalProtocol.new(
|
||||
baseProtocol,
|
||||
getProtocolId(portalNetwork, PortalSubnetwork.beacon),
|
||||
toContentIdHandler,
|
||||
createGetHandler(beaconDb),
|
||||
createRadiusHandler(beaconDb),
|
||||
stream,
|
||||
bootstrapRecords,
|
||||
config = portalConfigAdjusted,
|
||||
config = portalConfig,
|
||||
)
|
||||
|
||||
portalProtocol.dbPut = createStoreHandler(beaconDb)
|
||||
|
|
|
@ -692,6 +692,7 @@ proc new*(
|
|||
getProtocolId(portalNetwork, PortalSubnetwork.history),
|
||||
toContentIdHandler,
|
||||
createGetHandler(contentDB),
|
||||
createRadiusHandler(contentDB),
|
||||
stream,
|
||||
bootstrapRecords,
|
||||
config = portalConfig,
|
||||
|
@ -752,10 +753,11 @@ proc statusLogLoop(n: HistoryNetwork) {.async: (raises: []).} =
|
|||
# radius drop.
|
||||
# TODO: Get some float precision calculus?
|
||||
let radiusPercentage =
|
||||
n.portalProtocol.dataRadius div (UInt256.high() div u256(100))
|
||||
n.portalProtocol.dataRadius() div (UInt256.high() div u256(100))
|
||||
|
||||
info "History network status",
|
||||
radius = radiusPercentage.toString(10) & "%",
|
||||
radiusPercentage = radiusPercentage.toString(10) & "%",
|
||||
radius = n.portalProtocol.dataRadius().toHex(),
|
||||
dbSize = $(n.contentDB.size() div 1000) & "kb",
|
||||
routingTableNodes = n.portalProtocol.routingTable.len()
|
||||
|
||||
|
|
|
@ -57,6 +57,7 @@ proc new*(
|
|||
getProtocolId(portalNetwork, PortalSubnetwork.state),
|
||||
toContentIdHandler,
|
||||
createGetHandler(contentDB),
|
||||
createRadiusHandler(contentDB),
|
||||
s,
|
||||
bootstrapRecords,
|
||||
config = portalConfig,
|
||||
|
|
|
@ -155,6 +155,8 @@ type
|
|||
contentKey: ContentKeyByteList, contentId: ContentId, content: seq[byte]
|
||||
) {.raises: [], gcsafe.}
|
||||
|
||||
DbRadiusHandler* = proc(): UInt256 {.raises: [], gcsafe.}
|
||||
|
||||
PortalProtocolId* = array[2, byte]
|
||||
|
||||
RadiusCache* = LRUCache[NodeId, UInt256]
|
||||
|
@ -182,8 +184,7 @@ type
|
|||
toContentId*: ToContentIdHandler
|
||||
dbGet*: DbGetHandler
|
||||
dbPut*: DbStoreHandler
|
||||
radiusConfig: RadiusConfig
|
||||
dataRadius*: UInt256
|
||||
dataRadius*: DbRadiusHandler
|
||||
bootstrapRecords*: seq[Record]
|
||||
lastLookup: chronos.Moment
|
||||
refreshLoop: Future[void]
|
||||
|
@ -319,8 +320,8 @@ func inRange(
|
|||
let distance = p.distance(nodeId, contentId)
|
||||
distance <= nodeRadius
|
||||
|
||||
func inRange*(p: PortalProtocol, contentId: ContentId): bool =
|
||||
p.inRange(p.localNode.id, p.dataRadius, contentId)
|
||||
proc inRange*(p: PortalProtocol, contentId: ContentId): bool =
|
||||
p.inRange(p.localNode.id, p.dataRadius(), contentId)
|
||||
|
||||
func truncateEnrs(
|
||||
nodes: seq[Node], maxSize: int, enrOverhead: int
|
||||
|
@ -339,7 +340,7 @@ func truncateEnrs(
|
|||
|
||||
enrs
|
||||
|
||||
func handlePing(p: PortalProtocol, ping: PingMessage, srcId: NodeId): seq[byte] =
|
||||
proc handlePing(p: PortalProtocol, ping: PingMessage, srcId: NodeId): seq[byte] =
|
||||
# TODO: This should become custom per Portal Network
|
||||
# TODO: Need to think about the effect of malicious actor sending lots of
|
||||
# pings from different nodes to clear the LRU.
|
||||
|
@ -351,7 +352,7 @@ func handlePing(p: PortalProtocol, ping: PingMessage, srcId: NodeId): seq[byte]
|
|||
return @[]
|
||||
p.radiusCache.put(srcId, customPayloadDecoded.dataRadius)
|
||||
|
||||
let customPayload = CustomPayload(dataRadius: p.dataRadius)
|
||||
let customPayload = CustomPayload(dataRadius: p.dataRadius())
|
||||
let p = PongMessage(
|
||||
enrSeq: p.localNode.record.seqNum,
|
||||
customPayload: ByteList[2048](SSZ.encode(customPayload)),
|
||||
|
@ -560,13 +561,12 @@ proc new*(
|
|||
protocolId: PortalProtocolId,
|
||||
toContentId: ToContentIdHandler,
|
||||
dbGet: DbGetHandler,
|
||||
dbRadius: DbRadiusHandler,
|
||||
stream: PortalStream,
|
||||
bootstrapRecords: openArray[Record] = [],
|
||||
distanceCalculator: DistanceCalculator = XorDistanceCalculator,
|
||||
config: PortalProtocolConfig = defaultPortalProtocolConfig,
|
||||
): T =
|
||||
let initialRadius: UInt256 = config.radiusConfig.getInitialRadius()
|
||||
|
||||
let proto = PortalProtocol(
|
||||
protocolHandler: messageHandler,
|
||||
protocolId: protocolId,
|
||||
|
@ -577,8 +577,7 @@ proc new*(
|
|||
baseProtocol: baseProtocol,
|
||||
toContentId: toContentId,
|
||||
dbGet: dbGet,
|
||||
radiusConfig: config.radiusConfig,
|
||||
dataRadius: initialRadius,
|
||||
dataRadius: dbRadius,
|
||||
bootstrapRecords: @bootstrapRecords,
|
||||
stream: stream,
|
||||
radiusCache: RadiusCache.init(256),
|
||||
|
@ -647,7 +646,7 @@ proc reqResponse[Request: SomeMessage, Response: SomeMessage](
|
|||
proc pingImpl*(
|
||||
p: PortalProtocol, dst: Node
|
||||
): Future[PortalResult[PongMessage]] {.async: (raises: [CancelledError]).} =
|
||||
let customPayload = CustomPayload(dataRadius: p.dataRadius)
|
||||
let customPayload = CustomPayload(dataRadius: p.dataRadius())
|
||||
let ping = PingMessage(
|
||||
enrSeq: p.localNode.record.seqNum,
|
||||
customPayload: ByteList[2048](SSZ.encode(customPayload)),
|
||||
|
|
|
@ -73,18 +73,6 @@ func fromLogRadius*(T: type UInt256, logRadius: uint16): T =
|
|||
# Get the max value of the logRadius range
|
||||
pow((2).stuint(256), logRadius) - 1
|
||||
|
||||
func getInitialRadius*(rc: RadiusConfig): UInt256 =
|
||||
case rc.kind
|
||||
of Static:
|
||||
return UInt256.fromLogRadius(rc.logRadius)
|
||||
of Dynamic:
|
||||
# In case of a dynamic radius we start from the maximum value to quickly
|
||||
# gather as much data as possible, and also make sure each data piece in
|
||||
# the database is in our range after a node restart.
|
||||
# Alternative would be to store node the radius in database, and initialize
|
||||
# it from database after a restart
|
||||
return UInt256.high()
|
||||
|
||||
## Confutils parsers
|
||||
|
||||
proc parseCmdArg*(T: type RadiusConfig, p: string): T {.raises: [ValueError].} =
|
||||
|
|
|
@ -79,6 +79,8 @@ proc new*(
|
|||
config.dataDir / network.getDbDirectory() / "contentdb_" &
|
||||
discovery.localNode.id.toBytesBE().toOpenArray(0, 8).toHex(),
|
||||
storageCapacity = config.storageCapacity,
|
||||
radiusConfig = config.portalConfig.radiusConfig,
|
||||
localId = discovery.localNode.id,
|
||||
)
|
||||
# TODO: Portal works only over mainnet data currently
|
||||
networkData = loadNetworkData("mainnet")
|
||||
|
|
|
@ -115,7 +115,9 @@ proc newStateNode*(
|
|||
): StateNode {.raises: [CatchableError].} =
|
||||
let
|
||||
node = initDiscoveryNode(rng, PrivateKey.random(rng[]), localAddress(port))
|
||||
db = ContentDB.new("", uint32.high, inMemory = true)
|
||||
db = ContentDB.new(
|
||||
"", uint32.high, RadiusConfig(kind: Dynamic), node.localNode.id, inMemory = true
|
||||
)
|
||||
sm = StreamManager.new(node)
|
||||
hn = HistoryNetwork.new(PortalNetwork.none, node, db, sm, FinishedAccumulator())
|
||||
sn =
|
||||
|
|
|
@ -22,7 +22,9 @@ suite "Content Database":
|
|||
# underlying kvstore.
|
||||
test "ContentDB basic API":
|
||||
let
|
||||
db = ContentDB.new("", uint32.high, inMemory = true)
|
||||
db = ContentDB.new(
|
||||
"", uint32.high, RadiusConfig(kind: Dynamic), u256(0), inMemory = true
|
||||
)
|
||||
key = ContentId(UInt256.high()) # Some key
|
||||
|
||||
block:
|
||||
|
@ -50,7 +52,9 @@ suite "Content Database":
|
|||
db.contains(key) == false
|
||||
|
||||
test "ContentDB size":
|
||||
let db = ContentDB.new("", uint32.high, inMemory = true)
|
||||
let db = ContentDB.new(
|
||||
"", uint32.high, RadiusConfig(kind: Dynamic), u256(0), inMemory = true
|
||||
)
|
||||
|
||||
let numBytes = 10000
|
||||
let size1 = db.size()
|
||||
|
@ -97,7 +101,9 @@ suite "Content Database":
|
|||
# both.
|
||||
let
|
||||
storageCapacity = 100_000'u64
|
||||
db = ContentDB.new("", storageCapacity, inMemory = true)
|
||||
db = ContentDB.new(
|
||||
"", storageCapacity, RadiusConfig(kind: Dynamic), u256(0), inMemory = true
|
||||
)
|
||||
|
||||
furthestElement = u256(40)
|
||||
secondFurthest = u256(30)
|
||||
|
@ -147,7 +153,9 @@ suite "Content Database":
|
|||
|
||||
let
|
||||
rng = newRng()
|
||||
db = ContentDB.new("", startCapacity, inMemory = true)
|
||||
db = ContentDB.new(
|
||||
"", startCapacity, RadiusConfig(kind: Dynamic), u256(0), inMemory = true
|
||||
)
|
||||
localId = UInt256.fromHex(
|
||||
"30994892f3e4889d99deb5340050510d1842778acc7a7948adffa475fed51d6e"
|
||||
)
|
||||
|
@ -167,12 +175,27 @@ suite "Content Database":
|
|||
|
||||
db.storageCapacity = endCapacity
|
||||
|
||||
let
|
||||
oldRadiusApproximation = db.getLargestDistance(localId)
|
||||
newRadius = db.estimateNewRadius(oldRadiusApproximation)
|
||||
let newRadius = db.estimateNewRadius(RadiusConfig(kind: Dynamic))
|
||||
|
||||
db.forcePrune(localId, newRadius)
|
||||
|
||||
let diff = abs(db.size() - int64(db.storageCapacity))
|
||||
# Quite a big marging (20%) is added as it is all an approximation.
|
||||
check diff < int64(float(db.storageCapacity) * 0.20)
|
||||
|
||||
test "ContentDB radius - start with full radius":
|
||||
let
|
||||
storageCapacity = 100_000'u64
|
||||
db = ContentDB.new(
|
||||
"", storageCapacity, RadiusConfig(kind: Dynamic), u256(0), inMemory = true
|
||||
)
|
||||
radiusHandler = createRadiusHandler(db)
|
||||
|
||||
check radiusHandler() == UInt256.high()
|
||||
|
||||
test "ContentDB radius - 0 capacity":
|
||||
let
|
||||
db = ContentDB.new("", 0, RadiusConfig(kind: Dynamic), u256(0), inMemory = true)
|
||||
radiusHandler = createRadiusHandler(db)
|
||||
|
||||
check radiusHandler() == UInt256.low()
|
||||
|
|
|
@ -26,7 +26,9 @@ proc newHistoryNode(
|
|||
): HistoryNode =
|
||||
let
|
||||
node = initDiscoveryNode(rng, PrivateKey.random(rng[]), localAddress(port))
|
||||
db = ContentDB.new("", uint32.high, inMemory = true)
|
||||
db = ContentDB.new(
|
||||
"", uint32.high, RadiusConfig(kind: Dynamic), node.localNode.id, inMemory = true
|
||||
)
|
||||
streamManager = StreamManager.new(node)
|
||||
historyNetwork =
|
||||
HistoryNetwork.new(PortalNetwork.none, node, db, streamManager, accumulator)
|
||||
|
|
|
@ -37,7 +37,9 @@ proc initPortalProtocol(
|
|||
): PortalProtocol =
|
||||
let
|
||||
d = initDiscoveryNode(rng, privKey, address, bootstrapRecords)
|
||||
db = ContentDB.new("", uint32.high, inMemory = true)
|
||||
db = ContentDB.new(
|
||||
"", uint32.high, RadiusConfig(kind: Dynamic), d.localNode.id, inMemory = true
|
||||
)
|
||||
manager = StreamManager.new(d)
|
||||
q = newAsyncQueue[(Opt[NodeId], ContentKeysList, seq[seq[byte]])](50)
|
||||
stream = manager.registerNewStream(q)
|
||||
|
@ -47,6 +49,7 @@ proc initPortalProtocol(
|
|||
protocolId,
|
||||
toContentId,
|
||||
createGetHandler(db),
|
||||
createRadiusHandler(db),
|
||||
stream,
|
||||
bootstrapRecords = bootstrapRecords,
|
||||
)
|
||||
|
@ -331,13 +334,21 @@ procSuite "Portal Wire Protocol Tests":
|
|||
node1 = initDiscoveryNode(rng, PrivateKey.random(rng[]), localAddress(20303))
|
||||
|
||||
dbLimit = 400_000'u32
|
||||
db = ContentDB.new("", dbLimit, inMemory = true)
|
||||
db = ContentDB.new(
|
||||
"", dbLimit, RadiusConfig(kind: Dynamic), node1.localNode.id, inMemory = true
|
||||
)
|
||||
m = StreamManager.new(node1)
|
||||
q = newAsyncQueue[(Opt[NodeId], ContentKeysList, seq[seq[byte]])](50)
|
||||
stream = m.registerNewStream(q)
|
||||
|
||||
proto1 =
|
||||
PortalProtocol.new(node1, protocolId, toContentId, createGetHandler(db), stream)
|
||||
proto1 = PortalProtocol.new(
|
||||
node1,
|
||||
protocolId,
|
||||
toContentId,
|
||||
createGetHandler(db),
|
||||
createRadiusHandler(db),
|
||||
stream,
|
||||
)
|
||||
|
||||
proto1.dbPut = createStoreHandler(db, defaultRadiusConfig, proto1)
|
||||
|
||||
|
@ -360,9 +371,9 @@ procSuite "Portal Wire Protocol Tests":
|
|||
db.get((distances[2] xor proto1.localNode.id)).isNone()
|
||||
db.get((distances[3] xor proto1.localNode.id)).isSome()
|
||||
# The radius has been updated and is lower than the maximum start value.
|
||||
proto1.dataRadius < UInt256.high
|
||||
proto1.dataRadius() < UInt256.high
|
||||
# Yet higher than or equal to the furthest non deleted element.
|
||||
proto1.dataRadius >= distances[3]
|
||||
proto1.dataRadius() >= distances[3]
|
||||
|
||||
proto1.stop()
|
||||
await node1.closeWait()
|
||||
|
|
|
@ -69,7 +69,13 @@ func generateRandomU256(rng: var HmacDrbgContext): UInt256 =
|
|||
proc cmdGenerate(conf: DbConf) =
|
||||
let
|
||||
rng = newRng()
|
||||
db = ContentDB.new(conf.databaseDir.string, maxDbSize, inMemory = false)
|
||||
db = ContentDB.new(
|
||||
conf.databaseDir.string,
|
||||
maxDbSize,
|
||||
RadiusConfig(kind: Dynamic),
|
||||
u256(0),
|
||||
inMemory = false,
|
||||
)
|
||||
bytes = newSeq[byte](conf.contentSize)
|
||||
|
||||
for i in 0 ..< conf.contentAmount:
|
||||
|
@ -79,7 +85,13 @@ proc cmdGenerate(conf: DbConf) =
|
|||
proc cmdBench(conf: DbConf) =
|
||||
let
|
||||
rng = newRng()
|
||||
db = ContentDB.new(conf.databaseDir.string, 4_000_000_000'u64, inMemory = false)
|
||||
db = ContentDB.new(
|
||||
conf.databaseDir.string,
|
||||
4_000_000_000'u64,
|
||||
RadiusConfig(kind: Dynamic),
|
||||
u256(0),
|
||||
inMemory = false,
|
||||
)
|
||||
bytes = newSeq[byte](conf.contentSize)
|
||||
|
||||
var timers: array[Timers, RunningStat]
|
||||
|
@ -126,6 +138,8 @@ proc cmdPrune(conf: DbConf) =
|
|||
let db = ContentDB.new(
|
||||
conf.databaseDir.string,
|
||||
storageCapacity = 1_000_000, # Doesn't matter if only space reclaiming is done
|
||||
RadiusConfig(kind: Dynamic),
|
||||
u256(0),
|
||||
manualCheckpoint = true,
|
||||
)
|
||||
|
||||
|
|
|
@ -243,7 +243,9 @@ proc run(config: PortalCliConf) =
|
|||
d.open()
|
||||
|
||||
let
|
||||
db = ContentDB.new("", config.storageSize, inMemory = true)
|
||||
db = ContentDB.new(
|
||||
"", config.storageSize, defaultRadiusConfig, d.localNode.id, inMemory = true
|
||||
)
|
||||
sm = StreamManager.new(d)
|
||||
cq = newAsyncQueue[(Opt[NodeId], ContentKeysList, seq[seq[byte]])](50)
|
||||
stream = sm.registerNewStream(cq)
|
||||
|
@ -252,6 +254,7 @@ proc run(config: PortalCliConf) =
|
|||
config.protocolId,
|
||||
testContentIdHandler,
|
||||
createGetHandler(db),
|
||||
createRadiusHandler(db),
|
||||
stream,
|
||||
bootstrapRecords = bootstrapRecords,
|
||||
)
|
||||
|
|
Loading…
Reference in New Issue