mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-02-03 07:45:18 +00:00
Reverse calculate the radius at node restart (#2593)
This avoid restarting the node always with a full radius, which causes the node the be bombarded with offers which it later has to delete anyhow. In order to implement this functionality, several changes were made as the radius needed to move from the Portal wire protocol current location to the contentDB and beaconDB, which is conceptually more correct anyhow. So radius is now part of the database objects and a handler is used in the portal wire protocol to access its value.
This commit is contained in:
parent
d39c589ec3
commit
0a80a3bb25
@ -18,7 +18,7 @@ import
|
|||||||
../network/wire/[portal_protocol, portal_protocol_config],
|
../network/wire/[portal_protocol, portal_protocol_config],
|
||||||
./content_db_custom_sql_functions
|
./content_db_custom_sql_functions
|
||||||
|
|
||||||
export kvstore_sqlite3
|
export kvstore_sqlite3, portal_protocol_config
|
||||||
|
|
||||||
# This version of content db is the most basic, simple solution where data is
|
# This version of content db is the most basic, simple solution where data is
|
||||||
# stored no matter what content type or content network in the same kvstore with
|
# stored no matter what content type or content network in the same kvstore with
|
||||||
@ -53,6 +53,8 @@ type
|
|||||||
kv: KvStoreRef
|
kv: KvStoreRef
|
||||||
manualCheckpoint: bool
|
manualCheckpoint: bool
|
||||||
storageCapacity*: uint64
|
storageCapacity*: uint64
|
||||||
|
dataRadius*: UInt256
|
||||||
|
localId: NodeId
|
||||||
sizeStmt: SqliteStmt[NoParams, int64]
|
sizeStmt: SqliteStmt[NoParams, int64]
|
||||||
unusedSizeStmt: SqliteStmt[NoParams, int64]
|
unusedSizeStmt: SqliteStmt[NoParams, int64]
|
||||||
vacuumStmt: SqliteStmt[NoParams, void]
|
vacuumStmt: SqliteStmt[NoParams, void]
|
||||||
@ -80,10 +82,117 @@ template expectDb(x: auto): untyped =
|
|||||||
# full disk - this requires manual intervention, so we'll panic for now
|
# full disk - this requires manual intervention, so we'll panic for now
|
||||||
x.expect("working database (disk broken/full?)")
|
x.expect("working database (disk broken/full?)")
|
||||||
|
|
||||||
|
## Public calls to get database size, content size and similar.
|
||||||
|
|
||||||
|
proc size*(db: ContentDB): int64 =
|
||||||
|
## Return current size of DB as product of sqlite page_count and page_size:
|
||||||
|
## https://www.sqlite.org/pragma.html#pragma_page_count
|
||||||
|
## https://www.sqlite.org/pragma.html#pragma_page_size
|
||||||
|
## It returns the total size of db on the disk, i.e both data and metadata
|
||||||
|
## used to store content.
|
||||||
|
## It is worth noting that when deleting content, the size may lag behind due
|
||||||
|
## to the way how deleting works in sqlite.
|
||||||
|
## Good description can be found in: https://www.sqlite.org/lang_vacuum.html
|
||||||
|
var size: int64 = 0
|
||||||
|
discard (
|
||||||
|
db.sizeStmt.exec do(res: int64):
|
||||||
|
size = res).expectDb()
|
||||||
|
return size
|
||||||
|
|
||||||
|
proc unusedSize(db: ContentDB): int64 =
|
||||||
|
## Returns the total size of the pages which are unused by the database,
|
||||||
|
## i.e they can be re-used for new content.
|
||||||
|
var size: int64 = 0
|
||||||
|
discard (
|
||||||
|
db.unusedSizeStmt.exec do(res: int64):
|
||||||
|
size = res).expectDb()
|
||||||
|
return size
|
||||||
|
|
||||||
|
proc usedSize*(db: ContentDB): int64 =
|
||||||
|
## Returns the total size of the database (data + metadata) minus the unused
|
||||||
|
## pages.
|
||||||
|
db.size() - db.unusedSize()
|
||||||
|
|
||||||
|
proc contentSize*(db: ContentDB): int64 =
|
||||||
|
## Returns total size of the content stored in DB.
|
||||||
|
var size: int64 = 0
|
||||||
|
discard (
|
||||||
|
db.contentSizeStmt.exec do(res: int64):
|
||||||
|
size = res).expectDb()
|
||||||
|
return size
|
||||||
|
|
||||||
|
proc contentCount*(db: ContentDB): int64 =
|
||||||
|
var count: int64 = 0
|
||||||
|
discard (
|
||||||
|
db.contentCountStmt.exec do(res: int64):
|
||||||
|
count = res).expectDb()
|
||||||
|
return count
|
||||||
|
|
||||||
|
## Radius estimation and initialization related calls
|
||||||
|
|
||||||
|
proc getLargestDistance*(db: ContentDB, localId: UInt256): UInt256 =
|
||||||
|
var distanceBytes: array[32, byte]
|
||||||
|
discard (
|
||||||
|
db.largestDistanceStmt.exec(
|
||||||
|
localId.toBytesBE(),
|
||||||
|
proc(res: array[32, byte]) =
|
||||||
|
distanceBytes = res,
|
||||||
|
)
|
||||||
|
).expectDb()
|
||||||
|
|
||||||
|
return UInt256.fromBytesBE(distanceBytes)
|
||||||
|
|
||||||
|
func estimateNewRadius(
|
||||||
|
currentSize: uint64, storageCapacity: uint64, currentRadius: UInt256
|
||||||
|
): UInt256 =
|
||||||
|
if storageCapacity == 0:
|
||||||
|
return 0.stuint(256)
|
||||||
|
|
||||||
|
let sizeRatio = currentSize div storageCapacity
|
||||||
|
if sizeRatio > 0:
|
||||||
|
currentRadius div sizeRatio.stuint(256)
|
||||||
|
else:
|
||||||
|
currentRadius
|
||||||
|
|
||||||
|
func estimateNewRadius*(db: ContentDB, rc: RadiusConfig): UInt256 =
|
||||||
|
## Rough estimation of the new radius for pruning when adjusting the storage
|
||||||
|
## capacity.
|
||||||
|
case rc.kind
|
||||||
|
of Static:
|
||||||
|
UInt256.fromLogRadius(rc.logRadius)
|
||||||
|
of Dynamic:
|
||||||
|
if db.storageCapacity == 0:
|
||||||
|
return 0.stuint(256)
|
||||||
|
|
||||||
|
let oldRadiusApproximation = db.getLargestDistance(db.localId)
|
||||||
|
estimateNewRadius(uint64(db.usedSize()), db.storageCapacity, oldRadiusApproximation)
|
||||||
|
|
||||||
|
func setInitialRadius*(db: ContentDB, rc: RadiusConfig) =
|
||||||
|
## Set the initial radius based on the radius config and the storage capacity
|
||||||
|
## and furthest distance of the content in the database.
|
||||||
|
## In case of a dynamic radius, if the storage capacity is near full, the
|
||||||
|
## radius will be set to the largest distance of the content in the database.
|
||||||
|
## Else the radius will be set to the maximum value.
|
||||||
|
case rc.kind
|
||||||
|
of Static:
|
||||||
|
db.dataRadius = UInt256.fromLogRadius(rc.logRadius)
|
||||||
|
of Dynamic:
|
||||||
|
if db.storageCapacity == 0:
|
||||||
|
db.dataRadius = 0.stuint(256)
|
||||||
|
return
|
||||||
|
|
||||||
|
let sizeRatio = db.usedSize().float / db.storageCapacity.float
|
||||||
|
if sizeRatio > 0.95:
|
||||||
|
db.dataRadius = db.getLargestDistance(db.localId)
|
||||||
|
else:
|
||||||
|
db.dataRadius = UInt256.high()
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: type ContentDB,
|
T: type ContentDB,
|
||||||
path: string,
|
path: string,
|
||||||
storageCapacity: uint64,
|
storageCapacity: uint64,
|
||||||
|
radiusConfig: RadiusConfig,
|
||||||
|
localId: NodeId,
|
||||||
inMemory = false,
|
inMemory = false,
|
||||||
manualCheckpoint = false,
|
manualCheckpoint = false,
|
||||||
): ContentDB =
|
): ContentDB =
|
||||||
@ -141,7 +250,7 @@ proc new*(
|
|||||||
"SELECT max(xorDistance(?, key)) FROM kvstore", array[32, byte], array[32, byte]
|
"SELECT max(xorDistance(?, key)) FROM kvstore", array[32, byte], array[32, byte]
|
||||||
)[]
|
)[]
|
||||||
|
|
||||||
ContentDB(
|
let contentDb = ContentDB(
|
||||||
kv: kvStore,
|
kv: kvStore,
|
||||||
backend: db,
|
backend: db,
|
||||||
manualCheckpoint: manualCheckpoint,
|
manualCheckpoint: manualCheckpoint,
|
||||||
@ -156,6 +265,9 @@ proc new*(
|
|||||||
largestDistanceStmt: largestDistanceStmt,
|
largestDistanceStmt: largestDistanceStmt,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
contentDb.setInitialRadius(radiusConfig)
|
||||||
|
contentDb
|
||||||
|
|
||||||
template disposeSafe(s: untyped): untyped =
|
template disposeSafe(s: untyped): untyped =
|
||||||
if distinctBase(s) != nil:
|
if distinctBase(s) != nil:
|
||||||
s.dispose()
|
s.dispose()
|
||||||
@ -237,78 +349,8 @@ proc del*(db: ContentDB, key: ContentId) =
|
|||||||
proc getSszDecoded*(db: ContentDB, key: ContentId, T: type auto): Opt[T] =
|
proc getSszDecoded*(db: ContentDB, key: ContentId, T: type auto): Opt[T] =
|
||||||
db.getSszDecoded(key.toBytesBE(), T)
|
db.getSszDecoded(key.toBytesBE(), T)
|
||||||
|
|
||||||
## Public calls to get database size, content size and similar.
|
|
||||||
|
|
||||||
proc size*(db: ContentDB): int64 =
|
|
||||||
## Return current size of DB as product of sqlite page_count and page_size:
|
|
||||||
## https://www.sqlite.org/pragma.html#pragma_page_count
|
|
||||||
## https://www.sqlite.org/pragma.html#pragma_page_size
|
|
||||||
## It returns the total size of db on the disk, i.e both data and metadata
|
|
||||||
## used to store content.
|
|
||||||
## It is worth noting that when deleting content, the size may lag behind due
|
|
||||||
## to the way how deleting works in sqlite.
|
|
||||||
## Good description can be found in: https://www.sqlite.org/lang_vacuum.html
|
|
||||||
var size: int64 = 0
|
|
||||||
discard (
|
|
||||||
db.sizeStmt.exec do(res: int64):
|
|
||||||
size = res).expectDb()
|
|
||||||
return size
|
|
||||||
|
|
||||||
proc unusedSize(db: ContentDB): int64 =
|
|
||||||
## Returns the total size of the pages which are unused by the database,
|
|
||||||
## i.e they can be re-used for new content.
|
|
||||||
var size: int64 = 0
|
|
||||||
discard (
|
|
||||||
db.unusedSizeStmt.exec do(res: int64):
|
|
||||||
size = res).expectDb()
|
|
||||||
return size
|
|
||||||
|
|
||||||
proc usedSize*(db: ContentDB): int64 =
|
|
||||||
## Returns the total size of the database (data + metadata) minus the unused
|
|
||||||
## pages.
|
|
||||||
db.size() - db.unusedSize()
|
|
||||||
|
|
||||||
proc contentSize*(db: ContentDB): int64 =
|
|
||||||
## Returns total size of the content stored in DB.
|
|
||||||
var size: int64 = 0
|
|
||||||
discard (
|
|
||||||
db.contentSizeStmt.exec do(res: int64):
|
|
||||||
size = res).expectDb()
|
|
||||||
return size
|
|
||||||
|
|
||||||
proc contentCount*(db: ContentDB): int64 =
|
|
||||||
var count: int64 = 0
|
|
||||||
discard (
|
|
||||||
db.contentCountStmt.exec do(res: int64):
|
|
||||||
count = res).expectDb()
|
|
||||||
return count
|
|
||||||
|
|
||||||
## Pruning related calls
|
## Pruning related calls
|
||||||
|
|
||||||
proc getLargestDistance*(db: ContentDB, localId: UInt256): UInt256 =
|
|
||||||
var distanceBytes: array[32, byte]
|
|
||||||
discard (
|
|
||||||
db.largestDistanceStmt.exec(
|
|
||||||
localId.toBytesBE(),
|
|
||||||
proc(res: array[32, byte]) =
|
|
||||||
distanceBytes = res,
|
|
||||||
)
|
|
||||||
).expectDb()
|
|
||||||
|
|
||||||
return UInt256.fromBytesBE(distanceBytes)
|
|
||||||
|
|
||||||
func estimateNewRadius(
|
|
||||||
currentSize: uint64, storageCapacity: uint64, currentRadius: UInt256
|
|
||||||
): UInt256 =
|
|
||||||
let sizeRatio = currentSize div storageCapacity
|
|
||||||
if sizeRatio > 0:
|
|
||||||
currentRadius div sizeRatio.stuint(256)
|
|
||||||
else:
|
|
||||||
currentRadius
|
|
||||||
|
|
||||||
func estimateNewRadius*(db: ContentDB, currentRadius: UInt256): UInt256 =
|
|
||||||
estimateNewRadius(uint64(db.usedSize()), db.storageCapacity, currentRadius)
|
|
||||||
|
|
||||||
proc deleteContentFraction*(
|
proc deleteContentFraction*(
|
||||||
db: ContentDB, target: UInt256, fraction: float64
|
db: ContentDB, target: UInt256, fraction: float64
|
||||||
): (UInt256, int64, int64, int64) =
|
): (UInt256, int64, int64, int64) =
|
||||||
@ -419,12 +461,12 @@ proc put*(
|
|||||||
)
|
)
|
||||||
|
|
||||||
proc adjustRadius(
|
proc adjustRadius(
|
||||||
p: PortalProtocol, deletedFraction: float64, distanceOfFurthestElement: UInt256
|
db: ContentDB, deletedFraction: float64, distanceOfFurthestElement: UInt256
|
||||||
) =
|
) =
|
||||||
# Invert fraction as the UInt256 implementation does not support
|
# Invert fraction as the UInt256 implementation does not support
|
||||||
# multiplication by float
|
# multiplication by float
|
||||||
let invertedFractionAsInt = int64(1.0 / deletedFraction)
|
let invertedFractionAsInt = int64(1.0 / deletedFraction)
|
||||||
let scaledRadius = p.dataRadius div u256(invertedFractionAsInt)
|
let scaledRadius = db.dataRadius div u256(invertedFractionAsInt)
|
||||||
|
|
||||||
# Choose a larger value to avoid the situation where the
|
# Choose a larger value to avoid the situation where the
|
||||||
# `distanceOfFurthestElement is very close to the local id so that the local
|
# `distanceOfFurthestElement is very close to the local id so that the local
|
||||||
@ -433,12 +475,12 @@ proc adjustRadius(
|
|||||||
let newRadius = max(scaledRadius, distanceOfFurthestElement)
|
let newRadius = max(scaledRadius, distanceOfFurthestElement)
|
||||||
|
|
||||||
info "Database radius adjusted",
|
info "Database radius adjusted",
|
||||||
oldRadius = p.dataRadius, newRadius = newRadius, distanceOfFurthestElement
|
oldRadius = db.dataRadius, newRadius = newRadius, distanceOfFurthestElement
|
||||||
|
|
||||||
# Both scaledRadius and distanceOfFurthestElement are smaller than current
|
# Both scaledRadius and distanceOfFurthestElement are smaller than current
|
||||||
# dataRadius, so the radius will constantly decrease through the node its
|
# dataRadius, so the radius will constantly decrease through the node its
|
||||||
# lifetime.
|
# lifetime.
|
||||||
p.dataRadius = newRadius
|
db.dataRadius = newRadius
|
||||||
|
|
||||||
proc createGetHandler*(db: ContentDB): DbGetHandler =
|
proc createGetHandler*(db: ContentDB): DbGetHandler =
|
||||||
return (
|
return (
|
||||||
@ -473,7 +515,7 @@ proc createStoreHandler*(
|
|||||||
)
|
)
|
||||||
|
|
||||||
if res.deletedFraction > 0.0:
|
if res.deletedFraction > 0.0:
|
||||||
p.adjustRadius(res.deletedFraction, res.distanceOfFurthestElement)
|
db.adjustRadius(res.deletedFraction, res.distanceOfFurthestElement)
|
||||||
else:
|
else:
|
||||||
# Note:
|
# Note:
|
||||||
# This can occur when the furthest content is bigger than the fraction
|
# This can occur when the furthest content is bigger than the fraction
|
||||||
@ -488,3 +530,9 @@ proc createStoreHandler*(
|
|||||||
# so we will effectivly store fraction of the network
|
# so we will effectivly store fraction of the network
|
||||||
db.put(contentId, content)
|
db.put(contentId, content)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
proc createRadiusHandler*(db: ContentDB): DbRadiusHandler =
|
||||||
|
return (
|
||||||
|
proc(): UInt256 {.raises: [], gcsafe.} =
|
||||||
|
db.dataRadius
|
||||||
|
)
|
||||||
|
@ -129,16 +129,12 @@ proc run(config: PortalConf) {.raises: [CatchableError].} =
|
|||||||
config.dataDir / config.network.getDbDirectory() / "contentdb_" &
|
config.dataDir / config.network.getDbDirectory() / "contentdb_" &
|
||||||
d.localNode.id.toBytesBE().toOpenArray(0, 8).toHex(),
|
d.localNode.id.toBytesBE().toOpenArray(0, 8).toHex(),
|
||||||
storageCapacity = config.storageCapacityMB * 1_000_000,
|
storageCapacity = config.storageCapacityMB * 1_000_000,
|
||||||
|
radiusConfig = config.radiusConfig,
|
||||||
|
localId = d.localNode.id,
|
||||||
manualCheckpoint = true,
|
manualCheckpoint = true,
|
||||||
)
|
)
|
||||||
|
|
||||||
let radius =
|
let radius = db.estimateNewRadius(config.radiusConfig)
|
||||||
if config.radiusConfig.kind == Static:
|
|
||||||
UInt256.fromLogRadius(config.radiusConfig.logRadius)
|
|
||||||
else:
|
|
||||||
let oldRadiusApproximation = db.getLargestDistance(d.localNode.id)
|
|
||||||
db.estimateNewRadius(oldRadiusApproximation)
|
|
||||||
|
|
||||||
# Note: In the case of dynamical radius this is all an approximation that
|
# Note: In the case of dynamical radius this is all an approximation that
|
||||||
# heavily relies on uniformly distributed content and thus will always
|
# heavily relies on uniformly distributed content and thus will always
|
||||||
# have an error margin, either down or up of the requested capacity.
|
# have an error margin, either down or up of the requested capacity.
|
||||||
|
@ -22,7 +22,7 @@ import
|
|||||||
./beacon_content,
|
./beacon_content,
|
||||||
./beacon_chain_historical_summaries,
|
./beacon_chain_historical_summaries,
|
||||||
./beacon_init_loader,
|
./beacon_init_loader,
|
||||||
../wire/portal_protocol
|
../wire/[portal_protocol, portal_protocol_config]
|
||||||
|
|
||||||
from beacon_chain/spec/helpers import is_better_update, toMeta
|
from beacon_chain/spec/helpers import is_better_update, toMeta
|
||||||
|
|
||||||
@ -38,6 +38,7 @@ type
|
|||||||
BeaconDb* = ref object
|
BeaconDb* = ref object
|
||||||
backend: SqStoreRef
|
backend: SqStoreRef
|
||||||
kv: KvStoreRef
|
kv: KvStoreRef
|
||||||
|
dataRadius*: UInt256
|
||||||
bestUpdates: BestLightClientUpdateStore
|
bestUpdates: BestLightClientUpdateStore
|
||||||
forkDigests: ForkDigests
|
forkDigests: ForkDigests
|
||||||
cfg*: RuntimeConfig
|
cfg*: RuntimeConfig
|
||||||
@ -159,6 +160,7 @@ proc new*(
|
|||||||
BeaconDb(
|
BeaconDb(
|
||||||
backend: db,
|
backend: db,
|
||||||
kv: kvStore,
|
kv: kvStore,
|
||||||
|
dataRadius: UInt256.high(), # Radius to max to accept all data
|
||||||
bestUpdates: bestUpdates,
|
bestUpdates: bestUpdates,
|
||||||
cfg: networkData.metadata.cfg,
|
cfg: networkData.metadata.cfg,
|
||||||
forkDigests: (newClone networkData.forks)[],
|
forkDigests: (newClone networkData.forks)[],
|
||||||
@ -411,3 +413,9 @@ proc createStoreHandler*(db: BeaconDb): DbStoreHandler =
|
|||||||
else:
|
else:
|
||||||
db.put(contentId, content)
|
db.put(contentId, content)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
proc createRadiusHandler*(db: BeaconDb): DbRadiusHandler =
|
||||||
|
return (
|
||||||
|
proc(): UInt256 {.raises: [], gcsafe.} =
|
||||||
|
db.dataRadius
|
||||||
|
)
|
||||||
|
@ -196,23 +196,15 @@ proc new*(
|
|||||||
|
|
||||||
stream = streamManager.registerNewStream(contentQueue)
|
stream = streamManager.registerNewStream(contentQueue)
|
||||||
|
|
||||||
# Need to adjust the radius to a static max value as for the Beacon chain
|
|
||||||
# network all data must be accepted currently.
|
|
||||||
portalConfigAdjusted = PortalProtocolConfig(
|
|
||||||
tableIpLimits: portalConfig.tableIpLimits,
|
|
||||||
bitsPerHop: portalConfig.bitsPerHop,
|
|
||||||
radiusConfig: RadiusConfig(kind: Static, logRadius: 256),
|
|
||||||
disablePoke: portalConfig.disablePoke,
|
|
||||||
)
|
|
||||||
|
|
||||||
portalProtocol = PortalProtocol.new(
|
portalProtocol = PortalProtocol.new(
|
||||||
baseProtocol,
|
baseProtocol,
|
||||||
getProtocolId(portalNetwork, PortalSubnetwork.beacon),
|
getProtocolId(portalNetwork, PortalSubnetwork.beacon),
|
||||||
toContentIdHandler,
|
toContentIdHandler,
|
||||||
createGetHandler(beaconDb),
|
createGetHandler(beaconDb),
|
||||||
|
createRadiusHandler(beaconDb),
|
||||||
stream,
|
stream,
|
||||||
bootstrapRecords,
|
bootstrapRecords,
|
||||||
config = portalConfigAdjusted,
|
config = portalConfig,
|
||||||
)
|
)
|
||||||
|
|
||||||
portalProtocol.dbPut = createStoreHandler(beaconDb)
|
portalProtocol.dbPut = createStoreHandler(beaconDb)
|
||||||
|
@ -692,6 +692,7 @@ proc new*(
|
|||||||
getProtocolId(portalNetwork, PortalSubnetwork.history),
|
getProtocolId(portalNetwork, PortalSubnetwork.history),
|
||||||
toContentIdHandler,
|
toContentIdHandler,
|
||||||
createGetHandler(contentDB),
|
createGetHandler(contentDB),
|
||||||
|
createRadiusHandler(contentDB),
|
||||||
stream,
|
stream,
|
||||||
bootstrapRecords,
|
bootstrapRecords,
|
||||||
config = portalConfig,
|
config = portalConfig,
|
||||||
@ -752,10 +753,11 @@ proc statusLogLoop(n: HistoryNetwork) {.async: (raises: []).} =
|
|||||||
# radius drop.
|
# radius drop.
|
||||||
# TODO: Get some float precision calculus?
|
# TODO: Get some float precision calculus?
|
||||||
let radiusPercentage =
|
let radiusPercentage =
|
||||||
n.portalProtocol.dataRadius div (UInt256.high() div u256(100))
|
n.portalProtocol.dataRadius() div (UInt256.high() div u256(100))
|
||||||
|
|
||||||
info "History network status",
|
info "History network status",
|
||||||
radius = radiusPercentage.toString(10) & "%",
|
radiusPercentage = radiusPercentage.toString(10) & "%",
|
||||||
|
radius = n.portalProtocol.dataRadius().toHex(),
|
||||||
dbSize = $(n.contentDB.size() div 1000) & "kb",
|
dbSize = $(n.contentDB.size() div 1000) & "kb",
|
||||||
routingTableNodes = n.portalProtocol.routingTable.len()
|
routingTableNodes = n.portalProtocol.routingTable.len()
|
||||||
|
|
||||||
|
@ -57,6 +57,7 @@ proc new*(
|
|||||||
getProtocolId(portalNetwork, PortalSubnetwork.state),
|
getProtocolId(portalNetwork, PortalSubnetwork.state),
|
||||||
toContentIdHandler,
|
toContentIdHandler,
|
||||||
createGetHandler(contentDB),
|
createGetHandler(contentDB),
|
||||||
|
createRadiusHandler(contentDB),
|
||||||
s,
|
s,
|
||||||
bootstrapRecords,
|
bootstrapRecords,
|
||||||
config = portalConfig,
|
config = portalConfig,
|
||||||
|
@ -155,6 +155,8 @@ type
|
|||||||
contentKey: ContentKeyByteList, contentId: ContentId, content: seq[byte]
|
contentKey: ContentKeyByteList, contentId: ContentId, content: seq[byte]
|
||||||
) {.raises: [], gcsafe.}
|
) {.raises: [], gcsafe.}
|
||||||
|
|
||||||
|
DbRadiusHandler* = proc(): UInt256 {.raises: [], gcsafe.}
|
||||||
|
|
||||||
PortalProtocolId* = array[2, byte]
|
PortalProtocolId* = array[2, byte]
|
||||||
|
|
||||||
RadiusCache* = LRUCache[NodeId, UInt256]
|
RadiusCache* = LRUCache[NodeId, UInt256]
|
||||||
@ -182,8 +184,7 @@ type
|
|||||||
toContentId*: ToContentIdHandler
|
toContentId*: ToContentIdHandler
|
||||||
dbGet*: DbGetHandler
|
dbGet*: DbGetHandler
|
||||||
dbPut*: DbStoreHandler
|
dbPut*: DbStoreHandler
|
||||||
radiusConfig: RadiusConfig
|
dataRadius*: DbRadiusHandler
|
||||||
dataRadius*: UInt256
|
|
||||||
bootstrapRecords*: seq[Record]
|
bootstrapRecords*: seq[Record]
|
||||||
lastLookup: chronos.Moment
|
lastLookup: chronos.Moment
|
||||||
refreshLoop: Future[void]
|
refreshLoop: Future[void]
|
||||||
@ -319,8 +320,8 @@ func inRange(
|
|||||||
let distance = p.distance(nodeId, contentId)
|
let distance = p.distance(nodeId, contentId)
|
||||||
distance <= nodeRadius
|
distance <= nodeRadius
|
||||||
|
|
||||||
func inRange*(p: PortalProtocol, contentId: ContentId): bool =
|
proc inRange*(p: PortalProtocol, contentId: ContentId): bool =
|
||||||
p.inRange(p.localNode.id, p.dataRadius, contentId)
|
p.inRange(p.localNode.id, p.dataRadius(), contentId)
|
||||||
|
|
||||||
func truncateEnrs(
|
func truncateEnrs(
|
||||||
nodes: seq[Node], maxSize: int, enrOverhead: int
|
nodes: seq[Node], maxSize: int, enrOverhead: int
|
||||||
@ -339,7 +340,7 @@ func truncateEnrs(
|
|||||||
|
|
||||||
enrs
|
enrs
|
||||||
|
|
||||||
func handlePing(p: PortalProtocol, ping: PingMessage, srcId: NodeId): seq[byte] =
|
proc handlePing(p: PortalProtocol, ping: PingMessage, srcId: NodeId): seq[byte] =
|
||||||
# TODO: This should become custom per Portal Network
|
# TODO: This should become custom per Portal Network
|
||||||
# TODO: Need to think about the effect of malicious actor sending lots of
|
# TODO: Need to think about the effect of malicious actor sending lots of
|
||||||
# pings from different nodes to clear the LRU.
|
# pings from different nodes to clear the LRU.
|
||||||
@ -351,7 +352,7 @@ func handlePing(p: PortalProtocol, ping: PingMessage, srcId: NodeId): seq[byte]
|
|||||||
return @[]
|
return @[]
|
||||||
p.radiusCache.put(srcId, customPayloadDecoded.dataRadius)
|
p.radiusCache.put(srcId, customPayloadDecoded.dataRadius)
|
||||||
|
|
||||||
let customPayload = CustomPayload(dataRadius: p.dataRadius)
|
let customPayload = CustomPayload(dataRadius: p.dataRadius())
|
||||||
let p = PongMessage(
|
let p = PongMessage(
|
||||||
enrSeq: p.localNode.record.seqNum,
|
enrSeq: p.localNode.record.seqNum,
|
||||||
customPayload: ByteList[2048](SSZ.encode(customPayload)),
|
customPayload: ByteList[2048](SSZ.encode(customPayload)),
|
||||||
@ -560,13 +561,12 @@ proc new*(
|
|||||||
protocolId: PortalProtocolId,
|
protocolId: PortalProtocolId,
|
||||||
toContentId: ToContentIdHandler,
|
toContentId: ToContentIdHandler,
|
||||||
dbGet: DbGetHandler,
|
dbGet: DbGetHandler,
|
||||||
|
dbRadius: DbRadiusHandler,
|
||||||
stream: PortalStream,
|
stream: PortalStream,
|
||||||
bootstrapRecords: openArray[Record] = [],
|
bootstrapRecords: openArray[Record] = [],
|
||||||
distanceCalculator: DistanceCalculator = XorDistanceCalculator,
|
distanceCalculator: DistanceCalculator = XorDistanceCalculator,
|
||||||
config: PortalProtocolConfig = defaultPortalProtocolConfig,
|
config: PortalProtocolConfig = defaultPortalProtocolConfig,
|
||||||
): T =
|
): T =
|
||||||
let initialRadius: UInt256 = config.radiusConfig.getInitialRadius()
|
|
||||||
|
|
||||||
let proto = PortalProtocol(
|
let proto = PortalProtocol(
|
||||||
protocolHandler: messageHandler,
|
protocolHandler: messageHandler,
|
||||||
protocolId: protocolId,
|
protocolId: protocolId,
|
||||||
@ -577,8 +577,7 @@ proc new*(
|
|||||||
baseProtocol: baseProtocol,
|
baseProtocol: baseProtocol,
|
||||||
toContentId: toContentId,
|
toContentId: toContentId,
|
||||||
dbGet: dbGet,
|
dbGet: dbGet,
|
||||||
radiusConfig: config.radiusConfig,
|
dataRadius: dbRadius,
|
||||||
dataRadius: initialRadius,
|
|
||||||
bootstrapRecords: @bootstrapRecords,
|
bootstrapRecords: @bootstrapRecords,
|
||||||
stream: stream,
|
stream: stream,
|
||||||
radiusCache: RadiusCache.init(256),
|
radiusCache: RadiusCache.init(256),
|
||||||
@ -647,7 +646,7 @@ proc reqResponse[Request: SomeMessage, Response: SomeMessage](
|
|||||||
proc pingImpl*(
|
proc pingImpl*(
|
||||||
p: PortalProtocol, dst: Node
|
p: PortalProtocol, dst: Node
|
||||||
): Future[PortalResult[PongMessage]] {.async: (raises: [CancelledError]).} =
|
): Future[PortalResult[PongMessage]] {.async: (raises: [CancelledError]).} =
|
||||||
let customPayload = CustomPayload(dataRadius: p.dataRadius)
|
let customPayload = CustomPayload(dataRadius: p.dataRadius())
|
||||||
let ping = PingMessage(
|
let ping = PingMessage(
|
||||||
enrSeq: p.localNode.record.seqNum,
|
enrSeq: p.localNode.record.seqNum,
|
||||||
customPayload: ByteList[2048](SSZ.encode(customPayload)),
|
customPayload: ByteList[2048](SSZ.encode(customPayload)),
|
||||||
|
@ -73,18 +73,6 @@ func fromLogRadius*(T: type UInt256, logRadius: uint16): T =
|
|||||||
# Get the max value of the logRadius range
|
# Get the max value of the logRadius range
|
||||||
pow((2).stuint(256), logRadius) - 1
|
pow((2).stuint(256), logRadius) - 1
|
||||||
|
|
||||||
func getInitialRadius*(rc: RadiusConfig): UInt256 =
|
|
||||||
case rc.kind
|
|
||||||
of Static:
|
|
||||||
return UInt256.fromLogRadius(rc.logRadius)
|
|
||||||
of Dynamic:
|
|
||||||
# In case of a dynamic radius we start from the maximum value to quickly
|
|
||||||
# gather as much data as possible, and also make sure each data piece in
|
|
||||||
# the database is in our range after a node restart.
|
|
||||||
# Alternative would be to store node the radius in database, and initialize
|
|
||||||
# it from database after a restart
|
|
||||||
return UInt256.high()
|
|
||||||
|
|
||||||
## Confutils parsers
|
## Confutils parsers
|
||||||
|
|
||||||
proc parseCmdArg*(T: type RadiusConfig, p: string): T {.raises: [ValueError].} =
|
proc parseCmdArg*(T: type RadiusConfig, p: string): T {.raises: [ValueError].} =
|
||||||
|
@ -79,6 +79,8 @@ proc new*(
|
|||||||
config.dataDir / network.getDbDirectory() / "contentdb_" &
|
config.dataDir / network.getDbDirectory() / "contentdb_" &
|
||||||
discovery.localNode.id.toBytesBE().toOpenArray(0, 8).toHex(),
|
discovery.localNode.id.toBytesBE().toOpenArray(0, 8).toHex(),
|
||||||
storageCapacity = config.storageCapacity,
|
storageCapacity = config.storageCapacity,
|
||||||
|
radiusConfig = config.portalConfig.radiusConfig,
|
||||||
|
localId = discovery.localNode.id,
|
||||||
)
|
)
|
||||||
# TODO: Portal works only over mainnet data currently
|
# TODO: Portal works only over mainnet data currently
|
||||||
networkData = loadNetworkData("mainnet")
|
networkData = loadNetworkData("mainnet")
|
||||||
|
@ -115,7 +115,9 @@ proc newStateNode*(
|
|||||||
): StateNode {.raises: [CatchableError].} =
|
): StateNode {.raises: [CatchableError].} =
|
||||||
let
|
let
|
||||||
node = initDiscoveryNode(rng, PrivateKey.random(rng[]), localAddress(port))
|
node = initDiscoveryNode(rng, PrivateKey.random(rng[]), localAddress(port))
|
||||||
db = ContentDB.new("", uint32.high, inMemory = true)
|
db = ContentDB.new(
|
||||||
|
"", uint32.high, RadiusConfig(kind: Dynamic), node.localNode.id, inMemory = true
|
||||||
|
)
|
||||||
sm = StreamManager.new(node)
|
sm = StreamManager.new(node)
|
||||||
hn = HistoryNetwork.new(PortalNetwork.none, node, db, sm, FinishedAccumulator())
|
hn = HistoryNetwork.new(PortalNetwork.none, node, db, sm, FinishedAccumulator())
|
||||||
sn =
|
sn =
|
||||||
|
@ -22,7 +22,9 @@ suite "Content Database":
|
|||||||
# underlying kvstore.
|
# underlying kvstore.
|
||||||
test "ContentDB basic API":
|
test "ContentDB basic API":
|
||||||
let
|
let
|
||||||
db = ContentDB.new("", uint32.high, inMemory = true)
|
db = ContentDB.new(
|
||||||
|
"", uint32.high, RadiusConfig(kind: Dynamic), u256(0), inMemory = true
|
||||||
|
)
|
||||||
key = ContentId(UInt256.high()) # Some key
|
key = ContentId(UInt256.high()) # Some key
|
||||||
|
|
||||||
block:
|
block:
|
||||||
@ -50,7 +52,9 @@ suite "Content Database":
|
|||||||
db.contains(key) == false
|
db.contains(key) == false
|
||||||
|
|
||||||
test "ContentDB size":
|
test "ContentDB size":
|
||||||
let db = ContentDB.new("", uint32.high, inMemory = true)
|
let db = ContentDB.new(
|
||||||
|
"", uint32.high, RadiusConfig(kind: Dynamic), u256(0), inMemory = true
|
||||||
|
)
|
||||||
|
|
||||||
let numBytes = 10000
|
let numBytes = 10000
|
||||||
let size1 = db.size()
|
let size1 = db.size()
|
||||||
@ -97,7 +101,9 @@ suite "Content Database":
|
|||||||
# both.
|
# both.
|
||||||
let
|
let
|
||||||
storageCapacity = 100_000'u64
|
storageCapacity = 100_000'u64
|
||||||
db = ContentDB.new("", storageCapacity, inMemory = true)
|
db = ContentDB.new(
|
||||||
|
"", storageCapacity, RadiusConfig(kind: Dynamic), u256(0), inMemory = true
|
||||||
|
)
|
||||||
|
|
||||||
furthestElement = u256(40)
|
furthestElement = u256(40)
|
||||||
secondFurthest = u256(30)
|
secondFurthest = u256(30)
|
||||||
@ -147,7 +153,9 @@ suite "Content Database":
|
|||||||
|
|
||||||
let
|
let
|
||||||
rng = newRng()
|
rng = newRng()
|
||||||
db = ContentDB.new("", startCapacity, inMemory = true)
|
db = ContentDB.new(
|
||||||
|
"", startCapacity, RadiusConfig(kind: Dynamic), u256(0), inMemory = true
|
||||||
|
)
|
||||||
localId = UInt256.fromHex(
|
localId = UInt256.fromHex(
|
||||||
"30994892f3e4889d99deb5340050510d1842778acc7a7948adffa475fed51d6e"
|
"30994892f3e4889d99deb5340050510d1842778acc7a7948adffa475fed51d6e"
|
||||||
)
|
)
|
||||||
@ -167,12 +175,27 @@ suite "Content Database":
|
|||||||
|
|
||||||
db.storageCapacity = endCapacity
|
db.storageCapacity = endCapacity
|
||||||
|
|
||||||
let
|
let newRadius = db.estimateNewRadius(RadiusConfig(kind: Dynamic))
|
||||||
oldRadiusApproximation = db.getLargestDistance(localId)
|
|
||||||
newRadius = db.estimateNewRadius(oldRadiusApproximation)
|
|
||||||
|
|
||||||
db.forcePrune(localId, newRadius)
|
db.forcePrune(localId, newRadius)
|
||||||
|
|
||||||
let diff = abs(db.size() - int64(db.storageCapacity))
|
let diff = abs(db.size() - int64(db.storageCapacity))
|
||||||
# Quite a big marging (20%) is added as it is all an approximation.
|
# Quite a big marging (20%) is added as it is all an approximation.
|
||||||
check diff < int64(float(db.storageCapacity) * 0.20)
|
check diff < int64(float(db.storageCapacity) * 0.20)
|
||||||
|
|
||||||
|
test "ContentDB radius - start with full radius":
|
||||||
|
let
|
||||||
|
storageCapacity = 100_000'u64
|
||||||
|
db = ContentDB.new(
|
||||||
|
"", storageCapacity, RadiusConfig(kind: Dynamic), u256(0), inMemory = true
|
||||||
|
)
|
||||||
|
radiusHandler = createRadiusHandler(db)
|
||||||
|
|
||||||
|
check radiusHandler() == UInt256.high()
|
||||||
|
|
||||||
|
test "ContentDB radius - 0 capacity":
|
||||||
|
let
|
||||||
|
db = ContentDB.new("", 0, RadiusConfig(kind: Dynamic), u256(0), inMemory = true)
|
||||||
|
radiusHandler = createRadiusHandler(db)
|
||||||
|
|
||||||
|
check radiusHandler() == UInt256.low()
|
||||||
|
@ -26,7 +26,9 @@ proc newHistoryNode(
|
|||||||
): HistoryNode =
|
): HistoryNode =
|
||||||
let
|
let
|
||||||
node = initDiscoveryNode(rng, PrivateKey.random(rng[]), localAddress(port))
|
node = initDiscoveryNode(rng, PrivateKey.random(rng[]), localAddress(port))
|
||||||
db = ContentDB.new("", uint32.high, inMemory = true)
|
db = ContentDB.new(
|
||||||
|
"", uint32.high, RadiusConfig(kind: Dynamic), node.localNode.id, inMemory = true
|
||||||
|
)
|
||||||
streamManager = StreamManager.new(node)
|
streamManager = StreamManager.new(node)
|
||||||
historyNetwork =
|
historyNetwork =
|
||||||
HistoryNetwork.new(PortalNetwork.none, node, db, streamManager, accumulator)
|
HistoryNetwork.new(PortalNetwork.none, node, db, streamManager, accumulator)
|
||||||
|
@ -37,7 +37,9 @@ proc initPortalProtocol(
|
|||||||
): PortalProtocol =
|
): PortalProtocol =
|
||||||
let
|
let
|
||||||
d = initDiscoveryNode(rng, privKey, address, bootstrapRecords)
|
d = initDiscoveryNode(rng, privKey, address, bootstrapRecords)
|
||||||
db = ContentDB.new("", uint32.high, inMemory = true)
|
db = ContentDB.new(
|
||||||
|
"", uint32.high, RadiusConfig(kind: Dynamic), d.localNode.id, inMemory = true
|
||||||
|
)
|
||||||
manager = StreamManager.new(d)
|
manager = StreamManager.new(d)
|
||||||
q = newAsyncQueue[(Opt[NodeId], ContentKeysList, seq[seq[byte]])](50)
|
q = newAsyncQueue[(Opt[NodeId], ContentKeysList, seq[seq[byte]])](50)
|
||||||
stream = manager.registerNewStream(q)
|
stream = manager.registerNewStream(q)
|
||||||
@ -47,6 +49,7 @@ proc initPortalProtocol(
|
|||||||
protocolId,
|
protocolId,
|
||||||
toContentId,
|
toContentId,
|
||||||
createGetHandler(db),
|
createGetHandler(db),
|
||||||
|
createRadiusHandler(db),
|
||||||
stream,
|
stream,
|
||||||
bootstrapRecords = bootstrapRecords,
|
bootstrapRecords = bootstrapRecords,
|
||||||
)
|
)
|
||||||
@ -331,13 +334,21 @@ procSuite "Portal Wire Protocol Tests":
|
|||||||
node1 = initDiscoveryNode(rng, PrivateKey.random(rng[]), localAddress(20303))
|
node1 = initDiscoveryNode(rng, PrivateKey.random(rng[]), localAddress(20303))
|
||||||
|
|
||||||
dbLimit = 400_000'u32
|
dbLimit = 400_000'u32
|
||||||
db = ContentDB.new("", dbLimit, inMemory = true)
|
db = ContentDB.new(
|
||||||
|
"", dbLimit, RadiusConfig(kind: Dynamic), node1.localNode.id, inMemory = true
|
||||||
|
)
|
||||||
m = StreamManager.new(node1)
|
m = StreamManager.new(node1)
|
||||||
q = newAsyncQueue[(Opt[NodeId], ContentKeysList, seq[seq[byte]])](50)
|
q = newAsyncQueue[(Opt[NodeId], ContentKeysList, seq[seq[byte]])](50)
|
||||||
stream = m.registerNewStream(q)
|
stream = m.registerNewStream(q)
|
||||||
|
|
||||||
proto1 =
|
proto1 = PortalProtocol.new(
|
||||||
PortalProtocol.new(node1, protocolId, toContentId, createGetHandler(db), stream)
|
node1,
|
||||||
|
protocolId,
|
||||||
|
toContentId,
|
||||||
|
createGetHandler(db),
|
||||||
|
createRadiusHandler(db),
|
||||||
|
stream,
|
||||||
|
)
|
||||||
|
|
||||||
proto1.dbPut = createStoreHandler(db, defaultRadiusConfig, proto1)
|
proto1.dbPut = createStoreHandler(db, defaultRadiusConfig, proto1)
|
||||||
|
|
||||||
@ -360,9 +371,9 @@ procSuite "Portal Wire Protocol Tests":
|
|||||||
db.get((distances[2] xor proto1.localNode.id)).isNone()
|
db.get((distances[2] xor proto1.localNode.id)).isNone()
|
||||||
db.get((distances[3] xor proto1.localNode.id)).isSome()
|
db.get((distances[3] xor proto1.localNode.id)).isSome()
|
||||||
# The radius has been updated and is lower than the maximum start value.
|
# The radius has been updated and is lower than the maximum start value.
|
||||||
proto1.dataRadius < UInt256.high
|
proto1.dataRadius() < UInt256.high
|
||||||
# Yet higher than or equal to the furthest non deleted element.
|
# Yet higher than or equal to the furthest non deleted element.
|
||||||
proto1.dataRadius >= distances[3]
|
proto1.dataRadius() >= distances[3]
|
||||||
|
|
||||||
proto1.stop()
|
proto1.stop()
|
||||||
await node1.closeWait()
|
await node1.closeWait()
|
||||||
|
@ -69,7 +69,13 @@ func generateRandomU256(rng: var HmacDrbgContext): UInt256 =
|
|||||||
proc cmdGenerate(conf: DbConf) =
|
proc cmdGenerate(conf: DbConf) =
|
||||||
let
|
let
|
||||||
rng = newRng()
|
rng = newRng()
|
||||||
db = ContentDB.new(conf.databaseDir.string, maxDbSize, inMemory = false)
|
db = ContentDB.new(
|
||||||
|
conf.databaseDir.string,
|
||||||
|
maxDbSize,
|
||||||
|
RadiusConfig(kind: Dynamic),
|
||||||
|
u256(0),
|
||||||
|
inMemory = false,
|
||||||
|
)
|
||||||
bytes = newSeq[byte](conf.contentSize)
|
bytes = newSeq[byte](conf.contentSize)
|
||||||
|
|
||||||
for i in 0 ..< conf.contentAmount:
|
for i in 0 ..< conf.contentAmount:
|
||||||
@ -79,7 +85,13 @@ proc cmdGenerate(conf: DbConf) =
|
|||||||
proc cmdBench(conf: DbConf) =
|
proc cmdBench(conf: DbConf) =
|
||||||
let
|
let
|
||||||
rng = newRng()
|
rng = newRng()
|
||||||
db = ContentDB.new(conf.databaseDir.string, 4_000_000_000'u64, inMemory = false)
|
db = ContentDB.new(
|
||||||
|
conf.databaseDir.string,
|
||||||
|
4_000_000_000'u64,
|
||||||
|
RadiusConfig(kind: Dynamic),
|
||||||
|
u256(0),
|
||||||
|
inMemory = false,
|
||||||
|
)
|
||||||
bytes = newSeq[byte](conf.contentSize)
|
bytes = newSeq[byte](conf.contentSize)
|
||||||
|
|
||||||
var timers: array[Timers, RunningStat]
|
var timers: array[Timers, RunningStat]
|
||||||
@ -126,6 +138,8 @@ proc cmdPrune(conf: DbConf) =
|
|||||||
let db = ContentDB.new(
|
let db = ContentDB.new(
|
||||||
conf.databaseDir.string,
|
conf.databaseDir.string,
|
||||||
storageCapacity = 1_000_000, # Doesn't matter if only space reclaiming is done
|
storageCapacity = 1_000_000, # Doesn't matter if only space reclaiming is done
|
||||||
|
RadiusConfig(kind: Dynamic),
|
||||||
|
u256(0),
|
||||||
manualCheckpoint = true,
|
manualCheckpoint = true,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -243,7 +243,9 @@ proc run(config: PortalCliConf) =
|
|||||||
d.open()
|
d.open()
|
||||||
|
|
||||||
let
|
let
|
||||||
db = ContentDB.new("", config.storageSize, inMemory = true)
|
db = ContentDB.new(
|
||||||
|
"", config.storageSize, defaultRadiusConfig, d.localNode.id, inMemory = true
|
||||||
|
)
|
||||||
sm = StreamManager.new(d)
|
sm = StreamManager.new(d)
|
||||||
cq = newAsyncQueue[(Opt[NodeId], ContentKeysList, seq[seq[byte]])](50)
|
cq = newAsyncQueue[(Opt[NodeId], ContentKeysList, seq[seq[byte]])](50)
|
||||||
stream = sm.registerNewStream(cq)
|
stream = sm.registerNewStream(cq)
|
||||||
@ -252,6 +254,7 @@ proc run(config: PortalCliConf) =
|
|||||||
config.protocolId,
|
config.protocolId,
|
||||||
testContentIdHandler,
|
testContentIdHandler,
|
||||||
createGetHandler(db),
|
createGetHandler(db),
|
||||||
|
createRadiusHandler(db),
|
||||||
stream,
|
stream,
|
||||||
bootstrapRecords = bootstrapRecords,
|
bootstrapRecords = bootstrapRecords,
|
||||||
)
|
)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user