2022-05-19 19:56:03 +00:00
|
|
|
## Nim-Codex
|
2022-04-13 16:32:35 +00:00
|
|
|
## Copyright (c) 2022 Status Research & Development GmbH
|
|
|
|
## Licensed under either of
|
|
|
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
|
|
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
|
|
## at your option.
|
|
|
|
## This file may not be copied, modified, or distributed except according to
|
|
|
|
## those terms.
|
|
|
|
|
2022-05-26 02:29:31 +00:00
|
|
|
import std/algorithm
|
2023-11-27 18:25:53 +00:00
|
|
|
import std/sequtils
|
2022-05-26 02:29:31 +00:00
|
|
|
|
2022-04-13 16:32:35 +00:00
|
|
|
import pkg/chronos
|
2023-08-01 23:47:57 +00:00
|
|
|
import pkg/libp2p/[cid, multicodec, routing_record, signed_envelope]
|
2022-04-13 16:32:35 +00:00
|
|
|
import pkg/questionable
|
|
|
|
import pkg/questionable/results
|
|
|
|
import pkg/stew/shims/net
|
2022-05-26 02:29:31 +00:00
|
|
|
import pkg/contractabi/address as ca
|
2024-04-02 12:31:52 +00:00
|
|
|
import pkg/codexdht/discv5/[routing_table, protocol as discv5]
|
2022-04-13 16:32:35 +00:00
|
|
|
|
2022-05-12 21:52:03 +00:00
|
|
|
import ./rng
|
|
|
|
import ./errors
|
feat: create logging proxy (#663)
* implement a logging proxy
The logging proxy:
- prevents the need to import chronicles (as well as export except toJson),
- prevents the need to override `writeValue` or use or import nim-json-seralization elsewhere in the codebase, allowing for sole use of utils/json for de/serialization,
- and handles json formatting correctly in chronicles json sinks
* Rename logging -> logutils to avoid ambiguity with common names
* clean up
* add setProperty for JsonRecord, remove nim-json-serialization conflict
* Allow specifying textlines and json format separately
Not specifying a LogFormat will apply the formatting to both textlines and json sinks.
Specifying a LogFormat will apply the formatting to only that sink.
* remove unneeded usages of std/json
We only need to import utils/json instead of std/json
* move serialization from rest/json to utils/json so it can be shared
* fix NoColors ambiguity
Was causing unit tests to fail on Windows.
* Remove nre usage to fix Windows error
Windows was erroring with `could not load: pcre64.dll`. Instead of fixing that error, remove the pcre usage :)
* Add logutils module doc
* Shorten logutils.formatIt for `NBytes`
Both json and textlines formatIt were not needed, and could be combined into one formatIt
* remove debug integration test config
debug output and logformat of json for integration test logs
* Use ## module doc to support docgen
* bump nim-poseidon2 to export fromBytes
Before the changes in this branch, fromBytes was likely being resolved by nim-stew, or other dependency. With the changes in this branch, that dependency was removed and fromBytes could no longer be resolved. By exporting fromBytes from nim-poseidon, the correct resolution is now happening.
* fixes to get compiling after rebasing master
* Add support for Result types being logged using formatIt
2024-01-23 07:35:03 +00:00
|
|
|
import ./logutils
|
2022-04-13 16:32:35 +00:00
|
|
|
|
|
|
|
export discv5
|
|
|
|
|
2022-05-26 02:29:31 +00:00
|
|
|
# TODO: If generics in methods had not been
|
|
|
|
# deprecated, this could have been implemented
|
|
|
|
# much more elegantly.
|
|
|
|
|
2022-11-02 00:58:41 +00:00
|
|
|
logScope:
|
|
|
|
topics = "codex discovery"
|
|
|
|
|
2022-04-13 16:32:35 +00:00
|
|
|
type
|
2022-05-12 21:52:03 +00:00
|
|
|
Discovery* = ref object of RootObj
|
2023-11-27 18:25:53 +00:00
|
|
|
protocol*: discv5.Protocol # dht protocol
|
2022-11-02 00:58:41 +00:00
|
|
|
key: PrivateKey # private key
|
|
|
|
peerId: PeerId # the peer id of the local node
|
2023-11-27 18:25:53 +00:00
|
|
|
announceAddrs*: seq[MultiAddress] # addresses announced as part of the provider records
|
2022-11-02 00:58:41 +00:00
|
|
|
providerRecord*: ?SignedPeerRecord # record to advertice node connection information, this carry any
|
|
|
|
# address that the node can be connected on
|
|
|
|
dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information
|
2022-04-13 16:32:35 +00:00
|
|
|
|
2022-05-26 02:29:31 +00:00
|
|
|
proc toNodeId*(cid: Cid): NodeId =
|
|
|
|
## Cid to discovery id
|
|
|
|
##
|
|
|
|
|
|
|
|
readUintBE[256](keccak256.digest(cid.data.buffer).data)
|
|
|
|
|
|
|
|
proc toNodeId*(host: ca.Address): NodeId =
|
|
|
|
## Eth address to discovery id
|
|
|
|
##
|
|
|
|
|
|
|
|
readUintBE[256](keccak256.digest(host.toArray).data)
|
|
|
|
|
2022-04-13 16:32:35 +00:00
|
|
|
proc findPeer*(
|
2023-11-27 18:25:53 +00:00
|
|
|
d: Discovery,
|
|
|
|
peerId: PeerId): Future[?PeerRecord] {.async.} =
|
2023-06-19 06:21:03 +00:00
|
|
|
trace "protocol.resolve..."
|
2023-06-22 15:11:18 +00:00
|
|
|
## Find peer using the given Discovery object
|
|
|
|
##
|
2022-05-26 02:29:31 +00:00
|
|
|
let
|
|
|
|
node = await d.protocol.resolve(toNodeId(peerId))
|
|
|
|
|
2022-04-13 16:32:35 +00:00
|
|
|
return
|
|
|
|
if node.isSome():
|
2022-11-02 00:58:41 +00:00
|
|
|
node.get().record.data.some
|
2022-04-13 16:32:35 +00:00
|
|
|
else:
|
2022-11-02 00:58:41 +00:00
|
|
|
PeerRecord.none
|
2022-04-13 16:32:35 +00:00
|
|
|
|
2022-05-26 02:29:31 +00:00
|
|
|
method find*(
|
2023-11-27 18:25:53 +00:00
|
|
|
d: Discovery,
|
|
|
|
cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} =
|
2022-05-12 21:52:03 +00:00
|
|
|
## Find block providers
|
2023-11-12 08:58:40 +00:00
|
|
|
##
|
2022-05-12 21:52:03 +00:00
|
|
|
without providers =?
|
2022-05-26 02:29:31 +00:00
|
|
|
(await d.protocol.getProviders(cid.toNodeId())).mapFailure, error:
|
2024-05-16 17:06:12 +00:00
|
|
|
warn "Error finding providers for block", cid, error = error.msg
|
2022-04-13 16:32:35 +00:00
|
|
|
|
2023-11-27 18:25:53 +00:00
|
|
|
return providers.filterIt( not (it.data.peerId == d.peerId) )
|
2022-05-12 21:52:03 +00:00
|
|
|
|
2022-05-26 02:29:31 +00:00
|
|
|
method provide*(d: Discovery, cid: Cid) {.async, base.} =
|
2024-05-16 17:06:12 +00:00
|
|
|
## Provide a block Cid
|
2022-05-12 21:52:03 +00:00
|
|
|
##
|
|
|
|
let
|
|
|
|
nodes = await d.protocol.addProvider(
|
2022-11-02 00:58:41 +00:00
|
|
|
cid.toNodeId(), d.providerRecord.get)
|
2022-05-12 21:52:03 +00:00
|
|
|
|
|
|
|
if nodes.len <= 0:
|
2024-04-30 09:31:06 +00:00
|
|
|
warn "Couldn't provide to any nodes!"
|
2022-05-12 21:52:03 +00:00
|
|
|
|
2022-04-13 16:32:35 +00:00
|
|
|
|
2022-05-26 02:29:31 +00:00
|
|
|
method find*(
|
2023-11-27 18:25:53 +00:00
|
|
|
d: Discovery,
|
|
|
|
host: ca.Address): Future[seq[SignedPeerRecord]] {.async, base.} =
|
2022-05-26 02:29:31 +00:00
|
|
|
## Find host providers
|
|
|
|
##
|
|
|
|
|
|
|
|
trace "Finding providers for host", host = $host
|
|
|
|
without var providers =?
|
|
|
|
(await d.protocol.getProviders(host.toNodeId())).mapFailure, error:
|
|
|
|
trace "Error finding providers for host", host = $host, exc = error.msg
|
|
|
|
return
|
|
|
|
|
|
|
|
if providers.len <= 0:
|
|
|
|
trace "No providers found", host = $host
|
|
|
|
return
|
|
|
|
|
|
|
|
providers.sort do(a, b: SignedPeerRecord) -> int:
|
|
|
|
system.cmp[uint64](a.data.seqNo, b.data.seqNo)
|
|
|
|
|
|
|
|
return providers
|
|
|
|
|
|
|
|
method provide*(d: Discovery, host: ca.Address) {.async, base.} =
|
|
|
|
## Provide hosts
|
|
|
|
##
|
|
|
|
|
|
|
|
trace "Providing host", host = $host
|
|
|
|
let
|
|
|
|
nodes = await d.protocol.addProvider(
|
2022-11-02 00:58:41 +00:00
|
|
|
host.toNodeId(), d.providerRecord.get)
|
2022-05-26 02:29:31 +00:00
|
|
|
if nodes.len > 0:
|
|
|
|
trace "Provided to nodes", nodes = nodes.len
|
|
|
|
|
2023-06-22 15:11:18 +00:00
|
|
|
method removeProvider*(
|
2023-11-27 18:25:53 +00:00
|
|
|
d: Discovery,
|
|
|
|
peerId: PeerId): Future[void] {.base.} =
|
2023-06-22 15:11:18 +00:00
|
|
|
## Remove provider from providers table
|
|
|
|
##
|
|
|
|
|
2022-10-05 16:01:21 +00:00
|
|
|
trace "Removing provider", peerId
|
|
|
|
d.protocol.removeProvidersLocal(peerId)
|
|
|
|
|
2022-11-02 00:58:41 +00:00
|
|
|
proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) =
|
2023-06-22 15:11:18 +00:00
|
|
|
## Update providers record
|
|
|
|
##
|
|
|
|
|
2022-10-27 13:44:56 +00:00
|
|
|
d.announceAddrs = @addrs
|
2022-11-02 00:58:41 +00:00
|
|
|
|
|
|
|
trace "Updating announce record", addrs = d.announceAddrs
|
|
|
|
d.providerRecord = SignedPeerRecord.init(
|
|
|
|
d.key, PeerRecord.init(d.peerId, d.announceAddrs))
|
|
|
|
.expect("Should construct signed record").some
|
2022-09-13 19:37:49 +00:00
|
|
|
|
2022-10-27 13:44:56 +00:00
|
|
|
if not d.protocol.isNil:
|
2022-11-02 00:58:41 +00:00
|
|
|
d.protocol.updateRecord(d.providerRecord)
|
|
|
|
.expect("Should update SPR")
|
|
|
|
|
|
|
|
proc updateDhtRecord*(d: Discovery, ip: ValidIpAddress, port: Port) =
|
2023-06-22 15:11:18 +00:00
|
|
|
## Update providers record
|
|
|
|
##
|
|
|
|
|
2022-11-02 00:58:41 +00:00
|
|
|
trace "Updating Dht record", ip, port = $port
|
|
|
|
d.dhtRecord = SignedPeerRecord.init(
|
|
|
|
d.key, PeerRecord.init(d.peerId, @[
|
|
|
|
MultiAddress.init(
|
|
|
|
ip,
|
|
|
|
IpTransportProtocol.udpProtocol,
|
|
|
|
port)])).expect("Should construct signed record").some
|
2022-10-27 13:44:56 +00:00
|
|
|
|
2023-07-18 05:51:20 +00:00
|
|
|
if not d.protocol.isNil:
|
|
|
|
d.protocol.updateRecord(d.dhtRecord)
|
|
|
|
.expect("Should update SPR")
|
|
|
|
|
2022-10-27 13:44:56 +00:00
|
|
|
proc start*(d: Discovery) {.async.} =
|
2022-04-13 16:32:35 +00:00
|
|
|
d.protocol.open()
|
2022-10-01 17:08:44 +00:00
|
|
|
await d.protocol.start()
|
2022-04-13 16:32:35 +00:00
|
|
|
|
|
|
|
proc stop*(d: Discovery) {.async.} =
|
|
|
|
await d.protocol.closeWait()
|
2022-10-27 13:44:56 +00:00
|
|
|
|
|
|
|
proc new*(
|
2023-06-22 15:11:18 +00:00
|
|
|
T: type Discovery,
|
|
|
|
key: PrivateKey,
|
|
|
|
bindIp = ValidIpAddress.init(IPv4_any()),
|
|
|
|
bindPort = 0.Port,
|
|
|
|
announceAddrs: openArray[MultiAddress],
|
|
|
|
bootstrapNodes: openArray[SignedPeerRecord] = [],
|
|
|
|
store: Datastore = SQLiteDatastore.new(Memory).expect("Should not fail!")
|
|
|
|
): Discovery =
|
2023-11-12 08:58:40 +00:00
|
|
|
## Create a new Discovery node instance for the given key and datastore
|
|
|
|
##
|
2022-10-27 13:44:56 +00:00
|
|
|
|
|
|
|
var
|
2023-06-22 15:11:18 +00:00
|
|
|
self = Discovery(
|
2022-11-02 00:58:41 +00:00
|
|
|
key: key,
|
|
|
|
peerId: PeerId.init(key).expect("Should construct PeerId"))
|
2022-10-27 13:44:56 +00:00
|
|
|
|
2022-11-02 00:58:41 +00:00
|
|
|
self.updateAnnounceRecord(announceAddrs)
|
2022-10-27 13:44:56 +00:00
|
|
|
|
2024-04-02 12:31:52 +00:00
|
|
|
# --------------------------------------------------------------------------
|
|
|
|
# FIXME disable IP limits temporarily so we can run our workshop. Re-enable
|
|
|
|
# and figure out proper solution.
|
|
|
|
let discoveryConfig = DiscoveryConfig(
|
|
|
|
tableIpLimits: TableIpLimits(
|
|
|
|
tableIpLimit: high(uint),
|
|
|
|
bucketIpLimit:high(uint)
|
|
|
|
),
|
|
|
|
bitsPerHop: DefaultBitsPerHop
|
|
|
|
)
|
|
|
|
# --------------------------------------------------------------------------
|
|
|
|
|
2022-10-27 13:44:56 +00:00
|
|
|
self.protocol = newProtocol(
|
|
|
|
key,
|
2022-11-02 00:58:41 +00:00
|
|
|
bindIp = bindIp.toNormalIp,
|
|
|
|
bindPort = bindPort,
|
|
|
|
record = self.providerRecord.get,
|
2022-10-27 13:44:56 +00:00
|
|
|
bootstrapRecords = bootstrapNodes,
|
|
|
|
rng = Rng.instance(),
|
2024-04-02 12:31:52 +00:00
|
|
|
providers = ProvidersManager.new(store),
|
|
|
|
config = discoveryConfig)
|
2022-10-27 13:44:56 +00:00
|
|
|
|
|
|
|
self
|