mirror of
https://github.com/logos-storage/logos-storage-network-crawler.git
synced 2026-01-08 08:23:08 +00:00
hacky crawl is working
This commit is contained in:
parent
29f4c3e80f
commit
f7b1aab098
@ -80,21 +80,28 @@ proc initializeDht(app: Application): Future[?!void] {.async.} =
|
||||
without privateKey =? setupKey(keyPath), err:
|
||||
return failure(err)
|
||||
|
||||
var announceAddresses = newSeq[MultiAddress]()
|
||||
let aaa = MultiAddress.init("/ip4/172.21.64.1/udp/8090").expect("Should init multiaddress")
|
||||
# /ip4/45.82.185.194/udp/8090
|
||||
# /ip4/172.21.64.1/udp/8090
|
||||
announceAddresses.add(aaa)
|
||||
var listenAddresses = newSeq[MultiAddress]()
|
||||
# TODO: when p2p connections are supported:
|
||||
# let aaa = MultiAddress.init("/ip4/" & app.config.publicIp & "/tcp/53678").expect("Should init multiaddress")
|
||||
# listenAddresses.add(aaa)
|
||||
|
||||
var discAddresses = newSeq[MultiAddress]()
|
||||
let bbb = MultiAddress.init("/ip4/" & app.config.publicIp & "/udp/" & $app.config.discPort).expect("Should init multiaddress")
|
||||
discAddresses.add(bbb)
|
||||
|
||||
app.dht = Dht.new(
|
||||
privateKey,
|
||||
bindPort = app.config.discPort,
|
||||
announceAddrs = announceAddresses,
|
||||
announceAddrs = listenAddresses,
|
||||
bootstrapNodes = app.config.bootNodes,
|
||||
store = dhtStore,
|
||||
)
|
||||
|
||||
app.dht.updateAnnounceRecord(listenAddresses)
|
||||
app.dht.updateDhtRecord(discAddresses)
|
||||
|
||||
await app.dht.start()
|
||||
|
||||
return success()
|
||||
|
||||
proc initializeApp(app: Application): Future[?!void] {.async.} =
|
||||
@ -112,21 +119,22 @@ proc hackyCrawl(app: Application) {.async.} =
|
||||
info "starting hacky crawl..."
|
||||
await sleepAsync(3000)
|
||||
|
||||
var nodeIds = await app.dht.getRoutingTableNodeIds()
|
||||
var nodeIds = app.dht.getRoutingTableNodeIds()
|
||||
trace "starting with routing table nodes", nodes = nodeIds.len
|
||||
|
||||
while app.status == ApplicationStatus.Running:
|
||||
while app.status == ApplicationStatus.Running and nodeIds.len > 0:
|
||||
let nodeId = nodeIds[0]
|
||||
nodeIds.delete(0)
|
||||
|
||||
without newNodes =? (await app.dht.getNeighbors(nodeId)), err:
|
||||
error "getneighbors failed", err = err.msg
|
||||
|
||||
trace "adding new nodes", len = newNodes.len
|
||||
for id in newNodes.mapIt(it.id):
|
||||
nodeIds.add(id)
|
||||
for node in newNodes:
|
||||
nodeIds.add(node.id)
|
||||
trace "adding new node", id = $node.id, addrs = $node.address
|
||||
await sleepAsync(1000)
|
||||
|
||||
info "hacky crawl stopped!"
|
||||
|
||||
proc stop*(app: Application) =
|
||||
app.status = ApplicationStatus.Stopping
|
||||
|
||||
@ -10,10 +10,11 @@ let doc =
|
||||
Codex Network Crawler. Generates network metrics.
|
||||
|
||||
Usage:
|
||||
codexcrawler [--logLevel=<l>] [--metricsAddress=<ip>] [--metricsPort=<p>] [--dataDir=<dir>] [--discoveryPort=<p>] [--bootNodes=<n>]
|
||||
codexcrawler [--logLevel=<l>] [--publicIp=<a>] [--metricsAddress=<ip>] [--metricsPort=<p>] [--dataDir=<dir>] [--discoveryPort=<p>] [--bootNodes=<n>]
|
||||
|
||||
Options:
|
||||
--logLevel=<l> Sets log level [default: TRACE]
|
||||
--publicIp=<a> Public IP address where this instance is reachable. [default: 62.45.154.249]
|
||||
--metricsAddress=<ip> Listen address of the metrics server [default: 0.0.0.0]
|
||||
--metricsPort=<p> Listen HTTP port of the metrics server [default: 8008]
|
||||
--dataDir=<dir> Directory for storing data [default: crawler_data]
|
||||
@ -26,6 +27,7 @@ import docopt
|
||||
|
||||
type CrawlerConfig* = ref object
|
||||
logLevel*: string
|
||||
publicIp*: string
|
||||
metricsAddress*: IpAddress
|
||||
metricsPort*: Port
|
||||
dataDir*: string
|
||||
@ -82,6 +84,7 @@ proc parseConfig*(): CrawlerConfig =
|
||||
|
||||
return CrawlerConfig(
|
||||
logLevel: get("--logLevel"),
|
||||
publicIp: get("--publicIp"),
|
||||
metricsAddress: parseIpAddress(get("--metricsAddress")),
|
||||
metricsPort: Port(parseInt(get("--metricsPort"))),
|
||||
dataDir: get("--dataDir"),
|
||||
|
||||
@ -40,38 +40,20 @@ proc getNode*(d: Dht, nodeId: NodeId): ?!Node =
|
||||
return success(node.get())
|
||||
return failure("Node not found for id: " & $nodeId)
|
||||
|
||||
proc hacky*(d: Dht, nodeId: NodeId) {.async.} =
|
||||
await sleepAsync(1)
|
||||
let node = d.protocol.getNode(nodeId)
|
||||
if node.isSome():
|
||||
let n = node.get()
|
||||
info "that worked", node = $n.id, seen = $n.seen
|
||||
else:
|
||||
info "that didn't work", node = $nodeId
|
||||
|
||||
proc getRoutingTableNodeIds*(d: Dht): Future[seq[NodeId]] {.async.} =
|
||||
proc getRoutingTableNodeIds*(d: Dht): seq[NodeId] =
|
||||
var ids = newSeq[NodeId]()
|
||||
info "routing table", len = $d.protocol.routingTable.len
|
||||
for bucket in d.protocol.routingTable.buckets:
|
||||
for node in bucket.nodes:
|
||||
warn "node seen", node = $node.id, seen = $node.seen
|
||||
ids.add(node.id)
|
||||
|
||||
await d.hacky(node.id)
|
||||
# await sleepAsync(1)
|
||||
return ids
|
||||
|
||||
proc getDistances(): seq[uint16] =
|
||||
var d = newSeq[uint16]()
|
||||
for i in 0..10:
|
||||
d.add(i.uint16)
|
||||
return d
|
||||
|
||||
proc getNeighbors*(d: Dht, target: NodeId): Future[?!seq[Node]] {.async.} =
|
||||
without node =? d.getNode(target), err:
|
||||
return failure(err)
|
||||
|
||||
let distances = getDistances()
|
||||
let distances = @[256.uint16]
|
||||
let response = await d.protocol.findNode(node, distances)
|
||||
|
||||
if response.isOk():
|
||||
|
||||
@ -5,8 +5,8 @@ switch("define", "libp2p_pki_schemes=secp256k1")
|
||||
|
||||
# switch("define", "chronicles_runtime_filtering=true")
|
||||
# Sets TRACE logging for everything except DHT
|
||||
switch("define", "chronicles_log_level=INFO")
|
||||
# switch("define", "chronicles_disabled_topics:discv5")
|
||||
switch("define", "chronicles_log_level=TRACE")
|
||||
switch("define", "chronicles_disabled_topics:discv5")
|
||||
|
||||
when (NimMajor, NimMinor) >= (2, 0):
|
||||
--mm:
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user