fix: do not limit DNS Peer Discovery on capability

Nodes returned by DNS Discovery are not guaranteed to be reachable.
Hence, setting an upfront limit based on sought capability does not provide
any guarantees that such capability should be reached.

The discovery's mechanism role is to find Waku 2 nodes. As many as possible.

It is then the role of the peer manager to decide:
- whether to attempt connecting to the node based on advertised capabilities
- retain connection to the node based on actual mounted protocols.

We still want to prevent infinite loops, hence the `maxGet` parameter.
Also, there was a dichotomy between code tested, and code actually used by libp2p peer discovery, now resolved.

# Conflicts:
#	packages/discovery/src/dns/constants.ts
This commit is contained in:
fryorcraken 2025-03-24 11:23:16 +11:00
parent c1f9471cd7
commit 0dfe35281c
No known key found for this signature in database
GPG Key ID: A82ED75A8DFC50A4
10 changed files with 142 additions and 446 deletions

View File

@ -1,4 +1,4 @@
import { type NodeCapabilityCount, Tags } from "@waku/interfaces";
import { Tags } from "@waku/interfaces";
/**
* The ENR tree for the different fleets.
@ -13,9 +13,3 @@ export const enrTree = {
export const DEFAULT_BOOTSTRAP_TAG_NAME = Tags.BOOTSTRAP;
export const DEFAULT_BOOTSTRAP_TAG_VALUE = 50;
export const DEFAULT_BOOTSTRAP_TAG_TTL = 100_000_000;
export const DEFAULT_NODE_REQUIREMENTS: Partial<NodeCapabilityCount> = {
store: 1,
filter: 2,
lightPush: 2
};

View File

@ -17,7 +17,6 @@ const branchDomainD = "D5SNLTAGWNQ34NTQTPHNZDECFU";
const partialBranchA = "AAAA";
const partialBranchB = "BBBB";
const singleBranch = `enrtree-branch:${branchDomainA}`;
const doubleBranch = `enrtree-branch:${branchDomainA},${branchDomainB}`;
const multiComponentBranch = [
`enrtree-branch:${branchDomainA},${partialBranchA}`,
`${partialBranchB},${branchDomainB}`
@ -72,9 +71,10 @@ describe("DNS Node Discovery", () => {
mockDns.addRes(`${branchDomainA}.${host}`, [mockData.enrWithWaku2Relay]);
const dnsNodeDiscovery = new DnsNodeDiscovery(mockDns);
const peers = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
relay: 1
});
const peers = [];
for await (const peer of dnsNodeDiscovery.getNextPeer([mockData.enrTree])) {
peers.push(peer);
}
expect(peers.length).to.eq(1);
expect(peers[0].ip).to.eq("192.168.178.251");
@ -88,9 +88,10 @@ describe("DNS Node Discovery", () => {
mockDns.addRes(`${branchDomainA}.${host}`, [singleBranch]);
const dnsNodeDiscovery = new DnsNodeDiscovery(mockDns);
const peers = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
relay: 1
});
const peers = [];
for await (const peer of dnsNodeDiscovery.getNextPeer([mockData.enrTree])) {
peers.push(peer);
}
expect(peers.length).to.eq(0);
});
@ -102,17 +103,21 @@ describe("DNS Node Discovery", () => {
mockDns.addRes(`${branchDomainA}.${host}`, []);
const dnsNodeDiscovery = new DnsNodeDiscovery(mockDns);
let peers = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
relay: 1
});
const peersA = [];
for await (const peer of dnsNodeDiscovery.getNextPeer([mockData.enrTree])) {
peersA.push(peer);
}
expect(peers.length).to.eq(0);
expect(peersA.length).to.eq(0);
// No TXT records case
mockDns.addRes(`${branchDomainA}.${host}`, []);
peers = await dnsNodeDiscovery.getPeers([mockData.enrTree], { relay: 1 });
expect(peers.length).to.eq(0);
const peersB = [];
for await (const peer of dnsNodeDiscovery.getNextPeer([mockData.enrTree])) {
peersB.push(peer);
}
expect(peersB.length).to.eq(0);
});
it("ignores domain fetching errors", async function () {
@ -120,18 +125,20 @@ describe("DNS Node Discovery", () => {
mockDns.addThrow(`${branchDomainC}.${host}`);
const dnsNodeDiscovery = new DnsNodeDiscovery(mockDns);
const peers = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
relay: 1
});
const peers = [];
for await (const peer of dnsNodeDiscovery.getNextPeer([mockData.enrTree])) {
peers.push(peer);
}
expect(peers.length).to.eq(0);
});
it("ignores unrecognized TXT record formats", async function () {
mockDns.addRes(`${rootDomain}.${host}`, [mockData.enrBranchBadPrefix]);
const dnsNodeDiscovery = new DnsNodeDiscovery(mockDns);
const peers = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
relay: 1
});
const peers = [];
for await (const peer of dnsNodeDiscovery.getNextPeer([mockData.enrTree])) {
peers.push(peer);
}
expect(peers.length).to.eq(0);
});
@ -140,18 +147,20 @@ describe("DNS Node Discovery", () => {
mockDns.addRes(`${branchDomainD}.${host}`, [mockData.enrWithWaku2Relay]);
const dnsNodeDiscovery = new DnsNodeDiscovery(mockDns);
const peersA = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
relay: 1
});
const peersA = [];
for await (const peer of dnsNodeDiscovery.getNextPeer([mockData.enrTree])) {
peersA.push(peer);
}
expect(peersA.length).to.eq(1);
// Specify that a subsequent network call retrieving the same peer should throw.
// This test passes only if the peer is fetched from cache
mockDns.addThrow(`${branchDomainD}.${host}`);
const peersB = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
relay: 1
});
const peersB = [];
for await (const peer of dnsNodeDiscovery.getNextPeer([mockData.enrTree])) {
peersB.push(peer);
}
expect(peersB.length).to.eq(1);
expect(peersA[0].ip).to.eq(peersB[0].ip);
});
@ -169,9 +178,10 @@ describe("DNS Node Discovery w/ capabilities", () => {
mockDns.addRes(`${rootDomain}.${host}`, [mockData.enrWithWaku2Relay]);
const dnsNodeDiscovery = new DnsNodeDiscovery(mockDns);
const peers = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
relay: 1
});
const peers = [];
for await (const peer of dnsNodeDiscovery.getNextPeer([mockData.enrTree])) {
peers.push(peer);
}
expect(peers.length).to.eq(1);
expect(peers[0].peerId?.toString()).to.eq(
@ -183,10 +193,10 @@ describe("DNS Node Discovery w/ capabilities", () => {
mockDns.addRes(`${rootDomain}.${host}`, [mockData.enrWithWaku2RelayStore]);
const dnsNodeDiscovery = new DnsNodeDiscovery(mockDns);
const peers = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
store: 1,
relay: 1
});
const peers = [];
for await (const peer of dnsNodeDiscovery.getNextPeer([mockData.enrTree])) {
peers.push(peer);
}
expect(peers.length).to.eq(1);
expect(peers[0].peerId?.toString()).to.eq(
@ -194,44 +204,6 @@ describe("DNS Node Discovery w/ capabilities", () => {
);
});
it("should only return 1 node with store capability", async () => {
mockDns.addRes(`${rootDomain}.${host}`, [mockData.enrWithWaku2Store]);
const dnsNodeDiscovery = new DnsNodeDiscovery(mockDns);
const peers = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
store: 1
});
expect(peers.length).to.eq(1);
expect(peers[0].peerId?.toString()).to.eq(
"16Uiu2HAkv3La3ECgQpdYeEJfrX36EWdhkUDv4C9wvXM8TFZ9dNgd"
);
});
it("retrieves all peers (2) when cannot fulfill all requirements", async () => {
mockDns.addRes(`${rootDomain}.${host}`, [doubleBranch]);
mockDns.addRes(`${branchDomainA}.${host}`, [
mockData.enrWithWaku2RelayStore
]);
mockDns.addRes(`${branchDomainB}.${host}`, [mockData.enrWithWaku2Relay]);
const dnsNodeDiscovery = new DnsNodeDiscovery(mockDns);
const peers = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
store: 1,
relay: 2,
filter: 1
});
expect(peers.length).to.eq(2);
const peerIds = peers.map((p) => p.peerId?.toString());
expect(peerIds).to.contain(
"16Uiu2HAm2HyS6brcCspSbszG9i36re2bWBVjMe3tMdnFp1Hua34F"
);
expect(peerIds).to.contain(
"16Uiu2HAmPsYLvfKafxgRsb6tioYyGnSvGXS2iuMigptHrqHPNPzx"
);
});
it("retrieves all peers (3) when branch entries are composed of multiple strings", async function () {
mockDns.addRes(`${rootDomain}.${host}`, multiComponentBranch);
mockDns.addRes(`${branchDomainA}.${host}`, [
@ -243,10 +215,10 @@ describe("DNS Node Discovery w/ capabilities", () => {
]);
const dnsNodeDiscovery = new DnsNodeDiscovery(mockDns);
const peers = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
store: 2,
relay: 2
});
const peers = [];
for await (const peer of dnsNodeDiscovery.getNextPeer([mockData.enrTree])) {
peers.push(peer);
}
expect(peers.length).to.eq(3);
const peerIds = peers.map((p) => p.peerId?.toString());
@ -275,12 +247,10 @@ describe("DNS Node Discovery [live data]", function () {
this.timeout(10000);
// Google's dns server address. Needs to be set explicitly to run in CI
const dnsNodeDiscovery = await DnsNodeDiscovery.dnsOverHttp();
const peers = await dnsNodeDiscovery.getPeers([enrTree.TEST], {
relay: maxQuantity,
store: maxQuantity,
filter: maxQuantity,
lightPush: maxQuantity
});
const peers = [];
for await (const peer of dnsNodeDiscovery.getNextPeer([enrTree.TEST])) {
peers.push(peer);
}
expect(peers.length).to.eq(maxQuantity);
@ -298,12 +268,10 @@ describe("DNS Node Discovery [live data]", function () {
this.timeout(10000);
// Google's dns server address. Needs to be set explicitly to run in CI
const dnsNodeDiscovery = await DnsNodeDiscovery.dnsOverHttp();
const peers = await dnsNodeDiscovery.getPeers([enrTree.SANDBOX], {
relay: maxQuantity,
store: maxQuantity,
filter: maxQuantity,
lightPush: maxQuantity
});
const peers = [];
for await (const peer of dnsNodeDiscovery.getNextPeer([enrTree.SANDBOX])) {
peers.push(peer);
}
expect(peers.length).to.eq(maxQuantity);

View File

@ -1,25 +1,16 @@
import { ENR, EnrDecoder } from "@waku/enr";
import type {
DnsClient,
IEnr,
NodeCapabilityCount,
SearchContext
} from "@waku/interfaces";
import type { DnsClient, IEnr, SearchContext } from "@waku/interfaces";
import { Logger } from "@waku/utils";
import { DnsOverHttps } from "./dns_over_https.js";
import { ENRTree } from "./enrtree.js";
import {
fetchNodesUntilCapabilitiesFulfilled,
yieldNodesUntilCapabilitiesFulfilled
} from "./fetch_nodes.js";
import { fetchNodes } from "./fetch_nodes.js";
const log = new Logger("discovery:dns");
export class DnsNodeDiscovery {
private readonly dns: DnsClient;
private readonly _DNSTreeCache: { [key: string]: string };
private readonly _errorTolerance: number = 10;
public static async dnsOverHttp(
dnsClient?: DnsClient
@ -30,42 +21,6 @@ export class DnsNodeDiscovery {
return new DnsNodeDiscovery(dnsClient);
}
/**
* Returns a list of verified peers listed in an EIP-1459 DNS tree. Method may
* return fewer peers than requested if @link wantedNodeCapabilityCount requires
* larger quantity of peers than available or the number of errors/duplicate
* peers encountered by randomized search exceeds the sum of the fields of
* @link wantedNodeCapabilityCount plus the @link _errorTolerance factor.
*/
public async getPeers(
enrTreeUrls: string[],
wantedNodeCapabilityCount: Partial<NodeCapabilityCount>
): Promise<IEnr[]> {
const networkIndex = Math.floor(Math.random() * enrTreeUrls.length);
const { publicKey, domain } = ENRTree.parseTree(enrTreeUrls[networkIndex]);
const context: SearchContext = {
domain,
publicKey,
visits: {}
};
const peers = await fetchNodesUntilCapabilitiesFulfilled(
wantedNodeCapabilityCount,
this._errorTolerance,
() => this._search(domain, context)
);
log.info(
"retrieved peers: ",
peers.map((peer) => {
return {
id: peer.peerId?.toString(),
multiaddrs: peer.multiaddrs?.map((ma) => ma.toString())
};
})
);
return peers;
}
public constructor(dns: DnsClient) {
this._DNSTreeCache = {};
this.dns = dns;
@ -74,10 +29,7 @@ export class DnsNodeDiscovery {
/**
* {@inheritDoc getPeers}
*/
public async *getNextPeer(
enrTreeUrls: string[],
wantedNodeCapabilityCount: Partial<NodeCapabilityCount>
): AsyncGenerator<IEnr> {
public async *getNextPeer(enrTreeUrls: string[]): AsyncGenerator<IEnr> {
const networkIndex = Math.floor(Math.random() * enrTreeUrls.length);
const { publicKey, domain } = ENRTree.parseTree(enrTreeUrls[networkIndex]);
const context: SearchContext = {
@ -86,11 +38,7 @@ export class DnsNodeDiscovery {
visits: {}
};
for await (const peer of yieldNodesUntilCapabilitiesFulfilled(
wantedNodeCapabilityCount,
this._errorTolerance,
() => this._search(domain, context)
)) {
for await (const peer of fetchNodes(() => this._search(domain, context))) {
yield peer;
}
}

View File

@ -9,8 +9,7 @@ import type {
DiscoveryTrigger,
DnsDiscOptions,
DnsDiscoveryComponents,
IEnr,
NodeCapabilityCount
IEnr
} from "@waku/interfaces";
import { DNS_DISCOVERY_TAG } from "@waku/interfaces";
import { encodeRelayShard, Logger } from "@waku/utils";
@ -18,8 +17,7 @@ import { encodeRelayShard, Logger } from "@waku/utils";
import {
DEFAULT_BOOTSTRAP_TAG_NAME,
DEFAULT_BOOTSTRAP_TAG_TTL,
DEFAULT_BOOTSTRAP_TAG_VALUE,
DEFAULT_NODE_REQUIREMENTS
DEFAULT_BOOTSTRAP_TAG_VALUE
} from "./constants.js";
import { DnsNodeDiscovery } from "./dns.js";
@ -35,7 +33,7 @@ export class PeerDiscoveryDns
private nextPeer: (() => AsyncGenerator<IEnr>) | undefined;
private _started: boolean;
private _components: DnsDiscoveryComponents;
private _options: DnsDiscOptions;
private readonly _options: DnsDiscOptions;
public constructor(
components: DnsDiscoveryComponents,
@ -65,14 +63,9 @@ export class PeerDiscoveryDns
let { enrUrls } = this._options;
if (!Array.isArray(enrUrls)) enrUrls = [enrUrls];
const { wantedNodeCapabilityCount } = this._options;
const dns = await DnsNodeDiscovery.dnsOverHttp();
this.nextPeer = dns.getNextPeer.bind(
dns,
enrUrls,
wantedNodeCapabilityCount
);
this.nextPeer = dns.getNextPeer.bind(dns, enrUrls);
}
for await (const peerEnr of this.nextPeer()) {
@ -143,9 +136,8 @@ export class PeerDiscoveryDns
}
export function wakuDnsDiscovery(
enrUrls: string[],
wantedNodeCapabilityCount: Partial<NodeCapabilityCount> = DEFAULT_NODE_REQUIREMENTS
enrUrls: string[]
): (components: DnsDiscoveryComponents) => PeerDiscoveryDns {
return (components: DnsDiscoveryComponents) =>
new PeerDiscoveryDns(components, { enrUrls, wantedNodeCapabilityCount });
new PeerDiscoveryDns(components, { enrUrls });
}

View File

@ -3,12 +3,11 @@ import { peerIdFromPrivateKey } from "@libp2p/peer-id";
import { multiaddr } from "@multiformats/multiaddr";
import { ENR } from "@waku/enr";
import { EnrCreator } from "@waku/enr";
import type { Waku2 } from "@waku/interfaces";
import { expect } from "chai";
import { fetchNodesUntilCapabilitiesFulfilled } from "./fetch_nodes.js";
import { fetchNodes } from "./fetch_nodes.js";
async function createEnr(waku2: Waku2): Promise<ENR> {
async function createEnr(): Promise<ENR> {
const peerId = await generateKeyPair("secp256k1").then(peerIdFromPrivateKey);
const enr = await EnrCreator.fromPeerId(peerId);
enr.setLocationMultiaddr(multiaddr("/ip4/18.223.219.100/udp/9000"));
@ -20,38 +19,13 @@ async function createEnr(waku2: Waku2): Promise<ENR> {
)
];
enr.waku2 = waku2;
enr.waku2 = { lightPush: true, filter: true, relay: false, store: false };
return enr;
}
const Waku2None = {
relay: false,
store: false,
filter: false,
lightPush: false
};
describe("Fetch nodes until capabilities are fulfilled", function () {
it("1 Relay, 1 fetch", async function () {
const relayNode = await createEnr({ ...Waku2None, relay: true });
const getNode = (): Promise<ENR> => Promise.resolve(relayNode);
const res = await fetchNodesUntilCapabilitiesFulfilled(
{ relay: 1 },
0,
getNode
);
expect(res.length).to.eq(1);
expect(res[0].peerId!.toString()).to.eq(relayNode.peerId?.toString());
});
it("1 Store, 2 fetches", async function () {
const relayNode = await createEnr({ ...Waku2None, relay: true });
const storeNode = await createEnr({ ...Waku2None, store: true });
const retrievedNodes = [relayNode, storeNode];
describe("Fetch nodes", function () {
it("Get Nodes", async function () {
const retrievedNodes = [await createEnr(), await createEnr()];
let fetchCount = 0;
const getNode = (): Promise<ENR> => {
@ -60,27 +34,21 @@ describe("Fetch nodes until capabilities are fulfilled", function () {
return Promise.resolve(node);
};
const res = await fetchNodesUntilCapabilitiesFulfilled(
{ store: 1 },
1,
getNode
);
const res = [];
for await (const node of fetchNodes(getNode, 5)) {
res.push(node);
}
expect(res.length).to.eq(1);
expect(res[0].peerId!.toString()).to.eq(storeNode.peerId?.toString());
expect(res.length).to.eq(2);
expect(res[0].peerId!.toString()).to.not.eq(res[1].peerId!.toString());
});
it("1 Store, 2 relays, 2 fetches", async function () {
const relayNode1 = await createEnr({ ...Waku2None, relay: true });
const relayNode2 = await createEnr({ ...Waku2None, relay: true });
const relayNode3 = await createEnr({ ...Waku2None, relay: true });
const relayStoreNode = await createEnr({
...Waku2None,
relay: true,
store: true
});
const retrievedNodes = [relayNode1, relayNode2, relayNode3, relayStoreNode];
it("Stops search when maxGet is reached", async function () {
const retrievedNodes = [
await createEnr(),
await createEnr(),
await createEnr()
];
let fetchCount = 0;
const getNode = (): Promise<ENR> => {
@ -89,30 +57,29 @@ describe("Fetch nodes until capabilities are fulfilled", function () {
return Promise.resolve(node);
};
const res = await fetchNodesUntilCapabilitiesFulfilled(
{ store: 1, relay: 2 },
1,
getNode
);
const res = [];
for await (const node of fetchNodes(getNode, 2)) {
res.push(node);
}
expect(res.length).to.eq(3);
expect(res[0].peerId!.toString()).to.eq(relayNode1.peerId?.toString());
expect(res[1].peerId!.toString()).to.eq(relayNode2.peerId?.toString());
expect(res[2].peerId!.toString()).to.eq(relayStoreNode.peerId?.toString());
expect(res.length).to.eq(2);
});
it("1 Relay, 1 Filter, gives up", async function () {
const relayNode = await createEnr({ ...Waku2None, relay: true });
it("Stops search when 2 null results are returned", async function () {
const retrievedNodes = [await createEnr(), null, null, await createEnr()];
const getNode = (): Promise<ENR> => Promise.resolve(relayNode);
let fetchCount = 0;
const getNode = (): Promise<ENR | null> => {
const node = retrievedNodes[fetchCount];
fetchCount++;
return Promise.resolve(node);
};
const res = await fetchNodesUntilCapabilitiesFulfilled(
{ filter: 1, relay: 1 },
5,
getNode
);
const res = [];
for await (const node of fetchNodes(getNode, 10, 2)) {
res.push(node);
}
expect(res.length).to.eq(1);
expect(res[0].peerId!.toString()).to.eq(relayNode.peerId?.toString());
});
});

View File

@ -1,181 +1,44 @@
import type { IEnr, NodeCapabilityCount, Waku2 } from "@waku/interfaces";
import type { IEnr } from "@waku/interfaces";
import { Logger } from "@waku/utils";
const log = new Logger("discovery:fetch_nodes");
/**
* Fetch nodes using passed [[getNode]] until all wanted capabilities are
* fulfilled or the number of [[getNode]] call exceeds the sum of
* [[wantedNodeCapabilityCount]] plus [[errorTolerance]].
* Fetch nodes using passed [[getNode]] until it has been called [[maxGet]]
* times, or it has returned empty or duplicate results more than [[maxErrors]]
* times.
*/
export async function fetchNodesUntilCapabilitiesFulfilled(
wantedNodeCapabilityCount: Partial<NodeCapabilityCount>,
errorTolerance: number,
getNode: () => Promise<IEnr | null>
): Promise<IEnr[]> {
const wanted = {
relay: wantedNodeCapabilityCount.relay ?? 0,
store: wantedNodeCapabilityCount.store ?? 0,
filter: wantedNodeCapabilityCount.filter ?? 0,
lightPush: wantedNodeCapabilityCount.lightPush ?? 0
};
const maxSearches =
wanted.relay + wanted.store + wanted.filter + wanted.lightPush;
const actual = {
relay: 0,
store: 0,
filter: 0,
lightPush: 0
};
let totalSearches = 0;
const peers: IEnr[] = [];
while (
!isSatisfied(wanted, actual) &&
totalSearches < maxSearches + errorTolerance
) {
const peer = await getNode();
if (peer && isNewPeer(peer, peers)) {
// ENRs without a waku2 key are ignored.
if (peer.waku2) {
if (helpsSatisfyCapabilities(peer.waku2, wanted, actual)) {
addCapabilities(peer.waku2, actual);
peers.push(peer);
}
}
log.info(
`got new peer candidate from DNS address=${peer.nodeId}@${peer.ip}`
);
}
totalSearches++;
}
return peers;
}
/**
* Fetch nodes using passed [[getNode]] until all wanted capabilities are
* fulfilled or the number of [[getNode]] call exceeds the sum of
* [[wantedNodeCapabilityCount]] plus [[errorTolerance]].
*/
export async function* yieldNodesUntilCapabilitiesFulfilled(
wantedNodeCapabilityCount: Partial<NodeCapabilityCount>,
errorTolerance: number,
getNode: () => Promise<IEnr | null>
export async function* fetchNodes(
getNode: () => Promise<IEnr | null>,
maxGet: number = 10,
maxErrors: number = 3
): AsyncGenerator<IEnr> {
const wanted = {
relay: wantedNodeCapabilityCount.relay ?? 0,
store: wantedNodeCapabilityCount.store ?? 0,
filter: wantedNodeCapabilityCount.filter ?? 0,
lightPush: wantedNodeCapabilityCount.lightPush ?? 0
};
const maxSearches =
wanted.relay + wanted.store + wanted.filter + wanted.lightPush;
const actual = {
relay: 0,
store: 0,
filter: 0,
lightPush: 0
};
let totalSearches = 0;
const peerNodeIds = new Set();
let totalSearches = 0;
let erroneousSearches = 0;
while (
!isSatisfied(wanted, actual) &&
totalSearches < maxSearches + errorTolerance
totalSearches < maxGet &&
erroneousSearches < maxErrors // Allows a couple of empty results before calling it quit
) {
totalSearches++;
const peer = await getNode();
if (peer && peer.nodeId && !peerNodeIds.has(peer.nodeId)) {
if (!peer || !peer.nodeId) {
erroneousSearches++;
continue;
}
if (!peerNodeIds.has(peer.nodeId)) {
peerNodeIds.add(peer.nodeId);
// ENRs without a waku2 key are ignored.
if (peer.waku2) {
if (helpsSatisfyCapabilities(peer.waku2, wanted, actual)) {
addCapabilities(peer.waku2, actual);
yield peer;
}
yield peer;
}
log.info(
`got new peer candidate from DNS address=${peer.nodeId}@${peer.ip}`
);
}
totalSearches++;
}
}
function isSatisfied(
wanted: NodeCapabilityCount,
actual: NodeCapabilityCount
): boolean {
return (
actual.relay >= wanted.relay &&
actual.store >= wanted.store &&
actual.filter >= wanted.filter &&
actual.lightPush >= wanted.lightPush
);
}
function isNewPeer(peer: IEnr, peers: IEnr[]): boolean {
if (!peer.nodeId) return false;
for (const existingPeer of peers) {
if (peer.nodeId === existingPeer.nodeId) {
return false;
}
}
return true;
}
function addCapabilities(node: Waku2, total: NodeCapabilityCount): void {
if (node.relay) total.relay += 1;
if (node.store) total.store += 1;
if (node.filter) total.filter += 1;
if (node.lightPush) total.lightPush += 1;
}
/**
* Checks if the proposed ENR [[node]] helps satisfy the [[wanted]] capabilities,
* considering the [[actual]] capabilities of nodes retrieved so far..
*
* @throws If the function is called when the wanted capabilities are already fulfilled.
*/
function helpsSatisfyCapabilities(
node: Waku2,
wanted: NodeCapabilityCount,
actual: NodeCapabilityCount
): boolean {
if (isSatisfied(wanted, actual)) {
throw "Internal Error: Waku2 wanted capabilities are already fulfilled";
}
const missing = missingCapabilities(wanted, actual);
return (
(missing.relay && node.relay) ||
(missing.store && node.store) ||
(missing.filter && node.filter) ||
(missing.lightPush && node.lightPush)
);
}
/**
* Return a [[Waku2]] Object for which capabilities are set to true if they are
* [[wanted]] yet missing from [[actual]].
*/
function missingCapabilities(
wanted: NodeCapabilityCount,
actual: NodeCapabilityCount
): Waku2 {
return {
relay: actual.relay < wanted.relay,
store: actual.store < wanted.store,
filter: actual.filter < wanted.filter,
lightPush: actual.lightPush < wanted.lightPush
};
}

View File

@ -12,13 +12,6 @@ export interface DnsClient {
resolveTXT: (domain: string) => Promise<string[]>;
}
export interface NodeCapabilityCount {
relay: number;
store: number;
filter: number;
lightPush: number;
}
export interface DnsDiscoveryComponents {
peerStore: PeerStore;
}
@ -28,10 +21,7 @@ export interface DnsDiscOptions {
* ENR URL to use for DNS discovery
*/
enrUrls: string | string[];
/**
* Specifies what type of nodes are wanted from the discovery process
*/
wantedNodeCapabilityCount: Partial<NodeCapabilityCount>;
/**
* Tag a bootstrap peer with this name before "discovering" it (default: 'bootstrap')
*/

View File

@ -36,10 +36,7 @@ describe("DNS Discovery: Compliance Test", function () {
} as unknown as Libp2pComponents;
return new PeerDiscoveryDns(components, {
enrUrls: [enrTree["SANDBOX"]],
wantedNodeCapabilityCount: {
filter: 1
}
enrUrls: [enrTree["SANDBOX"]]
});
},
async teardown() {
@ -57,20 +54,11 @@ describe("DNS Node Discovery [live data]", function () {
it(`should use DNS peer discovery with light client`, async function () {
this.timeout(100000);
const maxQuantity = 3;
const nodeRequirements = {
relay: maxQuantity,
store: maxQuantity,
filter: maxQuantity,
lightPush: maxQuantity
};
const minQuantityExpected = 3; // We have at least 3 nodes in Waku Sandbox ENR tree
const waku = await createLightNode({
libp2p: {
peerDiscovery: [
wakuDnsDiscovery([enrTree["SANDBOX"]], nodeRequirements)
]
peerDiscovery: [wakuDnsDiscovery([enrTree["SANDBOX"]])]
}
});
@ -86,7 +74,7 @@ describe("DNS Node Discovery [live data]", function () {
}
expect(hasTag).to.be.eq(true);
}
expect(dnsPeers).to.eq(maxQuantity);
expect(dnsPeers).to.gte(minQuantityExpected);
});
it(`should retrieve ${maxQuantity} multiaddrs for test.waku.nodes.status.im`, async function () {
@ -96,12 +84,12 @@ describe("DNS Node Discovery [live data]", function () {
// Google's dns server address. Needs to be set explicitly to run in CI
const dnsNodeDiscovery = await DnsNodeDiscovery.dnsOverHttp();
const peers = await dnsNodeDiscovery.getPeers([enrTree["SANDBOX"]], {
relay: maxQuantity,
store: maxQuantity,
filter: maxQuantity,
lightPush: maxQuantity
});
const peers = [];
for await (const peer of dnsNodeDiscovery.getNextPeer([
enrTree["SANDBOX"]
])) {
peers.push(peer);
}
expect(peers.length).to.eq(maxQuantity);
@ -118,24 +106,20 @@ describe("DNS Node Discovery [live data]", function () {
if (process.env.CI) this.skip();
this.timeout(30_000);
const nodesToConnect = 2;
const minQuantityExpected = 2;
const waku = await createLightNode({
libp2p: {
peerDiscovery: [
wakuDnsDiscovery([enrTree["SANDBOX"], enrTree["TEST"]], {
filter: nodesToConnect
})
]
peerDiscovery: [wakuDnsDiscovery([enrTree["SANDBOX"], enrTree["TEST"]])]
}
});
await waku.start();
const allPeers = await waku.libp2p.peerStore.all();
while (allPeers.length < nodesToConnect) {
while (allPeers.length < minQuantityExpected) {
await delay(2000);
}
expect(allPeers.length).to.be.eq(nodesToConnect);
expect(allPeers.length).to.be.gte(minQuantityExpected);
});
});

View File

@ -23,12 +23,10 @@ describe("Peer Exchange", () => {
this.timeout(50_000);
const dns = await DnsNodeDiscovery.dnsOverHttp();
const dnsEnrs = await dns.getPeers(
[enrTree["SANDBOX"], enrTree["TEST"]],
{
lightPush: 1
}
);
const dnsEnrs = [];
for await (const node of dns.getNextPeer([enrTree["SANDBOX"]])) {
dnsEnrs.push(node);
}
const dnsPeerMultiaddrs = dnsEnrs
.flatMap(
(enr) => enr.peerInfo?.multiaddrs.map((ma) => ma.toString()) ?? []

View File

@ -9,17 +9,9 @@ describe("Use static and several ENR trees for bootstrap", function () {
it("", async function () {
this.timeout(10_000);
const NODE_REQUIREMENTS = {
store: 3,
lightPush: 3,
filter: 3
};
waku = await createLightNode({
libp2p: {
peerDiscovery: [
wakuDnsDiscovery([enrTree["SANDBOX"]], NODE_REQUIREMENTS)
]
peerDiscovery: [wakuDnsDiscovery([enrTree["SANDBOX"]])]
}
});
await waku.start();