mirror of
https://github.com/logos-messaging/logos-messaging-js.git
synced 2026-05-17 18:19:27 +00:00
Compare commits
124 Commits
message-en
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0ca94f3c8b | ||
|
|
c8667058a6 | ||
|
|
d4c6c9970d | ||
|
|
7816642fae | ||
|
|
74ad13ba24 | ||
|
|
9a1e9cecc5 | ||
|
|
ab237410f9 | ||
|
|
f2ad23ad43 | ||
|
|
788f7e62c5 | ||
|
|
e5f51d7df1 | ||
|
|
84a6ea69cf | ||
| 049e564e89 | |||
|
|
101ffe8a04 | ||
|
|
5334a7fcc9 | ||
|
|
115cdd28fe | ||
|
|
0daa81d3d7 | ||
| e2c9364053 | |||
|
|
0df18b2a75 | ||
|
|
ff9c43038e | ||
|
|
b8a9d132c1 | ||
|
|
37c6c1e529 | ||
|
|
ad0bed69ba | ||
|
|
d803565b30 | ||
|
|
e92f6a2409 | ||
|
|
c0ecb6abba | ||
|
|
593bc45225 | ||
|
|
bbcfc94879 | ||
|
|
016a25d578 | ||
|
|
79dd001b1f | ||
|
|
11d84ad342 | ||
|
|
cb3af8cd4d | ||
|
|
4d5c152f5b | ||
|
|
8f09f5fa5a | ||
|
|
16253026c6 | ||
|
|
a7f30b1211 | ||
|
|
2fba052b8b | ||
|
|
914beb6531 | ||
|
|
8542d04bf5 | ||
|
|
826aedd558 | ||
|
|
0dfbcf6b6b | ||
|
|
78c856d079 | ||
|
|
836d6b8793 | ||
|
|
c8dfdb1ace | ||
|
|
26de2d11c8 | ||
|
|
dfb2baf004 | ||
|
|
eab8ce81b4 | ||
|
|
95da57a870 | ||
|
|
c161b37d08 | ||
|
|
de972d6694 | ||
|
|
ada265731a | ||
|
|
dc5155056b | ||
|
|
459fe96fe6 | ||
|
|
52e5c34520 | ||
|
|
449797d5c1 | ||
|
|
103f21ef85 | ||
|
|
d7919e8c9b | ||
|
|
f4a2778e02 | ||
|
|
25f884e05b | ||
|
|
0dfe35281c | ||
|
|
c1f9471cd7 | ||
|
|
0be4861c79 | ||
|
|
f8f20db85c | ||
|
|
cd265ba1ae | ||
|
|
39c139158c | ||
|
|
77c694095d | ||
|
|
158f6ecf98 | ||
|
|
d4429702c2 | ||
|
|
1e7e0291fa | ||
|
|
b51d598581 | ||
|
|
ec8ded9c6a | ||
|
|
3bb5a4931f | ||
|
|
0739fd1dd2 | ||
|
|
e5919a6bd9 | ||
|
|
97a26b2373 | ||
|
|
e53717cd08 | ||
|
|
3c930dedfc | ||
|
|
065ef0adb0 | ||
|
|
8d278f8132 | ||
|
|
1bbb129d04 | ||
|
|
3200b19a02 | ||
|
|
d04130feb6 | ||
|
|
b8867dee38 | ||
|
|
b4787e0e87 | ||
|
|
3842d84b55 | ||
|
|
e92cc570f2 | ||
|
|
79f319cc86 | ||
|
|
36f6884d22 | ||
|
|
7fc2895b6a | ||
|
|
c24842a4a5 | ||
|
|
6d55af947e | ||
|
|
188e4bf928 | ||
|
|
ed389ccbc9 | ||
|
|
7f7f772d93 | ||
|
|
35acdf8fa5 | ||
|
|
27292edabc | ||
|
|
14085de3c4 | ||
|
|
94788d0dfc | ||
|
|
9b0c5e8311 | ||
|
|
5d8cfff7eb | ||
|
|
f75634d9c5 | ||
|
|
c7682ea67c | ||
|
|
bfda249aa6 | ||
|
|
058f2ff620 | ||
|
|
981248eedd | ||
|
|
cd1d909de3 | ||
|
|
f55db3eb4b | ||
|
|
fcc6496fef | ||
|
|
fecc026dd7 | ||
|
|
7c8d1073b0 | ||
|
|
a0fc9e05d4 | ||
|
|
8444bc940f | ||
|
|
a2c3b2e6aa | ||
|
|
4997440225 | ||
|
|
49f26d89a8 | ||
|
|
f649f59e64 | ||
|
|
5f63cb5bfb | ||
|
|
2d92191029 | ||
|
|
9f7a15dfb1 | ||
|
|
1905558753 | ||
|
|
16328a3f11 | ||
|
|
6d6e256fec | ||
|
|
5ed35471ca | ||
|
|
3b23bceb9d | ||
|
|
a4dfd3455c |
12
.cspell.json
12
.cspell.json
@ -4,6 +4,7 @@
|
||||
"language": "en",
|
||||
"words": [
|
||||
"abortable",
|
||||
"acks",
|
||||
"Addrs",
|
||||
"ahadns",
|
||||
"Alives",
|
||||
@ -23,9 +24,11 @@
|
||||
"cipherparams",
|
||||
"ciphertext",
|
||||
"circleci",
|
||||
"circom",
|
||||
"codecov",
|
||||
"codegen",
|
||||
"commitlint",
|
||||
"cooldown",
|
||||
"dependabot",
|
||||
"dialable",
|
||||
"dingpu",
|
||||
@ -52,6 +55,7 @@
|
||||
"fontsource",
|
||||
"globby",
|
||||
"gossipsub",
|
||||
"hackathons",
|
||||
"huilong",
|
||||
"iasked",
|
||||
"ihave",
|
||||
@ -59,6 +63,7 @@
|
||||
"ineed",
|
||||
"IPAM",
|
||||
"ipfs",
|
||||
"isready",
|
||||
"iwant",
|
||||
"jdev",
|
||||
"jswaku",
|
||||
@ -99,6 +104,7 @@
|
||||
"reactjs",
|
||||
"recid",
|
||||
"rlnrelay",
|
||||
"rlnv",
|
||||
"roadmap",
|
||||
"sandboxed",
|
||||
"scanf",
|
||||
@ -118,14 +124,18 @@
|
||||
"typedoc",
|
||||
"undialable",
|
||||
"unencrypted",
|
||||
"unhandle",
|
||||
"unmarshal",
|
||||
"unmount",
|
||||
"unmounts",
|
||||
"unsubscription",
|
||||
"untracked",
|
||||
"upgrader",
|
||||
"vacp",
|
||||
"varint",
|
||||
"viem",
|
||||
"vkey",
|
||||
"wagmi",
|
||||
"waku",
|
||||
"wakuconnect",
|
||||
"wakunode",
|
||||
@ -135,6 +145,7 @@
|
||||
"weboko",
|
||||
"websockets",
|
||||
"wifi",
|
||||
"WTNS",
|
||||
"xsalsa20",
|
||||
"zerokit",
|
||||
"Привет",
|
||||
@ -159,6 +170,7 @@
|
||||
"gen",
|
||||
"proto",
|
||||
"*.spec.ts",
|
||||
"*.log",
|
||||
"CHANGELOG.md"
|
||||
],
|
||||
"patterns": [
|
||||
|
||||
24
.dockerignore
Normal file
24
.dockerignore
Normal file
@ -0,0 +1,24 @@
|
||||
**/node_modules
|
||||
**/.git
|
||||
**/.vscode
|
||||
**/dist
|
||||
**/build
|
||||
**/.DS_Store
|
||||
**/.env*
|
||||
**/*.log
|
||||
|
||||
# Exclude all packages except browser-tests and browser-container
|
||||
packages/discovery/
|
||||
packages/tests/
|
||||
packages/utils/
|
||||
packages/sds/
|
||||
packages/sdk/
|
||||
packages/relay/
|
||||
packages/rln/
|
||||
packages/message-hash/
|
||||
packages/proto/
|
||||
packages/enr/
|
||||
packages/interfaces/
|
||||
packages/message-encryption/
|
||||
packages/core/
|
||||
packages/build-utils/
|
||||
@ -5,7 +5,12 @@
|
||||
"project": ["./tsconfig.json"]
|
||||
},
|
||||
"env": { "es6": true },
|
||||
"ignorePatterns": ["node_modules", "build", "coverage", "proto"],
|
||||
"ignorePatterns": [
|
||||
"node_modules",
|
||||
"build",
|
||||
"coverage",
|
||||
"proto"
|
||||
],
|
||||
"plugins": ["import", "eslint-comments", "functional"],
|
||||
"extends": [
|
||||
"eslint:recommended",
|
||||
|
||||
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@ -1 +1 @@
|
||||
* @waku-org/js-waku
|
||||
* @logos-messaging/js-waku
|
||||
|
||||
2
.github/workflows/add-action-project.yml
vendored
2
.github/workflows/add-action-project.yml
vendored
@ -11,5 +11,5 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/add-to-project@v0.5.0
|
||||
with:
|
||||
project-url: https://github.com/orgs/waku-org/projects/2
|
||||
project-url: https://github.com/orgs/logos-messaging/projects/2
|
||||
github-token: ${{ secrets.ADD_TO_PROJECT_20240815 }}
|
||||
|
||||
105
.github/workflows/ci.yml
vendored
105
.github/workflows/ci.yml
vendored
@ -15,16 +15,16 @@ on:
|
||||
type: string
|
||||
|
||||
env:
|
||||
NODE_JS: "20"
|
||||
NODE_JS: "24"
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: waku-org/js-waku
|
||||
|
||||
with:
|
||||
repository: logos-messaging/logos-messaging-js
|
||||
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: ${{ env.NODE_JS }}
|
||||
@ -37,8 +37,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: waku-org/js-waku
|
||||
with:
|
||||
repository: logos-messaging/logos-messaging-js
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: ${{ env.NODE_JS }}
|
||||
@ -57,13 +57,13 @@ jobs:
|
||||
browser:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: mcr.microsoft.com/playwright:v1.51.1-jammy
|
||||
image: mcr.microsoft.com/playwright:v1.56.1-jammy
|
||||
env:
|
||||
HOME: "/root"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: waku-org/js-waku
|
||||
with:
|
||||
repository: logos-messaging/logos-messaging-js
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: ${{ env.NODE_JS }}
|
||||
@ -71,65 +71,18 @@ jobs:
|
||||
- run: npm run build:esm
|
||||
- run: npm run test:browser
|
||||
|
||||
build_rln_tree:
|
||||
if: false # This condition disables the job
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: waku-org/js-waku
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: ${{ env.NODE_JS }}
|
||||
- name: Check for existing RLN tree artifact
|
||||
id: check-artifact
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
const artifact = await github.rest.actions.listWorkflowRunArtifacts({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
run_id: context.runId
|
||||
});
|
||||
console.log(artifact);
|
||||
const foundArtifact = artifact.data.artifacts.find(art => art.name === 'rln_tree.tar.gz');
|
||||
if (foundArtifact) {
|
||||
core.setOutput('artifact_id', foundArtifact.id);
|
||||
core.setOutput('artifact_found', 'true');
|
||||
} else {
|
||||
core.setOutput('artifact_found', 'false');
|
||||
}
|
||||
- name: Download RLN tree artifact
|
||||
if: steps.check-artifact.outputs.artifact_found == 'true'
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: rln_tree.tar.gz
|
||||
path: /tmp
|
||||
- uses: ./.github/actions/npm
|
||||
- name: Sync rln tree and save artifact
|
||||
run: |
|
||||
mkdir -p /tmp/rln_tree.db
|
||||
npm run build:esm
|
||||
npm run sync-rln-tree
|
||||
tar -czf rln_tree.tar.gz -C /tmp/rln_tree.db .
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: rln_tree.tar.gz
|
||||
path: rln_tree.tar.gz
|
||||
|
||||
node:
|
||||
uses: ./.github/workflows/test-node.yml
|
||||
secrets: inherit
|
||||
with:
|
||||
nim_wakunode_image: ${{ inputs.nim_wakunode_image || 'wakuorg/nwaku:v0.35.1' }}
|
||||
nim_wakunode_image: ${{ inputs.nim_wakunode_image || 'wakuorg/nwaku:v0.36.0' }}
|
||||
test_type: node
|
||||
allure_reports: true
|
||||
|
||||
node_optional:
|
||||
uses: ./.github/workflows/test-node.yml
|
||||
with:
|
||||
nim_wakunode_image: ${{ inputs.nim_wakunode_image || 'wakuorg/nwaku:v0.35.1' }}
|
||||
nim_wakunode_image: ${{ inputs.nim_wakunode_image || 'wakuorg/nwaku:v0.36.0' }}
|
||||
test_type: node-optional
|
||||
|
||||
node_with_nwaku_master:
|
||||
@ -150,8 +103,8 @@ jobs:
|
||||
token: ${{ secrets.CI_TOKEN }}
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: waku-org/js-waku
|
||||
with:
|
||||
repository: logos-messaging/logos-messaging-js
|
||||
if: ${{ steps.release.outputs.releases_created }}
|
||||
|
||||
- uses: actions/setup-node@v3
|
||||
@ -160,12 +113,44 @@ jobs:
|
||||
node-version: ${{ env.NODE_JS }}
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
|
||||
- uses: pnpm/action-setup@v4
|
||||
if: ${{ steps.release.outputs.releases_created }}
|
||||
with:
|
||||
version: 9
|
||||
|
||||
- run: npm install
|
||||
if: ${{ steps.release.outputs.releases_created }}
|
||||
|
||||
- run: npm run build
|
||||
if: ${{ steps.release.outputs.releases_created }}
|
||||
|
||||
- name: Setup Foundry
|
||||
if: ${{ steps.release.outputs.releases_created }}
|
||||
uses: foundry-rs/foundry-toolchain@v1
|
||||
with:
|
||||
version: nightly
|
||||
|
||||
- name: Generate RLN contract ABIs
|
||||
id: rln-abi
|
||||
if: ${{ steps.release.outputs.releases_created }}
|
||||
run: |
|
||||
npm run setup:contract-abi -w @waku/rln || {
|
||||
echo "::warning::Failed to generate contract ABIs, marking @waku/rln as private to skip publishing"
|
||||
cd packages/rln
|
||||
node -e "const fs = require('fs'); const pkg = JSON.parse(fs.readFileSync('package.json', 'utf8')); pkg.private = true; fs.writeFileSync('package.json', JSON.stringify(pkg, null, 2));"
|
||||
echo "failed=true" >> $GITHUB_OUTPUT
|
||||
}
|
||||
|
||||
- name: Rebuild with new ABIs
|
||||
if: ${{ steps.release.outputs.releases_created && steps.rln-abi.outputs.failed != 'true' }}
|
||||
run: |
|
||||
npm install -w packages/rln
|
||||
npm run build -w @waku/rln || {
|
||||
echo "::warning::Failed to build @waku/rln, marking as private to skip publishing"
|
||||
cd packages/rln
|
||||
node -e "const fs = require('fs'); const pkg = JSON.parse(fs.readFileSync('package.json', 'utf8')); pkg.private = true; fs.writeFileSync('package.json', JSON.stringify(pkg, null, 2));"
|
||||
}
|
||||
|
||||
- run: npm run publish
|
||||
if: ${{ steps.release.outputs.releases_created }}
|
||||
env:
|
||||
|
||||
4
.github/workflows/fleet-checker.yml
vendored
4
.github/workflows/fleet-checker.yml
vendored
@ -2,7 +2,7 @@ on:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
NODE_JS: "20"
|
||||
NODE_JS: "22"
|
||||
|
||||
jobs:
|
||||
pre-release:
|
||||
@ -12,7 +12,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: waku-org/js-waku
|
||||
repository: logos-messaging/logos-messaging-js
|
||||
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
|
||||
10
.github/workflows/playwright.yml
vendored
10
.github/workflows/playwright.yml
vendored
@ -7,10 +7,7 @@ on:
|
||||
branches: [ master ]
|
||||
|
||||
env:
|
||||
NODE_JS: "20"
|
||||
EXAMPLE_TEMPLATE: "web-chat"
|
||||
EXAMPLE_NAME: "example"
|
||||
EXAMPLE_PORT: "8080"
|
||||
NODE_JS: "22"
|
||||
# Firefox in container fails due to $HOME not being owned by user running commands
|
||||
# more details https://github.com/microsoft/playwright/issues/6500
|
||||
HOME: "/root"
|
||||
@ -20,7 +17,7 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: mcr.microsoft.com/playwright:v1.51.1-jammy
|
||||
image: mcr.microsoft.com/playwright:v1.56.1-jammy
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-node@v3
|
||||
@ -29,6 +26,9 @@ jobs:
|
||||
|
||||
- uses: ./.github/actions/npm
|
||||
|
||||
- name: Build entire monorepo
|
||||
run: npm run build
|
||||
|
||||
- name: Run Playwright tests
|
||||
run: npm run test --workspace=@waku/browser-tests
|
||||
|
||||
|
||||
52
.github/workflows/pre-release.yml
vendored
52
.github/workflows/pre-release.yml
vendored
@ -2,7 +2,11 @@ on:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
NODE_JS: "20"
|
||||
NODE_JS: "24"
|
||||
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
pre-release:
|
||||
@ -10,19 +14,49 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: waku-org/js-waku
|
||||
|
||||
- uses: actions/setup-node@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: logos-messaging/logos-messaging-js
|
||||
ref: ${{ github.ref }}
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ env.NODE_JS }}
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
|
||||
|
||||
- uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9
|
||||
|
||||
- run: npm install
|
||||
|
||||
|
||||
- run: npm run build
|
||||
|
||||
|
||||
- name: Setup Foundry
|
||||
uses: foundry-rs/foundry-toolchain@v1
|
||||
with:
|
||||
version: nightly
|
||||
|
||||
- name: Generate RLN contract ABIs
|
||||
id: rln-abi
|
||||
run: |
|
||||
npm run setup:contract-abi -w @waku/rln || {
|
||||
echo "::warning::Failed to generate contract ABIs, marking @waku/rln as private to skip publishing"
|
||||
cd packages/rln
|
||||
node -e "const fs = require('fs'); const pkg = JSON.parse(fs.readFileSync('package.json', 'utf8')); pkg.private = true; fs.writeFileSync('package.json', JSON.stringify(pkg, null, 2));"
|
||||
echo "failed=true" >> $GITHUB_OUTPUT
|
||||
}
|
||||
|
||||
- name: Rebuild with new ABIs
|
||||
if: steps.rln-abi.outputs.failed != 'true'
|
||||
run: |
|
||||
npm install -w packages/rln
|
||||
npm run build -w @waku/rln || {
|
||||
echo "::warning::Failed to build @waku/rln, marking as private to skip publishing"
|
||||
cd packages/rln
|
||||
node -e "const fs = require('fs'); const pkg = JSON.parse(fs.readFileSync('package.json', 'utf8')); pkg.private = true; fs.writeFileSync('package.json', JSON.stringify(pkg, null, 2));"
|
||||
}
|
||||
|
||||
- run: npm run publish -- --tag next
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_JS_WAKU_PUBLISH }}
|
||||
|
||||
14
.github/workflows/test-node.yml
vendored
14
.github/workflows/test-node.yml
vendored
@ -24,7 +24,7 @@ on:
|
||||
default: false
|
||||
|
||||
env:
|
||||
NODE_JS: "20"
|
||||
NODE_JS: "24"
|
||||
# Ensure test type conditions remain consistent.
|
||||
WAKU_SERVICE_NODE_PARAMS: ${{ (inputs.test_type == 'go-waku-master') && '--min-relay-peers-to-publish=0' || '' }}
|
||||
DEBUG: ${{ inputs.debug }}
|
||||
@ -42,8 +42,8 @@ jobs:
|
||||
checks: write
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: waku-org/js-waku
|
||||
with:
|
||||
repository: logos-messaging/logos-messaging-js
|
||||
|
||||
- name: Remove unwanted software
|
||||
uses: ./.github/actions/prune-vm
|
||||
@ -62,14 +62,14 @@ jobs:
|
||||
|
||||
- name: Merge allure reports
|
||||
if: always() && env.ALLURE_REPORTS == 'true'
|
||||
run: node ci/mergeAllureResults.cjs
|
||||
run: node ci/mergeAllureResults.cjs
|
||||
|
||||
- name: Get allure history
|
||||
if: always() && env.ALLURE_REPORTS == 'true'
|
||||
uses: actions/checkout@v3
|
||||
continue-on-error: true
|
||||
with:
|
||||
repository: waku-org/allure-jswaku
|
||||
repository: logos-messaging/logos-messaging-allure-js
|
||||
ref: gh-pages
|
||||
path: gh-pages
|
||||
token: ${{ env.GITHUB_TOKEN }}
|
||||
@ -89,7 +89,7 @@ jobs:
|
||||
uses: peaceiris/actions-gh-pages@v3
|
||||
with:
|
||||
personal_token: ${{ env.GITHUB_TOKEN }}
|
||||
external_repository: waku-org/allure-jswaku
|
||||
external_repository: logos-messaging/logos-messaging-allure-js
|
||||
publish_branch: gh-pages
|
||||
publish_dir: allure-history
|
||||
|
||||
@ -125,4 +125,4 @@ jobs:
|
||||
echo "## Run Information" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- **NWAKU**: ${{ env.WAKUNODE_IMAGE }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "## Test Results" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Allure report will be available at: https://waku-org.github.io/allure-jswaku/${{ github.run_number }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Allure report will be available at: https://logos-messaging.github.io/logos-messaging-allure-js/${{ github.run_number }}" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
106
.github/workflows/test-reliability.yml
vendored
Normal file
106
.github/workflows/test-reliability.yml
vendored
Normal file
@ -0,0 +1,106 @@
|
||||
name: Run Reliability Test
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
test_type:
|
||||
description: 'Type of reliability test to run'
|
||||
required: true
|
||||
default: 'longevity'
|
||||
type: choice
|
||||
options:
|
||||
- longevity
|
||||
- high-throughput
|
||||
- throughput-sizes
|
||||
- network-latency
|
||||
- low-bandwidth
|
||||
- packet-loss
|
||||
- all
|
||||
|
||||
env:
|
||||
NODE_JS: "24"
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
actions: read
|
||||
checks: write
|
||||
strategy:
|
||||
matrix:
|
||||
test_type: [longevity, high-throughput, throughput-sizes, network-latency, low-bandwidth, packet-loss]
|
||||
fail-fast: false
|
||||
if: ${{ github.event.inputs.test_type == 'all' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: logos-messaging/logos-messaging-js
|
||||
|
||||
- name: Remove unwanted software
|
||||
uses: ./.github/actions/prune-vm
|
||||
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: ${{ env.NODE_JS }}
|
||||
|
||||
- uses: ./.github/actions/npm
|
||||
|
||||
- run: npm run build:esm
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 150
|
||||
run: |
|
||||
if [ "${{ matrix.test_type }}" = "high-throughput" ]; then
|
||||
npm run test:high-throughput
|
||||
elif [ "${{ matrix.test_type }}" = "throughput-sizes" ]; then
|
||||
npm run test:throughput-sizes
|
||||
elif [ "${{ matrix.test_type }}" = "network-latency" ]; then
|
||||
npm run test:network-latency
|
||||
elif [ "${{ matrix.test_type }}" = "low-bandwidth" ]; then
|
||||
npm run test:low-bandwidth
|
||||
elif [ "${{ matrix.test_type }}" = "packet-loss" ]; then
|
||||
npm run test:packet-loss
|
||||
else
|
||||
npm run test:longevity
|
||||
fi
|
||||
|
||||
single-test:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
actions: read
|
||||
checks: write
|
||||
if: ${{ github.event.inputs.test_type != 'all' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: logos-messaging/logos-messaging-js
|
||||
|
||||
- name: Remove unwanted software
|
||||
uses: ./.github/actions/prune-vm
|
||||
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: ${{ env.NODE_JS }}
|
||||
|
||||
- uses: ./.github/actions/npm
|
||||
|
||||
- run: npm run build:esm
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 150
|
||||
run: |
|
||||
if [ "${{ github.event.inputs.test_type }}" = "high-throughput" ]; then
|
||||
npm run test:high-throughput
|
||||
elif [ "${{ github.event.inputs.test_type }}" = "throughput-sizes" ]; then
|
||||
npm run test:throughput-sizes
|
||||
elif [ "${{ github.event.inputs.test_type }}" = "network-latency" ]; then
|
||||
npm run test:network-latency
|
||||
elif [ "${{ github.event.inputs.test_type }}" = "low-bandwidth" ]; then
|
||||
npm run test:low-bandwidth
|
||||
elif [ "${{ github.event.inputs.test_type }}" = "packet-loss" ]; then
|
||||
npm run test:packet-loss
|
||||
else
|
||||
npm run test:longevity
|
||||
fi
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@ -15,4 +15,9 @@ example
|
||||
packages/discovery/mock_local_storage
|
||||
.cursorrules
|
||||
.giga
|
||||
.cursor
|
||||
.cursor
|
||||
.DS_Store
|
||||
CLAUDE.md
|
||||
.env
|
||||
postgres-data/
|
||||
packages/rln/waku-rlnv2-contract/
|
||||
|
||||
@ -1,15 +1,15 @@
|
||||
{
|
||||
"packages/utils": "0.0.23",
|
||||
"packages/proto": "0.0.10",
|
||||
"packages/interfaces": "0.0.30",
|
||||
"packages/message-hash": "0.1.19",
|
||||
"packages/enr": "0.0.29",
|
||||
"packages/core": "0.0.35",
|
||||
"packages/message-encryption": "0.0.33",
|
||||
"packages/relay": "0.0.18",
|
||||
"packages/sdk": "0.0.31",
|
||||
"packages/discovery": "0.0.8",
|
||||
"packages/react-native-polyfills": "0.0.1",
|
||||
"packages/sds": "0.0.3",
|
||||
"packages/rln": "0.1.5"
|
||||
"packages/utils": "0.0.28",
|
||||
"packages/proto": "0.0.15",
|
||||
"packages/interfaces": "0.0.35",
|
||||
"packages/enr": "0.0.34",
|
||||
"packages/core": "0.0.41",
|
||||
"packages/message-encryption": "0.0.39",
|
||||
"packages/relay": "0.0.24",
|
||||
"packages/sdk": "0.0.37",
|
||||
"packages/discovery": "0.0.14",
|
||||
"packages/sds": "0.0.9",
|
||||
"packages/rln": "0.1.11",
|
||||
"packages/react": "0.0.9",
|
||||
"packages/run": "0.0.3"
|
||||
}
|
||||
|
||||
@ -33,9 +33,9 @@ module.exports = [
|
||||
import: "{ wakuPeerExchangeDiscovery }",
|
||||
},
|
||||
{
|
||||
name: "Local Peer Cache Discovery",
|
||||
name: "Peer Cache Discovery",
|
||||
path: "packages/discovery/bundle/index.js",
|
||||
import: "{ wakuLocalPeerCacheDiscovery }",
|
||||
import: "{ wakuPeerCacheDiscovery }",
|
||||
},
|
||||
{
|
||||
name: "Privacy preserving protocols",
|
||||
@ -59,7 +59,7 @@ module.exports = [
|
||||
},
|
||||
{
|
||||
name: "Deterministic Message Hashing",
|
||||
path: "packages/message-hash/bundle/index.js",
|
||||
path: ["packages/core/bundle/index.js"],
|
||||
import: "{ messageHash }",
|
||||
},
|
||||
];
|
||||
|
||||
@ -187,7 +187,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2018 Status Research & Development GmbH
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
14
LICENSE-MIT
14
LICENSE-MIT
@ -1,21 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2021 Status Research & Development GmbH
|
||||
Copyright © 2025-2026 Logos
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
of this software and associated documentation files (the “Software”), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
@ -23,6 +23,15 @@ npm install
|
||||
npm run doc
|
||||
```
|
||||
|
||||
# Using Nix shell
|
||||
```shell
|
||||
git clone https://github.com/waku-org/js-waku.git
|
||||
cd js-waku
|
||||
nix develop
|
||||
npm install
|
||||
npm run doc
|
||||
```
|
||||
|
||||
## Bugs, Questions & Features
|
||||
|
||||
If you encounter any bug or would like to propose new features, feel free to [open an issue](https://github.com/waku-org/js-waku/issues/new/).
|
||||
|
||||
30
ci/Jenkinsfile
vendored
30
ci/Jenkinsfile
vendored
@ -1,8 +1,20 @@
|
||||
#!/usr/bin/env groovy
|
||||
library 'status-jenkins-lib@v1.9.27'
|
||||
|
||||
pipeline {
|
||||
agent { label 'linux' }
|
||||
agent {
|
||||
docker {
|
||||
label 'linuxcontainer'
|
||||
image 'harbor.status.im/infra/ci-build-containers:linux-base-1.0.0'
|
||||
args '--volume=/nix:/nix ' +
|
||||
'--volume=/etc/nix:/etc/nix ' +
|
||||
'--user jenkins'
|
||||
}
|
||||
}
|
||||
|
||||
options {
|
||||
disableConcurrentBuilds()
|
||||
disableRestartFromStage()
|
||||
/* manage how many builds we keep */
|
||||
buildDiscarder(logRotator(
|
||||
numToKeepStr: '20',
|
||||
@ -20,19 +32,25 @@ pipeline {
|
||||
stages {
|
||||
stage('Deps') {
|
||||
steps {
|
||||
sh 'npm install'
|
||||
script {
|
||||
nix.develop('npm install', pure: true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('Packages') {
|
||||
steps {
|
||||
sh 'npm run build'
|
||||
script {
|
||||
nix.develop('npm run build', pure: true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('Build') {
|
||||
steps {
|
||||
sh 'npm run doc'
|
||||
script {
|
||||
nix.develop('npm run doc', pure: true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -40,7 +58,9 @@ pipeline {
|
||||
when { expression { GIT_BRANCH.endsWith('master') } }
|
||||
steps {
|
||||
sshagent(credentials: ['status-im-auto-ssh']) {
|
||||
sh 'npm run deploy'
|
||||
script {
|
||||
nix.develop('npm run deploy', pure: false)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -13,8 +13,8 @@ const Args = process.argv.slice(2);
|
||||
const USE_HTTPS = Args[0] && Args[0].toUpperCase() === "HTTPS";
|
||||
|
||||
const branch = "gh-pages";
|
||||
const org = "waku-org";
|
||||
const repo = "js-waku";
|
||||
const org = "logos-messaging";
|
||||
const repo = "logos-messaging-js";
|
||||
/* use SSH auth by default */
|
||||
let repoUrl = USE_HTTPS
|
||||
? `https://github.com/${org}/${repo}.git`
|
||||
|
||||
26
flake.lock
generated
Normal file
26
flake.lock
generated
Normal file
@ -0,0 +1,26 @@
|
||||
{
|
||||
"nodes": {
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1761016216,
|
||||
"narHash": "sha256-G/iC4t/9j/52i/nm+0/4ybBmAF4hzR8CNHC75qEhjHo=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "481cf557888e05d3128a76f14c76397b7d7cc869",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"id": "nixpkgs",
|
||||
"ref": "nixos-25.05",
|
||||
"type": "indirect"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
33
flake.nix
Normal file
33
flake.nix
Normal file
@ -0,0 +1,33 @@
|
||||
{
|
||||
description = "Nix flake development shell.";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "nixpkgs/nixos-25.05";
|
||||
};
|
||||
|
||||
outputs =
|
||||
{ self, nixpkgs }:
|
||||
let
|
||||
supportedSystems = [
|
||||
"x86_64-linux"
|
||||
"aarch64-linux"
|
||||
"x86_64-darwin"
|
||||
"aarch64-darwin"
|
||||
];
|
||||
forEachSystem = nixpkgs.lib.genAttrs supportedSystems;
|
||||
pkgsFor = forEachSystem (system: import nixpkgs { inherit system; });
|
||||
in
|
||||
rec {
|
||||
formatter = forEachSystem (system: pkgsFor.${system}.nixpkgs-fmt);
|
||||
|
||||
devShells = forEachSystem (system: {
|
||||
default = pkgsFor.${system}.mkShellNoCC {
|
||||
packages = with pkgsFor.${system}.buildPackages; [
|
||||
git # 2.44.1
|
||||
openssh # 9.7p1
|
||||
nodejs_20 # v20.15.1
|
||||
];
|
||||
};
|
||||
});
|
||||
};
|
||||
}
|
||||
@ -1,8 +1,15 @@
|
||||
/* eslint-env node */
|
||||
const playwright = require("playwright");
|
||||
const webpack = require("webpack");
|
||||
|
||||
process.env.CHROME_BIN = playwright.chromium.executablePath();
|
||||
process.env.FIREFOX_BIN = playwright.firefox.executablePath();
|
||||
if (!process.env.CHROME_BIN) {
|
||||
process.env.CHROME_BIN = playwright.chromium.executablePath();
|
||||
}
|
||||
console.log("Using CHROME_BIN:", process.env.CHROME_BIN);
|
||||
if (!process.env.FIREFOX_BIN) {
|
||||
process.env.FIREFOX_BIN = playwright.firefox.executablePath();
|
||||
}
|
||||
console.log("Using FIREFOX_BIN:", process.env.FIREFOX_BIN);
|
||||
|
||||
module.exports = function (config) {
|
||||
const configuration = {
|
||||
|
||||
23228
package-lock.json
generated
23228
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
29
package.json
29
package.json
@ -6,19 +6,20 @@
|
||||
"packages/proto",
|
||||
"packages/interfaces",
|
||||
"packages/utils",
|
||||
"packages/message-hash",
|
||||
"packages/enr",
|
||||
"packages/core",
|
||||
"packages/discovery",
|
||||
"packages/message-encryption",
|
||||
"packages/sdk",
|
||||
"packages/relay",
|
||||
"packages/sds",
|
||||
"packages/rln",
|
||||
"packages/sdk",
|
||||
"packages/relay",
|
||||
"packages/run",
|
||||
"packages/tests",
|
||||
"packages/reliability-tests",
|
||||
"packages/browser-tests",
|
||||
"packages/build-utils",
|
||||
"packages/react-native-polyfills"
|
||||
"packages/react"
|
||||
],
|
||||
"scripts": {
|
||||
"prepare": "husky",
|
||||
@ -33,13 +34,18 @@
|
||||
"test": "NODE_ENV=test npm run test --workspaces --if-present",
|
||||
"test:browser": "NODE_ENV=test npm run test:browser --workspaces --if-present",
|
||||
"test:node": "NODE_ENV=test npm run test:node --workspaces --if-present",
|
||||
"test:longevity": "npm --prefix packages/reliability-tests run test:longevity",
|
||||
"test:high-throughput": "npm --prefix packages/reliability-tests run test:high-throughput",
|
||||
"test:throughput-sizes": "npm --prefix packages/reliability-tests run test:throughput-sizes",
|
||||
"test:network-latency": "npm --prefix packages/reliability-tests run test:network-latency",
|
||||
"test:low-bandwidth": "npm --prefix packages/reliability-tests run test:low-bandwidth",
|
||||
"test:packet-loss": "npm --prefix packages/reliability-tests run test:packet-loss",
|
||||
"proto": "npm run proto --workspaces --if-present",
|
||||
"deploy": "node ci/deploy.js",
|
||||
"doc": "run-s doc:*",
|
||||
"doc:html": "typedoc --options typedoc.cjs",
|
||||
"doc:cname": "echo 'js.waku.org' > docs/CNAME",
|
||||
"publish": "node ./ci/publish.js",
|
||||
"sync-rln-tree": "node ./packages/tests/src/sync-rln-tree.js"
|
||||
"publish": "node ./ci/publish.js"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@size-limit/preset-big-lib": "^11.0.2",
|
||||
@ -62,15 +68,16 @@
|
||||
"lint-staged": "^15.4.3",
|
||||
"playwright": "^1.40.1",
|
||||
"size-limit": "^11.0.1",
|
||||
"ts-loader": "^9.5.1",
|
||||
"ts-node": "^10.9.2",
|
||||
"typedoc": "^0.25.9",
|
||||
"typescript": "^5.3.3",
|
||||
"ts-loader": "9.5.2",
|
||||
"ts-node": "10.9.2",
|
||||
"typedoc": "0.28.5",
|
||||
"typescript": "5.8.3",
|
||||
"wscat": "^6.0.1"
|
||||
},
|
||||
"lint-staged": {
|
||||
"*.{ts,js}": [
|
||||
"eslint --fix"
|
||||
]
|
||||
}
|
||||
},
|
||||
"version": ""
|
||||
}
|
||||
|
||||
4
packages/browser-tests/.dockerignore
Normal file
4
packages/browser-tests/.dockerignore
Normal file
@ -0,0 +1,4 @@
|
||||
node_modules
|
||||
build
|
||||
.DS_Store
|
||||
*.log
|
||||
@ -1,3 +1,3 @@
|
||||
EXAMPLE_TEMPLATE="web-chat"
|
||||
EXAMPLE_NAME="example"
|
||||
EXAMPLE_TEMPLATE="headless"
|
||||
EXAMPLE_NAME="headless"
|
||||
EXAMPLE_PORT="8080"
|
||||
|
||||
@ -1,14 +1,45 @@
|
||||
module.exports = {
|
||||
root: true,
|
||||
parserOptions: {
|
||||
tsconfigRootDir: __dirname,
|
||||
project: "./tsconfig.dev.json"
|
||||
ecmaVersion: 2022,
|
||||
sourceType: "module"
|
||||
},
|
||||
env: {
|
||||
node: true,
|
||||
browser: true,
|
||||
es2021: true
|
||||
},
|
||||
plugins: ["import"],
|
||||
extends: ["eslint:recommended"],
|
||||
rules: {
|
||||
"no-unused-vars": ["error", { "argsIgnorePattern": "^_", "ignoreRestSiblings": true }]
|
||||
},
|
||||
rules: {},
|
||||
globals: {
|
||||
process: true
|
||||
}
|
||||
},
|
||||
overrides: [
|
||||
{
|
||||
files: ["*.spec.ts", "**/test_utils/*.ts"],
|
||||
rules: {
|
||||
"@typescript-eslint/no-non-null-assertion": "off",
|
||||
"@typescript-eslint/no-explicit-any": "off",
|
||||
"no-console": "off",
|
||||
"import/no-extraneous-dependencies": ["error", { "devDependencies": true }]
|
||||
}
|
||||
},
|
||||
{
|
||||
files: ["*.ts"],
|
||||
parser: "@typescript-eslint/parser",
|
||||
parserOptions: {
|
||||
tsconfigRootDir: __dirname,
|
||||
project: "./tsconfig.dev.json"
|
||||
}
|
||||
},
|
||||
{
|
||||
files: ["*.d.ts"],
|
||||
rules: {
|
||||
"no-unused-vars": "off"
|
||||
}
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
72
packages/browser-tests/Dockerfile
Normal file
72
packages/browser-tests/Dockerfile
Normal file
@ -0,0 +1,72 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# Build stage - install all dependencies and build
|
||||
FROM node:22-bullseye AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy package.json and temporarily remove workspace dependencies that can't be resolved
|
||||
COPY package.json package.json.orig
|
||||
RUN sed '/"@waku\/tests": "\*",/d' package.json.orig > package.json
|
||||
RUN npm install --no-audit --no-fund
|
||||
|
||||
COPY src ./src
|
||||
COPY types ./types
|
||||
COPY tsconfig.json ./
|
||||
COPY web ./web
|
||||
|
||||
RUN npm run build
|
||||
|
||||
# Production stage - only runtime dependencies
|
||||
FROM node:22-bullseye
|
||||
|
||||
# Install required system deps for Playwright Chromium
|
||||
RUN apt-get update && apt-get install -y \
|
||||
wget \
|
||||
gnupg \
|
||||
ca-certificates \
|
||||
fonts-liberation \
|
||||
libatk-bridge2.0-0 \
|
||||
libatk1.0-0 \
|
||||
libatspi2.0-0 \
|
||||
libcups2 \
|
||||
libdbus-1-3 \
|
||||
libdrm2 \
|
||||
libgtk-3-0 \
|
||||
libnspr4 \
|
||||
libnss3 \
|
||||
libx11-xcb1 \
|
||||
libxcomposite1 \
|
||||
libxdamage1 \
|
||||
libxfixes3 \
|
||||
libxkbcommon0 \
|
||||
libxrandr2 \
|
||||
xdg-utils \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy package files and install only production dependencies
|
||||
COPY package.json package.json.orig
|
||||
RUN sed '/"@waku\/tests": "\*",/d' package.json.orig > package.json
|
||||
RUN npm install --only=production --no-audit --no-fund
|
||||
|
||||
# Copy built application from builder stage
|
||||
COPY --from=builder /app/dist ./dist
|
||||
|
||||
# Install Playwright browsers (Chromium only) at runtime layer
|
||||
RUN npx playwright install --with-deps chromium
|
||||
|
||||
ENV PORT=8080 \
|
||||
NODE_ENV=production
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
# Use a script to handle CLI arguments and environment variables
|
||||
COPY scripts/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
|
||||
CMD ["npm", "run", "start:server"]
|
||||
|
||||
|
||||
174
packages/browser-tests/README.md
Normal file
174
packages/browser-tests/README.md
Normal file
@ -0,0 +1,174 @@
|
||||
# Waku Browser Tests
|
||||
|
||||
This package provides a containerized Waku light node simulation server for testing and development. The server runs a headless browser using Playwright and exposes a REST API similar to the nwaku REST API. A Dockerfile is provided to allow programmatic simulation and "deployment" of js-waku nodes in any Waku orchestration environment that uses Docker (e.g. [10ksim](https://github.com/vacp2p/10ksim) ).
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Build and Run
|
||||
|
||||
```bash
|
||||
# Build the application
|
||||
npm run build
|
||||
|
||||
# Start the server (port 8080)
|
||||
npm run start:server
|
||||
|
||||
# Build and run Docker container
|
||||
npm run docker:build
|
||||
docker run -p 8080:8080 waku-browser-tests:local
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
Configure the Waku node using environment variables:
|
||||
|
||||
### Network Configuration
|
||||
- `WAKU_CLUSTER_ID`: Cluster ID (default: 1)
|
||||
- `WAKU_SHARD`: Specific shard number - enables static sharding mode (optional)
|
||||
|
||||
**Sharding Behavior:**
|
||||
- **Auto-sharding** (default): Uses `numShardsInCluster: 8` across cluster 1
|
||||
- **Static sharding**: When `WAKU_SHARD` is set, uses only that specific shard
|
||||
|
||||
### Bootstrap Configuration
|
||||
- `WAKU_ENR_BOOTSTRAP`: Enable ENR bootstrap mode with custom bootstrap peers (comma-separated)
|
||||
- `WAKU_LIGHTPUSH_NODE`: Preferred lightpush node multiaddr (Docker only)
|
||||
|
||||
### ENR Bootstrap Mode
|
||||
|
||||
When `WAKU_ENR_BOOTSTRAP` is set:
|
||||
- Disables default bootstrap (`defaultBootstrap: false`)
|
||||
- Enables DNS discovery using production ENR trees
|
||||
- Enables peer exchange and peer cache
|
||||
- Uses the specified ENR for additional bootstrap peers
|
||||
|
||||
```bash
|
||||
# Example: ENR bootstrap mode
|
||||
WAKU_ENR_BOOTSTRAP="enr:-QEnuEBEAyErHEfhiQxAVQoWowGTCuEF9fKZtXSd7H_PymHFhGJA3rGAYDVSHKCyJDGRLBGsloNbS8AZF33IVuefjOO6BIJpZIJ2NIJpcIQS39tkim11bHRpYWRkcnO4lgAvNihub2RlLTAxLmRvLWFtczMud2FrdXYyLnRlc3Quc3RhdHVzaW0ubmV0BgG73gMAODcxbm9kZS0wMS5hYy1jbi1ob25na29uZy1jLndha3V2Mi50ZXN0LnN0YXR1c2ltLm5ldAYBu94DACm9A62t7AQL4Ef5ZYZosRpQTzFVAB8jGjf1TER2wH-0zBOe1-MDBNLeA4lzZWNwMjU2azGhAzfsxbxyCkgCqq8WwYsVWH7YkpMLnU2Bw5xJSimxKav-g3VkcIIjKA" npm run start:server
|
||||
```
|
||||
|
||||
## API Endpoints
|
||||
|
||||
The server exposes the following HTTP endpoints:
|
||||
|
||||
### Node Management
|
||||
- `GET /`: Health check - returns server status
|
||||
- `GET /waku/v1/peer-info`: Get node peer information
|
||||
- `POST /waku/v1/wait-for-peers`: Wait for peers with specific protocols
|
||||
|
||||
### Messaging
|
||||
- `POST /lightpush/v3/message`: Send message via lightpush
|
||||
|
||||
### Static Files
|
||||
- `GET /app/index.html`: Web application entry point
|
||||
- `GET /app/*`: Static web application files
|
||||
|
||||
### Examples
|
||||
|
||||
#### Send a Message (Auto-sharding)
|
||||
```bash
|
||||
curl -X POST http://localhost:8080/lightpush/v3/message \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"pubsubTopic": "",
|
||||
"message": {
|
||||
"contentTopic": "/test/1/example/proto",
|
||||
"payload": "SGVsbG8gV2FrdQ==",
|
||||
"version": 1
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
#### Send a Message (Explicit pubsub topic)
|
||||
```bash
|
||||
curl -X POST http://localhost:8080/lightpush/v3/message \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"pubsubTopic": "/waku/2/rs/1/4",
|
||||
"message": {
|
||||
"contentTopic": "/test/1/example/proto",
|
||||
"payload": "SGVsbG8gV2FrdQ==",
|
||||
"version": 1
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
#### Wait for Peers
|
||||
```bash
|
||||
curl -X POST http://localhost:8080/waku/v1/wait-for-peers \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"timeoutMs": 30000,
|
||||
"protocols": ["lightpush", "filter"]
|
||||
}'
|
||||
```
|
||||
|
||||
#### Get Peer Info
|
||||
```bash
|
||||
curl -X GET http://localhost:8080/waku/v1/peer-info
|
||||
```
|
||||
|
||||
## CLI Usage
|
||||
|
||||
Run with CLI arguments:
|
||||
|
||||
```bash
|
||||
# Custom cluster and shard
|
||||
node dist/src/server.js --cluster-id=2 --shard=0
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
The package includes several test suites:
|
||||
|
||||
```bash
|
||||
# Basic server functionality tests (default)
|
||||
npm test
|
||||
|
||||
# Docker testing workflow
|
||||
npm run docker:build
|
||||
npm run test:integration
|
||||
|
||||
# All tests
|
||||
npm run test:all
|
||||
|
||||
# Individual test suites:
|
||||
npm run test:server # Server-only tests
|
||||
npm run test:e2e # End-to-end tests
|
||||
```
|
||||
|
||||
**Test Types:**
|
||||
- `server.spec.ts` - Tests basic server functionality and static file serving
|
||||
- `integration.spec.ts` - Tests Docker container integration with external services
|
||||
- `e2e.spec.ts` - Full end-to-end tests using nwaku nodes
|
||||
|
||||
## Docker Usage
|
||||
|
||||
The package includes Docker support for containerized testing:
|
||||
|
||||
```bash
|
||||
# Build image
|
||||
docker build -t waku-browser-tests:local .
|
||||
|
||||
# Run with ENR bootstrap
|
||||
docker run -p 8080:8080 \
|
||||
-e WAKU_ENR_BOOTSTRAP="enr:-QEnuE..." \
|
||||
-e WAKU_CLUSTER_ID="1" \
|
||||
waku-browser-tests:local
|
||||
|
||||
# Run with specific configuration
|
||||
docker run -p 8080:8080 \
|
||||
-e WAKU_CLUSTER_ID="2" \
|
||||
-e WAKU_SHARD="0" \
|
||||
waku-browser-tests:local
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
The server automatically:
|
||||
- Creates a Waku light node on startup
|
||||
- Configures network settings from environment variables
|
||||
- Enables appropriate protocols (lightpush, filter)
|
||||
- Handles peer discovery and connection management
|
||||
|
||||
All endpoints are CORS-enabled for cross-origin requests.
|
||||
@ -4,16 +4,39 @@
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"start": "run-s start:*",
|
||||
"start:setup": "node ./src/setup-example.js",
|
||||
"start:build": "node ./src/build-example.js",
|
||||
"start:serve": "npx serve -p 8080 --no-port-switching ./example",
|
||||
"test": "npx playwright test"
|
||||
"start": "npm run start:server",
|
||||
"start:server": "PORT=8080 node ./dist/src/server.js",
|
||||
"test": "npx playwright test tests/server.spec.ts --reporter=line",
|
||||
"test:all": "npx playwright test --reporter=line",
|
||||
"test:server": "npx playwright test tests/server.spec.ts --reporter=line",
|
||||
"test:integration": "npx playwright test tests/integration.spec.ts --reporter=line",
|
||||
"test:e2e": "npx playwright test tests/e2e.spec.ts --reporter=line",
|
||||
"build:server": "tsc -p tsconfig.json",
|
||||
"build:web": "esbuild web/index.ts --bundle --format=esm --platform=browser --outdir=dist/web && cp web/index.html dist/web/index.html",
|
||||
"build": "npm-run-all -s build:server build:web",
|
||||
"docker:build": "docker build -t waku-browser-tests:local . && docker tag waku-browser-tests:local waku-browser-tests:latest"
|
||||
},
|
||||
"dependencies": {
|
||||
"@playwright/test": "^1.51.1",
|
||||
"@waku/discovery": "*",
|
||||
"@waku/interfaces": "*",
|
||||
"@waku/sdk": "*",
|
||||
"@waku/utils": "*",
|
||||
"cors": "^2.8.5",
|
||||
"dotenv-flow": "^0.4.0",
|
||||
"express": "^4.21.2",
|
||||
"filter-obj": "^2.0.2",
|
||||
"it-first": "^3.0.9"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@playwright/test": "^1.50.0",
|
||||
"@waku/create-app": "^0.1.1-504bcd4",
|
||||
"dotenv-flow": "^4.1.0",
|
||||
"serve": "^14.2.3"
|
||||
"@types/cors": "^2.8.15",
|
||||
"@types/express": "^4.17.21",
|
||||
"@types/node": "^20.10.0",
|
||||
"@waku/tests": "*",
|
||||
"axios": "^1.8.4",
|
||||
"esbuild": "^0.21.5",
|
||||
"npm-run-all": "^4.1.5",
|
||||
"testcontainers": "^10.9.0",
|
||||
"typescript": "5.8.3"
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,80 +1,39 @@
|
||||
import "dotenv-flow/config";
|
||||
import { defineConfig, devices } from "@playwright/test";
|
||||
import { Logger } from "@waku/utils";
|
||||
|
||||
const EXAMPLE_PORT = process.env.EXAMPLE_PORT;
|
||||
// web-chat specific thingy
|
||||
const EXAMPLE_TEMPLATE = process.env.EXAMPLE_TEMPLATE;
|
||||
const BASE_URL = `http://127.0.0.1:${EXAMPLE_PORT}/${EXAMPLE_TEMPLATE}`;
|
||||
const log = new Logger("playwright-config");
|
||||
|
||||
if (!process.env.CI) {
|
||||
try {
|
||||
await import("dotenv-flow/config.js");
|
||||
} catch (e) {
|
||||
log.warn("dotenv-flow not found; skipping env loading");
|
||||
}
|
||||
}
|
||||
|
||||
const EXAMPLE_PORT = process.env.EXAMPLE_PORT || "8080";
|
||||
const BASE_URL = `http://127.0.0.1:${EXAMPLE_PORT}`;
|
||||
const TEST_IGNORE = process.env.CI ? ["tests/e2e.spec.ts"] : [];
|
||||
|
||||
/**
|
||||
* See https://playwright.dev/docs/test-configuration.
|
||||
*/
|
||||
export default defineConfig({
|
||||
testDir: "./tests",
|
||||
/* Run tests in files in parallel */
|
||||
testIgnore: TEST_IGNORE,
|
||||
fullyParallel: true,
|
||||
/* Fail the build on CI if you accidentally left test.only in the source code. */
|
||||
forbidOnly: !!process.env.CI,
|
||||
/* Retry on CI only */
|
||||
retries: process.env.CI ? 2 : 0,
|
||||
/* Opt out of parallel tests on CI. */
|
||||
workers: process.env.CI ? 2 : undefined,
|
||||
/* Reporter to use. See https://playwright.dev/docs/test-reporters */
|
||||
reporter: "html",
|
||||
/* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */
|
||||
use: {
|
||||
/* Base URL to use in actions like `await page.goto('/')`. */
|
||||
baseURL: BASE_URL,
|
||||
|
||||
/* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */
|
||||
trace: "on-first-retry"
|
||||
},
|
||||
|
||||
/* Configure projects for major browsers */
|
||||
projects: [
|
||||
{
|
||||
name: "chromium",
|
||||
use: { ...devices["Desktop Chrome"] }
|
||||
},
|
||||
|
||||
{
|
||||
name: "firefox",
|
||||
use: { ...devices["Desktop Firefox"] }
|
||||
},
|
||||
|
||||
{
|
||||
name: "webkit",
|
||||
use: { ...devices["Desktop Safari"] }
|
||||
}
|
||||
]
|
||||
|
||||
/* Test against mobile viewports. */
|
||||
// {
|
||||
// name: 'Mobile Chrome',
|
||||
// use: { ...devices['Pixel 5'] },
|
||||
// },
|
||||
// {
|
||||
// name: 'Mobile Safari',
|
||||
// use: { ...devices['iPhone 12'] },
|
||||
// },
|
||||
|
||||
/* Test against branded browsers. */
|
||||
// {
|
||||
// name: 'Microsoft Edge',
|
||||
// use: { ...devices['Desktop Edge'], channel: 'msedge' },
|
||||
// },
|
||||
// {
|
||||
// name: 'Google Chrome',
|
||||
// use: { ...devices['Desktop Chrome'], channel: 'chrome' },
|
||||
// },
|
||||
],
|
||||
|
||||
/* Run your local dev server before starting the tests */
|
||||
webServer: {
|
||||
url: BASE_URL,
|
||||
stdout: "pipe",
|
||||
stderr: "pipe",
|
||||
command: "npm start",
|
||||
reuseExistingServer: !process.env.CI,
|
||||
timeout: 5 * 60 * 1000 // five minutes for bootstrapping an example
|
||||
}
|
||||
});
|
||||
|
||||
54
packages/browser-tests/scripts/docker-entrypoint.sh
Normal file
54
packages/browser-tests/scripts/docker-entrypoint.sh
Normal file
@ -0,0 +1,54 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Docker entrypoint script for waku-browser-tests
|
||||
# Handles CLI arguments and converts them to environment variables
|
||||
# Supports reading discovered addresses from /etc/addrs/addrs.env (10k sim pattern)
|
||||
echo "docker-entrypoint.sh"
|
||||
echo "Using address: $addrs1"
|
||||
# Only set WAKU_LIGHTPUSH_NODE if it's not already set and addrs1 is available
|
||||
if [ -z "$WAKU_LIGHTPUSH_NODE" ] && [ -n "$addrs1" ]; then
|
||||
export WAKU_LIGHTPUSH_NODE="$addrs1"
|
||||
fi
|
||||
echo "Num Args: $#"
|
||||
echo "Args: $@"
|
||||
|
||||
echo "WAKU_LIGHTPUSH_NODE=$WAKU_LIGHTPUSH_NODE"
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--cluster-id=*)
|
||||
export WAKU_CLUSTER_ID="${1#*=}"
|
||||
echo "Setting WAKU_CLUSTER_ID=${WAKU_CLUSTER_ID}"
|
||||
shift
|
||||
;;
|
||||
--shard=*)
|
||||
export WAKU_SHARD="${1#*=}"
|
||||
echo "Setting WAKU_SHARD=${WAKU_SHARD}"
|
||||
shift
|
||||
;;
|
||||
--lightpushnode=*)
|
||||
export WAKU_LIGHTPUSH_NODE="${1#*=}"
|
||||
echo "Setting WAKU_LIGHTPUSH_NODE=${WAKU_LIGHTPUSH_NODE}"
|
||||
shift
|
||||
;;
|
||||
--enr-bootstrap=*)
|
||||
export WAKU_ENR_BOOTSTRAP="${1#*=}"
|
||||
echo "Setting WAKU_ENR_BOOTSTRAP=${WAKU_ENR_BOOTSTRAP}"
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
# Unknown argument, notify user and keep it for the main command
|
||||
echo "Warning: Unknown argument '$1' will be passed to the main command"
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# If no specific command is provided, use the default CMD
|
||||
if [ $# -eq 0 ]; then
|
||||
set -- "npm" "run" "start:server"
|
||||
fi
|
||||
|
||||
# Execute the main command
|
||||
exec "$@"
|
||||
67
packages/browser-tests/src/browser/index.ts
Normal file
67
packages/browser-tests/src/browser/index.ts
Normal file
@ -0,0 +1,67 @@
|
||||
import { Browser, chromium, Page } from "@playwright/test";
|
||||
import { Logger } from "@waku/utils";
|
||||
|
||||
const log = new Logger("browser-test");
|
||||
|
||||
let browser: Browser | undefined;
|
||||
let page: Page | undefined;
|
||||
|
||||
export async function initBrowser(appPort: number): Promise<void> {
|
||||
try {
|
||||
const launchArgs = ["--no-sandbox", "--disable-setuid-sandbox"];
|
||||
|
||||
browser = await chromium.launch({
|
||||
headless: true,
|
||||
args: launchArgs
|
||||
});
|
||||
|
||||
if (!browser) {
|
||||
throw new Error("Failed to initialize browser");
|
||||
}
|
||||
|
||||
page = await browser.newPage();
|
||||
|
||||
// Forward browser console to server logs
|
||||
page.on('console', msg => {
|
||||
const type = msg.type();
|
||||
const text = msg.text();
|
||||
log.info(`[Browser Console ${type.toUpperCase()}] ${text}`);
|
||||
});
|
||||
|
||||
page.on('pageerror', error => {
|
||||
log.error('[Browser Page Error]', error.message);
|
||||
});
|
||||
|
||||
await page.goto(`http://localhost:${appPort}/app/index.html`, {
|
||||
waitUntil: "networkidle",
|
||||
});
|
||||
|
||||
await page.waitForFunction(
|
||||
() => {
|
||||
return window.wakuApi && typeof window.wakuApi.createWakuNode === "function";
|
||||
},
|
||||
{ timeout: 30000 }
|
||||
);
|
||||
|
||||
log.info("Browser initialized successfully with wakuApi");
|
||||
} catch (error) {
|
||||
log.error("Error initializing browser:", error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
export function getPage(): Page | undefined {
|
||||
return page;
|
||||
}
|
||||
|
||||
export function setPage(pageInstance: Page | undefined): void {
|
||||
page = pageInstance;
|
||||
}
|
||||
|
||||
export async function closeBrowser(): Promise<void> {
|
||||
if (browser) {
|
||||
await browser.close();
|
||||
browser = undefined;
|
||||
page = undefined;
|
||||
}
|
||||
}
|
||||
@ -1,55 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
import "dotenv-flow/config";
|
||||
import { execSync } from "child_process";
|
||||
import path from "path";
|
||||
|
||||
import { __dirname } from "./utils.js";
|
||||
|
||||
const EXAMPLE_NAME = process.env.EXAMPLE_NAME;
|
||||
const EXAMPLE_PATH = path.resolve(__dirname, "..", EXAMPLE_NAME);
|
||||
|
||||
const BUILD_FOLDER = "build";
|
||||
const BUILD_PATH = path.resolve(EXAMPLE_PATH, BUILD_FOLDER);
|
||||
|
||||
// required by web-chat example
|
||||
const WEB_CHAT_BUILD_PATH = path.resolve(EXAMPLE_PATH, "web-chat");
|
||||
|
||||
run();
|
||||
|
||||
function run() {
|
||||
cleanPrevBuildIfExists();
|
||||
buildExample();
|
||||
renameBuildFolderForWebChat();
|
||||
}
|
||||
|
||||
function cleanPrevBuildIfExists() {
|
||||
try {
|
||||
console.log("Cleaning previous build if exists.");
|
||||
execSync(`rm -rf ${BUILD_PATH}`, { stdio: "ignore" });
|
||||
} catch (error) {
|
||||
console.error(`Failed to clean previous build: ${error.message}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
function buildExample() {
|
||||
try {
|
||||
console.log("Building example at", EXAMPLE_PATH);
|
||||
execSync(`cd ${EXAMPLE_PATH} && npm run build`, { stdio: "pipe" });
|
||||
} catch (error) {
|
||||
console.error(`Failed to build example: ${error.message}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
function renameBuildFolderForWebChat() {
|
||||
try {
|
||||
console.log("Renaming example's build folder.");
|
||||
execSync(`mv ${BUILD_PATH} ${WEB_CHAT_BUILD_PATH}`, { stdio: "ignore" });
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`Failed to rename build folder for web-chat: ${error.message}`
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
87
packages/browser-tests/src/routes/waku.ts
Normal file
87
packages/browser-tests/src/routes/waku.ts
Normal file
@ -0,0 +1,87 @@
|
||||
import { Router } from "express";
|
||||
import { Logger } from "@waku/utils";
|
||||
import {
|
||||
createEndpointHandler,
|
||||
validators,
|
||||
errorHandlers,
|
||||
} from "../utils/endpoint-handler.js";
|
||||
|
||||
interface LightPushResult {
|
||||
successes: string[];
|
||||
failures: Array<{ error: string; peerId?: string }>;
|
||||
}
|
||||
|
||||
const log = new Logger("routes:waku");
|
||||
const router = Router();
|
||||
|
||||
const corsEndpoints = [
|
||||
"/waku/v1/wait-for-peers",
|
||||
"/waku/v1/peer-info",
|
||||
"/lightpush/v3/message",
|
||||
];
|
||||
|
||||
corsEndpoints.forEach((endpoint) => {
|
||||
router.head(endpoint, (_req, res) => {
|
||||
res.status(200).end();
|
||||
});
|
||||
});
|
||||
|
||||
router.post(
|
||||
"/waku/v1/wait-for-peers",
|
||||
createEndpointHandler({
|
||||
methodName: "waitForPeers",
|
||||
validateInput: (body: unknown) => {
|
||||
const bodyObj = body as { timeoutMs?: number; protocols?: string[] };
|
||||
return [
|
||||
bodyObj.timeoutMs || 10000,
|
||||
bodyObj.protocols || ["lightpush", "filter"],
|
||||
];
|
||||
},
|
||||
transformResult: () => ({
|
||||
success: true,
|
||||
message: "Successfully connected to peers",
|
||||
}),
|
||||
}),
|
||||
);
|
||||
|
||||
router.get(
|
||||
"/waku/v1/peer-info",
|
||||
createEndpointHandler({
|
||||
methodName: "getPeerInfo",
|
||||
validateInput: validators.noInput,
|
||||
}),
|
||||
);
|
||||
|
||||
router.post(
|
||||
"/lightpush/v3/message",
|
||||
createEndpointHandler({
|
||||
methodName: "pushMessageV3",
|
||||
validateInput: (body: unknown): [string, string, string] => {
|
||||
const validatedRequest = validators.requireLightpushV3(body);
|
||||
|
||||
return [
|
||||
validatedRequest.message.contentTopic,
|
||||
validatedRequest.message.payload,
|
||||
validatedRequest.pubsubTopic,
|
||||
];
|
||||
},
|
||||
handleError: errorHandlers.lightpushError,
|
||||
transformResult: (result: unknown) => {
|
||||
const lightPushResult = result as LightPushResult;
|
||||
if (lightPushResult && lightPushResult.successes && lightPushResult.successes.length > 0) {
|
||||
log.info("[Server] Message successfully sent via v3 lightpush!");
|
||||
return {
|
||||
success: true,
|
||||
result: lightPushResult,
|
||||
};
|
||||
} else {
|
||||
return {
|
||||
success: false,
|
||||
error: "Could not publish message: no suitable peers",
|
||||
};
|
||||
}
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
export default router;
|
||||
244
packages/browser-tests/src/server.ts
Normal file
244
packages/browser-tests/src/server.ts
Normal file
@ -0,0 +1,244 @@
|
||||
import { fileURLToPath } from "url";
|
||||
import * as path from "path";
|
||||
|
||||
import cors from "cors";
|
||||
import express, { Request, Response } from "express";
|
||||
import { Logger } from "@waku/utils";
|
||||
|
||||
import wakuRouter from "./routes/waku.js";
|
||||
import { initBrowser, getPage, closeBrowser } from "./browser/index.js";
|
||||
import {
|
||||
DEFAULT_CLUSTER_ID,
|
||||
DEFAULT_NUM_SHARDS,
|
||||
Protocols,
|
||||
AutoSharding,
|
||||
StaticSharding,
|
||||
} from "@waku/interfaces";
|
||||
import { CreateNodeOptions } from "@waku/sdk";
|
||||
import type { WindowNetworkConfig } from "../types/global.js";
|
||||
|
||||
interface NodeError extends Error {
|
||||
code?: string;
|
||||
}
|
||||
|
||||
const log = new Logger("server");
|
||||
const app = express();
|
||||
|
||||
app.use(cors());
|
||||
app.use(express.json());
|
||||
|
||||
import * as fs from "fs";
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
const distRoot = path.resolve(__dirname, "..");
|
||||
const webDir = path.resolve(distRoot, "web");
|
||||
|
||||
app.get("/app/index.html", (_req: Request, res: Response) => {
|
||||
try {
|
||||
const htmlPath = path.join(webDir, "index.html");
|
||||
let htmlContent = fs.readFileSync(htmlPath, "utf8");
|
||||
|
||||
const networkConfig: WindowNetworkConfig = {};
|
||||
if (process.env.WAKU_CLUSTER_ID) {
|
||||
networkConfig.clusterId = parseInt(process.env.WAKU_CLUSTER_ID, 10);
|
||||
}
|
||||
if (process.env.WAKU_SHARD) {
|
||||
networkConfig.shards = [parseInt(process.env.WAKU_SHARD, 10)];
|
||||
log.info("Using static shard:", networkConfig.shards);
|
||||
}
|
||||
|
||||
const lightpushNode = process.env.WAKU_LIGHTPUSH_NODE || null;
|
||||
const enrBootstrap = process.env.WAKU_ENR_BOOTSTRAP || null;
|
||||
|
||||
log.info("Network config on server start, pre headless:", networkConfig);
|
||||
|
||||
const configScript = ` <script>
|
||||
window.__WAKU_NETWORK_CONFIG = ${JSON.stringify(networkConfig)};
|
||||
window.__WAKU_LIGHTPUSH_NODE = ${JSON.stringify(lightpushNode)};
|
||||
window.__WAKU_ENR_BOOTSTRAP = ${JSON.stringify(enrBootstrap)};
|
||||
</script>`;
|
||||
const originalPattern =
|
||||
' <script type="module" src="./index.js"></script>';
|
||||
const replacement = `${configScript}\n <script type="module" src="./index.js"></script>`;
|
||||
|
||||
htmlContent = htmlContent.replace(originalPattern, replacement);
|
||||
|
||||
res.setHeader("Content-Type", "text/html");
|
||||
res.send(htmlContent);
|
||||
} catch (error) {
|
||||
log.error("Error serving dynamic index.html:", error);
|
||||
res.status(500).send("Error loading page");
|
||||
}
|
||||
});
|
||||
|
||||
app.use("/app", express.static(webDir, { index: false }));
|
||||
|
||||
app.use(wakuRouter);
|
||||
|
||||
async function startAPI(requestedPort: number): Promise<number> {
|
||||
try {
|
||||
app.get("/", (_req: Request, res: Response) => {
|
||||
res.json({ status: "Waku simulation server is running" });
|
||||
});
|
||||
|
||||
app
|
||||
.listen(requestedPort, () => {
|
||||
log.info(`API server running on http://localhost:${requestedPort}`);
|
||||
})
|
||||
.on("error", (error: NodeError) => {
|
||||
if (error.code === "EADDRINUSE") {
|
||||
log.error(
|
||||
`Port ${requestedPort} is already in use. Please close the application using this port and try again.`,
|
||||
);
|
||||
} else {
|
||||
log.error("Error starting server:", error);
|
||||
}
|
||||
throw error;
|
||||
});
|
||||
|
||||
return requestedPort;
|
||||
} catch (error) {
|
||||
log.error("Error starting server:", error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async function startServer(port: number = 3000): Promise<void> {
|
||||
try {
|
||||
const actualPort = await startAPI(port);
|
||||
await initBrowser(actualPort);
|
||||
|
||||
try {
|
||||
log.info("Auto-starting node with CLI configuration...");
|
||||
|
||||
const hasEnrBootstrap = Boolean(process.env.WAKU_ENR_BOOTSTRAP);
|
||||
|
||||
const networkConfig: AutoSharding | StaticSharding = process.env.WAKU_SHARD
|
||||
? ({
|
||||
clusterId: process.env.WAKU_CLUSTER_ID
|
||||
? parseInt(process.env.WAKU_CLUSTER_ID, 10)
|
||||
: DEFAULT_CLUSTER_ID,
|
||||
shards: [parseInt(process.env.WAKU_SHARD, 10)],
|
||||
} as StaticSharding)
|
||||
: ({
|
||||
clusterId: process.env.WAKU_CLUSTER_ID
|
||||
? parseInt(process.env.WAKU_CLUSTER_ID, 10)
|
||||
: DEFAULT_CLUSTER_ID,
|
||||
numShardsInCluster: DEFAULT_NUM_SHARDS,
|
||||
} as AutoSharding);
|
||||
|
||||
const createOptions: CreateNodeOptions = {
|
||||
defaultBootstrap: false,
|
||||
...(hasEnrBootstrap && {
|
||||
discovery: {
|
||||
dns: true,
|
||||
peerExchange: true,
|
||||
peerCache: true,
|
||||
},
|
||||
}),
|
||||
networkConfig,
|
||||
};
|
||||
|
||||
log.info(
|
||||
`Bootstrap mode: ${hasEnrBootstrap ? "ENR-only (defaultBootstrap=false)" : "default bootstrap (defaultBootstrap=true)"}`,
|
||||
);
|
||||
if (hasEnrBootstrap) {
|
||||
log.info(`ENR bootstrap peers: ${process.env.WAKU_ENR_BOOTSTRAP}`);
|
||||
}
|
||||
|
||||
log.info(
|
||||
`Network config: ${JSON.stringify(networkConfig)}`,
|
||||
);
|
||||
|
||||
await getPage()?.evaluate((config) => {
|
||||
return window.wakuApi.createWakuNode(config);
|
||||
}, createOptions);
|
||||
await getPage()?.evaluate(() => window.wakuApi.startNode());
|
||||
|
||||
try {
|
||||
await getPage()?.evaluate(() =>
|
||||
window.wakuApi.waitForPeers?.(5000, [Protocols.LightPush]),
|
||||
);
|
||||
log.info("Auto-start completed with bootstrap peers");
|
||||
} catch (peerError) {
|
||||
log.info(
|
||||
"Auto-start completed (no bootstrap peers found - may be expected with test ENRs)",
|
||||
);
|
||||
}
|
||||
} catch (e) {
|
||||
log.warn("Auto-start failed:", e);
|
||||
}
|
||||
} catch (error) {
|
||||
log.error("Error starting server:", error);
|
||||
}
|
||||
}
|
||||
|
||||
process.on("uncaughtException", (error) => {
|
||||
log.error("Uncaught Exception:", error);
|
||||
if (process.env.NODE_ENV !== "production") {
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
|
||||
process.on("unhandledRejection", (reason, promise) => {
|
||||
log.error("Unhandled Rejection at:", promise, "reason:", reason);
|
||||
if (process.env.NODE_ENV !== "production") {
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
|
||||
const gracefulShutdown = async (signal: string) => {
|
||||
log.info(`Received ${signal}, gracefully shutting down...`);
|
||||
try {
|
||||
await closeBrowser();
|
||||
} catch (e) {
|
||||
log.warn("Error closing browser:", e);
|
||||
}
|
||||
process.exit(0);
|
||||
};
|
||||
|
||||
process.on("SIGINT", () => gracefulShutdown("SIGINT"));
|
||||
process.on("SIGTERM", () => gracefulShutdown("SIGTERM"));
|
||||
|
||||
function parseCliArgs() {
|
||||
const args = process.argv.slice(2);
|
||||
let clusterId: number | undefined;
|
||||
let shard: number | undefined;
|
||||
|
||||
for (const arg of args) {
|
||||
if (arg.startsWith("--cluster-id=")) {
|
||||
clusterId = parseInt(arg.split("=")[1], 10);
|
||||
if (isNaN(clusterId)) {
|
||||
log.error("Invalid cluster-id value. Must be a number.");
|
||||
process.exit(1);
|
||||
}
|
||||
} else if (arg.startsWith("--shard=")) {
|
||||
shard = parseInt(arg.split("=")[1], 10);
|
||||
if (isNaN(shard)) {
|
||||
log.error("Invalid shard value. Must be a number.");
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { clusterId, shard };
|
||||
}
|
||||
|
||||
const isMainModule = process.argv[1] === fileURLToPath(import.meta.url);
|
||||
|
||||
if (isMainModule) {
|
||||
const port = process.env.PORT ? parseInt(process.env.PORT, 10) : 3000;
|
||||
const cliArgs = parseCliArgs();
|
||||
|
||||
if (cliArgs.clusterId !== undefined) {
|
||||
process.env.WAKU_CLUSTER_ID = cliArgs.clusterId.toString();
|
||||
log.info(`Using CLI cluster ID: ${cliArgs.clusterId}`);
|
||||
}
|
||||
if (cliArgs.shard !== undefined) {
|
||||
process.env.WAKU_SHARD = cliArgs.shard.toString();
|
||||
log.info(`Using CLI shard: ${cliArgs.shard}`);
|
||||
}
|
||||
|
||||
void startServer(port);
|
||||
}
|
||||
@ -1,93 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
import "dotenv-flow/config";
|
||||
import { execSync } from "child_process";
|
||||
import path from "path";
|
||||
|
||||
import { __dirname, readJSON } from "./utils.js";
|
||||
|
||||
const ROOT_PATH = path.resolve(__dirname, "../../../");
|
||||
const JS_WAKU_PACKAGES = readWorkspaces();
|
||||
|
||||
const EXAMPLE_NAME = process.env.EXAMPLE_NAME;
|
||||
const EXAMPLE_TEMPLATE = process.env.EXAMPLE_TEMPLATE;
|
||||
const EXAMPLE_PATH = path.resolve(__dirname, "..", EXAMPLE_NAME);
|
||||
|
||||
run();
|
||||
|
||||
function run() {
|
||||
cleanExampleIfExists();
|
||||
bootstrapExample();
|
||||
linkPackages();
|
||||
}
|
||||
|
||||
function cleanExampleIfExists() {
|
||||
try {
|
||||
console.log("Cleaning previous example if exists.");
|
||||
execSync(`rm -rf ${EXAMPLE_PATH}`, { stdio: "ignore" });
|
||||
} catch (error) {
|
||||
console.error(`Failed to clean previous example: ${error.message}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
function bootstrapExample() {
|
||||
try {
|
||||
console.log("Bootstrapping example.");
|
||||
execSync(
|
||||
`npx @waku/create-app --template ${EXAMPLE_TEMPLATE} ${EXAMPLE_NAME}`,
|
||||
{ stdio: "ignore" }
|
||||
);
|
||||
} catch (error) {
|
||||
console.error(`Failed to bootstrap example: ${error.message}`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
function linkPackages() {
|
||||
const examplePackage = readJSON(`${EXAMPLE_PATH}/package.json`);
|
||||
|
||||
// remove duplicates if any
|
||||
const dependencies = filterWakuDependencies({
|
||||
...examplePackage.dependencies,
|
||||
...examplePackage.devDependencies
|
||||
});
|
||||
|
||||
Object.keys(dependencies).forEach(linkDependency);
|
||||
}
|
||||
|
||||
function filterWakuDependencies(dependencies) {
|
||||
return Object.entries(dependencies)
|
||||
.filter((pair) => JS_WAKU_PACKAGES.includes(pair[0]))
|
||||
.reduce((acc, pair) => {
|
||||
acc[pair[0]] = pair[1];
|
||||
return acc;
|
||||
}, {});
|
||||
}
|
||||
|
||||
function linkDependency(dependency) {
|
||||
try {
|
||||
console.log(`Linking dependency to example: ${dependency}`);
|
||||
const pathToDependency = path.resolve(ROOT_PATH, toFolderName(dependency));
|
||||
execSync(`npm link ${pathToDependency}`, { stdio: "ignore" });
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`Failed to npm link dependency ${dependency} in example: ${error.message}`
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
function readWorkspaces() {
|
||||
const rootPath = path.resolve(ROOT_PATH, "package.json");
|
||||
const workspaces = readJSON(rootPath).workspaces;
|
||||
return workspaces.map(toPackageName);
|
||||
}
|
||||
|
||||
function toPackageName(str) {
|
||||
// assumption is that package name published is always the same in `@waku/package` name
|
||||
return str.replace("packages", "@waku");
|
||||
}
|
||||
|
||||
function toFolderName(str) {
|
||||
return str.replace("@waku", "packages");
|
||||
}
|
||||
@ -1,8 +0,0 @@
|
||||
import { readFileSync } from "fs";
|
||||
import { dirname } from "path";
|
||||
import { fileURLToPath } from "url";
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
export const __dirname = dirname(__filename);
|
||||
|
||||
export const readJSON = (path) => JSON.parse(readFileSync(path, "utf-8"));
|
||||
197
packages/browser-tests/src/utils/endpoint-handler.ts
Normal file
197
packages/browser-tests/src/utils/endpoint-handler.ts
Normal file
@ -0,0 +1,197 @@
|
||||
import { Request, Response } from "express";
|
||||
import { Logger } from "@waku/utils";
|
||||
import { getPage } from "../browser/index.js";
|
||||
import type { ITestBrowser } from "../../types/global.js";
|
||||
|
||||
const log = new Logger("endpoint-handler");
|
||||
|
||||
export interface LightpushV3Request {
|
||||
pubsubTopic: string;
|
||||
message: {
|
||||
payload: string;
|
||||
contentTopic: string;
|
||||
version: number;
|
||||
};
|
||||
}
|
||||
|
||||
export interface LightpushV3Response {
|
||||
success?: boolean;
|
||||
error?: string;
|
||||
result?: {
|
||||
successes: string[];
|
||||
failures: Array<{
|
||||
error: string;
|
||||
peerId?: string;
|
||||
}>;
|
||||
};
|
||||
}
|
||||
|
||||
export interface EndpointConfig<TInput = unknown, TOutput = unknown> {
|
||||
methodName: string;
|
||||
validateInput?: (_requestBody: unknown) => TInput;
|
||||
transformResult?: (_sdkResult: unknown) => TOutput;
|
||||
handleError?: (_caughtError: Error) => { code: number; message: string };
|
||||
preCheck?: () => Promise<void> | void;
|
||||
logResult?: boolean;
|
||||
}
|
||||
|
||||
export function createEndpointHandler<TInput = unknown, TOutput = unknown>(
|
||||
config: EndpointConfig<TInput, TOutput>,
|
||||
) {
|
||||
return async (req: Request, res: Response) => {
|
||||
try {
|
||||
let input: TInput;
|
||||
try {
|
||||
input = config.validateInput
|
||||
? config.validateInput(req.body)
|
||||
: req.body;
|
||||
} catch (validationError) {
|
||||
return res.status(400).json({
|
||||
code: 400,
|
||||
message: `Invalid input: ${validationError instanceof Error ? validationError.message : String(validationError)}`,
|
||||
});
|
||||
}
|
||||
|
||||
if (config.preCheck) {
|
||||
try {
|
||||
await config.preCheck();
|
||||
} catch (checkError) {
|
||||
return res.status(503).json({
|
||||
code: 503,
|
||||
message: checkError instanceof Error ? checkError.message : String(checkError),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const page = getPage();
|
||||
if (!page) {
|
||||
return res.status(503).json({
|
||||
code: 503,
|
||||
message: "Browser not initialized",
|
||||
});
|
||||
}
|
||||
|
||||
const result = await page.evaluate(
|
||||
({ methodName, params }) => {
|
||||
const testWindow = window as ITestBrowser;
|
||||
if (!testWindow.wakuApi) {
|
||||
throw new Error("window.wakuApi is not available");
|
||||
}
|
||||
|
||||
const wakuApi = testWindow.wakuApi as unknown as Record<string, unknown>;
|
||||
const method = wakuApi[methodName];
|
||||
if (typeof method !== "function") {
|
||||
throw new Error(`window.wakuApi.${methodName} is not a function`);
|
||||
}
|
||||
|
||||
if (params === null || params === undefined) {
|
||||
return method.call(testWindow.wakuApi);
|
||||
} else if (Array.isArray(params)) {
|
||||
return method.apply(testWindow.wakuApi, params);
|
||||
} else {
|
||||
return method.call(testWindow.wakuApi, params);
|
||||
}
|
||||
},
|
||||
{ methodName: config.methodName, params: input },
|
||||
);
|
||||
|
||||
if (config.logResult !== false) {
|
||||
log.info(
|
||||
`[${config.methodName}] Result:`,
|
||||
JSON.stringify(result, null, 2),
|
||||
);
|
||||
}
|
||||
|
||||
const finalResult = config.transformResult
|
||||
? config.transformResult(result)
|
||||
: result;
|
||||
|
||||
res.status(200).json(finalResult);
|
||||
} catch (error) {
|
||||
if (config.handleError) {
|
||||
const errorResponse = config.handleError(error as Error);
|
||||
return res.status(errorResponse.code).json({
|
||||
code: errorResponse.code,
|
||||
message: errorResponse.message,
|
||||
});
|
||||
}
|
||||
|
||||
log.error(`[${config.methodName}] Error:`, error);
|
||||
res.status(500).json({
|
||||
code: 500,
|
||||
message: `Could not execute ${config.methodName}: ${error instanceof Error ? error.message : String(error)}`,
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
export const validators = {
|
||||
requireLightpushV3: (body: unknown): LightpushV3Request => {
|
||||
// Type guard to check if body is an object
|
||||
if (!body || typeof body !== "object") {
|
||||
throw new Error("Request body must be an object");
|
||||
}
|
||||
|
||||
const bodyObj = body as Record<string, unknown>;
|
||||
|
||||
if (
|
||||
bodyObj.pubsubTopic !== undefined &&
|
||||
typeof bodyObj.pubsubTopic !== "string"
|
||||
) {
|
||||
throw new Error("pubsubTopic must be a string if provided");
|
||||
}
|
||||
if (!bodyObj.message || typeof bodyObj.message !== "object") {
|
||||
throw new Error("message is required and must be an object");
|
||||
}
|
||||
|
||||
const message = bodyObj.message as Record<string, unknown>;
|
||||
|
||||
if (
|
||||
!message.contentTopic ||
|
||||
typeof message.contentTopic !== "string"
|
||||
) {
|
||||
throw new Error("message.contentTopic is required and must be a string");
|
||||
}
|
||||
if (!message.payload || typeof message.payload !== "string") {
|
||||
throw new Error(
|
||||
"message.payload is required and must be a string (base64 encoded)",
|
||||
);
|
||||
}
|
||||
if (
|
||||
message.version !== undefined &&
|
||||
typeof message.version !== "number"
|
||||
) {
|
||||
throw new Error("message.version must be a number if provided");
|
||||
}
|
||||
|
||||
return {
|
||||
pubsubTopic: (bodyObj.pubsubTopic as string) || "",
|
||||
message: {
|
||||
payload: message.payload as string,
|
||||
contentTopic: message.contentTopic as string,
|
||||
version: (message.version as number) || 1,
|
||||
},
|
||||
};
|
||||
},
|
||||
|
||||
noInput: () => null,
|
||||
};
|
||||
|
||||
export const errorHandlers = {
|
||||
lightpushError: (error: Error) => {
|
||||
if (
|
||||
error.message.includes("size exceeds") ||
|
||||
error.message.includes("stream reset")
|
||||
) {
|
||||
return {
|
||||
code: 503,
|
||||
message:
|
||||
"Could not publish message: message size exceeds gossipsub max message size",
|
||||
};
|
||||
}
|
||||
return {
|
||||
code: 500,
|
||||
message: `Could not publish message: ${error.message}`,
|
||||
};
|
||||
},
|
||||
};
|
||||
117
packages/browser-tests/tests/e2e.spec.ts
Normal file
117
packages/browser-tests/tests/e2e.spec.ts
Normal file
@ -0,0 +1,117 @@
|
||||
import { test, expect } from "@playwright/test";
|
||||
import axios from "axios";
|
||||
import { StartedTestContainer } from "testcontainers";
|
||||
import { DefaultTestRoutingInfo } from "@waku/tests";
|
||||
import {
|
||||
startBrowserTestsContainer,
|
||||
stopContainer
|
||||
} from "./utils/container-helpers.js";
|
||||
import {
|
||||
createTwoNodeNetwork,
|
||||
getDockerAccessibleMultiaddr,
|
||||
stopNwakuNodes,
|
||||
TwoNodeNetwork
|
||||
} from "./utils/nwaku-helpers.js";
|
||||
import {
|
||||
ENV_BUILDERS,
|
||||
TEST_CONFIG,
|
||||
ASSERTIONS
|
||||
} from "./utils/test-config.js";
|
||||
|
||||
test.describe.configure({ mode: "serial" });
|
||||
|
||||
let container: StartedTestContainer;
|
||||
let nwakuNodes: TwoNodeNetwork;
|
||||
let baseUrl: string;
|
||||
|
||||
test.beforeAll(async () => {
|
||||
nwakuNodes = await createTwoNodeNetwork();
|
||||
|
||||
const lightPushPeerAddr = await getDockerAccessibleMultiaddr(nwakuNodes.nodes[0]);
|
||||
|
||||
const result = await startBrowserTestsContainer({
|
||||
environment: {
|
||||
...ENV_BUILDERS.withLocalLightPush(lightPushPeerAddr),
|
||||
DEBUG: "waku:*",
|
||||
WAKU_LIGHTPUSH_NODE: lightPushPeerAddr,
|
||||
},
|
||||
networkMode: "waku",
|
||||
});
|
||||
|
||||
container = result.container;
|
||||
baseUrl = result.baseUrl;
|
||||
});
|
||||
|
||||
test.afterAll(async () => {
|
||||
await Promise.all([
|
||||
stopContainer(container),
|
||||
stopNwakuNodes(nwakuNodes?.nodes || []),
|
||||
]);
|
||||
});
|
||||
|
||||
test("WakuHeadless can discover nwaku peer and use it for light push", async () => {
|
||||
test.setTimeout(TEST_CONFIG.DEFAULT_TEST_TIMEOUT);
|
||||
|
||||
const contentTopic = TEST_CONFIG.DEFAULT_CONTENT_TOPIC;
|
||||
const testMessage = TEST_CONFIG.DEFAULT_TEST_MESSAGE;
|
||||
|
||||
await new Promise((r) => setTimeout(r, TEST_CONFIG.WAKU_INIT_DELAY));
|
||||
|
||||
const healthResponse = await axios.get(`${baseUrl}/`, { timeout: 5000 });
|
||||
ASSERTIONS.serverHealth(healthResponse);
|
||||
|
||||
try {
|
||||
await axios.post(`${baseUrl}/waku/v1/wait-for-peers`, {
|
||||
timeoutMs: 10000,
|
||||
protocols: ["lightpush"],
|
||||
}, { timeout: 15000 });
|
||||
} catch {
|
||||
// Ignore errors
|
||||
}
|
||||
|
||||
const peerInfoResponse = await axios.get(`${baseUrl}/waku/v1/peer-info`);
|
||||
ASSERTIONS.peerInfo(peerInfoResponse);
|
||||
|
||||
const routingInfo = DefaultTestRoutingInfo;
|
||||
|
||||
const subscriptionResults = await Promise.all([
|
||||
nwakuNodes.nodes[0].ensureSubscriptions([routingInfo.pubsubTopic]),
|
||||
nwakuNodes.nodes[1].ensureSubscriptions([routingInfo.pubsubTopic])
|
||||
]);
|
||||
|
||||
expect(subscriptionResults[0]).toBe(true);
|
||||
expect(subscriptionResults[1]).toBe(true);
|
||||
|
||||
await new Promise((r) => setTimeout(r, TEST_CONFIG.SUBSCRIPTION_DELAY));
|
||||
|
||||
const base64Payload = btoa(testMessage);
|
||||
|
||||
const pushResponse = await axios.post(`${baseUrl}/lightpush/v3/message`, {
|
||||
pubsubTopic: routingInfo.pubsubTopic,
|
||||
message: {
|
||||
contentTopic,
|
||||
payload: base64Payload,
|
||||
version: 1,
|
||||
},
|
||||
});
|
||||
|
||||
ASSERTIONS.lightPushV3Success(pushResponse);
|
||||
|
||||
await new Promise((r) => setTimeout(r, TEST_CONFIG.MESSAGE_PROPAGATION_DELAY));
|
||||
|
||||
const [node1Messages, node2Messages] = await Promise.all([
|
||||
nwakuNodes.nodes[0].messages(contentTopic),
|
||||
nwakuNodes.nodes[1].messages(contentTopic)
|
||||
]);
|
||||
|
||||
|
||||
const totalMessages = node1Messages.length + node2Messages.length;
|
||||
expect(totalMessages).toBeGreaterThanOrEqual(1);
|
||||
|
||||
const receivedMessages = [...node1Messages, ...node2Messages];
|
||||
expect(receivedMessages.length).toBeGreaterThan(0);
|
||||
|
||||
const receivedMessage = receivedMessages[0];
|
||||
ASSERTIONS.messageContent(receivedMessage, testMessage, contentTopic);
|
||||
|
||||
});
|
||||
134
packages/browser-tests/tests/integration.spec.ts
Normal file
134
packages/browser-tests/tests/integration.spec.ts
Normal file
@ -0,0 +1,134 @@
|
||||
import { test, expect } from "@playwright/test";
|
||||
import axios from "axios";
|
||||
import { StartedTestContainer } from "testcontainers";
|
||||
import {
|
||||
createLightNode,
|
||||
LightNode,
|
||||
Protocols,
|
||||
IDecodedMessage,
|
||||
} from "@waku/sdk";
|
||||
import { DEFAULT_CLUSTER_ID, DEFAULT_NUM_SHARDS } from "@waku/interfaces";
|
||||
import { startBrowserTestsContainer, stopContainer } from "./utils/container-helpers.js";
|
||||
import { ENV_BUILDERS, TEST_CONFIG } from "./utils/test-config.js";
|
||||
|
||||
test.describe.configure({ mode: "serial" });
|
||||
|
||||
let container: StartedTestContainer;
|
||||
let baseUrl: string;
|
||||
let wakuNode: LightNode;
|
||||
|
||||
test.beforeAll(async () => {
|
||||
const result = await startBrowserTestsContainer({
|
||||
environment: {
|
||||
...ENV_BUILDERS.withProductionEnr(),
|
||||
DEBUG: "waku:*",
|
||||
},
|
||||
});
|
||||
|
||||
container = result.container;
|
||||
baseUrl = result.baseUrl;
|
||||
});
|
||||
|
||||
test.afterAll(async () => {
|
||||
if (wakuNode) {
|
||||
try {
|
||||
await wakuNode.stop();
|
||||
} catch {
|
||||
// Ignore errors
|
||||
}
|
||||
}
|
||||
|
||||
await stopContainer(container);
|
||||
});
|
||||
|
||||
test("cross-network message delivery: SDK light node receives server lightpush", async () => {
|
||||
test.setTimeout(TEST_CONFIG.DEFAULT_TEST_TIMEOUT);
|
||||
|
||||
const contentTopic = TEST_CONFIG.DEFAULT_CONTENT_TOPIC;
|
||||
const testMessage = TEST_CONFIG.DEFAULT_TEST_MESSAGE;
|
||||
|
||||
wakuNode = await createLightNode({
|
||||
defaultBootstrap: true,
|
||||
discovery: {
|
||||
dns: true,
|
||||
peerExchange: true,
|
||||
peerCache: true,
|
||||
},
|
||||
networkConfig: {
|
||||
clusterId: DEFAULT_CLUSTER_ID,
|
||||
numShardsInCluster: DEFAULT_NUM_SHARDS,
|
||||
},
|
||||
libp2p: {
|
||||
filterMultiaddrs: false,
|
||||
},
|
||||
});
|
||||
|
||||
await wakuNode.start();
|
||||
|
||||
await wakuNode.waitForPeers(
|
||||
[Protocols.Filter, Protocols.LightPush],
|
||||
30000,
|
||||
);
|
||||
|
||||
const messages: IDecodedMessage[] = [];
|
||||
const decoder = wakuNode.createDecoder({ contentTopic });
|
||||
|
||||
if (
|
||||
!(await wakuNode.filter.subscribe([decoder], (message) => {
|
||||
messages.push(message);
|
||||
}))
|
||||
) {
|
||||
throw new Error("Failed to subscribe to Filter");
|
||||
}
|
||||
|
||||
await new Promise((r) => setTimeout(r, 2000));
|
||||
|
||||
const messagePromise = new Promise<void>((resolve) => {
|
||||
const originalLength = messages.length;
|
||||
const checkForMessage = () => {
|
||||
if (messages.length > originalLength) {
|
||||
resolve();
|
||||
} else {
|
||||
setTimeout(checkForMessage, 100);
|
||||
}
|
||||
};
|
||||
checkForMessage();
|
||||
});
|
||||
|
||||
await axios.post(`${baseUrl}/waku/v1/wait-for-peers`, {
|
||||
timeoutMs: 30000, // Increased timeout
|
||||
protocols: ["lightpush", "filter"],
|
||||
});
|
||||
|
||||
await new Promise((r) => setTimeout(r, 10000));
|
||||
|
||||
const base64Payload = btoa(testMessage);
|
||||
|
||||
const pushResponse = await axios.post(`${baseUrl}/lightpush/v3/message`, {
|
||||
pubsubTopic: decoder.pubsubTopic,
|
||||
message: {
|
||||
contentTopic,
|
||||
payload: base64Payload,
|
||||
version: 1,
|
||||
},
|
||||
});
|
||||
|
||||
expect(pushResponse.status).toBe(200);
|
||||
expect(pushResponse.data.success).toBe(true);
|
||||
|
||||
await Promise.race([
|
||||
messagePromise,
|
||||
new Promise((_, reject) =>
|
||||
setTimeout(() => {
|
||||
reject(new Error("Timeout waiting for message"));
|
||||
}, 45000),
|
||||
),
|
||||
]);
|
||||
|
||||
expect(messages).toHaveLength(1);
|
||||
const receivedMessage = messages[0];
|
||||
expect(receivedMessage.contentTopic).toBe(contentTopic);
|
||||
|
||||
const receivedPayload = new TextDecoder().decode(receivedMessage.payload);
|
||||
expect(receivedPayload).toBe(testMessage);
|
||||
});
|
||||
82
packages/browser-tests/tests/server.spec.ts
Normal file
82
packages/browser-tests/tests/server.spec.ts
Normal file
@ -0,0 +1,82 @@
|
||||
import { test, expect } from "@playwright/test";
|
||||
import axios from "axios";
|
||||
import { spawn, ChildProcess } from "child_process";
|
||||
import { fileURLToPath } from "url";
|
||||
import { dirname, join } from "path";
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = dirname(__filename);
|
||||
|
||||
test.describe.configure({ mode: "serial" });
|
||||
|
||||
test.describe("Server Tests", () => {
|
||||
let serverProcess: ChildProcess;
|
||||
let baseUrl = "http://localhost:3000";
|
||||
|
||||
test.beforeAll(async () => {
|
||||
const serverPath = join(__dirname, "..", "dist", "src", "server.js");
|
||||
|
||||
serverProcess = spawn("node", [serverPath], {
|
||||
stdio: "pipe",
|
||||
env: { ...process.env, PORT: "3000" }
|
||||
});
|
||||
|
||||
serverProcess.stdout?.on("data", (_data: Buffer) => {
|
||||
});
|
||||
|
||||
serverProcess.stderr?.on("data", (_data: Buffer) => {
|
||||
});
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 3000));
|
||||
|
||||
let serverReady = false;
|
||||
for (let i = 0; i < 30; i++) {
|
||||
try {
|
||||
const res = await axios.get(`${baseUrl}/`, { timeout: 2000 });
|
||||
if (res.status === 200) {
|
||||
serverReady = true;
|
||||
break;
|
||||
}
|
||||
} catch {
|
||||
// Ignore errors
|
||||
}
|
||||
await new Promise((r) => setTimeout(r, 1000));
|
||||
}
|
||||
|
||||
expect(serverReady).toBe(true);
|
||||
});
|
||||
|
||||
test.afterAll(async () => {
|
||||
if (serverProcess) {
|
||||
serverProcess.kill("SIGTERM");
|
||||
await new Promise((resolve) => setTimeout(resolve, 1000));
|
||||
}
|
||||
});
|
||||
|
||||
test("server health endpoint", async () => {
|
||||
const res = await axios.get(`${baseUrl}/`);
|
||||
expect(res.status).toBe(200);
|
||||
expect(res.data.status).toBe("Waku simulation server is running");
|
||||
});
|
||||
|
||||
test("static files are served", async () => {
|
||||
const htmlRes = await axios.get(`${baseUrl}/app/index.html`);
|
||||
expect(htmlRes.status).toBe(200);
|
||||
expect(htmlRes.data).toContain("Waku Test Environment");
|
||||
|
||||
const jsRes = await axios.get(`${baseUrl}/app/index.js`);
|
||||
expect(jsRes.status).toBe(200);
|
||||
expect(jsRes.data).toContain("WakuHeadless");
|
||||
});
|
||||
|
||||
test("Waku node auto-started", async () => {
|
||||
try {
|
||||
const infoRes = await axios.get(`${baseUrl}/waku/v1/peer-info`);
|
||||
expect(infoRes.status).toBe(200);
|
||||
expect(infoRes.data.peerId).toBeDefined();
|
||||
expect(infoRes.data.multiaddrs).toBeDefined();
|
||||
} catch (error) {
|
||||
expect(error.response?.status).toBe(400);
|
||||
}
|
||||
});
|
||||
});
|
||||
128
packages/browser-tests/tests/utils/container-helpers.ts
Normal file
128
packages/browser-tests/tests/utils/container-helpers.ts
Normal file
@ -0,0 +1,128 @@
|
||||
import axios from "axios";
|
||||
import { GenericContainer, StartedTestContainer } from "testcontainers";
|
||||
import { Logger } from "@waku/utils";
|
||||
|
||||
const log = new Logger("container-helpers");
|
||||
|
||||
export interface ContainerSetupOptions {
|
||||
environment?: Record<string, string>;
|
||||
networkMode?: string;
|
||||
timeout?: number;
|
||||
maxAttempts?: number;
|
||||
}
|
||||
|
||||
export interface ContainerSetupResult {
|
||||
container: StartedTestContainer;
|
||||
baseUrl: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts a waku-browser-tests Docker container with proper health checking.
|
||||
* Follows patterns from @waku/tests package for retry logic and cleanup.
|
||||
*/
|
||||
export async function startBrowserTestsContainer(
|
||||
options: ContainerSetupOptions = {}
|
||||
): Promise<ContainerSetupResult> {
|
||||
const {
|
||||
environment = {},
|
||||
networkMode = "bridge",
|
||||
timeout = 2000,
|
||||
maxAttempts = 60
|
||||
} = options;
|
||||
|
||||
log.info("Starting waku-browser-tests container...");
|
||||
|
||||
let generic = new GenericContainer("waku-browser-tests:local")
|
||||
.withExposedPorts(8080)
|
||||
.withNetworkMode(networkMode);
|
||||
|
||||
// Apply environment variables
|
||||
for (const [key, value] of Object.entries(environment)) {
|
||||
generic = generic.withEnvironment({ [key]: value });
|
||||
}
|
||||
|
||||
const container = await generic.start();
|
||||
|
||||
// Set up container logging - stream all output from the start
|
||||
const logs = await container.logs();
|
||||
logs.on("data", (b) => process.stdout.write("[container] " + b.toString()));
|
||||
logs.on("error", (err) => log.error("[container log error]", err));
|
||||
|
||||
// Give container time to initialize
|
||||
await new Promise((r) => setTimeout(r, 5000));
|
||||
|
||||
const mappedPort = container.getMappedPort(8080);
|
||||
const baseUrl = `http://127.0.0.1:${mappedPort}`;
|
||||
|
||||
// Wait for server readiness with retry logic (following waku/tests patterns)
|
||||
const serverReady = await waitForServerReady(baseUrl, maxAttempts, timeout);
|
||||
|
||||
if (!serverReady) {
|
||||
await logFinalContainerState(container);
|
||||
throw new Error("Container failed to become ready");
|
||||
}
|
||||
|
||||
log.info("✅ Browser tests container ready");
|
||||
await new Promise((r) => setTimeout(r, 500)); // Final settling time
|
||||
|
||||
return { container, baseUrl };
|
||||
}
|
||||
|
||||
/**
|
||||
* Waits for server to become ready with exponential backoff and detailed logging.
|
||||
* Follows retry patterns from @waku/tests ServiceNode.
|
||||
*/
|
||||
async function waitForServerReady(
|
||||
baseUrl: string,
|
||||
maxAttempts: number,
|
||||
timeout: number
|
||||
): Promise<boolean> {
|
||||
for (let i = 0; i < maxAttempts; i++) {
|
||||
try {
|
||||
const res = await axios.get(`${baseUrl}/`, { timeout });
|
||||
if (res.status === 200) {
|
||||
log.info(`Server is ready after ${i + 1} attempts`);
|
||||
return true;
|
||||
}
|
||||
} catch (error) {
|
||||
if (i % 10 === 0) {
|
||||
log.info(`Attempt ${i + 1}/${maxAttempts} failed:`, error.code || error.message);
|
||||
}
|
||||
}
|
||||
await new Promise((r) => setTimeout(r, 1000));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Logs final container state for debugging, following waku/tests error handling patterns.
|
||||
*/
|
||||
async function logFinalContainerState(container: StartedTestContainer): Promise<void> {
|
||||
try {
|
||||
const finalLogs = await container.logs({ tail: 50 });
|
||||
log.info("=== Final Container Logs ===");
|
||||
finalLogs.on("data", (b) => log.info(b.toString()));
|
||||
await new Promise((r) => setTimeout(r, 1000));
|
||||
} catch (logError) {
|
||||
log.error("Failed to get container logs:", logError);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gracefully stops containers with retry logic, following teardown patterns from waku/tests.
|
||||
*/
|
||||
export async function stopContainer(container: StartedTestContainer): Promise<void> {
|
||||
if (!container) return;
|
||||
|
||||
log.info("Stopping container gracefully...");
|
||||
try {
|
||||
await container.stop({ timeout: 10000 });
|
||||
log.info("Container stopped successfully");
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error);
|
||||
log.warn(
|
||||
"Container stop had issues (expected):",
|
||||
message
|
||||
);
|
||||
}
|
||||
}
|
||||
8
packages/browser-tests/tests/utils/index.ts
Normal file
8
packages/browser-tests/tests/utils/index.ts
Normal file
@ -0,0 +1,8 @@
|
||||
/**
|
||||
* Shared test utilities for browser-tests package.
|
||||
* Follows patterns established in @waku/tests package.
|
||||
*/
|
||||
|
||||
export * from "./container-helpers.js";
|
||||
export * from "./nwaku-helpers.js";
|
||||
export * from "./test-config.js";
|
||||
141
packages/browser-tests/tests/utils/nwaku-helpers.ts
Normal file
141
packages/browser-tests/tests/utils/nwaku-helpers.ts
Normal file
@ -0,0 +1,141 @@
|
||||
import { ServiceNode } from "@waku/tests";
|
||||
import { DefaultTestRoutingInfo } from "@waku/tests";
|
||||
import { Logger } from "@waku/utils";
|
||||
|
||||
const log = new Logger("nwaku-helpers");
|
||||
|
||||
export interface TwoNodeNetwork {
|
||||
nodes: ServiceNode[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a two-node nwaku network following waku/tests patterns.
|
||||
* Node 1: Relay + Light Push (service provider)
|
||||
* Node 2: Relay only (network peer)
|
||||
*/
|
||||
export async function createTwoNodeNetwork(): Promise<TwoNodeNetwork> {
|
||||
log.info("Creating nwaku node 1 (Relay + Light Push)...");
|
||||
const lightPushNode = new ServiceNode(
|
||||
"lightpush-node-" + Math.random().toString(36).substring(7),
|
||||
);
|
||||
|
||||
const lightPushArgs = {
|
||||
relay: true,
|
||||
lightpush: true,
|
||||
filter: false,
|
||||
store: false,
|
||||
clusterId: DefaultTestRoutingInfo.clusterId,
|
||||
numShardsInNetwork: DefaultTestRoutingInfo.networkConfig.numShardsInCluster,
|
||||
contentTopic: [DefaultTestRoutingInfo.contentTopic],
|
||||
};
|
||||
|
||||
await lightPushNode.start(lightPushArgs, { retries: 3 });
|
||||
|
||||
log.info("Creating nwaku node 2 (Relay only)...");
|
||||
const relayNode = new ServiceNode(
|
||||
"relay-node-" + Math.random().toString(36).substring(7),
|
||||
);
|
||||
|
||||
// Connect second node to first node (following ServiceNodesFleet pattern)
|
||||
const firstNodeAddr = await lightPushNode.getExternalMultiaddr();
|
||||
const relayArgs = {
|
||||
relay: true,
|
||||
lightpush: false,
|
||||
filter: false,
|
||||
store: false,
|
||||
staticnode: firstNodeAddr,
|
||||
clusterId: DefaultTestRoutingInfo.clusterId,
|
||||
numShardsInNetwork: DefaultTestRoutingInfo.networkConfig.numShardsInCluster,
|
||||
contentTopic: [DefaultTestRoutingInfo.contentTopic],
|
||||
};
|
||||
|
||||
await relayNode.start(relayArgs, { retries: 3 });
|
||||
|
||||
// Wait for network formation (following waku/tests timing patterns)
|
||||
log.info("Waiting for nwaku network formation...");
|
||||
await new Promise((r) => setTimeout(r, 5000));
|
||||
|
||||
// Verify connectivity (optional, for debugging)
|
||||
await verifyNetworkFormation([lightPushNode, relayNode]);
|
||||
|
||||
return {
|
||||
nodes: [lightPushNode, relayNode],
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Verifies that nwaku nodes have formed connections.
|
||||
* Follows error handling patterns from waku/tests.
|
||||
*/
|
||||
async function verifyNetworkFormation(nodes: ServiceNode[]): Promise<void> {
|
||||
try {
|
||||
const peerCounts = await Promise.all(
|
||||
nodes.map(async (node, index) => {
|
||||
const peers = await node.peers();
|
||||
log.info(`Node ${index + 1} has ${peers.length} peer(s)`);
|
||||
return peers.length;
|
||||
}),
|
||||
);
|
||||
|
||||
if (peerCounts.every((count) => count === 0)) {
|
||||
log.warn("⚠️ Nodes may not be properly connected yet");
|
||||
}
|
||||
} catch (error) {
|
||||
log.warn("Could not verify peer connections:", error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts Docker-accessible multiaddr from nwaku node.
|
||||
* Returns multiaddr using container's internal IP for Docker network communication.
|
||||
*/
|
||||
export async function getDockerAccessibleMultiaddr(
|
||||
node: ServiceNode,
|
||||
): Promise<string> {
|
||||
// Get multiaddr with localhost and extract components
|
||||
const localhostMultiaddr = await node.getMultiaddrWithId();
|
||||
const peerId = await node.getPeerId();
|
||||
|
||||
// Extract port from multiaddr string
|
||||
const multiaddrStr = localhostMultiaddr.toString();
|
||||
const portMatch = multiaddrStr.match(/\/tcp\/(\d+)/);
|
||||
const port = portMatch ? portMatch[1] : null;
|
||||
|
||||
if (!port) {
|
||||
throw new Error("Could not extract port from multiaddr: " + multiaddrStr);
|
||||
}
|
||||
|
||||
// Get Docker container IP (accessing internal field)
|
||||
// Note: This accesses an internal implementation detail of ServiceNode
|
||||
const nodeWithDocker = node as ServiceNode & {
|
||||
docker?: { containerIp?: string };
|
||||
};
|
||||
const containerIp = nodeWithDocker.docker?.containerIp;
|
||||
if (!containerIp) {
|
||||
throw new Error("Could not get container IP from node");
|
||||
}
|
||||
|
||||
// Build Docker network accessible multiaddr
|
||||
const dockerMultiaddr = `/ip4/${containerIp}/tcp/${port}/ws/p2p/${peerId}`;
|
||||
|
||||
log.info("Original multiaddr:", multiaddrStr);
|
||||
log.info("Docker accessible multiaddr:", dockerMultiaddr);
|
||||
|
||||
return dockerMultiaddr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops nwaku nodes with retry logic, following teardown patterns from waku/tests.
|
||||
*/
|
||||
export async function stopNwakuNodes(nodes: ServiceNode[]): Promise<void> {
|
||||
if (!nodes || nodes.length === 0) return;
|
||||
|
||||
log.info("Stopping nwaku nodes...");
|
||||
try {
|
||||
await Promise.all(nodes.map((node) => node.stop()));
|
||||
log.info("Nwaku nodes stopped successfully");
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error);
|
||||
log.warn("Nwaku nodes stop had issues:", message);
|
||||
}
|
||||
}
|
||||
127
packages/browser-tests/tests/utils/test-config.ts
Normal file
127
packages/browser-tests/tests/utils/test-config.ts
Normal file
@ -0,0 +1,127 @@
|
||||
import { expect } from "@playwright/test";
|
||||
import { DefaultTestRoutingInfo } from "@waku/tests";
|
||||
import { AxiosResponse } from "axios";
|
||||
|
||||
/**
|
||||
* Response type definitions for API endpoints
|
||||
*/
|
||||
interface ServerHealthResponse {
|
||||
status: string;
|
||||
}
|
||||
|
||||
interface PeerInfoResponse {
|
||||
peerId: string;
|
||||
multiaddrs: string[];
|
||||
peers: string[];
|
||||
}
|
||||
|
||||
interface LightPushV3Result {
|
||||
successes: string[];
|
||||
failures: Array<{ error: string; peerId?: string }>;
|
||||
}
|
||||
|
||||
interface LightPushV3Response {
|
||||
success: boolean;
|
||||
result: LightPushV3Result;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
interface MessageResponse {
|
||||
contentTopic: string;
|
||||
payload: string;
|
||||
version: number;
|
||||
timestamp?: bigint | number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Common test configuration constants following waku/tests patterns.
|
||||
*/
|
||||
export const TEST_CONFIG = {
|
||||
// Test timeouts (following waku/tests timeout patterns)
|
||||
DEFAULT_TEST_TIMEOUT: 120000, // 2 minutes
|
||||
CONTAINER_READY_TIMEOUT: 60000, // 1 minute
|
||||
NETWORK_FORMATION_DELAY: 5000, // 5 seconds
|
||||
SUBSCRIPTION_DELAY: 3000, // 3 seconds
|
||||
MESSAGE_PROPAGATION_DELAY: 5000, // 5 seconds
|
||||
WAKU_INIT_DELAY: 8000, // 8 seconds
|
||||
|
||||
// Network configuration
|
||||
DEFAULT_CLUSTER_ID: DefaultTestRoutingInfo.clusterId.toString(),
|
||||
DEFAULT_CONTENT_TOPIC: "/test/1/browser-tests/proto",
|
||||
|
||||
// Test messages
|
||||
DEFAULT_TEST_MESSAGE: "Hello from browser tests",
|
||||
} as const;
|
||||
|
||||
/**
|
||||
* Environment variable builders for different test scenarios.
|
||||
*/
|
||||
export const ENV_BUILDERS = {
|
||||
/**
|
||||
* Environment for production ENR bootstrap (integration test pattern).
|
||||
*/
|
||||
withProductionEnr: () => ({
|
||||
WAKU_ENR_BOOTSTRAP: "enr:-QEnuEBEAyErHEfhiQxAVQoWowGTCuEF9fKZtXSd7H_PymHFhGJA3rGAYDVSHKCyJDGRLBGsloNbS8AZF33IVuefjOO6BIJpZIJ2NIJpcIQS39tkim11bHRpYWRkcnO4lgAvNihub2RlLTAxLmRvLWFtczMud2FrdXYyLnRlc3Quc3RhdHVzaW0ubmV0BgG73gMAODcxbm9kZS0wMS5hYy1jbi1ob25na29uZy1jLndha3V2Mi50ZXN0LnN0YXR1c2ltLm5ldAYBu94DACm9A62t7AQL4Ef5ZYZosRpQTzFVAB8jGjf1TER2wH-0zBOe1-MDBNLeA4lzZWNwMjU2azGhAzfsxbxyCkgCqq8WwYsVWH7YkpMLnU2Bw5xJSimxKav-g3VkcIIjKA",
|
||||
WAKU_CLUSTER_ID: "1",
|
||||
}),
|
||||
|
||||
/**
|
||||
* Environment for local nwaku node connection (e2e test pattern).
|
||||
*/
|
||||
withLocalLightPush: (lightpushMultiaddr: string) => ({
|
||||
WAKU_LIGHTPUSH_NODE: lightpushMultiaddr,
|
||||
WAKU_CLUSTER_ID: TEST_CONFIG.DEFAULT_CLUSTER_ID,
|
||||
}),
|
||||
};
|
||||
|
||||
/**
|
||||
* Test assertion helpers following waku/tests verification patterns.
|
||||
*/
|
||||
export const ASSERTIONS = {
|
||||
/**
|
||||
* Verifies server health response structure.
|
||||
*/
|
||||
serverHealth: (response: AxiosResponse<ServerHealthResponse>) => {
|
||||
expect(response.status).toBe(200);
|
||||
expect(response.data.status).toBe("Waku simulation server is running");
|
||||
},
|
||||
|
||||
/**
|
||||
* Verifies peer info response structure.
|
||||
*/
|
||||
peerInfo: (response: AxiosResponse<PeerInfoResponse>) => {
|
||||
expect(response.status).toBe(200);
|
||||
expect(response.data.peerId).toBeDefined();
|
||||
expect(typeof response.data.peerId).toBe("string");
|
||||
},
|
||||
|
||||
/**
|
||||
* Verifies lightpush response structure (v3 format).
|
||||
*/
|
||||
lightPushV3Success: (response: AxiosResponse<LightPushV3Response>) => {
|
||||
expect(response.status).toBe(200);
|
||||
expect(response.data).toHaveProperty('success', true);
|
||||
expect(response.data).toHaveProperty('result');
|
||||
expect(response.data.result).toHaveProperty('successes');
|
||||
expect(Array.isArray(response.data.result.successes)).toBe(true);
|
||||
expect(response.data.result.successes.length).toBeGreaterThan(0);
|
||||
},
|
||||
|
||||
/**
|
||||
* Verifies message content and structure.
|
||||
*/
|
||||
messageContent: (message: MessageResponse, expectedContent: string, expectedTopic: string) => {
|
||||
expect(message).toHaveProperty('contentTopic', expectedTopic);
|
||||
expect(message).toHaveProperty('payload');
|
||||
expect(typeof message.payload).toBe('string');
|
||||
|
||||
const receivedPayload = Buffer.from(message.payload, 'base64').toString();
|
||||
expect(receivedPayload).toBe(expectedContent);
|
||||
|
||||
// Optional fields
|
||||
expect(message).toHaveProperty('version');
|
||||
if (message.timestamp) {
|
||||
expect(['bigint', 'number']).toContain(typeof message.timestamp);
|
||||
}
|
||||
},
|
||||
};
|
||||
@ -1,6 +0,0 @@
|
||||
import { expect, test } from "@playwright/test";
|
||||
|
||||
test("has title Web Chat title", async ({ page }) => {
|
||||
await page.goto("");
|
||||
await expect(page).toHaveTitle("Waku v2 chat app");
|
||||
});
|
||||
19
packages/browser-tests/tsconfig.json
Normal file
19
packages/browser-tests/tsconfig.json
Normal file
@ -0,0 +1,19 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "ES2020",
|
||||
"module": "NodeNext",
|
||||
"moduleResolution": "NodeNext",
|
||||
"esModuleInterop": true,
|
||||
"strict": true,
|
||||
"skipLibCheck": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"outDir": "dist",
|
||||
"declaration": true,
|
||||
"sourceMap": true,
|
||||
"resolveJsonModule": true,
|
||||
"lib": ["ES2020", "DOM"],
|
||||
"typeRoots": ["./node_modules/@types", "./types"]
|
||||
},
|
||||
"include": ["src/server.ts", "types/**/*.d.ts"],
|
||||
"exclude": ["node_modules", "dist", "web"]
|
||||
}
|
||||
2
packages/browser-tests/types/dotenv-flow.d.ts
vendored
Normal file
2
packages/browser-tests/types/dotenv-flow.d.ts
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
declare module "dotenv-flow/config";
|
||||
declare module "dotenv-flow/config.js";
|
||||
19
packages/browser-tests/types/global.d.ts
vendored
Normal file
19
packages/browser-tests/types/global.d.ts
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
import type { WakuHeadless } from "../web/index.js";
|
||||
|
||||
export interface WindowNetworkConfig {
|
||||
clusterId?: number;
|
||||
shards?: number[];
|
||||
}
|
||||
|
||||
export interface ITestBrowser extends Window {
|
||||
wakuApi: WakuHeadless;
|
||||
__WAKU_NETWORK_CONFIG?: WindowNetworkConfig;
|
||||
__WAKU_LIGHTPUSH_NODE?: string | null;
|
||||
__WAKU_ENR_BOOTSTRAP?: string | null;
|
||||
}
|
||||
|
||||
declare global {
|
||||
interface Window {
|
||||
wakuApi: WakuHeadless;
|
||||
}
|
||||
}
|
||||
9
packages/browser-tests/types/serve.d.ts
vendored
Normal file
9
packages/browser-tests/types/serve.d.ts
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
declare module "serve" {
|
||||
import type { Server } from "http";
|
||||
|
||||
function serve(
|
||||
folder: string,
|
||||
options: { port: number; single: boolean; listen: boolean },
|
||||
): Promise<Server>;
|
||||
export default serve;
|
||||
}
|
||||
14
packages/browser-tests/web/index.html
Normal file
14
packages/browser-tests/web/index.html
Normal file
@ -0,0 +1,14 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<title>Waku Test Environment</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Waku Test Environment</h1>
|
||||
<script type="module" src="./index.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
|
||||
431
packages/browser-tests/web/index.ts
Normal file
431
packages/browser-tests/web/index.ts
Normal file
@ -0,0 +1,431 @@
|
||||
import {
|
||||
createLightNode,
|
||||
LightNode,
|
||||
Protocols,
|
||||
NetworkConfig,
|
||||
CreateNodeOptions,
|
||||
} from "@waku/sdk";
|
||||
import {
|
||||
AutoSharding,
|
||||
DEFAULT_CLUSTER_ID,
|
||||
DEFAULT_NUM_SHARDS,
|
||||
ShardId,
|
||||
StaticSharding,
|
||||
ShardInfo,
|
||||
CreateLibp2pOptions,
|
||||
IEncoder,
|
||||
ILightPush,
|
||||
SDKProtocolResult,
|
||||
Failure,
|
||||
} from "@waku/interfaces";
|
||||
import { bootstrap } from "@libp2p/bootstrap";
|
||||
import { EnrDecoder, TransportProtocol } from "@waku/enr";
|
||||
import type { Multiaddr } from "@multiformats/multiaddr";
|
||||
import type { ITestBrowser } from "../types/global.js";
|
||||
import { Logger, StaticShardingRoutingInfo } from "@waku/utils";
|
||||
import type { PeerId } from "@libp2p/interface";
|
||||
|
||||
const log = new Logger("waku-headless");
|
||||
|
||||
export interface SerializableSDKProtocolResult {
|
||||
successes: string[];
|
||||
failures: Array<{
|
||||
error: string;
|
||||
peerId?: string;
|
||||
}>;
|
||||
myPeerId?: string;
|
||||
}
|
||||
|
||||
function makeSerializable(result: SDKProtocolResult): SerializableSDKProtocolResult {
|
||||
return {
|
||||
...result,
|
||||
successes: result.successes.map((peerId: PeerId) => peerId.toString()),
|
||||
failures: result.failures.map((failure: Failure) => ({
|
||||
error: failure.error || failure.toString(),
|
||||
peerId: failure.peerId ? failure.peerId.toString() : undefined,
|
||||
})),
|
||||
};
|
||||
}
|
||||
|
||||
async function convertEnrToMultiaddrs(enrString: string): Promise<string[]> {
|
||||
try {
|
||||
const enr = await EnrDecoder.fromString(enrString);
|
||||
const allMultiaddrs = enr.getAllLocationMultiaddrs();
|
||||
const multiaddrs: string[] = [];
|
||||
|
||||
for (const multiaddr of allMultiaddrs) {
|
||||
const maStr = multiaddr.toString();
|
||||
multiaddrs.push(maStr);
|
||||
}
|
||||
if (multiaddrs.length === 0) {
|
||||
const tcpMultiaddr = enr.getFullMultiaddr(TransportProtocol.TCP);
|
||||
if (tcpMultiaddr) {
|
||||
const tcpStr = tcpMultiaddr.toString();
|
||||
multiaddrs.push(tcpStr);
|
||||
}
|
||||
const udpMultiaddr = enr.getFullMultiaddr(TransportProtocol.UDP);
|
||||
if (udpMultiaddr) {
|
||||
const udpStr = udpMultiaddr.toString();
|
||||
multiaddrs.push(udpStr);
|
||||
}
|
||||
}
|
||||
|
||||
return multiaddrs;
|
||||
} catch (error) {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
export class WakuHeadless {
|
||||
waku: LightNode | null;
|
||||
networkConfig: NetworkConfig;
|
||||
lightpushNode: string | null;
|
||||
enrBootstrap: string | null;
|
||||
constructor(
|
||||
networkConfig?: Partial<NetworkConfig>,
|
||||
lightpushNode?: string | null,
|
||||
enrBootstrap?: string | null,
|
||||
) {
|
||||
this.waku = null;
|
||||
this.networkConfig = this.buildNetworkConfig(networkConfig);
|
||||
log.info("Network config on construction:", this.networkConfig);
|
||||
this.lightpushNode = lightpushNode || null;
|
||||
this.enrBootstrap = enrBootstrap || null;
|
||||
|
||||
if (this.lightpushNode) {
|
||||
log.info(`Configured preferred lightpush node: ${this.lightpushNode}`);
|
||||
}
|
||||
if (this.enrBootstrap) {
|
||||
log.info(`Configured ENR bootstrap: ${this.enrBootstrap}`);
|
||||
}
|
||||
}
|
||||
|
||||
private shouldUseCustomBootstrap(options: CreateNodeOptions): boolean {
|
||||
const hasEnr = Boolean(this.enrBootstrap);
|
||||
const isDefaultBootstrap = Boolean(options.defaultBootstrap);
|
||||
|
||||
return hasEnr && !isDefaultBootstrap;
|
||||
}
|
||||
|
||||
private async getBootstrapMultiaddrs(): Promise<string[]> {
|
||||
if (!this.enrBootstrap) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const enrList = this.enrBootstrap.split(",").map((enr) => enr.trim());
|
||||
const allMultiaddrs: string[] = [];
|
||||
|
||||
for (const enr of enrList) {
|
||||
const multiaddrs = await convertEnrToMultiaddrs(enr);
|
||||
if (multiaddrs.length > 0) {
|
||||
allMultiaddrs.push(...multiaddrs);
|
||||
}
|
||||
}
|
||||
|
||||
return allMultiaddrs;
|
||||
}
|
||||
|
||||
private buildNetworkConfig(
|
||||
providedConfig?: Partial<NetworkConfig> | Partial<ShardInfo>,
|
||||
): NetworkConfig {
|
||||
const clusterId = providedConfig?.clusterId ?? DEFAULT_CLUSTER_ID;
|
||||
|
||||
const staticShards = (providedConfig as Partial<ShardInfo>)?.shards;
|
||||
if (
|
||||
staticShards &&
|
||||
Array.isArray(staticShards) &&
|
||||
staticShards.length > 0
|
||||
) {
|
||||
log.info("Using static sharding with shards:", staticShards);
|
||||
return {
|
||||
clusterId,
|
||||
} as StaticSharding;
|
||||
}
|
||||
|
||||
const numShardsInCluster =
|
||||
(providedConfig as Partial<AutoSharding>)?.numShardsInCluster ?? DEFAULT_NUM_SHARDS;
|
||||
log.info(
|
||||
"Using auto sharding with num shards in cluster:",
|
||||
numShardsInCluster,
|
||||
);
|
||||
return {
|
||||
clusterId,
|
||||
numShardsInCluster,
|
||||
} as AutoSharding;
|
||||
}
|
||||
|
||||
private async send(
|
||||
lightPush: ILightPush,
|
||||
encoder: IEncoder,
|
||||
payload: Uint8Array,
|
||||
): Promise<SDKProtocolResult> {
|
||||
return lightPush.send(encoder, {
|
||||
payload,
|
||||
timestamp: new Date(),
|
||||
});
|
||||
}
|
||||
|
||||
async pushMessageV3(
|
||||
contentTopic: string,
|
||||
payload: string,
|
||||
pubsubTopic: string,
|
||||
): Promise<SerializableSDKProtocolResult> {
|
||||
if (!this.waku) {
|
||||
throw new Error("Waku node not started");
|
||||
}
|
||||
log.info(
|
||||
"Pushing message via v3 lightpush:",
|
||||
contentTopic,
|
||||
payload,
|
||||
pubsubTopic,
|
||||
);
|
||||
log.info("Waku node:", this.waku);
|
||||
log.info("Network config:", this.networkConfig);
|
||||
|
||||
let processedPayload: Uint8Array;
|
||||
try {
|
||||
const binaryString = atob(payload);
|
||||
const bytes = new Uint8Array(binaryString.length);
|
||||
for (let i = 0; i < binaryString.length; i++) {
|
||||
bytes[i] = binaryString.charCodeAt(i);
|
||||
}
|
||||
processedPayload = bytes;
|
||||
} catch (e) {
|
||||
processedPayload = new TextEncoder().encode(payload);
|
||||
}
|
||||
|
||||
try {
|
||||
const lightPush = this.waku.lightPush;
|
||||
if (!lightPush) {
|
||||
throw new Error("Lightpush service not available");
|
||||
}
|
||||
|
||||
let shardId: ShardId | undefined;
|
||||
if (pubsubTopic) {
|
||||
const staticShardingRoutingInfo =
|
||||
StaticShardingRoutingInfo.fromPubsubTopic(
|
||||
pubsubTopic,
|
||||
this.networkConfig as StaticSharding,
|
||||
);
|
||||
shardId = staticShardingRoutingInfo?.shardId;
|
||||
}
|
||||
|
||||
const encoder = this.waku.createEncoder({
|
||||
contentTopic,
|
||||
shardId,
|
||||
});
|
||||
log.info("Encoder:", encoder);
|
||||
log.info("Pubsub topic:", pubsubTopic);
|
||||
log.info("Encoder pubsub topic:", encoder.pubsubTopic);
|
||||
|
||||
if (pubsubTopic && pubsubTopic !== encoder.pubsubTopic) {
|
||||
log.warn(
|
||||
`Explicit pubsubTopic ${pubsubTopic} provided, but auto-sharding determined ${encoder.pubsubTopic}. Using auto-sharding.`,
|
||||
);
|
||||
}
|
||||
|
||||
let result;
|
||||
if (this.lightpushNode) {
|
||||
try {
|
||||
const preferredPeerId = this.getPeerIdFromMultiaddr(
|
||||
this.lightpushNode,
|
||||
);
|
||||
if (preferredPeerId) {
|
||||
result = await this.send(lightPush, encoder, processedPayload);
|
||||
log.info("✅ Message sent via preferred lightpush node");
|
||||
} else {
|
||||
throw new Error(
|
||||
"Could not extract peer ID from preferred node address",
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
log.error(
|
||||
"Couldn't send message via preferred lightpush node:",
|
||||
error,
|
||||
);
|
||||
result = await this.send(lightPush, encoder, processedPayload);
|
||||
}
|
||||
} else {
|
||||
result = await this.send(lightPush, encoder, processedPayload);
|
||||
}
|
||||
|
||||
const serializableResult = makeSerializable(result);
|
||||
|
||||
return serializableResult;
|
||||
} catch (error) {
|
||||
log.error("Error sending message via v3 lightpush:", error);
|
||||
throw new Error(
|
||||
`Failed to send v3 message: ${error instanceof Error ? error.message : String(error)}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async waitForPeers(
|
||||
timeoutMs: number = 10000,
|
||||
protocols: Protocols[] = [Protocols.LightPush, Protocols.Filter],
|
||||
) {
|
||||
if (!this.waku) {
|
||||
throw new Error("Waku node not started");
|
||||
}
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
try {
|
||||
await this.waku.waitForPeers(protocols, timeoutMs);
|
||||
const elapsed = Date.now() - startTime;
|
||||
|
||||
const peers = this.waku.libp2p.getPeers();
|
||||
|
||||
return {
|
||||
success: true,
|
||||
peersFound: peers.length,
|
||||
protocolsRequested: protocols,
|
||||
timeElapsed: elapsed,
|
||||
};
|
||||
} catch (error) {
|
||||
const elapsed = Date.now() - startTime;
|
||||
log.error(`Failed to find peers after ${elapsed}ms:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async createWakuNode(options: CreateNodeOptions) {
|
||||
try {
|
||||
if (this.waku) {
|
||||
await this.waku.stop();
|
||||
}
|
||||
} catch (e) {
|
||||
log.warn("ignore previous waku stop error");
|
||||
}
|
||||
|
||||
let libp2pConfig: CreateLibp2pOptions = {
|
||||
...options.libp2p,
|
||||
filterMultiaddrs: false,
|
||||
};
|
||||
|
||||
if (this.enrBootstrap) {
|
||||
const multiaddrs = await this.getBootstrapMultiaddrs();
|
||||
|
||||
if (multiaddrs.length > 0) {
|
||||
libp2pConfig.peerDiscovery = [
|
||||
bootstrap({ list: multiaddrs }),
|
||||
...(options.libp2p?.peerDiscovery || []),
|
||||
];
|
||||
}
|
||||
}
|
||||
|
||||
const createOptions = {
|
||||
...options,
|
||||
networkConfig: this.networkConfig,
|
||||
libp2p: libp2pConfig,
|
||||
};
|
||||
|
||||
this.waku = await createLightNode(createOptions);
|
||||
return { success: true };
|
||||
}
|
||||
|
||||
async startNode() {
|
||||
if (!this.waku) {
|
||||
throw new Error("Waku node not created");
|
||||
}
|
||||
await this.waku.start();
|
||||
|
||||
if (this.lightpushNode) {
|
||||
await this.dialPreferredLightpushNode();
|
||||
}
|
||||
|
||||
return { success: true };
|
||||
}
|
||||
|
||||
private async dialPreferredLightpushNode() {
|
||||
if (!this.waku || !this.lightpushNode) {
|
||||
log.info("Skipping dial: waku or lightpushNode not set");
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
log.info("Attempting to dial preferred lightpush node:", this.lightpushNode);
|
||||
await this.waku.dial(this.lightpushNode);
|
||||
log.info("Successfully dialed preferred lightpush node:", this.lightpushNode);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : String(error);
|
||||
log.error(
|
||||
"Failed to dial preferred lightpush node:",
|
||||
this.lightpushNode,
|
||||
message
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
private getPeerIdFromMultiaddr(multiaddr: string): string | null {
|
||||
const parts = multiaddr.split("/");
|
||||
const p2pIndex = parts.indexOf("p2p");
|
||||
return p2pIndex !== -1 && p2pIndex + 1 < parts.length
|
||||
? parts[p2pIndex + 1]
|
||||
: null;
|
||||
}
|
||||
|
||||
async stopNode() {
|
||||
if (!this.waku) {
|
||||
throw new Error("Waku node not created");
|
||||
}
|
||||
await this.waku.stop();
|
||||
return { success: true };
|
||||
}
|
||||
|
||||
getPeerInfo() {
|
||||
if (!this.waku) {
|
||||
throw new Error("Waku node not started");
|
||||
}
|
||||
|
||||
const addrs = this.waku.libp2p.getMultiaddrs();
|
||||
return {
|
||||
peerId: this.waku.libp2p.peerId.toString(),
|
||||
multiaddrs: addrs.map((a: Multiaddr) => a.toString()),
|
||||
peers: [],
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
(() => {
|
||||
try {
|
||||
log.info("Initializing WakuHeadless...");
|
||||
|
||||
const testWindow = window as ITestBrowser;
|
||||
const globalNetworkConfig = testWindow.__WAKU_NETWORK_CONFIG;
|
||||
const globalLightpushNode = testWindow.__WAKU_LIGHTPUSH_NODE;
|
||||
const globalEnrBootstrap = testWindow.__WAKU_ENR_BOOTSTRAP;
|
||||
|
||||
log.info("Global config from window:", {
|
||||
networkConfig: globalNetworkConfig,
|
||||
lightpushNode: globalLightpushNode,
|
||||
enrBootstrap: globalEnrBootstrap
|
||||
});
|
||||
|
||||
const instance = new WakuHeadless(
|
||||
globalNetworkConfig,
|
||||
globalLightpushNode,
|
||||
globalEnrBootstrap,
|
||||
);
|
||||
|
||||
testWindow.wakuApi = instance;
|
||||
log.info("WakuHeadless initialized successfully:", !!testWindow.wakuApi);
|
||||
} catch (error) {
|
||||
log.error("Error initializing WakuHeadless:", error);
|
||||
const testWindow = window as ITestBrowser;
|
||||
// Create a stub wakuApi that will reject all method calls
|
||||
testWindow.wakuApi = {
|
||||
waku: null,
|
||||
networkConfig: { clusterId: 0, numShardsInCluster: 0 },
|
||||
lightpushNode: null,
|
||||
enrBootstrap: null,
|
||||
error,
|
||||
createWakuNode: () => Promise.reject(new Error("WakuHeadless failed to initialize")),
|
||||
startNode: () => Promise.reject(new Error("WakuHeadless failed to initialize")),
|
||||
stopNode: () => Promise.reject(new Error("WakuHeadless failed to initialize")),
|
||||
pushMessageV3: () => Promise.reject(new Error("WakuHeadless failed to initialize")),
|
||||
waitForPeers: () => Promise.reject(new Error("WakuHeadless failed to initialize")),
|
||||
getPeerInfo: () => { throw new Error("WakuHeadless failed to initialize"); },
|
||||
} as unknown as WakuHeadless;
|
||||
}
|
||||
})();
|
||||
@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "@waku/build-utils",
|
||||
"version": "1.0.0",
|
||||
"description": "Build utilities for js-waku",
|
||||
"description": "Build utilities for logos-messaging-js",
|
||||
"main": "index.js",
|
||||
"module": "index.js",
|
||||
"type": "module",
|
||||
@ -14,12 +14,12 @@
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/waku-org/js-waku.git"
|
||||
"url": "git+https://github.com/logos-messaging/logos-messaging-js.git"
|
||||
},
|
||||
"author": "Waku Team",
|
||||
"license": "MIT OR Apache-2.0",
|
||||
"bugs": {
|
||||
"url": "https://github.com/waku-org/js-waku/issues"
|
||||
"url": "https://github.com/logos-messaging/logos-messaging-js/issues"
|
||||
},
|
||||
"homepage": "https://github.com/waku-org/js-waku#readme"
|
||||
"homepage": "https://github.com/logos-messaging/logos-messaging-js#readme"
|
||||
}
|
||||
|
||||
@ -5,6 +5,159 @@ All notable changes to this project will be documented in this file.
|
||||
The file is maintained by [Release Please](https://github.com/googleapis/release-please) based on [Conventional Commits](https://www.conventionalcommits.org) specification,
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [0.0.41](https://github.com/logos-messaging/logos-messaging-js/compare/core-v0.0.40...core-v0.0.41) (2026-01-16)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* Add dialTimeout, change dialingQueue to Map ([#2773](https://github.com/logos-messaging/logos-messaging-js/issues/2773)) ([7816642](https://github.com/logos-messaging/logos-messaging-js/commit/7816642fae3eba4f87c196b9571246456a1525e7))
|
||||
* Reliable Channel: Status Sync, overflow protection, stop TODOs ([#2729](https://github.com/logos-messaging/logos-messaging-js/issues/2729)) ([e5f51d7](https://github.com/logos-messaging/logos-messaging-js/commit/e5f51d7df101020a1a6d0787ce68fab4f28922f5))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* Cleanup routines on reliable channel and core protocols ([#2733](https://github.com/logos-messaging/logos-messaging-js/issues/2733)) ([84a6ea6](https://github.com/logos-messaging/logos-messaging-js/commit/84a6ea69cf8630dacea0cafd58dd8c605ee8dc48))
|
||||
|
||||
|
||||
### Dependencies
|
||||
|
||||
* The following workspace dependencies were updated
|
||||
* dependencies
|
||||
* @waku/enr bumped from ^0.0.33 to ^0.0.34
|
||||
* @waku/interfaces bumped from 0.0.34 to 0.0.35
|
||||
* @waku/utils bumped from 0.0.27 to 0.0.28
|
||||
|
||||
## [0.0.40](https://github.com/waku-org/js-waku/compare/core-v0.0.39...core-v0.0.40) (2025-10-31)
|
||||
|
||||
|
||||
### Dependencies
|
||||
|
||||
* The following workspace dependencies were updated
|
||||
* dependencies
|
||||
* @waku/proto bumped from 0.0.14 to 0.0.15
|
||||
|
||||
## [0.0.39](https://github.com/waku-org/js-waku/compare/core-v0.0.38...core-v0.0.39) (2025-09-20)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* Add start/stop to filter ([#2592](https://github.com/waku-org/js-waku/issues/2592)) ([2fba052](https://github.com/waku-org/js-waku/commit/2fba052b8b98cb64f6383de95d01b33beb771448))
|
||||
* Expose message hash from IDecodedMessage ([#2578](https://github.com/waku-org/js-waku/issues/2578)) ([836d6b8](https://github.com/waku-org/js-waku/commit/836d6b8793a5124747684f6ea76b6dd47c73048b))
|
||||
* Implement lp-v3 error codes with backwards compatibility ([#2501](https://github.com/waku-org/js-waku/issues/2501)) ([1625302](https://github.com/waku-org/js-waku/commit/16253026c6e30052d87d9975b58480951de469d8))
|
||||
* Implement peer-store re-bootstrapping ([#2641](https://github.com/waku-org/js-waku/issues/2641)) ([11d84ad](https://github.com/waku-org/js-waku/commit/11d84ad342fe45158ef0734f9ca070f14704503f))
|
||||
* StoreConnect events ([#2601](https://github.com/waku-org/js-waku/issues/2601)) ([0dfbcf6](https://github.com/waku-org/js-waku/commit/0dfbcf6b6bd9225dcb0dec540aeb1eb2703c8397))
|
||||
|
||||
|
||||
### Dependencies
|
||||
|
||||
* The following workspace dependencies were updated
|
||||
* dependencies
|
||||
* @waku/enr bumped from ^0.0.32 to ^0.0.33
|
||||
* @waku/interfaces bumped from 0.0.33 to 0.0.34
|
||||
* @waku/proto bumped from 0.0.13 to 0.0.14
|
||||
* @waku/utils bumped from 0.0.26 to 0.0.27
|
||||
|
||||
## [0.0.38](https://github.com/waku-org/js-waku/compare/core-v0.0.37...core-v0.0.38) (2025-08-14)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* local peer discovery improvements ([#2557](https://github.com/waku-org/js-waku/issues/2557))
|
||||
* Introduce routing info concept
|
||||
|
||||
### Features
|
||||
|
||||
* Introduce routing info concept ([3842d84](https://github.com/waku-org/js-waku/commit/3842d84b55eb96728f6b05b9307ff823fac58a54))
|
||||
* Local peer discovery improvements ([#2557](https://github.com/waku-org/js-waku/issues/2557)) ([eab8ce8](https://github.com/waku-org/js-waku/commit/eab8ce81b431b11d79dcbec31aea759319853336))
|
||||
* Peer exchange discovery improvements ([#2537](https://github.com/waku-org/js-waku/issues/2537)) ([95da57a](https://github.com/waku-org/js-waku/commit/95da57a8705fa195529ef52a6c908642da5e120c))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* Improve error handling for stream manager ([#2546](https://github.com/waku-org/js-waku/issues/2546)) ([ada2657](https://github.com/waku-org/js-waku/commit/ada265731acfeddc2bfe2e8e963bc2be37f13900))
|
||||
|
||||
|
||||
### Dependencies
|
||||
|
||||
* The following workspace dependencies were updated
|
||||
* dependencies
|
||||
* @waku/enr bumped from ^0.0.31 to ^0.0.32
|
||||
* @waku/interfaces bumped from 0.0.32 to 0.0.33
|
||||
* @waku/proto bumped from 0.0.12 to 0.0.13
|
||||
* @waku/utils bumped from 0.0.25 to 0.0.26
|
||||
|
||||
## [0.0.37](https://github.com/waku-org/js-waku/compare/core-v0.0.36...core-v0.0.37) (2025-07-18)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* remove node level pubsub topic concept
|
||||
* unify events under one source ([#2473](https://github.com/waku-org/js-waku/issues/2473))
|
||||
* re-architect connection manager ([#2445](https://github.com/waku-org/js-waku/issues/2445))
|
||||
|
||||
### Features
|
||||
|
||||
* Add recovery and connection maintenance ([#2496](https://github.com/waku-org/js-waku/issues/2496)) ([ed389cc](https://github.com/waku-org/js-waku/commit/ed389ccbc970c8e41761c5c427d151bcf9f72725))
|
||||
* Don't dial peers that failed before, make dialer use dial queue ([#2478](https://github.com/waku-org/js-waku/issues/2478)) ([35acdf8](https://github.com/waku-org/js-waku/commit/35acdf8fa5a8f8d79b75cc0361302628178b5193))
|
||||
* Implement store query chunking ([#2511](https://github.com/waku-org/js-waku/issues/2511)) ([36f6884](https://github.com/waku-org/js-waku/commit/36f6884d220eab42d4ce25cc4b2cc12cf36e5468))
|
||||
* Make peer manager aware of codec and shard, fix retry manager and shut down subscriptions ([#2425](https://github.com/waku-org/js-waku/issues/2425)) ([058f2ff](https://github.com/waku-org/js-waku/commit/058f2ff620a4a9a6f465a1498c05fb85df369941))
|
||||
* Re-architect connection manager ([#2445](https://github.com/waku-org/js-waku/issues/2445)) ([c7682ea](https://github.com/waku-org/js-waku/commit/c7682ea67c54d2c26a68ce96208003fb1ffc915c))
|
||||
* Unify events under one source ([#2473](https://github.com/waku-org/js-waku/issues/2473)) ([27292ed](https://github.com/waku-org/js-waku/commit/27292edabce801a5d2296437ca3e6198da018a24))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* Remove node level pubsub topic concept ([6d55af9](https://github.com/waku-org/js-waku/commit/6d55af947e8e57bf0d33fc6a5a67f61594e83ff1))
|
||||
|
||||
|
||||
### Dependencies
|
||||
|
||||
* The following workspace dependencies were updated
|
||||
* dependencies
|
||||
* @waku/enr bumped from ^0.0.30 to ^0.0.31
|
||||
* @waku/interfaces bumped from 0.0.31 to 0.0.32
|
||||
* @waku/proto bumped from 0.0.11 to 0.0.12
|
||||
* @waku/utils bumped from 0.0.24 to 0.0.25
|
||||
|
||||
## [0.0.36](https://github.com/waku-org/js-waku/compare/core-v0.0.35...core-v0.0.36) (2025-06-23)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* upgrade libp2p, nodejs and typescript ([#2401](https://github.com/waku-org/js-waku/issues/2401))
|
||||
* remove IBaseProtocol and improve interface on PeerExchange ([#2422](https://github.com/waku-org/js-waku/issues/2422))
|
||||
* re-work messaging parts and sharding ([#2399](https://github.com/waku-org/js-waku/issues/2399))
|
||||
|
||||
### Features
|
||||
|
||||
* Event based approach to Filter ([#2300](https://github.com/waku-org/js-waku/issues/2300)) ([a4dfd34](https://github.com/waku-org/js-waku/commit/a4dfd3455c88db6ff60531c15a58128afb25db05))
|
||||
* Re-work messaging parts and sharding ([#2399](https://github.com/waku-org/js-waku/issues/2399)) ([1905558](https://github.com/waku-org/js-waku/commit/1905558753a7bf61c3dd27d6892d0f561d4c57c6))
|
||||
* Shard retrieval for store and store peers selection ([#2417](https://github.com/waku-org/js-waku/issues/2417)) ([f55db3e](https://github.com/waku-org/js-waku/commit/f55db3eb4bbbbbdf454e420b7238ede0642d996f))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* Rebase package lock ([f649f59](https://github.com/waku-org/js-waku/commit/f649f59e64cd1d9bd2fcc01509f6725790534a7b))
|
||||
* Remove unnecessary comments from store validation ([5f63cb5](https://github.com/waku-org/js-waku/commit/5f63cb5bfbb8c7926597490a867c11ab90af8159))
|
||||
* **store:** Update store query validation logic to support msg hash q… ([49f26d8](https://github.com/waku-org/js-waku/commit/49f26d89a8032386ee5b3ba629713b4f84891d5d))
|
||||
* **store:** Update store query validation logic to support msg hash queries ([9f7a15d](https://github.com/waku-org/js-waku/commit/9f7a15dfb19e765c4cbfa43f5d4f9323d2804f50))
|
||||
|
||||
|
||||
### Miscellaneous Chores
|
||||
|
||||
* Remove IBaseProtocol and improve interface on PeerExchange ([#2422](https://github.com/waku-org/js-waku/issues/2422)) ([7c8d107](https://github.com/waku-org/js-waku/commit/7c8d1073b0d076117fb33ce05452a88871259782))
|
||||
* Upgrade libp2p, nodejs and typescript ([#2401](https://github.com/waku-org/js-waku/issues/2401)) ([fcc6496](https://github.com/waku-org/js-waku/commit/fcc6496fef914c56f6a4d2d17c494c8b94caea3c))
|
||||
|
||||
|
||||
### Dependencies
|
||||
|
||||
* The following workspace dependencies were updated
|
||||
* dependencies
|
||||
* @waku/enr bumped from ^0.0.29 to ^0.0.30
|
||||
* @waku/interfaces bumped from 0.0.30 to 0.0.31
|
||||
* @waku/proto bumped from 0.0.10 to 0.0.11
|
||||
* @waku/utils bumped from 0.0.23 to 0.0.24
|
||||
|
||||
## [0.0.35](https://github.com/waku-org/js-waku/compare/core-v0.0.34...core-v0.0.35) (2025-04-23)
|
||||
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@waku/core",
|
||||
"version": "0.0.35",
|
||||
"version": "0.0.41",
|
||||
"description": "TypeScript implementation of the Waku v2 protocol",
|
||||
"types": "./dist/index.d.ts",
|
||||
"module": "./dist/index.js",
|
||||
@ -12,10 +12,6 @@
|
||||
"./lib/message/version_0": {
|
||||
"types": "./dist/lib/message/version_0.d.ts",
|
||||
"import": "./dist/lib/message/version_0.js"
|
||||
},
|
||||
"./lib/base_protocol": {
|
||||
"types": "./dist/lib/base_protocol.d.ts",
|
||||
"import": "./dist/lib/base_protocol.js"
|
||||
}
|
||||
},
|
||||
"typesVersions": {
|
||||
@ -29,17 +25,18 @@
|
||||
}
|
||||
},
|
||||
"type": "module",
|
||||
"homepage": "https://github.com/waku-org/js-waku/tree/master/packages/core#readme",
|
||||
"homepage": "https://github.com/logos-messaging/logos-messaging-js/tree/master/packages/core#readme",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/waku-org/js-waku.git"
|
||||
"url": "git+https://github.com/logos-messaging/logos-messaging-js.git"
|
||||
},
|
||||
"bugs": {
|
||||
"url": "https://github.com/waku-org/js-waku/issues"
|
||||
"url": "https://github.com/logos-messaging/logos-messaging-js/issues"
|
||||
},
|
||||
"license": "MIT OR Apache-2.0",
|
||||
"keywords": [
|
||||
"waku",
|
||||
"logos-messaging",
|
||||
"decentralised",
|
||||
"communication",
|
||||
"web3",
|
||||
@ -65,15 +62,16 @@
|
||||
"reset-hard": "git clean -dfx -e .idea && git reset --hard && npm i && npm run build"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=20"
|
||||
"node": ">=22"
|
||||
},
|
||||
"dependencies": {
|
||||
"@waku/enr": "^0.0.29",
|
||||
"@waku/interfaces": "0.0.30",
|
||||
"@libp2p/ping": "2.0.1",
|
||||
"@waku/proto": "0.0.10",
|
||||
"@waku/utils": "0.0.23",
|
||||
"@waku/enr": "^0.0.34",
|
||||
"@waku/interfaces": "0.0.35",
|
||||
"@libp2p/ping": "2.0.35",
|
||||
"@waku/proto": "0.0.15",
|
||||
"@waku/utils": "0.0.28",
|
||||
"debug": "^4.3.4",
|
||||
"@noble/hashes": "^1.3.2",
|
||||
"it-all": "^3.0.4",
|
||||
"it-length-prefixed": "^9.0.4",
|
||||
"it-pipe": "^3.0.1",
|
||||
@ -81,8 +79,8 @@
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@libp2p/peer-id": "^5.0.1",
|
||||
"@libp2p/interface": "^2.1.3",
|
||||
"@libp2p/peer-id": "5.1.7",
|
||||
"@libp2p/interface": "2.10.4",
|
||||
"@multiformats/multiaddr": "^12.0.0",
|
||||
"@rollup/plugin-commonjs": "^25.0.7",
|
||||
"@rollup/plugin-json": "^6.0.0",
|
||||
@ -105,7 +103,7 @@
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@multiformats/multiaddr": "^12.0.0",
|
||||
"libp2p": "2.1.8"
|
||||
"libp2p": "2.8.11"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@multiformats/multiaddr": {
|
||||
|
||||
@ -3,7 +3,7 @@ import json from "@rollup/plugin-json";
|
||||
import { nodeResolve } from "@rollup/plugin-node-resolve";
|
||||
import { extractExports } from "@waku/build-utils";
|
||||
|
||||
import * as packageJson from "./package.json" assert { type: "json" };
|
||||
import * as packageJson from "./package.json" with { type: "json" };
|
||||
|
||||
const input = extractExports(packageJson);
|
||||
|
||||
|
||||
@ -10,7 +10,11 @@ export * as waku_filter from "./lib/filter/index.js";
|
||||
export { FilterCore, FilterCodecs } from "./lib/filter/index.js";
|
||||
|
||||
export * as waku_light_push from "./lib/light_push/index.js";
|
||||
export { LightPushCodec, LightPushCore } from "./lib/light_push/index.js";
|
||||
export {
|
||||
LightPushCore,
|
||||
LightPushCodec,
|
||||
LightPushCodecV2
|
||||
} from "./lib/light_push/index.js";
|
||||
|
||||
export * as waku_store from "./lib/store/index.js";
|
||||
export { StoreCore, StoreCodec } from "./lib/store/index.js";
|
||||
@ -20,3 +24,5 @@ export { ConnectionManager } from "./lib/connection_manager/index.js";
|
||||
export { StreamManager } from "./lib/stream_manager/index.js";
|
||||
|
||||
export { MetadataCodec, wakuMetadata } from "./lib/metadata/index.js";
|
||||
|
||||
export { messageHash, messageHashStr } from "./lib/message_hash/index.js";
|
||||
|
||||
@ -1,44 +0,0 @@
|
||||
import type { Libp2p } from "@libp2p/interface";
|
||||
import type { PeerId, Stream } from "@libp2p/interface";
|
||||
import type {
|
||||
IBaseProtocolCore,
|
||||
Libp2pComponents,
|
||||
PubsubTopic
|
||||
} from "@waku/interfaces";
|
||||
|
||||
import { StreamManager } from "./stream_manager/index.js";
|
||||
|
||||
/**
|
||||
* A class with predefined helpers, to be used as a base to implement Waku
|
||||
* Protocols.
|
||||
*/
|
||||
export class BaseProtocol implements IBaseProtocolCore {
|
||||
public readonly addLibp2pEventListener: Libp2p["addEventListener"];
|
||||
public readonly removeLibp2pEventListener: Libp2p["removeEventListener"];
|
||||
protected streamManager: StreamManager;
|
||||
|
||||
protected constructor(
|
||||
public multicodec: string,
|
||||
protected components: Libp2pComponents,
|
||||
public readonly pubsubTopics: PubsubTopic[]
|
||||
) {
|
||||
this.addLibp2pEventListener = components.events.addEventListener.bind(
|
||||
components.events
|
||||
);
|
||||
this.removeLibp2pEventListener = components.events.removeEventListener.bind(
|
||||
components.events
|
||||
);
|
||||
|
||||
this.streamManager = new StreamManager(
|
||||
multicodec,
|
||||
components.connectionManager.getConnections.bind(
|
||||
components.connectionManager
|
||||
),
|
||||
this.addLibp2pEventListener
|
||||
);
|
||||
}
|
||||
|
||||
protected async getStream(peerId: PeerId): Promise<Stream> {
|
||||
return this.streamManager.getStream(peerId);
|
||||
}
|
||||
}
|
||||
@ -0,0 +1,540 @@
|
||||
import { type Connection, type Peer, type PeerId } from "@libp2p/interface";
|
||||
import { multiaddr } from "@multiformats/multiaddr";
|
||||
import {
|
||||
CONNECTION_LOCKED_TAG,
|
||||
IWakuEventEmitter,
|
||||
Tags,
|
||||
WakuEvent
|
||||
} from "@waku/interfaces";
|
||||
import { expect } from "chai";
|
||||
import sinon from "sinon";
|
||||
|
||||
import { ConnectionLimiter } from "./connection_limiter.js";
|
||||
import { Dialer } from "./dialer.js";
|
||||
import { NetworkMonitor } from "./network_monitor.js";
|
||||
|
||||
describe("ConnectionLimiter", () => {
|
||||
let libp2p: any;
|
||||
let events: IWakuEventEmitter;
|
||||
let dialer: sinon.SinonStubbedInstance<Dialer>;
|
||||
let networkMonitor: sinon.SinonStubbedInstance<NetworkMonitor>;
|
||||
let connectionLimiter: ConnectionLimiter;
|
||||
let mockPeerId: PeerId;
|
||||
|
||||
let mockConnection: Connection;
|
||||
|
||||
let mockPeer: Peer;
|
||||
let mockPeer2: Peer;
|
||||
|
||||
const createMockPeerId = (id: string): PeerId =>
|
||||
({
|
||||
toString: () => id,
|
||||
equals: function (other: PeerId) {
|
||||
return (
|
||||
other &&
|
||||
typeof other.toString === "function" &&
|
||||
other.toString() === id
|
||||
);
|
||||
}
|
||||
}) as PeerId;
|
||||
|
||||
const createMockPeer = (id: string, tags: string[] = []): Peer =>
|
||||
({
|
||||
id: createMockPeerId(id),
|
||||
tags: new Map(tags.map((tag) => [tag, { value: 0 }])),
|
||||
addresses: [],
|
||||
protocols: [],
|
||||
metadata: new Map(),
|
||||
toString: () => id
|
||||
}) as unknown as Peer;
|
||||
|
||||
const createMockConnection = (
|
||||
peerId: PeerId,
|
||||
tags: string[] = []
|
||||
): Connection =>
|
||||
({
|
||||
remotePeer: peerId,
|
||||
tags: tags || []
|
||||
}) as Connection;
|
||||
|
||||
const defaultOptions = {
|
||||
maxConnections: 5,
|
||||
maxBootstrapPeers: 2,
|
||||
pingKeepAlive: 300,
|
||||
relayKeepAlive: 300,
|
||||
enableAutoRecovery: true,
|
||||
maxDialingPeers: 3,
|
||||
failedDialCooldown: 60,
|
||||
dialCooldown: 10,
|
||||
dialTimeout: 30
|
||||
};
|
||||
|
||||
function createLimiter(
|
||||
opts: Partial<typeof defaultOptions> = {}
|
||||
): ConnectionLimiter {
|
||||
return new ConnectionLimiter({
|
||||
libp2p,
|
||||
events,
|
||||
dialer,
|
||||
networkMonitor,
|
||||
options: { ...defaultOptions, ...opts }
|
||||
});
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
mockPeerId = createMockPeerId("12D3KooWTest1");
|
||||
|
||||
mockPeer = createMockPeer("12D3KooWTest1", [Tags.BOOTSTRAP]);
|
||||
mockPeer2 = createMockPeer("12D3KooWTest2", [Tags.BOOTSTRAP]); // Ensure mockPeer2 is prioritized and dialed
|
||||
mockConnection = createMockConnection(mockPeerId, [Tags.BOOTSTRAP]);
|
||||
|
||||
dialer = {
|
||||
start: sinon.stub(),
|
||||
stop: sinon.stub(),
|
||||
dial: sinon.stub().resolves()
|
||||
} as unknown as sinon.SinonStubbedInstance<Dialer>;
|
||||
|
||||
libp2p = {
|
||||
addEventListener: sinon.stub(),
|
||||
removeEventListener: sinon.stub(),
|
||||
dial: sinon.stub().resolves(),
|
||||
hangUp: sinon.stub().resolves(),
|
||||
getConnections: sinon.stub().returns([]),
|
||||
peerStore: {
|
||||
all: sinon.stub().resolves([]),
|
||||
get: sinon.stub().resolves(mockPeer),
|
||||
merge: sinon.stub().resolves()
|
||||
},
|
||||
components: {
|
||||
components: {}
|
||||
}
|
||||
};
|
||||
|
||||
events = {
|
||||
addEventListener: sinon.stub(),
|
||||
removeEventListener: sinon.stub(),
|
||||
dispatchEvent: sinon.stub()
|
||||
} as any;
|
||||
|
||||
networkMonitor = {
|
||||
start: sinon.stub(),
|
||||
stop: sinon.stub(),
|
||||
isBrowserConnected: sinon.stub().returns(true),
|
||||
isConnected: sinon.stub().returns(true),
|
||||
isP2PConnected: sinon.stub().returns(true)
|
||||
} as unknown as sinon.SinonStubbedInstance<NetworkMonitor>;
|
||||
|
||||
// Mock the libp2p components needed by isAddressesSupported
|
||||
libp2p.components = {
|
||||
components: {},
|
||||
transportManager: {
|
||||
getTransports: sinon.stub().returns([
|
||||
{
|
||||
dialFilter: sinon
|
||||
.stub()
|
||||
.returns([multiaddr("/dns4/test/tcp/443/wss")])
|
||||
}
|
||||
])
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (connectionLimiter) {
|
||||
connectionLimiter.stop();
|
||||
}
|
||||
sinon.restore();
|
||||
});
|
||||
|
||||
describe("start", () => {
|
||||
beforeEach(() => {
|
||||
connectionLimiter = createLimiter();
|
||||
});
|
||||
|
||||
it("should dial peers from store on start", async () => {
|
||||
const dialPeersStub = sinon.stub(
|
||||
connectionLimiter as any,
|
||||
"dialPeersFromStore"
|
||||
);
|
||||
|
||||
connectionLimiter.start();
|
||||
|
||||
expect(dialPeersStub.calledOnce).to.be.true;
|
||||
});
|
||||
|
||||
it("should add event listeners for waku:connection and peer:disconnect", () => {
|
||||
connectionLimiter.start();
|
||||
|
||||
expect((events.addEventListener as sinon.SinonStub).calledOnce).to.be
|
||||
.true;
|
||||
expect(
|
||||
(events.addEventListener as sinon.SinonStub).calledWith(
|
||||
WakuEvent.Connection,
|
||||
sinon.match.func
|
||||
)
|
||||
).to.be.true;
|
||||
|
||||
expect(libp2p.addEventListener.calledOnce).to.be.true;
|
||||
expect(
|
||||
libp2p.addEventListener.calledWith("peer:disconnect", sinon.match.func)
|
||||
).to.be.true;
|
||||
});
|
||||
|
||||
it("should be safe to call multiple times", () => {
|
||||
connectionLimiter.start();
|
||||
connectionLimiter.start();
|
||||
|
||||
expect((events.addEventListener as sinon.SinonStub).callCount).to.equal(
|
||||
2
|
||||
);
|
||||
expect(libp2p.addEventListener.callCount).to.equal(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe("stop", () => {
|
||||
beforeEach(() => {
|
||||
connectionLimiter = createLimiter();
|
||||
connectionLimiter.start();
|
||||
});
|
||||
|
||||
it("should remove event listeners", () => {
|
||||
connectionLimiter.stop();
|
||||
|
||||
expect((events.removeEventListener as sinon.SinonStub).calledOnce).to.be
|
||||
.true;
|
||||
expect(
|
||||
(events.removeEventListener as sinon.SinonStub).calledWith(
|
||||
WakuEvent.Connection,
|
||||
sinon.match.func
|
||||
)
|
||||
).to.be.true;
|
||||
|
||||
expect(libp2p.removeEventListener.calledOnce).to.be.true;
|
||||
expect(
|
||||
libp2p.removeEventListener.calledWith(
|
||||
"peer:disconnect",
|
||||
sinon.match.func
|
||||
)
|
||||
).to.be.true;
|
||||
});
|
||||
|
||||
it("should be safe to call multiple times", () => {
|
||||
connectionLimiter.stop();
|
||||
connectionLimiter.stop();
|
||||
|
||||
expect(
|
||||
(events.removeEventListener as sinon.SinonStub).callCount
|
||||
).to.equal(2);
|
||||
expect(libp2p.removeEventListener.callCount).to.equal(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe("onWakuConnectionEvent", () => {
|
||||
let eventHandler: () => void;
|
||||
|
||||
beforeEach(() => {
|
||||
connectionLimiter = createLimiter();
|
||||
connectionLimiter.start();
|
||||
|
||||
const addEventListenerStub = events.addEventListener as sinon.SinonStub;
|
||||
eventHandler = addEventListenerStub.getCall(0).args[1];
|
||||
});
|
||||
|
||||
it("should dial peers from store when browser is connected", () => {
|
||||
const dialPeersStub = sinon.stub(
|
||||
connectionLimiter as any,
|
||||
"dialPeersFromStore"
|
||||
);
|
||||
networkMonitor.isBrowserConnected.returns(true);
|
||||
|
||||
eventHandler();
|
||||
|
||||
expect(dialPeersStub.calledOnce).to.be.true;
|
||||
});
|
||||
|
||||
it("should not dial peers from store when browser is not connected", () => {
|
||||
const dialPeersStub = sinon.stub(
|
||||
connectionLimiter as any,
|
||||
"dialPeersFromStore"
|
||||
);
|
||||
networkMonitor.isBrowserConnected.returns(false);
|
||||
|
||||
eventHandler();
|
||||
|
||||
expect(dialPeersStub.called).to.be.false;
|
||||
});
|
||||
});
|
||||
|
||||
describe("onDisconnectedEvent", () => {
|
||||
let eventHandler: () => Promise<void>;
|
||||
|
||||
beforeEach(() => {
|
||||
connectionLimiter = createLimiter();
|
||||
connectionLimiter.start();
|
||||
|
||||
const addEventListenerStub = libp2p.addEventListener as sinon.SinonStub;
|
||||
eventHandler = addEventListenerStub.getCall(0).args[1];
|
||||
});
|
||||
|
||||
it("should dial peers from store when no connections remain", async () => {
|
||||
libp2p.getConnections.returns([]);
|
||||
const dialPeersStub = sinon.stub(
|
||||
connectionLimiter as any,
|
||||
"dialPeersFromStore"
|
||||
);
|
||||
await eventHandler();
|
||||
expect(dialPeersStub.calledOnce).to.be.true;
|
||||
});
|
||||
|
||||
it("should do nothing when connections still exist", async () => {
|
||||
libp2p.getConnections.returns([mockConnection]);
|
||||
const dialPeersStub = sinon.stub(
|
||||
connectionLimiter as any,
|
||||
"dialPeersFromStore"
|
||||
);
|
||||
await eventHandler();
|
||||
expect(dialPeersStub.called).to.be.false;
|
||||
});
|
||||
});
|
||||
|
||||
describe("dialPeersFromStore", () => {
|
||||
beforeEach(() => {
|
||||
libp2p.hangUp = sinon.stub().resolves();
|
||||
connectionLimiter = createLimiter();
|
||||
mockPeer.addresses = [
|
||||
{
|
||||
multiaddr: multiaddr("/dns4/mockpeer/tcp/443/wss"),
|
||||
isCertified: false
|
||||
}
|
||||
];
|
||||
mockPeer2.addresses = [
|
||||
{
|
||||
multiaddr: multiaddr("/dns4/mockpeer2/tcp/443/wss"),
|
||||
isCertified: false
|
||||
}
|
||||
];
|
||||
});
|
||||
|
||||
it("should get all peers from store", async () => {
|
||||
libp2p.peerStore.all.resolves([mockPeer, mockPeer2]);
|
||||
libp2p.getConnections.returns([]);
|
||||
await (connectionLimiter as any).dialPeersFromStore();
|
||||
expect(libp2p.peerStore.all.calledOnce).to.be.true;
|
||||
});
|
||||
|
||||
it("should filter out already connected peers", async () => {
|
||||
dialer.dial.resetHistory();
|
||||
libp2p.hangUp.resetHistory();
|
||||
libp2p.peerStore.all.resolves([mockPeer, mockPeer2]);
|
||||
libp2p.getConnections.returns([createMockConnection(mockPeer.id, [])]);
|
||||
await (connectionLimiter as any).dialPeersFromStore();
|
||||
expect(dialer.dial.calledOnce).to.be.true;
|
||||
expect(dialer.dial.calledWith(mockPeer2.id)).to.be.true;
|
||||
expect(dialer.dial.calledWith(mockPeer.id)).to.be.false;
|
||||
});
|
||||
|
||||
it("should dial all remaining peers", async () => {
|
||||
dialer.dial.resetHistory();
|
||||
libp2p.hangUp.resetHistory();
|
||||
libp2p.peerStore.all.resolves([mockPeer, mockPeer2]);
|
||||
libp2p.getConnections.returns([]);
|
||||
await (connectionLimiter as any).dialPeersFromStore();
|
||||
expect(dialer.dial.callCount).to.equal(2);
|
||||
expect(dialer.dial.calledWith(mockPeer.id)).to.be.true;
|
||||
expect(dialer.dial.calledWith(mockPeer2.id)).to.be.true;
|
||||
});
|
||||
|
||||
it("should handle dial errors gracefully", async () => {
|
||||
libp2p.peerStore.all.resolves([mockPeer]);
|
||||
libp2p.getConnections.returns([]);
|
||||
dialer.dial.rejects(new Error("Dial failed"));
|
||||
await (connectionLimiter as any).dialPeersFromStore();
|
||||
expect(dialer.dial.calledOnce).to.be.true;
|
||||
});
|
||||
|
||||
it("should handle case with no peers in store", async () => {
|
||||
libp2p.peerStore.all.resolves([]);
|
||||
libp2p.getConnections.returns([]);
|
||||
await (connectionLimiter as any).dialPeersFromStore();
|
||||
expect(dialer.dial.called).to.be.false;
|
||||
});
|
||||
|
||||
it("should handle case with all peers already connected", async () => {
|
||||
libp2p.peerStore.all.resolves([mockPeer]);
|
||||
libp2p.getConnections.returns([createMockConnection(mockPeer.id)]);
|
||||
await (connectionLimiter as any).dialPeersFromStore();
|
||||
expect(dialer.dial.called).to.be.false;
|
||||
});
|
||||
});
|
||||
|
||||
describe("getPeer", () => {
|
||||
beforeEach(() => {
|
||||
connectionLimiter = createLimiter();
|
||||
});
|
||||
|
||||
it("should return peer for existing peer", async () => {
|
||||
const peer = await (connectionLimiter as any).getPeer(mockPeerId);
|
||||
|
||||
expect(libp2p.peerStore.get.calledWith(mockPeerId)).to.be.true;
|
||||
expect(peer).to.equal(mockPeer);
|
||||
});
|
||||
|
||||
it("should return null for non-existent peer", async () => {
|
||||
libp2p.peerStore.get.rejects(new Error("Peer not found"));
|
||||
|
||||
const peer = await (connectionLimiter as any).getPeer(mockPeerId);
|
||||
|
||||
expect(peer).to.be.null;
|
||||
});
|
||||
|
||||
it("should handle peer store errors gracefully", async () => {
|
||||
libp2p.peerStore.get.rejects(new Error("Database error"));
|
||||
|
||||
const peer = await (connectionLimiter as any).getPeer(mockPeerId);
|
||||
|
||||
expect(peer).to.be.null;
|
||||
});
|
||||
});
|
||||
|
||||
describe("autoRecovery flag", () => {
|
||||
it("should not dial on waku:connection if enableAutoRecovery is false, but should dial on start", () => {
|
||||
connectionLimiter = createLimiter({ enableAutoRecovery: false });
|
||||
const dialPeersStub = sinon.stub(
|
||||
connectionLimiter as any,
|
||||
"dialPeersFromStore"
|
||||
);
|
||||
connectionLimiter.start();
|
||||
expect(connectionLimiter["connectionMonitorInterval"]).to.be.null;
|
||||
connectionLimiter["onWakuConnectionEvent"]();
|
||||
expect(dialPeersStub.calledOnce).to.be.true;
|
||||
});
|
||||
|
||||
it("should start connection monitor interval and dial on waku:connection if enableAutoRecovery is true", () => {
|
||||
connectionLimiter = createLimiter({ enableAutoRecovery: true });
|
||||
const dialPeersStub = sinon.stub(
|
||||
connectionLimiter as any,
|
||||
"dialPeersFromStore"
|
||||
);
|
||||
connectionLimiter.start();
|
||||
expect(connectionLimiter["connectionMonitorInterval"]).to.not.be.null;
|
||||
connectionLimiter["onWakuConnectionEvent"]();
|
||||
expect(dialPeersStub.calledTwice).to.be.true;
|
||||
});
|
||||
});
|
||||
|
||||
describe("maintainConnectionsCount", () => {
|
||||
beforeEach(() => {
|
||||
libp2p.hangUp = sinon.stub().resolves();
|
||||
connectionLimiter = createLimiter({ maxConnections: 2 });
|
||||
mockPeer.addresses = [
|
||||
{
|
||||
multiaddr: multiaddr("/dns4/mockpeer/tcp/443/wss"),
|
||||
isCertified: false
|
||||
}
|
||||
];
|
||||
mockPeer2.addresses = [
|
||||
{
|
||||
multiaddr: multiaddr("/dns4/mockpeer2/tcp/443/wss"),
|
||||
isCertified: false
|
||||
}
|
||||
];
|
||||
});
|
||||
|
||||
it("should dial more peers if under maxConnections", async () => {
|
||||
libp2p.getConnections.returns([]);
|
||||
sinon
|
||||
.stub(connectionLimiter as any, "getPrioritizedPeers")
|
||||
.resolves([mockPeer, mockPeer2]);
|
||||
await (connectionLimiter as any).maintainConnectionsCount();
|
||||
expect(dialer.dial.calledTwice).to.be.true;
|
||||
});
|
||||
|
||||
it("should drop only non-locked connections when over maxConnections", async () => {
|
||||
dialer.dial.resetHistory();
|
||||
libp2p.hangUp.resetHistory();
|
||||
const lockedConn = createMockConnection(mockPeerId, [
|
||||
CONNECTION_LOCKED_TAG
|
||||
]);
|
||||
const normalConn1 = createMockConnection(createMockPeerId("p2"), []);
|
||||
const normalConn2 = createMockConnection(createMockPeerId("p3"), []);
|
||||
const normalConn3 = createMockConnection(createMockPeerId("p4"), []);
|
||||
const connections = [lockedConn, normalConn1, normalConn2, normalConn3];
|
||||
libp2p.getConnections.returns(connections);
|
||||
sinon.stub(connectionLimiter as any, "getPrioritizedPeers").resolves([]);
|
||||
await (connectionLimiter as any).maintainConnectionsCount();
|
||||
|
||||
expect(libp2p.hangUp.callCount).to.equal(1);
|
||||
expect(libp2p.hangUp.calledWith(normalConn3.remotePeer)).to.be.true;
|
||||
expect(libp2p.hangUp.calledWith(normalConn1.remotePeer)).to.be.false;
|
||||
expect(libp2p.hangUp.calledWith(normalConn2.remotePeer)).to.be.false;
|
||||
expect(libp2p.hangUp.calledWith(lockedConn.remotePeer)).to.be.false;
|
||||
});
|
||||
|
||||
it("should do nothing if no non-locked connections to drop", async () => {
|
||||
const lockedConn1 = createMockConnection(createMockPeerId("p1"), [
|
||||
CONNECTION_LOCKED_TAG
|
||||
]);
|
||||
const lockedConn2 = createMockConnection(createMockPeerId("p2"), [
|
||||
CONNECTION_LOCKED_TAG
|
||||
]);
|
||||
libp2p.getConnections.returns([lockedConn1, lockedConn2]);
|
||||
sinon.stub(connectionLimiter as any, "getPrioritizedPeers").resolves([]);
|
||||
await (connectionLimiter as any).maintainConnectionsCount();
|
||||
expect(libp2p.hangUp.called).to.be.false;
|
||||
});
|
||||
});
|
||||
|
||||
describe("maintainBootstrapConnections", () => {
|
||||
beforeEach(() => {
|
||||
connectionLimiter = createLimiter({ maxBootstrapPeers: 2 });
|
||||
});
|
||||
|
||||
it("should do nothing if at or below maxBootstrapPeers", async () => {
|
||||
sinon
|
||||
.stub(connectionLimiter as any, "getBootstrapPeers")
|
||||
.resolves([mockPeer, mockPeer2]);
|
||||
await (connectionLimiter as any).maintainBootstrapConnections();
|
||||
expect(libp2p.hangUp.called).to.be.false;
|
||||
});
|
||||
|
||||
it("should drop excess bootstrap peers if over maxBootstrapPeers", async () => {
|
||||
const p1 = createMockPeer("p1", [Tags.BOOTSTRAP]);
|
||||
const p2 = createMockPeer("p2", [Tags.BOOTSTRAP]);
|
||||
const p3 = createMockPeer("p3", [Tags.BOOTSTRAP]);
|
||||
sinon
|
||||
.stub(connectionLimiter as any, "getBootstrapPeers")
|
||||
.resolves([p1, p2, p3]);
|
||||
await (connectionLimiter as any).maintainBootstrapConnections();
|
||||
expect(libp2p.hangUp.calledOnce).to.be.true;
|
||||
expect(libp2p.hangUp.calledWith(p3.id)).to.be.true;
|
||||
});
|
||||
});
|
||||
|
||||
describe("dialPeersFromStore prioritization", () => {
|
||||
beforeEach(() => {
|
||||
connectionLimiter = createLimiter();
|
||||
});
|
||||
|
||||
it("should prioritize bootstrap, then peer exchange, then local peers", async () => {
|
||||
const bootstrapPeer = createMockPeer("b", [Tags.BOOTSTRAP]);
|
||||
bootstrapPeer.addresses = [
|
||||
{ multiaddr: multiaddr("/dns4/b/tcp/443/wss"), isCertified: false }
|
||||
];
|
||||
const pxPeer = createMockPeer("px", [Tags.PEER_EXCHANGE]);
|
||||
pxPeer.addresses = [
|
||||
{ multiaddr: multiaddr("/dns4/px/tcp/443/wss"), isCertified: false }
|
||||
];
|
||||
const localPeer = createMockPeer("l", [Tags.PEER_CACHE]);
|
||||
localPeer.addresses = [
|
||||
{ multiaddr: multiaddr("/dns4/l/tcp/443/wss"), isCertified: false }
|
||||
];
|
||||
libp2p.peerStore.all.resolves([bootstrapPeer, pxPeer, localPeer]);
|
||||
libp2p.getConnections.returns([]);
|
||||
connectionLimiter = createLimiter();
|
||||
const peers = await (connectionLimiter as any).getPrioritizedPeers();
|
||||
expect(peers[0].id.toString()).to.equal("b");
|
||||
expect(peers[1].id.toString()).to.equal("px");
|
||||
expect(peers[2].id.toString()).to.equal("l");
|
||||
});
|
||||
});
|
||||
});
|
||||
349
packages/core/src/lib/connection_manager/connection_limiter.ts
Normal file
349
packages/core/src/lib/connection_manager/connection_limiter.ts
Normal file
@ -0,0 +1,349 @@
|
||||
import { Peer, PeerId } from "@libp2p/interface";
|
||||
import {
|
||||
CONNECTION_LOCKED_TAG,
|
||||
ConnectionManagerOptions,
|
||||
IWakuEventEmitter,
|
||||
Libp2p,
|
||||
Libp2pEventHandler,
|
||||
Tags,
|
||||
WakuEvent
|
||||
} from "@waku/interfaces";
|
||||
import { Logger } from "@waku/utils";
|
||||
|
||||
import { Dialer } from "./dialer.js";
|
||||
import { NetworkMonitor } from "./network_monitor.js";
|
||||
import { isAddressesSupported } from "./utils.js";
|
||||
|
||||
const log = new Logger("connection-limiter");
|
||||
|
||||
const DEFAULT_CONNECTION_MONITOR_INTERVAL = 5 * 1_000;
|
||||
|
||||
type ConnectionLimiterConstructorOptions = {
|
||||
libp2p: Libp2p;
|
||||
events: IWakuEventEmitter;
|
||||
dialer: Dialer;
|
||||
networkMonitor: NetworkMonitor;
|
||||
options: ConnectionManagerOptions;
|
||||
};
|
||||
|
||||
interface IConnectionLimiter {
|
||||
start(): void;
|
||||
stop(): void;
|
||||
}
|
||||
|
||||
/**
|
||||
* This class is responsible for limiting the number of connections to peers.
|
||||
* It also dials all known peers because libp2p might have emitted `peer:discovery` before initialization
|
||||
* and listen to `peer:connect` and `peer:disconnect` events to manage connections.
|
||||
*/
|
||||
export class ConnectionLimiter implements IConnectionLimiter {
|
||||
private readonly libp2p: Libp2p;
|
||||
private readonly events: IWakuEventEmitter;
|
||||
private readonly networkMonitor: NetworkMonitor;
|
||||
private readonly dialer: Dialer;
|
||||
|
||||
private connectionMonitorInterval: NodeJS.Timeout | null = null;
|
||||
private readonly options: ConnectionManagerOptions;
|
||||
|
||||
public constructor(options: ConnectionLimiterConstructorOptions) {
|
||||
this.libp2p = options.libp2p;
|
||||
this.events = options.events;
|
||||
this.networkMonitor = options.networkMonitor;
|
||||
this.dialer = options.dialer;
|
||||
|
||||
this.options = options.options;
|
||||
|
||||
this.onWakuConnectionEvent = this.onWakuConnectionEvent.bind(this);
|
||||
this.onDisconnectedEvent = this.onDisconnectedEvent.bind(this);
|
||||
}
|
||||
|
||||
public start(): void {
|
||||
// dial all known peers because libp2p might have emitted `peer:discovery` before initialization
|
||||
void this.dialPeersFromStore();
|
||||
|
||||
if (
|
||||
this.options.enableAutoRecovery &&
|
||||
this.connectionMonitorInterval === null
|
||||
) {
|
||||
this.connectionMonitorInterval = setInterval(
|
||||
() => void this.maintainConnections(),
|
||||
DEFAULT_CONNECTION_MONITOR_INTERVAL
|
||||
);
|
||||
}
|
||||
|
||||
this.events.addEventListener(
|
||||
WakuEvent.Connection,
|
||||
this.onWakuConnectionEvent
|
||||
);
|
||||
|
||||
/**
|
||||
* NOTE: Event is not being emitted on closing nor losing a connection.
|
||||
* @see https://github.com/libp2p/js-libp2p/issues/939
|
||||
* @see https://github.com/logos-messaging/logos-messaging-js/issues/252
|
||||
*
|
||||
* >This event will be triggered anytime we are disconnected from another peer,
|
||||
* >regardless of the circumstances of that disconnection.
|
||||
* >If we happen to have multiple connections to a peer,
|
||||
* >this event will **only** be triggered when the last connection is closed.
|
||||
* @see https://github.com/libp2p/js-libp2p/blob/bad9e8c0ff58d60a78314077720c82ae331cc55b/doc/API.md?plain=1#L2100
|
||||
*/
|
||||
this.libp2p.addEventListener(
|
||||
"peer:disconnect",
|
||||
this.onDisconnectedEvent as Libp2pEventHandler<PeerId>
|
||||
);
|
||||
}
|
||||
|
||||
public stop(): void {
|
||||
this.events.removeEventListener(
|
||||
WakuEvent.Connection,
|
||||
this.onWakuConnectionEvent
|
||||
);
|
||||
|
||||
this.libp2p.removeEventListener(
|
||||
"peer:disconnect",
|
||||
this.onDisconnectedEvent as Libp2pEventHandler<PeerId>
|
||||
);
|
||||
|
||||
if (this.connectionMonitorInterval) {
|
||||
clearInterval(this.connectionMonitorInterval);
|
||||
this.connectionMonitorInterval = null;
|
||||
}
|
||||
}
|
||||
|
||||
private onWakuConnectionEvent(): void {
|
||||
if (!this.options.enableAutoRecovery) {
|
||||
log.info(`Auto recovery is disabled, skipping`);
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.networkMonitor.isBrowserConnected()) {
|
||||
void this.dialPeersFromStore();
|
||||
}
|
||||
}
|
||||
|
||||
private async maintainConnections(): Promise<void> {
|
||||
await this.maintainConnectionsCount();
|
||||
await this.maintainBootstrapConnections();
|
||||
}
|
||||
|
||||
private async onDisconnectedEvent(): Promise<void> {
|
||||
if (this.libp2p.getConnections().length === 0) {
|
||||
log.info(`No connections, dialing peers from store`);
|
||||
await this.dialPeersFromStore();
|
||||
}
|
||||
}
|
||||
|
||||
private async maintainConnectionsCount(): Promise<void> {
|
||||
log.info(`Maintaining connections count`);
|
||||
|
||||
const connections = this.libp2p.getConnections();
|
||||
|
||||
if (connections.length <= this.options.maxConnections) {
|
||||
log.info(
|
||||
`Node has less than max connections ${this.options.maxConnections}, trying to dial more peers`
|
||||
);
|
||||
|
||||
const peers = await this.getPrioritizedPeers();
|
||||
|
||||
if (peers.length === 0) {
|
||||
log.info(`No peers to dial, skipping`);
|
||||
await this.triggerBootstrap();
|
||||
return;
|
||||
}
|
||||
|
||||
const promises = peers
|
||||
.slice(0, this.options.maxConnections - connections.length)
|
||||
.map((p) => this.dialer.dial(p.id));
|
||||
|
||||
await Promise.all(promises);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
log.info(
|
||||
`Node has more than max connections ${this.options.maxConnections}, dropping connections`
|
||||
);
|
||||
|
||||
try {
|
||||
const connectionsToDrop = connections
|
||||
.filter((c) => !c.tags.includes(CONNECTION_LOCKED_TAG))
|
||||
.slice(this.options.maxConnections);
|
||||
|
||||
if (connectionsToDrop.length === 0) {
|
||||
log.info(`No connections to drop, skipping`);
|
||||
return;
|
||||
}
|
||||
|
||||
const promises = connectionsToDrop.map((c) =>
|
||||
this.libp2p.hangUp(c.remotePeer)
|
||||
);
|
||||
await Promise.all(promises);
|
||||
|
||||
log.info(`Dropped ${connectionsToDrop.length} connections`);
|
||||
} catch (error) {
|
||||
log.error(`Unexpected error while maintaining connections`, error);
|
||||
}
|
||||
}
|
||||
|
||||
private async maintainBootstrapConnections(): Promise<void> {
|
||||
log.info(`Maintaining bootstrap connections`);
|
||||
|
||||
const bootstrapPeers = await this.getBootstrapPeers();
|
||||
|
||||
if (bootstrapPeers.length <= this.options.maxBootstrapPeers) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const peersToDrop = bootstrapPeers.slice(this.options.maxBootstrapPeers);
|
||||
|
||||
log.info(
|
||||
`Dropping ${peersToDrop.length} bootstrap connections because node has more than max bootstrap connections ${this.options.maxBootstrapPeers}`
|
||||
);
|
||||
|
||||
const promises = peersToDrop.map((p) => this.libp2p.hangUp(p.id));
|
||||
await Promise.all(promises);
|
||||
|
||||
log.info(`Dropped ${peersToDrop.length} bootstrap connections`);
|
||||
} catch (error) {
|
||||
log.error(
|
||||
`Unexpected error while maintaining bootstrap connections`,
|
||||
error
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
private async dialPeersFromStore(): Promise<void> {
|
||||
log.info(`Dialing peers from store`);
|
||||
|
||||
try {
|
||||
const peers = await this.getPrioritizedPeers();
|
||||
|
||||
if (peers.length === 0) {
|
||||
log.info(`No peers to dial, skipping`);
|
||||
await this.triggerBootstrap();
|
||||
return;
|
||||
}
|
||||
|
||||
const promises = peers.map((p) => this.dialer.dial(p.id));
|
||||
|
||||
log.info(`Dialing ${peers.length} peers from store`);
|
||||
await Promise.all(promises);
|
||||
log.info(`Dialed ${promises.length} peers from store`);
|
||||
} catch (error) {
|
||||
log.error(`Unexpected error while dialing peer store peers`, error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a list of peers ordered by priority:
|
||||
* - bootstrap peers
|
||||
* - peers from peer exchange
|
||||
* - peers from peer cache (last because we are not sure that locally stored information is up to date)
|
||||
*/
|
||||
private async getPrioritizedPeers(): Promise<Peer[]> {
|
||||
const allPeers = await this.libp2p.peerStore.all();
|
||||
const allConnections = this.libp2p.getConnections();
|
||||
const allConnectionsSet = new Set(
|
||||
allConnections.map((c) => c.remotePeer.toString())
|
||||
);
|
||||
|
||||
log.info(
|
||||
`Found ${allPeers.length} peers in store, and found ${allConnections.length} connections`
|
||||
);
|
||||
|
||||
const notConnectedPeers = allPeers.filter(
|
||||
(p) =>
|
||||
!allConnectionsSet.has(p.id.toString()) &&
|
||||
isAddressesSupported(
|
||||
this.libp2p,
|
||||
p.addresses.map((a) => a.multiaddr)
|
||||
)
|
||||
);
|
||||
|
||||
const bootstrapPeers = notConnectedPeers.filter((p) =>
|
||||
p.tags.has(Tags.BOOTSTRAP)
|
||||
);
|
||||
|
||||
const peerExchangePeers = notConnectedPeers.filter((p) =>
|
||||
p.tags.has(Tags.PEER_EXCHANGE)
|
||||
);
|
||||
|
||||
const localStorePeers = notConnectedPeers.filter((p) =>
|
||||
p.tags.has(Tags.PEER_CACHE)
|
||||
);
|
||||
|
||||
const restPeers = notConnectedPeers.filter(
|
||||
(p) =>
|
||||
!p.tags.has(Tags.BOOTSTRAP) &&
|
||||
!p.tags.has(Tags.PEER_EXCHANGE) &&
|
||||
!p.tags.has(Tags.PEER_CACHE)
|
||||
);
|
||||
|
||||
return [
|
||||
...bootstrapPeers,
|
||||
...peerExchangePeers,
|
||||
...localStorePeers,
|
||||
...restPeers
|
||||
];
|
||||
}
|
||||
|
||||
private async getBootstrapPeers(): Promise<Peer[]> {
|
||||
const peers = await Promise.all(
|
||||
this.libp2p
|
||||
.getConnections()
|
||||
.map((conn) => conn.remotePeer)
|
||||
.map((id) => this.getPeer(id))
|
||||
);
|
||||
|
||||
return peers.filter(
|
||||
(peer) => peer && peer.tags.has(Tags.BOOTSTRAP)
|
||||
) as Peer[];
|
||||
}
|
||||
|
||||
private async getPeer(peerId: PeerId): Promise<Peer | null> {
|
||||
try {
|
||||
return await this.libp2p.peerStore.get(peerId);
|
||||
} catch (error) {
|
||||
log.error(`Failed to get peer ${peerId}, error: ${error}`);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Triggers the bootstrap or peer cache discovery if they are mounted.
|
||||
* @returns void
|
||||
*/
|
||||
private async triggerBootstrap(): Promise<void> {
|
||||
log.info("Triggering bootstrap discovery");
|
||||
|
||||
const bootstrapComponents = Object.values(this.libp2p.components.components)
|
||||
.filter((c) => !!c)
|
||||
.filter((c: unknown) =>
|
||||
[`@waku/${Tags.BOOTSTRAP}`, `@waku/${Tags.PEER_CACHE}`].includes(
|
||||
(c as { [Symbol.toStringTag]: string })?.[Symbol.toStringTag]
|
||||
)
|
||||
);
|
||||
|
||||
if (bootstrapComponents.length === 0) {
|
||||
log.warn("No bootstrap components found to trigger");
|
||||
return;
|
||||
}
|
||||
|
||||
log.info(
|
||||
`Found ${bootstrapComponents.length} bootstrap components, starting them`
|
||||
);
|
||||
|
||||
const promises = bootstrapComponents.map(async (component) => {
|
||||
try {
|
||||
await (component as { stop: () => Promise<void> })?.stop?.();
|
||||
await (component as { start: () => Promise<void> })?.start?.();
|
||||
log.info("Successfully started bootstrap component");
|
||||
} catch (error) {
|
||||
log.error("Failed to start bootstrap component", error);
|
||||
}
|
||||
});
|
||||
|
||||
await Promise.all(promises);
|
||||
}
|
||||
}
|
||||
@ -0,0 +1,587 @@
|
||||
import { type Peer, type PeerId, type Stream } from "@libp2p/interface";
|
||||
import { peerIdFromString } from "@libp2p/peer-id";
|
||||
import { multiaddr, MultiaddrInput } from "@multiformats/multiaddr";
|
||||
import {
|
||||
IWakuEventEmitter,
|
||||
Libp2p,
|
||||
NetworkConfig,
|
||||
PubsubTopic
|
||||
} from "@waku/interfaces";
|
||||
import { expect } from "chai";
|
||||
import sinon from "sinon";
|
||||
|
||||
import { ConnectionLimiter } from "./connection_limiter.js";
|
||||
import { ConnectionManager } from "./connection_manager.js";
|
||||
import { DiscoveryDialer } from "./discovery_dialer.js";
|
||||
import { KeepAliveManager } from "./keep_alive_manager.js";
|
||||
import { NetworkMonitor } from "./network_monitor.js";
|
||||
import { IShardReader, ShardReader } from "./shard_reader.js";
|
||||
|
||||
describe("ConnectionManager", () => {
|
||||
let libp2p: Libp2p;
|
||||
let events: IWakuEventEmitter;
|
||||
let networkConfig: NetworkConfig;
|
||||
let pubsubTopics: PubsubTopic[];
|
||||
let relay: any;
|
||||
let connectionManager: ConnectionManager;
|
||||
let mockPeerId: PeerId;
|
||||
let mockMultiaddr: MultiaddrInput;
|
||||
let mockStream: Stream;
|
||||
// Mock internal components
|
||||
let mockKeepAliveManager: sinon.SinonStubbedInstance<KeepAliveManager>;
|
||||
let mockDiscoveryDialer: sinon.SinonStubbedInstance<DiscoveryDialer>;
|
||||
let mockShardReader: sinon.SinonStubbedInstance<IShardReader>;
|
||||
let mockNetworkMonitor: sinon.SinonStubbedInstance<NetworkMonitor>;
|
||||
let mockConnectionLimiter: sinon.SinonStubbedInstance<ConnectionLimiter>;
|
||||
|
||||
const createMockPeer = (
|
||||
id: string,
|
||||
protocols: string[] = [],
|
||||
ping = 100
|
||||
): Peer =>
|
||||
({
|
||||
id: peerIdFromString(id),
|
||||
protocols,
|
||||
metadata: new Map([["ping", new TextEncoder().encode(ping.toString())]]),
|
||||
toString: () => id
|
||||
}) as Peer;
|
||||
|
||||
beforeEach(() => {
|
||||
// Create mock dependencies
|
||||
libp2p = {
|
||||
dialProtocol: sinon.stub().resolves({} as Stream),
|
||||
hangUp: sinon.stub().resolves(),
|
||||
getPeers: sinon.stub().returns([]),
|
||||
getConnections: sinon.stub().returns([]),
|
||||
addEventListener: sinon.stub(),
|
||||
removeEventListener: sinon.stub(),
|
||||
components: {
|
||||
components: {}
|
||||
},
|
||||
peerStore: {
|
||||
get: sinon.stub().resolves(null),
|
||||
merge: sinon.stub().resolves()
|
||||
}
|
||||
} as unknown as Libp2p;
|
||||
|
||||
events = {
|
||||
dispatchEvent: sinon.stub()
|
||||
} as unknown as IWakuEventEmitter;
|
||||
|
||||
networkConfig = {
|
||||
clusterId: 2,
|
||||
shards: [0, 1]
|
||||
} as NetworkConfig;
|
||||
|
||||
pubsubTopics = ["/waku/2/rs/1/0", "/waku/2/rs/1/1"];
|
||||
|
||||
relay = {
|
||||
pubsubTopics,
|
||||
getMeshPeers: sinon.stub().returns([])
|
||||
};
|
||||
|
||||
// Create mock internal components
|
||||
mockKeepAliveManager = {
|
||||
start: sinon.stub(),
|
||||
stop: sinon.stub()
|
||||
} as unknown as sinon.SinonStubbedInstance<KeepAliveManager>;
|
||||
|
||||
mockDiscoveryDialer = {
|
||||
start: sinon.stub(),
|
||||
stop: sinon.stub()
|
||||
} as unknown as sinon.SinonStubbedInstance<DiscoveryDialer>;
|
||||
|
||||
mockShardReader = {
|
||||
isPeerOnTopic: sinon.stub().resolves(true)
|
||||
} as unknown as sinon.SinonStubbedInstance<IShardReader>;
|
||||
|
||||
mockNetworkMonitor = {
|
||||
start: sinon.stub(),
|
||||
stop: sinon.stub(),
|
||||
isConnected: sinon.stub().returns(true)
|
||||
} as unknown as sinon.SinonStubbedInstance<NetworkMonitor>;
|
||||
|
||||
mockConnectionLimiter = {
|
||||
start: sinon.stub(),
|
||||
stop: sinon.stub()
|
||||
} as unknown as sinon.SinonStubbedInstance<ConnectionLimiter>;
|
||||
|
||||
// Create test data
|
||||
mockPeerId = peerIdFromString(
|
||||
"12D3KooWPjceQuRaNMhcrLF6BaW69PdCXB95h6TBpFf9nAmcL8hE"
|
||||
);
|
||||
mockMultiaddr = multiaddr(
|
||||
"/ip4/127.0.0.1/tcp/60000/p2p/12D3KooWPjceQuRaNMhcrLF6BaW69PdCXB95h6TBpFf9nAmcL8hE"
|
||||
);
|
||||
mockStream = {} as Stream;
|
||||
|
||||
// Mock the internal component prototype methods
|
||||
sinon
|
||||
.stub(KeepAliveManager.prototype, "start")
|
||||
.callsFake(() => mockKeepAliveManager.start());
|
||||
sinon
|
||||
.stub(KeepAliveManager.prototype, "stop")
|
||||
.callsFake(() => mockKeepAliveManager.stop());
|
||||
|
||||
sinon
|
||||
.stub(DiscoveryDialer.prototype, "start")
|
||||
.callsFake(() => mockDiscoveryDialer.start());
|
||||
sinon
|
||||
.stub(DiscoveryDialer.prototype, "stop")
|
||||
.callsFake(() => mockDiscoveryDialer.stop());
|
||||
|
||||
sinon
|
||||
.stub(ShardReader.prototype, "isPeerOnTopic")
|
||||
.callsFake((peerId: PeerId, topic: string) =>
|
||||
mockShardReader.isPeerOnTopic(peerId, topic)
|
||||
);
|
||||
|
||||
sinon
|
||||
.stub(NetworkMonitor.prototype, "start")
|
||||
.callsFake(() => mockNetworkMonitor.start());
|
||||
sinon
|
||||
.stub(NetworkMonitor.prototype, "stop")
|
||||
.callsFake(() => mockNetworkMonitor.stop());
|
||||
sinon
|
||||
.stub(NetworkMonitor.prototype, "isConnected")
|
||||
.callsFake(() => mockNetworkMonitor.isConnected());
|
||||
|
||||
sinon
|
||||
.stub(ConnectionLimiter.prototype, "start")
|
||||
.callsFake(() => mockConnectionLimiter.start());
|
||||
sinon
|
||||
.stub(ConnectionLimiter.prototype, "stop")
|
||||
.callsFake(() => mockConnectionLimiter.stop());
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
sinon.restore();
|
||||
});
|
||||
|
||||
describe("constructor", () => {
|
||||
it("should create ConnectionManager with required options", () => {
|
||||
connectionManager = new ConnectionManager({
|
||||
libp2p,
|
||||
events,
|
||||
networkConfig
|
||||
});
|
||||
|
||||
expect(connectionManager).to.be.instanceOf(ConnectionManager);
|
||||
});
|
||||
|
||||
it("should create ConnectionManager with relay", () => {
|
||||
connectionManager = new ConnectionManager({
|
||||
libp2p,
|
||||
events,
|
||||
networkConfig,
|
||||
relay
|
||||
});
|
||||
|
||||
expect(connectionManager).to.be.instanceOf(ConnectionManager);
|
||||
});
|
||||
|
||||
it("should set default options when no config provided", () => {
|
||||
connectionManager = new ConnectionManager({
|
||||
libp2p,
|
||||
events,
|
||||
networkConfig
|
||||
});
|
||||
|
||||
expect(connectionManager).to.be.instanceOf(ConnectionManager);
|
||||
// Default options are set internally and tested through behavior
|
||||
});
|
||||
|
||||
it("should merge provided config with defaults", () => {
|
||||
const customConfig = {
|
||||
maxBootstrapPeers: 5,
|
||||
pingKeepAlive: 120
|
||||
};
|
||||
|
||||
connectionManager = new ConnectionManager({
|
||||
libp2p,
|
||||
events,
|
||||
networkConfig,
|
||||
config: customConfig
|
||||
});
|
||||
|
||||
expect(connectionManager).to.be.instanceOf(ConnectionManager);
|
||||
});
|
||||
|
||||
it("should create all internal components", () => {
|
||||
connectionManager = new ConnectionManager({
|
||||
libp2p,
|
||||
events,
|
||||
networkConfig,
|
||||
relay
|
||||
});
|
||||
|
||||
expect(connectionManager).to.be.instanceOf(ConnectionManager);
|
||||
// Internal components are created and tested through their behavior
|
||||
});
|
||||
});
|
||||
|
||||
describe("start", () => {
|
||||
beforeEach(() => {
|
||||
connectionManager = new ConnectionManager({
|
||||
libp2p,
|
||||
events,
|
||||
networkConfig,
|
||||
relay
|
||||
});
|
||||
});
|
||||
|
||||
it("should start all internal components", () => {
|
||||
connectionManager.start();
|
||||
|
||||
expect(mockNetworkMonitor.start.calledOnce).to.be.true;
|
||||
expect(mockDiscoveryDialer.start.calledOnce).to.be.true;
|
||||
expect(mockKeepAliveManager.start.calledOnce).to.be.true;
|
||||
expect(mockConnectionLimiter.start.calledOnce).to.be.true;
|
||||
});
|
||||
|
||||
it("should be safe to call multiple times", () => {
|
||||
connectionManager.start();
|
||||
connectionManager.start();
|
||||
|
||||
expect(mockNetworkMonitor.start.calledTwice).to.be.true;
|
||||
expect(mockDiscoveryDialer.start.calledTwice).to.be.true;
|
||||
expect(mockKeepAliveManager.start.calledTwice).to.be.true;
|
||||
expect(mockConnectionLimiter.start.calledTwice).to.be.true;
|
||||
});
|
||||
});
|
||||
|
||||
describe("stop", () => {
|
||||
beforeEach(() => {
|
||||
connectionManager = new ConnectionManager({
|
||||
libp2p,
|
||||
events,
|
||||
networkConfig,
|
||||
relay
|
||||
});
|
||||
connectionManager.start();
|
||||
});
|
||||
|
||||
it("should stop all internal components", () => {
|
||||
connectionManager.stop();
|
||||
|
||||
expect(mockNetworkMonitor.stop.calledOnce).to.be.true;
|
||||
expect(mockDiscoveryDialer.stop.calledOnce).to.be.true;
|
||||
expect(mockKeepAliveManager.stop.calledOnce).to.be.true;
|
||||
expect(mockConnectionLimiter.stop.calledOnce).to.be.true;
|
||||
});
|
||||
|
||||
it("should be safe to call multiple times", () => {
|
||||
connectionManager.stop();
|
||||
connectionManager.stop();
|
||||
|
||||
expect(mockNetworkMonitor.stop.calledTwice).to.be.true;
|
||||
expect(mockDiscoveryDialer.stop.calledTwice).to.be.true;
|
||||
expect(mockKeepAliveManager.stop.calledTwice).to.be.true;
|
||||
expect(mockConnectionLimiter.stop.calledTwice).to.be.true;
|
||||
});
|
||||
});
|
||||
|
||||
describe("isConnected", () => {
|
||||
beforeEach(() => {
|
||||
connectionManager = new ConnectionManager({
|
||||
libp2p,
|
||||
events,
|
||||
networkConfig
|
||||
});
|
||||
});
|
||||
|
||||
it("should delegate to networkMonitor.isConnected()", () => {
|
||||
mockNetworkMonitor.isConnected.returns(true);
|
||||
|
||||
const result = connectionManager.isConnected();
|
||||
|
||||
expect(mockNetworkMonitor.isConnected.calledOnce).to.be.true;
|
||||
expect(result).to.be.true;
|
||||
});
|
||||
|
||||
it("should return false when network is not connected", () => {
|
||||
mockNetworkMonitor.isConnected.returns(false);
|
||||
|
||||
const result = connectionManager.isConnected();
|
||||
|
||||
expect(mockNetworkMonitor.isConnected.calledOnce).to.be.true;
|
||||
expect(result).to.be.false;
|
||||
});
|
||||
});
|
||||
|
||||
describe("dial", () => {
|
||||
beforeEach(() => {
|
||||
connectionManager = new ConnectionManager({
|
||||
libp2p,
|
||||
events,
|
||||
networkConfig
|
||||
});
|
||||
});
|
||||
|
||||
it("should dial with PeerId and return stream", async () => {
|
||||
const protocolCodecs = ["/waku/2/store/1.0.0"];
|
||||
const libp2pStub = libp2p.dialProtocol as sinon.SinonStub;
|
||||
libp2pStub.resolves(mockStream);
|
||||
|
||||
const result = await connectionManager.dial(mockPeerId, protocolCodecs);
|
||||
|
||||
expect(libp2pStub.calledOnce).to.be.true;
|
||||
expect(libp2pStub.calledWith(mockPeerId, protocolCodecs)).to.be.true;
|
||||
expect(result).to.equal(mockStream);
|
||||
});
|
||||
|
||||
it("should dial with multiaddr and return stream", async () => {
|
||||
const protocolCodecs = ["/waku/2/store/1.0.0"];
|
||||
const libp2pStub = libp2p.dialProtocol as sinon.SinonStub;
|
||||
libp2pStub.resolves(mockStream);
|
||||
|
||||
const result = await connectionManager.dial(
|
||||
mockMultiaddr,
|
||||
protocolCodecs
|
||||
);
|
||||
|
||||
expect(libp2pStub.calledOnce).to.be.true;
|
||||
expect(result).to.equal(mockStream);
|
||||
});
|
||||
|
||||
it("should handle dial errors", async () => {
|
||||
const protocolCodecs = ["/waku/2/store/1.0.0"];
|
||||
const libp2pStub = libp2p.dialProtocol as sinon.SinonStub;
|
||||
const error = new Error("Dial failed");
|
||||
libp2pStub.rejects(error);
|
||||
|
||||
try {
|
||||
await connectionManager.dial(mockPeerId, protocolCodecs);
|
||||
expect.fail("Should have thrown error");
|
||||
} catch (e) {
|
||||
expect(e).to.equal(error);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe("hangUp", () => {
|
||||
beforeEach(() => {
|
||||
connectionManager = new ConnectionManager({
|
||||
libp2p,
|
||||
events,
|
||||
networkConfig
|
||||
});
|
||||
});
|
||||
|
||||
it("should hang up with PeerId and return true on success", async () => {
|
||||
const libp2pStub = libp2p.hangUp as sinon.SinonStub;
|
||||
libp2pStub.resolves();
|
||||
|
||||
const result = await connectionManager.hangUp(mockPeerId);
|
||||
|
||||
expect(libp2pStub.calledOnce).to.be.true;
|
||||
expect(libp2pStub.calledWith(mockPeerId)).to.be.true;
|
||||
expect(result).to.be.true;
|
||||
});
|
||||
|
||||
it("should hang up with multiaddr and return true on success", async () => {
|
||||
const libp2pStub = libp2p.hangUp as sinon.SinonStub;
|
||||
libp2pStub.resolves();
|
||||
|
||||
const result = await connectionManager.hangUp(mockMultiaddr);
|
||||
|
||||
expect(libp2pStub.calledOnce).to.be.true;
|
||||
expect(result).to.be.true;
|
||||
});
|
||||
|
||||
it("should return false and handle errors gracefully", async () => {
|
||||
const libp2pStub = libp2p.hangUp as sinon.SinonStub;
|
||||
libp2pStub.rejects(new Error("Hang up failed"));
|
||||
|
||||
const result = await connectionManager.hangUp(mockPeerId);
|
||||
|
||||
expect(libp2pStub.calledOnce).to.be.true;
|
||||
expect(result).to.be.false;
|
||||
});
|
||||
});
|
||||
|
||||
describe("getConnectedPeers", () => {
|
||||
beforeEach(() => {
|
||||
connectionManager = new ConnectionManager({
|
||||
libp2p,
|
||||
events,
|
||||
networkConfig
|
||||
});
|
||||
});
|
||||
|
||||
it("should return empty array when no peers connected", async () => {
|
||||
const libp2pStub = libp2p.getPeers as sinon.SinonStub;
|
||||
libp2pStub.returns([]);
|
||||
|
||||
const result = await connectionManager.getConnectedPeers();
|
||||
|
||||
expect(libp2pStub.calledOnce).to.be.true;
|
||||
expect(result).to.deep.equal([]);
|
||||
});
|
||||
|
||||
it("should return all connected peers without codec filter", async () => {
|
||||
const peer1Id = "12D3KooWPjceQuRaNMhcrLF6BaW69PdCXB95h6TBpFf9nAmcL8hE";
|
||||
const peer2Id = "12D3KooWNFmTNRsVfUJqGrRMzQiULd4fL2iRKGj4PpNm4F5BhvCw";
|
||||
const mockPeerIds = [
|
||||
peerIdFromString(peer1Id),
|
||||
peerIdFromString(peer2Id)
|
||||
];
|
||||
const mockPeers = [
|
||||
createMockPeer(peer1Id, ["/waku/2/relay/1.0.0"], 50),
|
||||
createMockPeer(peer2Id, ["/waku/2/store/1.0.0"], 100)
|
||||
];
|
||||
|
||||
const libp2pStub = libp2p.getPeers as sinon.SinonStub;
|
||||
libp2pStub.returns(mockPeerIds);
|
||||
|
||||
const peerStoreStub = libp2p.peerStore.get as sinon.SinonStub;
|
||||
peerStoreStub.onCall(0).resolves(mockPeers[0]);
|
||||
peerStoreStub.onCall(1).resolves(mockPeers[1]);
|
||||
|
||||
const result = await connectionManager.getConnectedPeers();
|
||||
|
||||
expect(libp2pStub.calledOnce).to.be.true;
|
||||
expect(peerStoreStub.calledTwice).to.be.true;
|
||||
expect(result).to.have.length(2);
|
||||
// Should be sorted by ping (peer1 has lower ping)
|
||||
expect(result[0].id.toString()).to.equal(peer1Id);
|
||||
expect(result[1].id.toString()).to.equal(peer2Id);
|
||||
});
|
||||
|
||||
it("should filter peers by codec", async () => {
|
||||
const peer1Id = "12D3KooWPjceQuRaNMhcrLF6BaW69PdCXB95h6TBpFf9nAmcL8hE";
|
||||
const peer2Id = "12D3KooWNFmTNRsVfUJqGrRMzQiULd4fL2iRKGj4PpNm4F5BhvCw";
|
||||
const mockPeerIds = [
|
||||
peerIdFromString(peer1Id),
|
||||
peerIdFromString(peer2Id)
|
||||
];
|
||||
const mockPeers = [
|
||||
createMockPeer(peer1Id, ["/waku/2/relay/1.0.0"], 50),
|
||||
createMockPeer(peer2Id, ["/waku/2/store/1.0.0"], 100)
|
||||
];
|
||||
|
||||
const libp2pStub = libp2p.getPeers as sinon.SinonStub;
|
||||
libp2pStub.returns(mockPeerIds);
|
||||
|
||||
const peerStoreStub = libp2p.peerStore.get as sinon.SinonStub;
|
||||
peerStoreStub.onCall(0).resolves(mockPeers[0]);
|
||||
peerStoreStub.onCall(1).resolves(mockPeers[1]);
|
||||
|
||||
const result = await connectionManager.getConnectedPeers(
|
||||
"/waku/2/relay/1.0.0"
|
||||
);
|
||||
|
||||
expect(result).to.have.length(1);
|
||||
expect(result[0].id.toString()).to.equal(peer1Id);
|
||||
});
|
||||
|
||||
it("should handle peerStore errors gracefully", async () => {
|
||||
const peer1Id = "12D3KooWPjceQuRaNMhcrLF6BaW69PdCXB95h6TBpFf9nAmcL8hE";
|
||||
const peer2Id = "12D3KooWNFmTNRsVfUJqGrRMzQiULd4fL2iRKGj4PpNm4F5BhvCw";
|
||||
const mockPeerIds = [
|
||||
peerIdFromString(peer1Id),
|
||||
peerIdFromString(peer2Id)
|
||||
];
|
||||
const mockPeer = createMockPeer(peer2Id, ["/waku/2/store/1.0.0"], 100);
|
||||
|
||||
const libp2pStub = libp2p.getPeers as sinon.SinonStub;
|
||||
libp2pStub.returns(mockPeerIds);
|
||||
|
||||
const peerStoreStub = libp2p.peerStore.get as sinon.SinonStub;
|
||||
peerStoreStub.onCall(0).rejects(new Error("Peer not found"));
|
||||
peerStoreStub.onCall(1).resolves(mockPeer);
|
||||
|
||||
const result = await connectionManager.getConnectedPeers();
|
||||
|
||||
expect(result).to.have.length(1);
|
||||
expect(result[0].id.toString()).to.equal(peer2Id);
|
||||
});
|
||||
|
||||
it("should sort peers by ping value", async () => {
|
||||
const peer1Id = "12D3KooWPjceQuRaNMhcrLF6BaW69PdCXB95h6TBpFf9nAmcL8hE";
|
||||
const peer2Id = "12D3KooWNFmTNRsVfUJqGrRMzQiULd4fL2iRKGj4PpNm4F5BhvCw";
|
||||
const peer3Id = "12D3KooWMvU9HGhiEHDWYgJDnLj2Z4JHBQMdxFPgWTNKXjHDYKUW";
|
||||
const mockPeerIds = [
|
||||
peerIdFromString(peer1Id),
|
||||
peerIdFromString(peer2Id),
|
||||
peerIdFromString(peer3Id)
|
||||
];
|
||||
const mockPeers = [
|
||||
createMockPeer(peer1Id, ["/waku/2/relay/1.0.0"], 200),
|
||||
createMockPeer(peer2Id, ["/waku/2/store/1.0.0"], 50),
|
||||
createMockPeer(peer3Id, ["/waku/2/filter/1.0.0"], 150)
|
||||
];
|
||||
|
||||
const libp2pStub = libp2p.getPeers as sinon.SinonStub;
|
||||
libp2pStub.returns(mockPeerIds);
|
||||
|
||||
const peerStoreStub = libp2p.peerStore.get as sinon.SinonStub;
|
||||
peerStoreStub.onCall(0).resolves(mockPeers[0]);
|
||||
peerStoreStub.onCall(1).resolves(mockPeers[1]);
|
||||
peerStoreStub.onCall(2).resolves(mockPeers[2]);
|
||||
|
||||
const result = await connectionManager.getConnectedPeers();
|
||||
|
||||
expect(result).to.have.length(3);
|
||||
// Should be sorted by ping: peer2 (50), peer3 (150), peer1 (200)
|
||||
expect(result[0].id.toString()).to.equal(peer2Id);
|
||||
expect(result[1].id.toString()).to.equal(peer3Id);
|
||||
expect(result[2].id.toString()).to.equal(peer1Id);
|
||||
});
|
||||
});
|
||||
|
||||
describe("isTopicConfigured", () => {
|
||||
beforeEach(() => {
|
||||
connectionManager = new ConnectionManager({
|
||||
libp2p,
|
||||
events,
|
||||
networkConfig
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("isPeerOnTopic", () => {
|
||||
beforeEach(() => {
|
||||
connectionManager = new ConnectionManager({
|
||||
libp2p,
|
||||
events,
|
||||
networkConfig
|
||||
});
|
||||
});
|
||||
|
||||
it("should delegate to shardReader.isPeerOnTopic()", async () => {
|
||||
const topic = "/waku/2/rs/1/0";
|
||||
mockShardReader.isPeerOnTopic.resolves(true);
|
||||
|
||||
const result = await connectionManager.isPeerOnTopic(mockPeerId, topic);
|
||||
|
||||
expect(mockShardReader.isPeerOnTopic.calledOnce).to.be.true;
|
||||
expect(mockShardReader.isPeerOnTopic.calledWith(mockPeerId, topic)).to.be
|
||||
.true;
|
||||
expect(result).to.be.true;
|
||||
});
|
||||
|
||||
it("should return false when peer is not on topic", async () => {
|
||||
const topic = "/waku/2/rs/1/0";
|
||||
mockShardReader.isPeerOnTopic.resolves(false);
|
||||
|
||||
const result = await connectionManager.isPeerOnTopic(mockPeerId, topic);
|
||||
|
||||
expect(mockShardReader.isPeerOnTopic.calledOnce).to.be.true;
|
||||
expect(result).to.be.false;
|
||||
});
|
||||
|
||||
it("should handle shardReader errors", async () => {
|
||||
const topic = "/waku/2/rs/1/0";
|
||||
const error = new Error("Shard reader error");
|
||||
mockShardReader.isPeerOnTopic.rejects(error);
|
||||
|
||||
try {
|
||||
await connectionManager.isPeerOnTopic(mockPeerId, topic);
|
||||
expect.fail("Should have thrown error");
|
||||
} catch (e) {
|
||||
expect(e).to.equal(error);
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
@ -1,199 +1,170 @@
|
||||
import {
|
||||
type Connection,
|
||||
isPeerId,
|
||||
type Peer,
|
||||
type PeerId,
|
||||
type PeerInfo,
|
||||
type PeerStore,
|
||||
type Stream,
|
||||
TypedEventEmitter
|
||||
} from "@libp2p/interface";
|
||||
import { Multiaddr, multiaddr, MultiaddrInput } from "@multiformats/multiaddr";
|
||||
import { type Peer, type PeerId, type Stream } from "@libp2p/interface";
|
||||
import { MultiaddrInput } from "@multiformats/multiaddr";
|
||||
import {
|
||||
ConnectionManagerOptions,
|
||||
DiscoveryTrigger,
|
||||
DNS_DISCOVERY_TAG,
|
||||
EConnectionStateEvents,
|
||||
EPeersByDiscoveryEvents,
|
||||
IConnectionManager,
|
||||
IConnectionStateEvents,
|
||||
IPeersByDiscoveryEvents,
|
||||
IRelay,
|
||||
PeersByDiscoveryResult,
|
||||
PubsubTopic,
|
||||
ShardInfo
|
||||
IWakuEventEmitter,
|
||||
NetworkConfig,
|
||||
ShardId
|
||||
} from "@waku/interfaces";
|
||||
import { Libp2p, Tags } from "@waku/interfaces";
|
||||
import { decodeRelayShard, shardInfoToPubsubTopics } from "@waku/utils";
|
||||
import { Libp2p } from "@waku/interfaces";
|
||||
import { Logger } from "@waku/utils";
|
||||
|
||||
import { ConnectionLimiter } from "./connection_limiter.js";
|
||||
import { Dialer } from "./dialer.js";
|
||||
import { DiscoveryDialer } from "./discovery_dialer.js";
|
||||
import { KeepAliveManager } from "./keep_alive_manager.js";
|
||||
import { getPeerPing } from "./utils.js";
|
||||
import { NetworkMonitor } from "./network_monitor.js";
|
||||
import { ShardReader } from "./shard_reader.js";
|
||||
import { getPeerPing, mapToPeerId, mapToPeerIdOrMultiaddr } from "./utils.js";
|
||||
|
||||
const log = new Logger("connection-manager");
|
||||
|
||||
const DEFAULT_MAX_BOOTSTRAP_PEERS_ALLOWED = 1;
|
||||
const DEFAULT_MAX_DIAL_ATTEMPTS_FOR_PEER = 3;
|
||||
const DEFAULT_MAX_PARALLEL_DIALS = 3;
|
||||
|
||||
const DEFAULT_MAX_BOOTSTRAP_PEERS_ALLOWED = 3;
|
||||
const DEFAULT_PING_KEEP_ALIVE_SEC = 5 * 60;
|
||||
const DEFAULT_RELAY_KEEP_ALIVE_SEC = 5 * 60;
|
||||
const DEFAULT_ENABLE_AUTO_RECOVERY = true;
|
||||
const DEFAULT_MAX_CONNECTIONS = 10;
|
||||
const DEFAULT_MAX_DIALING_PEERS = 3;
|
||||
const DEFAULT_FAILED_DIAL_COOLDOWN_SEC = 60;
|
||||
const DEFAULT_DIAL_COOLDOWN_SEC = 10;
|
||||
const DEFAULT_DIAL_TIMEOUT_SEC = 30;
|
||||
|
||||
type ConnectionManagerConstructorOptions = {
|
||||
libp2p: Libp2p;
|
||||
pubsubTopics: PubsubTopic[];
|
||||
events: IWakuEventEmitter;
|
||||
networkConfig: NetworkConfig;
|
||||
relay?: IRelay;
|
||||
config?: Partial<ConnectionManagerOptions>;
|
||||
};
|
||||
|
||||
export class ConnectionManager
|
||||
extends TypedEventEmitter<IPeersByDiscoveryEvents & IConnectionStateEvents>
|
||||
implements IConnectionManager
|
||||
{
|
||||
// TODO(weboko): make it private
|
||||
public readonly pubsubTopics: PubsubTopic[];
|
||||
export class ConnectionManager implements IConnectionManager {
|
||||
private readonly keepAliveManager: KeepAliveManager;
|
||||
private readonly discoveryDialer: DiscoveryDialer;
|
||||
private readonly dialer: Dialer;
|
||||
private readonly shardReader: ShardReader;
|
||||
private readonly networkMonitor: NetworkMonitor;
|
||||
private readonly connectionLimiter: ConnectionLimiter;
|
||||
|
||||
private keepAliveManager: KeepAliveManager;
|
||||
private options: ConnectionManagerOptions;
|
||||
private readonly options: ConnectionManagerOptions;
|
||||
private libp2p: Libp2p;
|
||||
private dialAttemptsForPeer: Map<string, number> = new Map();
|
||||
private dialErrorsForPeer: Map<string, any> = new Map();
|
||||
|
||||
private currentActiveParallelDialCount = 0;
|
||||
private pendingPeerDialQueue: Array<PeerId> = [];
|
||||
|
||||
private isP2PNetworkConnected: boolean = false;
|
||||
|
||||
public isConnected(): boolean {
|
||||
if (globalThis?.navigator && !globalThis?.navigator?.onLine) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return this.isP2PNetworkConnected;
|
||||
}
|
||||
|
||||
public stop(): void {
|
||||
this.keepAliveManager.stopAll();
|
||||
this.libp2p.removeEventListener(
|
||||
"peer:connect",
|
||||
this.onEventHandlers["peer:connect"]
|
||||
);
|
||||
this.libp2p.removeEventListener(
|
||||
"peer:disconnect",
|
||||
this.onEventHandlers["peer:disconnect"]
|
||||
);
|
||||
this.libp2p.removeEventListener(
|
||||
"peer:discovery",
|
||||
this.onEventHandlers["peer:discovery"]
|
||||
);
|
||||
this.stopNetworkStatusListener();
|
||||
}
|
||||
|
||||
public async dropConnection(peerId: PeerId): Promise<void> {
|
||||
try {
|
||||
this.keepAliveManager.stop(peerId);
|
||||
await this.libp2p.hangUp(peerId);
|
||||
log.info(`Dropped connection with peer ${peerId.toString()}`);
|
||||
} catch (error) {
|
||||
log.error(
|
||||
`Error dropping connection with peer ${peerId.toString()} - ${error}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
public async getPeersByDiscovery(): Promise<PeersByDiscoveryResult> {
|
||||
const peersDiscovered = await this.libp2p.peerStore.all();
|
||||
const peersConnected = this.libp2p
|
||||
.getConnections()
|
||||
.map((conn) => conn.remotePeer);
|
||||
|
||||
const peersDiscoveredByBootstrap: Peer[] = [];
|
||||
const peersDiscoveredByPeerExchange: Peer[] = [];
|
||||
const peersDiscoveredByLocal: Peer[] = [];
|
||||
|
||||
const peersConnectedByBootstrap: Peer[] = [];
|
||||
const peersConnectedByPeerExchange: Peer[] = [];
|
||||
const peersConnectedByLocal: Peer[] = [];
|
||||
|
||||
for (const peer of peersDiscovered) {
|
||||
const tags = await this.getTagNamesForPeer(peer.id);
|
||||
|
||||
if (tags.includes(Tags.BOOTSTRAP)) {
|
||||
peersDiscoveredByBootstrap.push(peer);
|
||||
} else if (tags.includes(Tags.PEER_EXCHANGE)) {
|
||||
peersDiscoveredByPeerExchange.push(peer);
|
||||
} else if (tags.includes(Tags.LOCAL)) {
|
||||
peersDiscoveredByLocal.push(peer);
|
||||
}
|
||||
}
|
||||
|
||||
for (const peerId of peersConnected) {
|
||||
const peer = await this.libp2p.peerStore.get(peerId);
|
||||
const tags = await this.getTagNamesForPeer(peerId);
|
||||
|
||||
if (tags.includes(Tags.BOOTSTRAP)) {
|
||||
peersConnectedByBootstrap.push(peer);
|
||||
} else if (tags.includes(Tags.PEER_EXCHANGE)) {
|
||||
peersConnectedByPeerExchange.push(peer);
|
||||
} else if (tags.includes(Tags.LOCAL)) {
|
||||
peersConnectedByLocal.push(peer);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
DISCOVERED: {
|
||||
[Tags.BOOTSTRAP]: peersDiscoveredByBootstrap,
|
||||
[Tags.PEER_EXCHANGE]: peersDiscoveredByPeerExchange,
|
||||
[Tags.LOCAL]: peersDiscoveredByLocal
|
||||
},
|
||||
CONNECTED: {
|
||||
[Tags.BOOTSTRAP]: peersConnectedByBootstrap,
|
||||
[Tags.PEER_EXCHANGE]: peersConnectedByPeerExchange,
|
||||
[Tags.LOCAL]: peersConnectedByLocal
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
public constructor(options: ConnectionManagerConstructorOptions) {
|
||||
super();
|
||||
this.libp2p = options.libp2p;
|
||||
this.pubsubTopics = options.pubsubTopics;
|
||||
|
||||
this.options = {
|
||||
maxDialAttemptsForPeer: DEFAULT_MAX_DIAL_ATTEMPTS_FOR_PEER,
|
||||
maxBootstrapPeersAllowed: DEFAULT_MAX_BOOTSTRAP_PEERS_ALLOWED,
|
||||
maxParallelDials: DEFAULT_MAX_PARALLEL_DIALS,
|
||||
maxBootstrapPeers: DEFAULT_MAX_BOOTSTRAP_PEERS_ALLOWED,
|
||||
maxConnections: DEFAULT_MAX_CONNECTIONS,
|
||||
pingKeepAlive: DEFAULT_PING_KEEP_ALIVE_SEC,
|
||||
relayKeepAlive: DEFAULT_RELAY_KEEP_ALIVE_SEC,
|
||||
enableAutoRecovery: DEFAULT_ENABLE_AUTO_RECOVERY,
|
||||
maxDialingPeers: DEFAULT_MAX_DIALING_PEERS,
|
||||
failedDialCooldown: DEFAULT_FAILED_DIAL_COOLDOWN_SEC,
|
||||
dialCooldown: DEFAULT_DIAL_COOLDOWN_SEC,
|
||||
dialTimeout: DEFAULT_DIAL_TIMEOUT_SEC,
|
||||
...options.config
|
||||
};
|
||||
|
||||
this.keepAliveManager = new KeepAliveManager({
|
||||
relay: options.relay,
|
||||
libp2p: options.libp2p,
|
||||
networkConfig: options.networkConfig,
|
||||
options: {
|
||||
pingKeepAlive: this.options.pingKeepAlive,
|
||||
relayKeepAlive: this.options.relayKeepAlive
|
||||
}
|
||||
});
|
||||
|
||||
this.startEventListeners()
|
||||
.then(() => log.info(`Connection Manager is now running`))
|
||||
.catch((error) =>
|
||||
log.error(`Unexpected error while running service`, error)
|
||||
this.shardReader = new ShardReader({
|
||||
libp2p: options.libp2p,
|
||||
networkConfig: options.networkConfig
|
||||
});
|
||||
|
||||
this.dialer = new Dialer({
|
||||
libp2p: options.libp2p,
|
||||
shardReader: this.shardReader,
|
||||
options: this.options
|
||||
});
|
||||
|
||||
this.discoveryDialer = new DiscoveryDialer({
|
||||
libp2p: options.libp2p,
|
||||
dialer: this.dialer
|
||||
});
|
||||
|
||||
this.networkMonitor = new NetworkMonitor({
|
||||
libp2p: options.libp2p,
|
||||
events: options.events
|
||||
});
|
||||
|
||||
this.connectionLimiter = new ConnectionLimiter({
|
||||
libp2p: options.libp2p,
|
||||
events: options.events,
|
||||
networkMonitor: this.networkMonitor,
|
||||
dialer: this.dialer,
|
||||
options: this.options
|
||||
});
|
||||
}
|
||||
|
||||
public start(): void {
|
||||
this.dialer.start();
|
||||
this.networkMonitor.start();
|
||||
this.discoveryDialer.start();
|
||||
this.keepAliveManager.start();
|
||||
this.connectionLimiter.start();
|
||||
}
|
||||
|
||||
public stop(): void {
|
||||
this.dialer.stop();
|
||||
this.networkMonitor.stop();
|
||||
this.discoveryDialer.stop();
|
||||
this.keepAliveManager.stop();
|
||||
this.connectionLimiter.stop();
|
||||
}
|
||||
|
||||
public isConnected(): boolean {
|
||||
return this.networkMonitor.isConnected();
|
||||
}
|
||||
|
||||
public async dial(
|
||||
peer: PeerId | MultiaddrInput,
|
||||
protocolCodecs: string[]
|
||||
): Promise<Stream> {
|
||||
const ma = mapToPeerIdOrMultiaddr(peer);
|
||||
log.info(`Dialing peer ${ma.toString()} with protocols ${protocolCodecs}`);
|
||||
|
||||
// must use libp2p directly instead of dialer because we need to dial the peer right away
|
||||
const stream = await this.libp2p.dialProtocol(ma, protocolCodecs);
|
||||
log.info(`Dialed peer ${ma.toString()} with protocols ${protocolCodecs}`);
|
||||
|
||||
return stream;
|
||||
}
|
||||
|
||||
public async hangUp(peer: PeerId | MultiaddrInput): Promise<boolean> {
|
||||
const peerId = mapToPeerId(peer);
|
||||
|
||||
try {
|
||||
log.info(`Dropping connection with peer ${peerId.toString()}`);
|
||||
await this.libp2p.hangUp(peerId);
|
||||
log.info(`Dropped connection with peer ${peerId.toString()}`);
|
||||
|
||||
return true;
|
||||
} catch (error) {
|
||||
log.error(
|
||||
`Error dropping connection with peer ${peerId.toString()} - ${error}`
|
||||
);
|
||||
|
||||
// libp2p emits `peer:discovery` events during its initialization
|
||||
// which means that before the ConnectionManager is initialized, some peers may have been discovered
|
||||
// we will dial the peers in peerStore ONCE before we start to listen to the `peer:discovery` events within the ConnectionManager
|
||||
this.dialPeerStorePeers().catch((error) =>
|
||||
log.error(`Unexpected error while dialing peer store peers`, error)
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public async getConnectedPeers(codec?: string): Promise<Peer[]> {
|
||||
const peerIDs = this.libp2p.getPeers();
|
||||
|
||||
log.info(`Getting connected peers for codec ${codec}`);
|
||||
|
||||
if (peerIDs.length === 0) {
|
||||
log.info(`No connected peers`);
|
||||
return [];
|
||||
}
|
||||
|
||||
@ -207,544 +178,31 @@ export class ConnectionManager
|
||||
})
|
||||
);
|
||||
|
||||
return peers
|
||||
const result = peers
|
||||
.filter((p) => !!p)
|
||||
.filter((p) => (codec ? (p as Peer).protocols.includes(codec) : true))
|
||||
.sort((left, right) => getPeerPing(left) - getPeerPing(right)) as Peer[];
|
||||
|
||||
log.info(`Found ${result.length} connected peers for codec ${codec}`);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
private async dialPeerStorePeers(): Promise<void> {
|
||||
const peerInfos = await this.libp2p.peerStore.all();
|
||||
const dialPromises = [];
|
||||
for (const peerInfo of peerInfos) {
|
||||
if (
|
||||
this.libp2p.getConnections().find((c) => c.remotePeer === peerInfo.id)
|
||||
)
|
||||
continue;
|
||||
|
||||
dialPromises.push(this.attemptDial(peerInfo.id));
|
||||
}
|
||||
try {
|
||||
await Promise.all(dialPromises);
|
||||
} catch (error) {
|
||||
log.error(`Unexpected error while dialing peer store peers`, error);
|
||||
}
|
||||
public async hasShardInfo(peerId: PeerId): Promise<boolean> {
|
||||
return this.shardReader.hasShardInfo(peerId);
|
||||
}
|
||||
|
||||
private async startEventListeners(): Promise<void> {
|
||||
this.startPeerDiscoveryListener();
|
||||
this.startPeerConnectionListener();
|
||||
this.startPeerDisconnectionListener();
|
||||
|
||||
this.startNetworkStatusListener();
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to establish a connection with a peer and set up specified protocols.
|
||||
* The method handles both PeerId and Multiaddr inputs, manages connection attempts,
|
||||
* and maintains the connection state.
|
||||
*
|
||||
* The dialing process includes:
|
||||
* 1. Converting input to dialable peer info
|
||||
* 2. Managing parallel dial attempts
|
||||
* 3. Attempting to establish protocol-specific connections
|
||||
* 4. Handling connection failures and retries
|
||||
* 5. Updating the peer store and connection state
|
||||
*
|
||||
* @param {PeerId | MultiaddrInput} peer - The peer to connect to, either as a PeerId or multiaddr
|
||||
* @param {string[]} [protocolCodecs] - Optional array of protocol-specific codec strings to establish
|
||||
* (e.g., for LightPush, Filter, Store protocols)
|
||||
*
|
||||
* @throws {Error} If the multiaddr is missing a peer ID
|
||||
* @throws {Error} If the maximum dial attempts are reached and the peer cannot be dialed
|
||||
* @throws {Error} If there's an error deleting an undialable peer from the peer store
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* // Dial using PeerId
|
||||
* await connectionManager.dialPeer(peerId);
|
||||
*
|
||||
* // Dial using multiaddr with specific protocols
|
||||
* await connectionManager.dialPeer(multiaddr, [
|
||||
* "/vac/waku/relay/2.0.0",
|
||||
* "/vac/waku/lightpush/2.0.0-beta1"
|
||||
* ]);
|
||||
* ```
|
||||
*
|
||||
* @remarks
|
||||
* - The method implements exponential backoff through multiple dial attempts
|
||||
* - Maintains a queue for parallel dial attempts (limited by maxParallelDials)
|
||||
* - Integrates with the KeepAliveManager for connection maintenance
|
||||
* - Updates the peer store and connection state after successful/failed attempts
|
||||
* - If all dial attempts fail, triggers DNS discovery as a fallback
|
||||
*/
|
||||
public async dialPeer(peer: PeerId | MultiaddrInput): Promise<Connection> {
|
||||
let connection: Connection | undefined;
|
||||
let peerId: PeerId | undefined;
|
||||
const peerDialInfo = this.getDialablePeerInfo(peer);
|
||||
const peerIdStr = isPeerId(peerDialInfo)
|
||||
? peerDialInfo.toString()
|
||||
: peerDialInfo.getPeerId()!;
|
||||
|
||||
this.currentActiveParallelDialCount += 1;
|
||||
let dialAttempt = 0;
|
||||
while (dialAttempt < this.options.maxDialAttemptsForPeer) {
|
||||
try {
|
||||
log.info(`Dialing peer ${peerDialInfo} on attempt ${dialAttempt + 1}`);
|
||||
connection = await this.libp2p.dial(peerDialInfo);
|
||||
peerId = connection.remotePeer;
|
||||
|
||||
const tags = await this.getTagNamesForPeer(peerId);
|
||||
// add tag to connection describing discovery mechanism
|
||||
// don't add duplicate tags
|
||||
this.libp2p.getConnections(peerId).forEach((conn) => {
|
||||
conn.tags = Array.from(new Set([...conn.tags, ...tags]));
|
||||
});
|
||||
|
||||
// instead of deleting the peer from the peer store, we set the dial attempt to -1
|
||||
// this helps us keep track of peers that have been dialed before
|
||||
this.dialAttemptsForPeer.set(peerId.toString(), -1);
|
||||
|
||||
// Dialing succeeded, break the loop
|
||||
this.keepAliveManager.start(peerId);
|
||||
break;
|
||||
} catch (error) {
|
||||
if (error instanceof AggregateError) {
|
||||
// Handle AggregateError
|
||||
log.error(`Error dialing peer ${peerIdStr} - ${error.errors}`);
|
||||
} else {
|
||||
// Handle generic error
|
||||
log.error(
|
||||
`Error dialing peer ${peerIdStr} - ${(error as any).message}`
|
||||
);
|
||||
}
|
||||
this.dialErrorsForPeer.set(peerIdStr, error);
|
||||
|
||||
dialAttempt++;
|
||||
this.dialAttemptsForPeer.set(peerIdStr, dialAttempt);
|
||||
}
|
||||
}
|
||||
|
||||
// Always decrease the active dial count and process the dial queue
|
||||
this.currentActiveParallelDialCount--;
|
||||
this.processDialQueue();
|
||||
|
||||
// If max dial attempts reached and dialing failed, delete the peer
|
||||
if (dialAttempt === this.options.maxDialAttemptsForPeer) {
|
||||
try {
|
||||
const error = this.dialErrorsForPeer.get(peerIdStr);
|
||||
|
||||
if (error) {
|
||||
let errorMessage;
|
||||
if (error instanceof AggregateError) {
|
||||
if (!error.errors) {
|
||||
log.warn(`No errors array found for AggregateError`);
|
||||
} else if (error.errors.length === 0) {
|
||||
log.warn(`Errors array is empty for AggregateError`);
|
||||
} else {
|
||||
errorMessage = JSON.stringify(error.errors[0]);
|
||||
}
|
||||
} else {
|
||||
errorMessage = error.message;
|
||||
}
|
||||
|
||||
log.info(
|
||||
`Deleting undialable peer ${peerIdStr} from peer store. Reason: ${errorMessage}`
|
||||
);
|
||||
}
|
||||
|
||||
this.dialErrorsForPeer.delete(peerIdStr);
|
||||
if (peerId) {
|
||||
await this.libp2p.peerStore.delete(peerId);
|
||||
}
|
||||
|
||||
// if it was last available peer - attempt DNS discovery
|
||||
await this.attemptDnsDiscovery();
|
||||
} catch (error) {
|
||||
throw new Error(
|
||||
`Error deleting undialable peer ${peerIdStr} from peer store - ${error}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (!connection) {
|
||||
throw new Error(`Failed to dial peer ${peerDialInfo}`);
|
||||
}
|
||||
|
||||
return connection;
|
||||
}
|
||||
|
||||
/**
|
||||
* Dial a peer with specific protocols.
|
||||
* This method is a raw proxy to the libp2p dialProtocol method.
|
||||
* @param peer - The peer to connect to, either as a PeerId or multiaddr
|
||||
* @param protocolCodecs - Optional array of protocol-specific codec strings to establish
|
||||
* @returns A stream to the peer
|
||||
*/
|
||||
public async rawDialPeerWithProtocols(
|
||||
peer: PeerId | MultiaddrInput,
|
||||
protocolCodecs: string[]
|
||||
): Promise<Stream> {
|
||||
const peerDialInfo = this.getDialablePeerInfo(peer);
|
||||
return await this.libp2p.dialProtocol(peerDialInfo, protocolCodecs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal utility to extract a PeerId or Multiaddr from a peer input.
|
||||
* This is used internally by the connection manager to handle different peer input formats.
|
||||
* @internal
|
||||
*/
|
||||
private getDialablePeerInfo(
|
||||
peer: PeerId | MultiaddrInput
|
||||
): PeerId | Multiaddr {
|
||||
if (isPeerId(peer)) {
|
||||
return peer;
|
||||
} else {
|
||||
// peer is of MultiaddrInput type
|
||||
const ma = multiaddr(peer);
|
||||
const peerIdStr = ma.getPeerId();
|
||||
if (!peerIdStr) {
|
||||
throw new Error("Failed to dial multiaddr: missing peer ID");
|
||||
}
|
||||
return ma;
|
||||
}
|
||||
}
|
||||
|
||||
private async attemptDnsDiscovery(): Promise<void> {
|
||||
if (this.libp2p.getConnections().length > 0) return;
|
||||
if ((await this.libp2p.peerStore.all()).length > 0) return;
|
||||
|
||||
log.info("Attempting to trigger DNS discovery.");
|
||||
|
||||
const dnsDiscovery = Object.values(this.libp2p.components.components).find(
|
||||
(v: unknown) => {
|
||||
if (v && v.toString) {
|
||||
return v.toString().includes(DNS_DISCOVERY_TAG);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
) as DiscoveryTrigger;
|
||||
|
||||
if (!dnsDiscovery) return;
|
||||
|
||||
await dnsDiscovery.findPeers();
|
||||
}
|
||||
|
||||
private processDialQueue(): void {
|
||||
if (
|
||||
this.pendingPeerDialQueue.length > 0 &&
|
||||
this.currentActiveParallelDialCount < this.options.maxParallelDials
|
||||
) {
|
||||
const peerId = this.pendingPeerDialQueue.shift();
|
||||
if (!peerId) return;
|
||||
this.attemptDial(peerId).catch((error) => {
|
||||
log.error(error);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private startPeerDiscoveryListener(): void {
|
||||
this.libp2p.addEventListener(
|
||||
"peer:discovery",
|
||||
this.onEventHandlers["peer:discovery"]
|
||||
);
|
||||
}
|
||||
|
||||
private startPeerConnectionListener(): void {
|
||||
this.libp2p.addEventListener(
|
||||
"peer:connect",
|
||||
this.onEventHandlers["peer:connect"]
|
||||
);
|
||||
}
|
||||
|
||||
private startPeerDisconnectionListener(): void {
|
||||
// TODO: ensure that these following issues are updated and confirmed
|
||||
/**
|
||||
* NOTE: Event is not being emitted on closing nor losing a connection.
|
||||
* @see https://github.com/libp2p/js-libp2p/issues/939
|
||||
* @see https://github.com/status-im/js-waku/issues/252
|
||||
*
|
||||
* >This event will be triggered anytime we are disconnected from another peer,
|
||||
* >regardless of the circumstances of that disconnection.
|
||||
* >If we happen to have multiple connections to a peer,
|
||||
* >this event will **only** be triggered when the last connection is closed.
|
||||
* @see https://github.com/libp2p/js-libp2p/blob/bad9e8c0ff58d60a78314077720c82ae331cc55b/doc/API.md?plain=1#L2100
|
||||
*/
|
||||
this.libp2p.addEventListener(
|
||||
"peer:disconnect",
|
||||
this.onEventHandlers["peer:disconnect"]
|
||||
);
|
||||
}
|
||||
|
||||
public async attemptDial(peerId: PeerId): Promise<void> {
|
||||
if (!(await this.shouldDialPeer(peerId))) return;
|
||||
|
||||
if (this.currentActiveParallelDialCount >= this.options.maxParallelDials) {
|
||||
this.pendingPeerDialQueue.push(peerId);
|
||||
return;
|
||||
}
|
||||
|
||||
await this.dialPeer(peerId);
|
||||
}
|
||||
|
||||
private onEventHandlers = {
|
||||
"peer:discovery": (evt: CustomEvent<PeerInfo>): void => {
|
||||
void (async () => {
|
||||
const { id: peerId } = evt.detail;
|
||||
|
||||
await this.dispatchDiscoveryEvent(peerId);
|
||||
|
||||
try {
|
||||
await this.attemptDial(peerId);
|
||||
} catch (error) {
|
||||
log.error(`Error dialing peer ${peerId.toString()} : ${error}`);
|
||||
}
|
||||
})();
|
||||
},
|
||||
"peer:connect": (evt: CustomEvent<PeerId>): void => {
|
||||
void (async () => {
|
||||
log.info(`Connected to peer ${evt.detail.toString()}`);
|
||||
|
||||
const peerId = evt.detail;
|
||||
|
||||
this.keepAliveManager.start(peerId);
|
||||
|
||||
const isBootstrap = (await this.getTagNamesForPeer(peerId)).includes(
|
||||
Tags.BOOTSTRAP
|
||||
);
|
||||
|
||||
if (isBootstrap) {
|
||||
const bootstrapConnections = this.libp2p
|
||||
.getConnections()
|
||||
.filter((conn) => conn.tags.includes(Tags.BOOTSTRAP));
|
||||
|
||||
// If we have too many bootstrap connections, drop one
|
||||
if (
|
||||
bootstrapConnections.length > this.options.maxBootstrapPeersAllowed
|
||||
) {
|
||||
await this.dropConnection(peerId);
|
||||
} else {
|
||||
this.dispatchEvent(
|
||||
new CustomEvent<PeerId>(
|
||||
EPeersByDiscoveryEvents.PEER_CONNECT_BOOTSTRAP,
|
||||
{
|
||||
detail: peerId
|
||||
}
|
||||
)
|
||||
);
|
||||
}
|
||||
} else {
|
||||
this.dispatchEvent(
|
||||
new CustomEvent<PeerId>(
|
||||
EPeersByDiscoveryEvents.PEER_CONNECT_PEER_EXCHANGE,
|
||||
{
|
||||
detail: peerId
|
||||
}
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
this.setP2PNetworkConnected();
|
||||
})();
|
||||
},
|
||||
"peer:disconnect": (evt: CustomEvent<PeerId>): void => {
|
||||
void (async () => {
|
||||
this.keepAliveManager.stop(evt.detail);
|
||||
this.setP2PNetworkDisconnected();
|
||||
})();
|
||||
},
|
||||
"browser:network": (): void => {
|
||||
this.dispatchWakuConnectionEvent();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Checks if the peer should be dialed based on the following conditions:
|
||||
* 1. If the peer is already connected, don't dial
|
||||
* 2. If the peer is not part of any of the configured pubsub topics, don't dial
|
||||
* 3. If the peer is not dialable based on bootstrap status, don't dial
|
||||
* 4. If the peer is already has an active dial attempt, or has been dialed before, don't dial it
|
||||
* @returns true if the peer should be dialed, false otherwise
|
||||
*/
|
||||
private async shouldDialPeer(peerId: PeerId): Promise<boolean> {
|
||||
const isConnected = this.libp2p.getConnections(peerId).length > 0;
|
||||
if (isConnected) {
|
||||
log.warn(`Already connected to peer ${peerId.toString()}. Not dialing.`);
|
||||
return false;
|
||||
}
|
||||
|
||||
const isSameShard = await this.isPeerTopicConfigured(peerId);
|
||||
if (!isSameShard) {
|
||||
const shardInfo = await this.getPeerShardInfo(
|
||||
peerId,
|
||||
this.libp2p.peerStore
|
||||
);
|
||||
|
||||
log.warn(
|
||||
`Discovered peer ${peerId.toString()} with ShardInfo ${shardInfo} is not part of any of the configured pubsub topics (${
|
||||
this.pubsubTopics
|
||||
}).
|
||||
Not dialing.`
|
||||
);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
const isPreferredBasedOnBootstrap =
|
||||
await this.isPeerDialableBasedOnBootstrapStatus(peerId);
|
||||
if (!isPreferredBasedOnBootstrap) {
|
||||
log.warn(
|
||||
`Peer ${peerId.toString()} is not dialable based on bootstrap status. Not dialing.`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
const hasBeenDialed = this.dialAttemptsForPeer.has(peerId.toString());
|
||||
if (hasBeenDialed) {
|
||||
log.warn(
|
||||
`Peer ${peerId.toString()} has already been attempted dial before, or already has a dial attempt in progress, skipping dial`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the peer is dialable based on the following conditions:
|
||||
* 1. If the peer is a bootstrap peer, it is only dialable if the number of current bootstrap connections is less than the max allowed.
|
||||
* 2. If the peer is not a bootstrap peer
|
||||
*/
|
||||
private async isPeerDialableBasedOnBootstrapStatus(
|
||||
peerId: PeerId
|
||||
): Promise<boolean> {
|
||||
const tagNames = await this.getTagNamesForPeer(peerId);
|
||||
|
||||
const isBootstrap = tagNames.some((tagName) => tagName === Tags.BOOTSTRAP);
|
||||
|
||||
if (!isBootstrap) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const currentBootstrapConnections = this.libp2p
|
||||
.getConnections()
|
||||
.filter((conn) => {
|
||||
return conn.tags.find((name) => name === Tags.BOOTSTRAP);
|
||||
}).length;
|
||||
|
||||
return currentBootstrapConnections < this.options.maxBootstrapPeersAllowed;
|
||||
}
|
||||
|
||||
private async dispatchDiscoveryEvent(peerId: PeerId): Promise<void> {
|
||||
const isBootstrap = (await this.getTagNamesForPeer(peerId)).includes(
|
||||
Tags.BOOTSTRAP
|
||||
);
|
||||
|
||||
this.dispatchEvent(
|
||||
new CustomEvent<PeerId>(
|
||||
isBootstrap
|
||||
? EPeersByDiscoveryEvents.PEER_DISCOVERY_BOOTSTRAP
|
||||
: EPeersByDiscoveryEvents.PEER_DISCOVERY_PEER_EXCHANGE,
|
||||
{
|
||||
detail: peerId
|
||||
}
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetches the tag names for a given peer
|
||||
*/
|
||||
private async getTagNamesForPeer(peerId: PeerId): Promise<string[]> {
|
||||
try {
|
||||
const peer = await this.libp2p.peerStore.get(peerId);
|
||||
return Array.from(peer.tags.keys());
|
||||
} catch (error) {
|
||||
log.error(`Failed to get peer ${peerId}, error: ${error}`);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
private async isPeerTopicConfigured(peerId: PeerId): Promise<boolean> {
|
||||
const shardInfo = await this.getPeerShardInfo(
|
||||
peerId,
|
||||
this.libp2p.peerStore
|
||||
);
|
||||
|
||||
// If there's no shard information, simply return true
|
||||
if (!shardInfo) return true;
|
||||
|
||||
const pubsubTopics = shardInfoToPubsubTopics(shardInfo);
|
||||
|
||||
const isTopicConfigured = pubsubTopics.some((topic) =>
|
||||
this.pubsubTopics.includes(topic)
|
||||
);
|
||||
return isTopicConfigured;
|
||||
}
|
||||
|
||||
private async getPeerShardInfo(
|
||||
public async isPeerOnTopic(
|
||||
peerId: PeerId,
|
||||
peerStore: PeerStore
|
||||
): Promise<ShardInfo | undefined> {
|
||||
const peer = await peerStore.get(peerId);
|
||||
const shardInfoBytes = peer.metadata.get("shardInfo");
|
||||
if (!shardInfoBytes) return undefined;
|
||||
return decodeRelayShard(shardInfoBytes);
|
||||
pubsubTopic: string
|
||||
): Promise<boolean> {
|
||||
return this.shardReader.isPeerOnTopic(peerId, pubsubTopic);
|
||||
}
|
||||
|
||||
private startNetworkStatusListener(): void {
|
||||
try {
|
||||
globalThis.addEventListener(
|
||||
"online",
|
||||
this.onEventHandlers["browser:network"]
|
||||
);
|
||||
globalThis.addEventListener(
|
||||
"offline",
|
||||
this.onEventHandlers["browser:network"]
|
||||
);
|
||||
} catch (err) {
|
||||
log.error(`Failed to start network listener: ${err}`);
|
||||
}
|
||||
}
|
||||
|
||||
private stopNetworkStatusListener(): void {
|
||||
try {
|
||||
globalThis.removeEventListener(
|
||||
"online",
|
||||
this.onEventHandlers["browser:network"]
|
||||
);
|
||||
globalThis.removeEventListener(
|
||||
"offline",
|
||||
this.onEventHandlers["browser:network"]
|
||||
);
|
||||
} catch (err) {
|
||||
log.error(`Failed to stop network listener: ${err}`);
|
||||
}
|
||||
}
|
||||
|
||||
private setP2PNetworkConnected(): void {
|
||||
if (!this.isP2PNetworkConnected) {
|
||||
this.isP2PNetworkConnected = true;
|
||||
this.dispatchWakuConnectionEvent();
|
||||
}
|
||||
}
|
||||
|
||||
private setP2PNetworkDisconnected(): void {
|
||||
if (
|
||||
this.isP2PNetworkConnected &&
|
||||
this.libp2p.getConnections().length === 0
|
||||
) {
|
||||
this.isP2PNetworkConnected = false;
|
||||
this.dispatchWakuConnectionEvent();
|
||||
}
|
||||
}
|
||||
|
||||
private dispatchWakuConnectionEvent(): void {
|
||||
this.dispatchEvent(
|
||||
new CustomEvent<boolean>(EConnectionStateEvents.CONNECTION_STATUS, {
|
||||
detail: this.isConnected()
|
||||
})
|
||||
);
|
||||
public async isPeerOnShard(
|
||||
peerId: PeerId,
|
||||
shardId: ShardId
|
||||
): Promise<boolean> {
|
||||
return this.shardReader.isPeerOnShard(peerId, shardId);
|
||||
}
|
||||
}
|
||||
|
||||
530
packages/core/src/lib/connection_manager/dialer.spec.ts
Normal file
530
packages/core/src/lib/connection_manager/dialer.spec.ts
Normal file
@ -0,0 +1,530 @@
|
||||
import { PeerId } from "@libp2p/interface";
|
||||
import { ConnectionManagerOptions, Libp2p } from "@waku/interfaces";
|
||||
import { expect } from "chai";
|
||||
import sinon from "sinon";
|
||||
|
||||
import { Dialer } from "./dialer.js";
|
||||
import { ShardReader } from "./shard_reader.js";
|
||||
|
||||
describe("Dialer", () => {
|
||||
let libp2p: Libp2p;
|
||||
let dialer: Dialer;
|
||||
let mockShardReader: sinon.SinonStubbedInstance<ShardReader>;
|
||||
let mockPeerId: PeerId;
|
||||
let mockPeerId2: PeerId;
|
||||
let clock: sinon.SinonFakeTimers;
|
||||
let mockOptions: ConnectionManagerOptions;
|
||||
|
||||
const createMockPeerId = (id: string): PeerId =>
|
||||
({
|
||||
toString: () => id,
|
||||
equals: (other: PeerId) => other.toString() === id
|
||||
}) as PeerId;
|
||||
|
||||
beforeEach(() => {
|
||||
libp2p = {
|
||||
dial: sinon.stub().resolves(),
|
||||
getPeers: sinon.stub().returns([])
|
||||
} as unknown as Libp2p;
|
||||
|
||||
mockShardReader = {
|
||||
hasShardInfo: sinon.stub().resolves(false),
|
||||
isPeerOnCluster: sinon.stub().resolves(true)
|
||||
} as unknown as sinon.SinonStubbedInstance<ShardReader>;
|
||||
|
||||
mockOptions = {
|
||||
maxBootstrapPeers: 1,
|
||||
pingKeepAlive: 300,
|
||||
relayKeepAlive: 300,
|
||||
maxDialingPeers: 3,
|
||||
failedDialCooldown: 60,
|
||||
dialCooldown: 10,
|
||||
dialTimeout: 30,
|
||||
maxConnections: 10,
|
||||
enableAutoRecovery: true
|
||||
};
|
||||
|
||||
mockPeerId = createMockPeerId("12D3KooWTest1");
|
||||
mockPeerId2 = createMockPeerId("12D3KooWTest2");
|
||||
|
||||
clock = sinon.useFakeTimers({
|
||||
now: 1000000000000
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (dialer) {
|
||||
dialer.stop();
|
||||
}
|
||||
clock.restore();
|
||||
sinon.restore();
|
||||
});
|
||||
|
||||
describe("constructor", () => {
|
||||
it("should create dialer with libp2p and shardReader", () => {
|
||||
dialer = new Dialer({
|
||||
libp2p,
|
||||
shardReader: mockShardReader,
|
||||
options: mockOptions
|
||||
});
|
||||
|
||||
expect(dialer).to.be.instanceOf(Dialer);
|
||||
});
|
||||
});
|
||||
|
||||
describe("start", () => {
|
||||
beforeEach(() => {
|
||||
dialer = new Dialer({
|
||||
libp2p,
|
||||
shardReader: mockShardReader,
|
||||
options: mockOptions
|
||||
});
|
||||
});
|
||||
|
||||
it("should start the dialing interval", () => {
|
||||
dialer.start();
|
||||
|
||||
expect(clock.countTimers()).to.be.greaterThan(0);
|
||||
});
|
||||
|
||||
it("should clear dial history on start", () => {
|
||||
dialer.start();
|
||||
|
||||
void dialer.dial(mockPeerId);
|
||||
|
||||
dialer.stop();
|
||||
dialer.start();
|
||||
|
||||
const dialStub = libp2p.dial as sinon.SinonStub;
|
||||
dialStub.resetHistory();
|
||||
|
||||
void dialer.dial(mockPeerId);
|
||||
|
||||
expect(dialStub.called).to.be.false;
|
||||
});
|
||||
|
||||
it("should not create multiple intervals when called multiple times", () => {
|
||||
dialer.start();
|
||||
dialer.start();
|
||||
|
||||
expect(clock.countTimers()).to.equal(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe("stop", () => {
|
||||
beforeEach(() => {
|
||||
dialer = new Dialer({
|
||||
libp2p,
|
||||
shardReader: mockShardReader,
|
||||
options: mockOptions
|
||||
});
|
||||
dialer.start();
|
||||
});
|
||||
|
||||
it("should clear the dialing interval", () => {
|
||||
expect(clock.countTimers()).to.be.greaterThan(0);
|
||||
|
||||
dialer.stop();
|
||||
|
||||
expect(clock.countTimers()).to.equal(0);
|
||||
});
|
||||
|
||||
it("should clear dial history on stop", () => {
|
||||
dialer.stop();
|
||||
|
||||
const dialStub = libp2p.dial as sinon.SinonStub;
|
||||
dialStub.resetHistory();
|
||||
|
||||
dialer.start();
|
||||
void dialer.dial(mockPeerId);
|
||||
|
||||
expect(dialStub.called).to.be.false;
|
||||
});
|
||||
|
||||
it("should be safe to call multiple times", () => {
|
||||
dialer.stop();
|
||||
dialer.stop();
|
||||
|
||||
expect(clock.countTimers()).to.equal(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe("dial", () => {
|
||||
beforeEach(() => {
|
||||
dialer = new Dialer({
|
||||
libp2p,
|
||||
shardReader: mockShardReader,
|
||||
options: mockOptions
|
||||
});
|
||||
dialer.start();
|
||||
});
|
||||
|
||||
it("should dial peer immediately when queue is empty", async () => {
|
||||
const dialStub = libp2p.dial as sinon.SinonStub;
|
||||
dialStub.resolves();
|
||||
|
||||
await dialer.dial(mockPeerId);
|
||||
|
||||
expect(dialStub.calledOnce).to.be.true;
|
||||
expect(dialStub.calledWith(mockPeerId)).to.be.true;
|
||||
});
|
||||
|
||||
it("should add peer to queue when queue is not empty", async () => {
|
||||
const dialStub = libp2p.dial as sinon.SinonStub;
|
||||
|
||||
let resolveFirstDial: () => void;
|
||||
const firstDialPromise = new Promise<void>((resolve) => {
|
||||
resolveFirstDial = resolve;
|
||||
});
|
||||
dialStub.onFirstCall().returns(firstDialPromise);
|
||||
dialStub.onSecondCall().resolves();
|
||||
|
||||
const firstDialCall = dialer.dial(mockPeerId);
|
||||
|
||||
await dialer.dial(mockPeerId2);
|
||||
|
||||
expect(dialStub.calledOnce).to.be.true;
|
||||
expect(dialStub.calledWith(mockPeerId)).to.be.true;
|
||||
|
||||
resolveFirstDial!();
|
||||
await firstDialCall;
|
||||
|
||||
clock.tick(500);
|
||||
await Promise.resolve();
|
||||
|
||||
expect(dialStub.calledTwice).to.be.true;
|
||||
expect(dialStub.calledWith(mockPeerId2)).to.be.true;
|
||||
});
|
||||
|
||||
it("should skip peer when already connected", async () => {
|
||||
const getPeersStub = libp2p.getPeers as sinon.SinonStub;
|
||||
getPeersStub.returns([mockPeerId]);
|
||||
|
||||
const dialStub = libp2p.dial as sinon.SinonStub;
|
||||
|
||||
await dialer.dial(mockPeerId);
|
||||
|
||||
expect(dialStub.called).to.be.false;
|
||||
});
|
||||
|
||||
it("should skip peer when dialed recently", async () => {
|
||||
const dialStub = libp2p.dial as sinon.SinonStub;
|
||||
dialStub.resolves();
|
||||
|
||||
await dialer.dial(mockPeerId);
|
||||
expect(dialStub.calledOnce).to.be.true;
|
||||
|
||||
dialStub.resetHistory();
|
||||
|
||||
clock.tick(5000);
|
||||
await dialer.dial(mockPeerId);
|
||||
|
||||
expect(dialStub.called).to.be.false;
|
||||
});
|
||||
|
||||
it("should skip peer when failed to dial recently", async () => {
|
||||
const dialStub = libp2p.dial as sinon.SinonStub;
|
||||
dialStub.rejects(new Error("Dial failed"));
|
||||
|
||||
await dialer.dial(mockPeerId);
|
||||
expect(dialStub.calledOnce).to.be.true;
|
||||
|
||||
dialStub.resetHistory();
|
||||
dialStub.resolves();
|
||||
|
||||
clock.tick(30000);
|
||||
|
||||
await dialer.dial(mockPeerId);
|
||||
expect(dialStub.called).to.be.false;
|
||||
});
|
||||
|
||||
it("should populate queue if has active dial", async () => {
|
||||
const dialStub = libp2p.dial as sinon.SinonStub;
|
||||
const mockPeerId3 = createMockPeerId("12D3KooWTest3");
|
||||
|
||||
let resolveFirstDial: () => void;
|
||||
const firstDialPromise = new Promise<void>((resolve) => {
|
||||
resolveFirstDial = resolve;
|
||||
});
|
||||
dialStub.onFirstCall().returns(firstDialPromise);
|
||||
dialStub.onSecondCall().resolves();
|
||||
dialStub.onThirdCall().resolves();
|
||||
|
||||
const firstDialCall = dialer.dial(mockPeerId);
|
||||
|
||||
await dialer.dial(mockPeerId2);
|
||||
await dialer.dial(mockPeerId3);
|
||||
|
||||
expect(dialStub.calledOnce).to.be.true;
|
||||
expect(dialStub.calledWith(mockPeerId)).to.be.true;
|
||||
|
||||
resolveFirstDial!();
|
||||
await firstDialCall;
|
||||
|
||||
clock.tick(500);
|
||||
await Promise.resolve();
|
||||
|
||||
expect(dialStub.callCount).to.equal(3);
|
||||
expect(dialStub.calledWith(mockPeerId2)).to.be.true;
|
||||
expect(dialStub.calledWith(mockPeerId3)).to.be.true;
|
||||
});
|
||||
|
||||
it("should allow redial after cooldown period", async () => {
|
||||
const dialStub = libp2p.dial as sinon.SinonStub;
|
||||
dialStub.resolves();
|
||||
|
||||
await dialer.dial(mockPeerId);
|
||||
expect(dialStub.calledOnce).to.be.true;
|
||||
|
||||
clock.tick(10001);
|
||||
await dialer.dial(mockPeerId);
|
||||
expect(dialStub.calledTwice).to.be.true;
|
||||
});
|
||||
|
||||
it("should skip peer when not on same cluster", async () => {
|
||||
mockShardReader.hasShardInfo.resolves(true);
|
||||
mockShardReader.isPeerOnCluster.resolves(false);
|
||||
|
||||
const dialStub = libp2p.dial as sinon.SinonStub;
|
||||
|
||||
await dialer.dial(mockPeerId);
|
||||
|
||||
expect(dialStub.called).to.be.false;
|
||||
expect(mockShardReader.hasShardInfo.calledWith(mockPeerId)).to.be.true;
|
||||
expect(mockShardReader.isPeerOnCluster.calledWith(mockPeerId)).to.be.true;
|
||||
});
|
||||
|
||||
it("should dial peer when on same shard", async () => {
|
||||
mockShardReader.hasShardInfo.resolves(true);
|
||||
mockShardReader.isPeerOnCluster.resolves(true);
|
||||
|
||||
const dialStub = libp2p.dial as sinon.SinonStub;
|
||||
dialStub.resolves();
|
||||
|
||||
await dialer.dial(mockPeerId);
|
||||
|
||||
expect(dialStub.calledOnce).to.be.true;
|
||||
expect(dialStub.calledWith(mockPeerId)).to.be.true;
|
||||
expect(mockShardReader.hasShardInfo.calledWith(mockPeerId)).to.be.true;
|
||||
expect(mockShardReader.isPeerOnCluster.calledWith(mockPeerId)).to.be.true;
|
||||
});
|
||||
|
||||
it("should dial peer when no shard info available", async () => {
|
||||
mockShardReader.hasShardInfo.resolves(false);
|
||||
|
||||
const dialStub = libp2p.dial as sinon.SinonStub;
|
||||
dialStub.resolves();
|
||||
|
||||
await dialer.dial(mockPeerId);
|
||||
|
||||
expect(dialStub.calledOnce).to.be.true;
|
||||
expect(dialStub.calledWith(mockPeerId)).to.be.true;
|
||||
expect(mockShardReader.hasShardInfo.calledWith(mockPeerId)).to.be.true;
|
||||
expect(mockShardReader.isPeerOnCluster.called).to.be.false;
|
||||
});
|
||||
|
||||
it("should handle dial errors gracefully", async () => {
|
||||
const dialStub = libp2p.dial as sinon.SinonStub;
|
||||
dialStub.rejects(new Error("Dial failed"));
|
||||
|
||||
await dialer.dial(mockPeerId);
|
||||
|
||||
expect(dialStub.calledOnce).to.be.true;
|
||||
expect(dialStub.calledWith(mockPeerId)).to.be.true;
|
||||
});
|
||||
|
||||
it("should allow redial after failed dial cooldown expires", async () => {
|
||||
const dialStub = libp2p.dial as sinon.SinonStub;
|
||||
dialStub.onFirstCall().rejects(new Error("Dial failed"));
|
||||
dialStub.onSecondCall().resolves();
|
||||
await dialer.dial(mockPeerId);
|
||||
expect(dialStub.calledOnce).to.be.true;
|
||||
clock.tick(60001);
|
||||
await dialer.dial(mockPeerId);
|
||||
expect(dialStub.calledTwice).to.be.true;
|
||||
});
|
||||
|
||||
it("should handle queue overflow by adding peers to queue", async () => {
|
||||
const dialStub = libp2p.dial as sinon.SinonStub;
|
||||
const peers = [];
|
||||
for (let i = 0; i < 100; i++) {
|
||||
peers.push(createMockPeerId(`12D3KooWTest${i}`));
|
||||
}
|
||||
let resolveFirstDial: () => void;
|
||||
const firstDialPromise = new Promise<void>((resolve) => {
|
||||
resolveFirstDial = resolve;
|
||||
});
|
||||
dialStub.onFirstCall().returns(firstDialPromise);
|
||||
dialStub.resolves();
|
||||
const firstDialCall = dialer.dial(peers[0]);
|
||||
for (let i = 1; i < 100; i++) {
|
||||
await dialer.dial(peers[i]);
|
||||
}
|
||||
expect(dialStub.calledOnce).to.be.true;
|
||||
resolveFirstDial!();
|
||||
await firstDialCall;
|
||||
clock.tick(500);
|
||||
await Promise.resolve();
|
||||
expect(dialStub.callCount).to.be.greaterThan(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe("queue processing", () => {
|
||||
beforeEach(() => {
|
||||
dialer = new Dialer({
|
||||
libp2p,
|
||||
shardReader: mockShardReader,
|
||||
options: mockOptions
|
||||
});
|
||||
dialer.start();
|
||||
});
|
||||
|
||||
it("should process queue every 500ms", async () => {
|
||||
const dialStub = libp2p.dial as sinon.SinonStub;
|
||||
dialStub.resolves();
|
||||
|
||||
await dialer.dial(mockPeerId);
|
||||
expect(dialStub.calledOnce).to.be.true;
|
||||
expect(dialStub.calledWith(mockPeerId)).to.be.true;
|
||||
|
||||
dialStub.resetHistory();
|
||||
await dialer.dial(mockPeerId2);
|
||||
expect(dialStub.calledOnce).to.be.true;
|
||||
expect(dialStub.calledWith(mockPeerId2)).to.be.true;
|
||||
});
|
||||
|
||||
it("should process up to 3 peers at once", async () => {
|
||||
const dialStub = libp2p.dial as sinon.SinonStub;
|
||||
const mockPeerId3 = createMockPeerId("12D3KooWTest3");
|
||||
const mockPeerId4 = createMockPeerId("12D3KooWTest4");
|
||||
const mockPeerId5 = createMockPeerId("12D3KooWTest5");
|
||||
|
||||
dialStub.resolves();
|
||||
|
||||
await dialer.dial(mockPeerId);
|
||||
await dialer.dial(mockPeerId2);
|
||||
await dialer.dial(mockPeerId3);
|
||||
await dialer.dial(mockPeerId4);
|
||||
await dialer.dial(mockPeerId5);
|
||||
|
||||
expect(dialStub.callCount).to.equal(5);
|
||||
expect(dialStub.calledWith(mockPeerId)).to.be.true;
|
||||
expect(dialStub.calledWith(mockPeerId2)).to.be.true;
|
||||
expect(dialStub.calledWith(mockPeerId3)).to.be.true;
|
||||
expect(dialStub.calledWith(mockPeerId4)).to.be.true;
|
||||
expect(dialStub.calledWith(mockPeerId5)).to.be.true;
|
||||
});
|
||||
|
||||
it("should not process empty queue", () => {
|
||||
const dialStub = libp2p.dial as sinon.SinonStub;
|
||||
|
||||
clock.tick(500);
|
||||
|
||||
expect(dialStub.called).to.be.false;
|
||||
});
|
||||
|
||||
it("should handle queue processing errors gracefully", async () => {
|
||||
const dialStub = libp2p.dial as sinon.SinonStub;
|
||||
|
||||
let resolveFirstDial: () => void;
|
||||
const firstDialPromise = new Promise<void>((resolve) => {
|
||||
resolveFirstDial = resolve;
|
||||
});
|
||||
dialStub.onFirstCall().returns(firstDialPromise);
|
||||
dialStub.onSecondCall().rejects(new Error("Queue dial failed"));
|
||||
|
||||
const firstDialPromise2 = dialer.dial(mockPeerId);
|
||||
await dialer.dial(mockPeerId2);
|
||||
|
||||
resolveFirstDial!();
|
||||
await firstDialPromise2;
|
||||
|
||||
clock.tick(500);
|
||||
await Promise.resolve();
|
||||
|
||||
expect(dialStub.calledTwice).to.be.true;
|
||||
});
|
||||
});
|
||||
|
||||
describe("shard reader integration", () => {
|
||||
beforeEach(() => {
|
||||
dialer = new Dialer({
|
||||
libp2p,
|
||||
shardReader: mockShardReader,
|
||||
options: mockOptions
|
||||
});
|
||||
dialer.start();
|
||||
});
|
||||
|
||||
it("should handle shard reader errors gracefully", async () => {
|
||||
mockShardReader.hasShardInfo.rejects(new Error("Shard reader error"));
|
||||
|
||||
const dialStub = libp2p.dial as sinon.SinonStub;
|
||||
|
||||
await dialer.dial(mockPeerId);
|
||||
|
||||
expect(dialStub.called).to.be.false;
|
||||
expect(mockShardReader.hasShardInfo.calledWith(mockPeerId)).to.be.true;
|
||||
});
|
||||
|
||||
it("should handle network check errors gracefully", async () => {
|
||||
mockShardReader.hasShardInfo.resolves(true);
|
||||
mockShardReader.isPeerOnCluster.rejects(new Error("Network check error"));
|
||||
|
||||
const dialStub = libp2p.dial as sinon.SinonStub;
|
||||
|
||||
await dialer.dial(mockPeerId);
|
||||
|
||||
expect(dialStub.called).to.be.false;
|
||||
expect(mockShardReader.hasShardInfo.calledWith(mockPeerId)).to.be.true;
|
||||
expect(mockShardReader.isPeerOnCluster.calledWith(mockPeerId)).to.be.true;
|
||||
});
|
||||
});
|
||||
|
||||
describe("integration", () => {
|
||||
it("should handle complete dial lifecycle", async () => {
|
||||
dialer = new Dialer({
|
||||
libp2p,
|
||||
shardReader: mockShardReader,
|
||||
options: mockOptions
|
||||
});
|
||||
dialer.start();
|
||||
|
||||
const dialStub = libp2p.dial as sinon.SinonStub;
|
||||
dialStub.resolves();
|
||||
|
||||
await dialer.dial(mockPeerId);
|
||||
|
||||
expect(dialStub.calledOnce).to.be.true;
|
||||
expect(dialStub.calledWith(mockPeerId)).to.be.true;
|
||||
|
||||
dialer.stop();
|
||||
});
|
||||
|
||||
it("should handle multiple peers with different shard configurations", async () => {
|
||||
dialer = new Dialer({
|
||||
libp2p,
|
||||
shardReader: mockShardReader,
|
||||
options: mockOptions
|
||||
});
|
||||
dialer.start();
|
||||
|
||||
const dialStub = libp2p.dial as sinon.SinonStub;
|
||||
dialStub.resolves();
|
||||
|
||||
mockShardReader.hasShardInfo.withArgs(mockPeerId).resolves(true);
|
||||
mockShardReader.isPeerOnCluster.withArgs(mockPeerId).resolves(true);
|
||||
|
||||
mockShardReader.hasShardInfo.withArgs(mockPeerId2).resolves(false);
|
||||
|
||||
await dialer.dial(mockPeerId);
|
||||
await dialer.dial(mockPeerId2);
|
||||
|
||||
expect(dialStub.calledTwice).to.be.true;
|
||||
expect(dialStub.calledWith(mockPeerId)).to.be.true;
|
||||
expect(dialStub.calledWith(mockPeerId2)).to.be.true;
|
||||
|
||||
dialer.stop();
|
||||
});
|
||||
});
|
||||
});
|
||||
203
packages/core/src/lib/connection_manager/dialer.ts
Normal file
203
packages/core/src/lib/connection_manager/dialer.ts
Normal file
@ -0,0 +1,203 @@
|
||||
import type { PeerId } from "@libp2p/interface";
|
||||
import { ConnectionManagerOptions, Libp2p } from "@waku/interfaces";
|
||||
import { Logger } from "@waku/utils";
|
||||
|
||||
import { ShardReader } from "./shard_reader.js";
|
||||
|
||||
const log = new Logger("dialer");
|
||||
|
||||
type DialerConstructorOptions = {
|
||||
libp2p: Libp2p;
|
||||
shardReader: ShardReader;
|
||||
options: ConnectionManagerOptions;
|
||||
};
|
||||
|
||||
interface IDialer {
|
||||
start(): void;
|
||||
stop(): void;
|
||||
dial(peerId: PeerId): Promise<void>;
|
||||
}
|
||||
|
||||
export class Dialer implements IDialer {
|
||||
private readonly libp2p: Libp2p;
|
||||
private readonly shardReader: ShardReader;
|
||||
private readonly options: ConnectionManagerOptions;
|
||||
|
||||
private dialingQueue: Map<string, PeerId> = new Map();
|
||||
private dialHistory: Map<string, number> = new Map();
|
||||
private failedDials: Map<string, number> = new Map();
|
||||
private dialingInterval: NodeJS.Timeout | null = null;
|
||||
|
||||
private isProcessing = false;
|
||||
private isImmediateDialing = false;
|
||||
|
||||
public constructor(options: DialerConstructorOptions) {
|
||||
this.libp2p = options.libp2p;
|
||||
this.shardReader = options.shardReader;
|
||||
this.options = options.options;
|
||||
}
|
||||
|
||||
public start(): void {
|
||||
log.info("Starting dialer");
|
||||
|
||||
if (!this.dialingInterval) {
|
||||
this.dialingInterval = setInterval(() => {
|
||||
void this.processQueue();
|
||||
}, 500);
|
||||
}
|
||||
|
||||
this.dialHistory.clear();
|
||||
this.failedDials.clear();
|
||||
}
|
||||
|
||||
public stop(): void {
|
||||
log.info("Stopping dialer");
|
||||
|
||||
if (this.dialingInterval) {
|
||||
clearInterval(this.dialingInterval);
|
||||
this.dialingInterval = null;
|
||||
}
|
||||
|
||||
this.dialHistory.clear();
|
||||
this.failedDials.clear();
|
||||
}
|
||||
|
||||
public async dial(peerId: PeerId): Promise<void> {
|
||||
const shouldSkip = await this.shouldSkipPeer(peerId);
|
||||
|
||||
if (shouldSkip) {
|
||||
log.info(`Skipping peer: ${peerId}`);
|
||||
return;
|
||||
}
|
||||
|
||||
const isEmptyQueue = this.dialingQueue.size === 0;
|
||||
const isNotDialing = !this.isProcessing && !this.isImmediateDialing;
|
||||
|
||||
// If queue is empty and we're not currently processing, dial immediately
|
||||
if (isEmptyQueue && isNotDialing) {
|
||||
this.isImmediateDialing = true;
|
||||
log.info("Dialed peer immediately");
|
||||
await this.dialPeer(peerId);
|
||||
this.isImmediateDialing = false;
|
||||
log.info("Released immediate dial lock");
|
||||
} else {
|
||||
this.dialingQueue.set(peerId.toString(), peerId);
|
||||
log.info(
|
||||
`Added peer to dialing queue, queue size: ${this.dialingQueue.size}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
private async processQueue(): Promise<void> {
|
||||
if (this.dialingQueue.size === 0 || this.isProcessing) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.isProcessing = true;
|
||||
|
||||
try {
|
||||
const allPeers = Array.from(this.dialingQueue.values());
|
||||
const peersToDial = allPeers.slice(0, this.options.maxDialingPeers);
|
||||
|
||||
peersToDial.forEach((peer) => this.dialingQueue.delete(peer.toString()));
|
||||
|
||||
log.info(
|
||||
`Processing dial queue: dialing ${peersToDial.length} peers, ${this.dialingQueue.size} remaining in queue`
|
||||
);
|
||||
|
||||
await Promise.all(peersToDial.map((peerId) => this.dialPeer(peerId)));
|
||||
} finally {
|
||||
this.isProcessing = false;
|
||||
}
|
||||
}
|
||||
|
||||
private async dialPeer(peerId: PeerId): Promise<void> {
|
||||
try {
|
||||
log.info(`Dialing peer from queue: ${peerId}`);
|
||||
|
||||
await Promise.race([
|
||||
this.libp2p.dial(peerId),
|
||||
new Promise<never>((_, reject) =>
|
||||
setTimeout(
|
||||
() =>
|
||||
reject(
|
||||
new Error(`Dial timeout after ${this.options.dialTimeout}s`)
|
||||
),
|
||||
this.options.dialTimeout * 1000
|
||||
)
|
||||
)
|
||||
]);
|
||||
|
||||
this.dialHistory.set(peerId.toString(), Date.now());
|
||||
this.failedDials.delete(peerId.toString());
|
||||
|
||||
log.info(`Successfully dialed peer from queue: ${peerId}`);
|
||||
} catch (error) {
|
||||
log.error(`Error dialing peer ${peerId}`, error);
|
||||
this.failedDials.set(peerId.toString(), Date.now());
|
||||
}
|
||||
}
|
||||
|
||||
private async shouldSkipPeer(peerId: PeerId): Promise<boolean> {
|
||||
const hasConnection = this.libp2p.getPeers().some((p) => p.equals(peerId));
|
||||
if (hasConnection) {
|
||||
log.info(`Skipping peer ${peerId} - already connected`);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (this.isRecentlyDialed(peerId)) {
|
||||
log.info(
|
||||
`Skipping peer ${peerId} - already dialed in the last 10 seconds`
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (this.isRecentlyFailed(peerId)) {
|
||||
log.info(`Skipping peer ${peerId} - recently failed to dial`);
|
||||
return true;
|
||||
}
|
||||
|
||||
try {
|
||||
const hasShardInfo = await this.shardReader.hasShardInfo(peerId);
|
||||
if (!hasShardInfo) {
|
||||
log.info(`Skipping peer ${peerId} - no shard info`);
|
||||
return false;
|
||||
}
|
||||
|
||||
const isOnSameCluster = await this.shardReader.isPeerOnCluster(peerId);
|
||||
if (!isOnSameCluster) {
|
||||
log.info(`Skipping peer ${peerId} - not on same cluster`);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
} catch (error) {
|
||||
log.error(`Error checking shard info for peer ${peerId}`, error);
|
||||
return true; // Skip peer when there's an error
|
||||
}
|
||||
}
|
||||
|
||||
private isRecentlyDialed(peerId: PeerId): boolean {
|
||||
const lastDialed = this.dialHistory.get(peerId.toString());
|
||||
if (
|
||||
lastDialed &&
|
||||
Date.now() - lastDialed < this.options.dialCooldown * 1000
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
private isRecentlyFailed(peerId: PeerId): boolean {
|
||||
const lastFailed = this.failedDials.get(peerId.toString());
|
||||
if (
|
||||
lastFailed &&
|
||||
Date.now() - lastFailed < this.options.failedDialCooldown * 1000
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -0,0 +1,304 @@
|
||||
import { PeerId, PeerInfo } from "@libp2p/interface";
|
||||
import { expect } from "chai";
|
||||
import { Libp2p } from "libp2p";
|
||||
import sinon from "sinon";
|
||||
|
||||
import { Dialer } from "./dialer.js";
|
||||
import { DiscoveryDialer } from "./discovery_dialer.js";
|
||||
|
||||
describe("DiscoveryDialer", () => {
|
||||
let libp2p: Libp2p;
|
||||
let discoveryDialer: DiscoveryDialer;
|
||||
let dialer: sinon.SinonStubbedInstance<Dialer>;
|
||||
let mockPeerId: PeerId;
|
||||
let mockPeerInfo: PeerInfo;
|
||||
|
||||
beforeEach(() => {
|
||||
libp2p = {
|
||||
addEventListener: sinon.stub(),
|
||||
removeEventListener: sinon.stub(),
|
||||
peerStore: {
|
||||
get: sinon.stub().resolves(undefined),
|
||||
save: sinon.stub().resolves(),
|
||||
merge: sinon.stub().resolves()
|
||||
}
|
||||
} as unknown as Libp2p;
|
||||
|
||||
dialer = {
|
||||
start: sinon.stub(),
|
||||
stop: sinon.stub(),
|
||||
dial: sinon.stub().resolves()
|
||||
} as unknown as sinon.SinonStubbedInstance<Dialer>;
|
||||
|
||||
mockPeerId = {
|
||||
toString: () => "mock-peer-id",
|
||||
equals: (other: PeerId) => other.toString() === "mock-peer-id"
|
||||
} as PeerId;
|
||||
|
||||
mockPeerInfo = {
|
||||
id: mockPeerId,
|
||||
multiaddrs: []
|
||||
} as PeerInfo;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (discoveryDialer) {
|
||||
discoveryDialer.stop();
|
||||
}
|
||||
sinon.restore();
|
||||
});
|
||||
|
||||
describe("constructor", () => {
|
||||
it("should create an instance with libp2p and dialer", () => {
|
||||
discoveryDialer = new DiscoveryDialer({
|
||||
libp2p,
|
||||
dialer
|
||||
});
|
||||
expect(discoveryDialer).to.be.instanceOf(DiscoveryDialer);
|
||||
});
|
||||
});
|
||||
|
||||
describe("start", () => {
|
||||
beforeEach(() => {
|
||||
discoveryDialer = new DiscoveryDialer({
|
||||
libp2p,
|
||||
dialer
|
||||
});
|
||||
});
|
||||
|
||||
it("should add event listener for peer:discovery", () => {
|
||||
discoveryDialer.start();
|
||||
|
||||
const addEventListenerStub = libp2p.addEventListener as sinon.SinonStub;
|
||||
expect(addEventListenerStub.calledOnce).to.be.true;
|
||||
expect(
|
||||
addEventListenerStub.calledWith("peer:discovery", sinon.match.func)
|
||||
).to.be.true;
|
||||
});
|
||||
|
||||
it("should be safe to call multiple times", () => {
|
||||
discoveryDialer.start();
|
||||
discoveryDialer.start();
|
||||
|
||||
const addEventListenerStub = libp2p.addEventListener as sinon.SinonStub;
|
||||
expect(addEventListenerStub.calledTwice).to.be.true;
|
||||
});
|
||||
});
|
||||
|
||||
describe("stop", () => {
|
||||
beforeEach(() => {
|
||||
discoveryDialer = new DiscoveryDialer({
|
||||
libp2p,
|
||||
dialer
|
||||
});
|
||||
discoveryDialer.start();
|
||||
});
|
||||
|
||||
it("should remove event listener for peer:discovery", () => {
|
||||
discoveryDialer.stop();
|
||||
|
||||
const removeEventListenerStub =
|
||||
libp2p.removeEventListener as sinon.SinonStub;
|
||||
expect(removeEventListenerStub.calledOnce).to.be.true;
|
||||
expect(
|
||||
removeEventListenerStub.calledWith("peer:discovery", sinon.match.func)
|
||||
).to.be.true;
|
||||
});
|
||||
|
||||
it("should be safe to call multiple times", () => {
|
||||
discoveryDialer.stop();
|
||||
discoveryDialer.stop();
|
||||
|
||||
const removeEventListenerStub =
|
||||
libp2p.removeEventListener as sinon.SinonStub;
|
||||
expect(removeEventListenerStub.calledTwice).to.be.true;
|
||||
});
|
||||
});
|
||||
|
||||
describe("peer discovery handling", () => {
|
||||
let eventHandler: (event: CustomEvent<PeerInfo>) => Promise<void>;
|
||||
|
||||
beforeEach(() => {
|
||||
discoveryDialer = new DiscoveryDialer({
|
||||
libp2p,
|
||||
dialer
|
||||
});
|
||||
discoveryDialer.start();
|
||||
|
||||
const addEventListenerStub = libp2p.addEventListener as sinon.SinonStub;
|
||||
eventHandler = addEventListenerStub.getCall(0).args[1];
|
||||
});
|
||||
|
||||
it("should dial peer when peer is discovered", async () => {
|
||||
const peerStoreStub = libp2p.peerStore.get as sinon.SinonStub;
|
||||
peerStoreStub.resolves(undefined);
|
||||
|
||||
const mockEvent = new CustomEvent("peer:discovery", {
|
||||
detail: mockPeerInfo
|
||||
});
|
||||
|
||||
await eventHandler(mockEvent);
|
||||
|
||||
expect(dialer.dial.calledOnce).to.be.true;
|
||||
expect(dialer.dial.calledWith(mockPeerId)).to.be.true;
|
||||
});
|
||||
|
||||
it("should handle dial errors gracefully", async () => {
|
||||
const peerStoreStub = libp2p.peerStore.get as sinon.SinonStub;
|
||||
peerStoreStub.resolves(undefined);
|
||||
|
||||
dialer.dial.rejects(new Error("Dial failed"));
|
||||
|
||||
const mockEvent = new CustomEvent("peer:discovery", {
|
||||
detail: mockPeerInfo
|
||||
});
|
||||
|
||||
await eventHandler(mockEvent);
|
||||
|
||||
expect(dialer.dial.calledOnce).to.be.true;
|
||||
expect(dialer.dial.calledWith(mockPeerId)).to.be.true;
|
||||
});
|
||||
|
||||
it("should update peer store before dialing", async () => {
|
||||
const peerStoreStub = libp2p.peerStore.get as sinon.SinonStub;
|
||||
peerStoreStub.resolves(undefined);
|
||||
|
||||
const mockEvent = new CustomEvent("peer:discovery", {
|
||||
detail: mockPeerInfo
|
||||
});
|
||||
|
||||
await eventHandler(mockEvent);
|
||||
|
||||
expect(peerStoreStub.calledWith(mockPeerId)).to.be.true;
|
||||
expect(dialer.dial.calledOnce).to.be.true;
|
||||
});
|
||||
|
||||
it("should handle peer store errors gracefully", async () => {
|
||||
const peerStoreStub = libp2p.peerStore.get as sinon.SinonStub;
|
||||
peerStoreStub.rejects(new Error("Peer store error"));
|
||||
|
||||
const mockEvent = new CustomEvent("peer:discovery", {
|
||||
detail: mockPeerInfo
|
||||
});
|
||||
|
||||
await eventHandler(mockEvent);
|
||||
|
||||
expect(dialer.dial.calledOnce).to.be.true;
|
||||
});
|
||||
});
|
||||
|
||||
describe("updatePeerStore", () => {
|
||||
let eventHandler: (event: CustomEvent<PeerInfo>) => Promise<void>;
|
||||
|
||||
beforeEach(() => {
|
||||
discoveryDialer = new DiscoveryDialer({
|
||||
libp2p,
|
||||
dialer
|
||||
});
|
||||
discoveryDialer.start();
|
||||
|
||||
const addEventListenerStub = libp2p.addEventListener as sinon.SinonStub;
|
||||
eventHandler = addEventListenerStub.getCall(0).args[1];
|
||||
});
|
||||
|
||||
it("should save new peer to store", async () => {
|
||||
const peerStoreStub = libp2p.peerStore.get as sinon.SinonStub;
|
||||
peerStoreStub.resolves(undefined);
|
||||
|
||||
const mockEvent = new CustomEvent("peer:discovery", {
|
||||
detail: mockPeerInfo
|
||||
});
|
||||
|
||||
await eventHandler(mockEvent);
|
||||
|
||||
expect((libp2p.peerStore.save as sinon.SinonStub).calledOnce).to.be.true;
|
||||
expect(
|
||||
(libp2p.peerStore.save as sinon.SinonStub).calledWith(mockPeerId, {
|
||||
multiaddrs: mockPeerInfo.multiaddrs
|
||||
})
|
||||
).to.be.true;
|
||||
});
|
||||
|
||||
it("should skip updating peer store if peer has same addresses", async () => {
|
||||
// Set up mockPeerInfo with actual multiaddrs for this test
|
||||
const mockMultiaddr = { equals: sinon.stub().returns(true) };
|
||||
const mockPeerInfoWithAddr = {
|
||||
id: mockPeerId,
|
||||
multiaddrs: [mockMultiaddr]
|
||||
} as unknown as PeerInfo;
|
||||
|
||||
const mockPeer = {
|
||||
addresses: [{ multiaddr: mockMultiaddr }]
|
||||
};
|
||||
const peerStoreStub = libp2p.peerStore.get as sinon.SinonStub;
|
||||
peerStoreStub.resolves(mockPeer);
|
||||
|
||||
const mockEvent = new CustomEvent("peer:discovery", {
|
||||
detail: mockPeerInfoWithAddr
|
||||
});
|
||||
|
||||
await eventHandler(mockEvent);
|
||||
|
||||
expect((libp2p.peerStore.save as sinon.SinonStub).called).to.be.false;
|
||||
expect((libp2p.peerStore.merge as sinon.SinonStub).called).to.be.false;
|
||||
});
|
||||
|
||||
it("should merge peer addresses if peer exists with different addresses", async () => {
|
||||
// Set up mockPeerInfo with actual multiaddrs for this test
|
||||
const mockMultiaddr = { equals: sinon.stub().returns(false) };
|
||||
const mockPeerInfoWithAddr = {
|
||||
id: mockPeerId,
|
||||
multiaddrs: [mockMultiaddr]
|
||||
} as unknown as PeerInfo;
|
||||
|
||||
const mockPeer = {
|
||||
addresses: []
|
||||
};
|
||||
const peerStoreStub = libp2p.peerStore.get as sinon.SinonStub;
|
||||
peerStoreStub.resolves(mockPeer);
|
||||
|
||||
const mockEvent = new CustomEvent("peer:discovery", {
|
||||
detail: mockPeerInfoWithAddr
|
||||
});
|
||||
|
||||
await eventHandler(mockEvent);
|
||||
|
||||
expect((libp2p.peerStore.merge as sinon.SinonStub).calledOnce).to.be.true;
|
||||
expect(
|
||||
(libp2p.peerStore.merge as sinon.SinonStub).calledWith(mockPeerId, {
|
||||
multiaddrs: mockPeerInfoWithAddr.multiaddrs
|
||||
})
|
||||
).to.be.true;
|
||||
});
|
||||
});
|
||||
|
||||
describe("integration", () => {
|
||||
it("should handle complete discovery-to-dial flow", async () => {
|
||||
const peerStoreStub = libp2p.peerStore.get as sinon.SinonStub;
|
||||
peerStoreStub.resolves(undefined);
|
||||
|
||||
discoveryDialer = new DiscoveryDialer({
|
||||
libp2p,
|
||||
dialer
|
||||
});
|
||||
discoveryDialer.start();
|
||||
|
||||
const addEventListenerStub = libp2p.addEventListener as sinon.SinonStub;
|
||||
const eventHandler = addEventListenerStub.getCall(0).args[1];
|
||||
|
||||
const mockEvent = new CustomEvent("peer:discovery", {
|
||||
detail: mockPeerInfo
|
||||
});
|
||||
|
||||
await eventHandler(mockEvent);
|
||||
|
||||
expect(dialer.dial.calledOnce).to.be.true;
|
||||
expect(dialer.dial.calledWith(mockPeerId)).to.be.true;
|
||||
|
||||
discoveryDialer.stop();
|
||||
const removeEventListenerStub =
|
||||
libp2p.removeEventListener as sinon.SinonStub;
|
||||
expect(removeEventListenerStub.called).to.be.true;
|
||||
});
|
||||
});
|
||||
});
|
||||
104
packages/core/src/lib/connection_manager/discovery_dialer.ts
Normal file
104
packages/core/src/lib/connection_manager/discovery_dialer.ts
Normal file
@ -0,0 +1,104 @@
|
||||
import { Libp2p, Peer, PeerId, PeerInfo } from "@libp2p/interface";
|
||||
import { Multiaddr } from "@multiformats/multiaddr";
|
||||
import { Libp2pEventHandler } from "@waku/interfaces";
|
||||
import { Logger } from "@waku/utils";
|
||||
|
||||
import { Dialer } from "./dialer.js";
|
||||
|
||||
type DiscoveryDialerConstructorOptions = {
|
||||
libp2p: Libp2p;
|
||||
dialer: Dialer;
|
||||
};
|
||||
|
||||
interface IDiscoveryDialer {
|
||||
start(): void;
|
||||
stop(): void;
|
||||
}
|
||||
|
||||
const log = new Logger("discovery-dialer");
|
||||
|
||||
/**
|
||||
* This class is responsible for dialing peers that are discovered by the libp2p node.
|
||||
* Managing limits for the peers is out of scope for this class.
|
||||
* Dialing after discovery is needed to identify the peer and get all other information: metadata, protocols, etc.
|
||||
*/
|
||||
export class DiscoveryDialer implements IDiscoveryDialer {
|
||||
private readonly libp2p: Libp2p;
|
||||
private readonly dialer: Dialer;
|
||||
|
||||
public constructor(options: DiscoveryDialerConstructorOptions) {
|
||||
this.libp2p = options.libp2p;
|
||||
this.dialer = options.dialer;
|
||||
|
||||
this.onPeerDiscovery = this.onPeerDiscovery.bind(this);
|
||||
}
|
||||
|
||||
public start(): void {
|
||||
this.libp2p.addEventListener(
|
||||
"peer:discovery",
|
||||
this.onPeerDiscovery as Libp2pEventHandler<PeerInfo>
|
||||
);
|
||||
}
|
||||
|
||||
public stop(): void {
|
||||
this.libp2p.removeEventListener(
|
||||
"peer:discovery",
|
||||
this.onPeerDiscovery as Libp2pEventHandler<PeerInfo>
|
||||
);
|
||||
}
|
||||
|
||||
private async onPeerDiscovery(event: CustomEvent<PeerInfo>): Promise<void> {
|
||||
const peerId = event.detail.id;
|
||||
log.info(`Discovered new peer: ${peerId}`);
|
||||
|
||||
try {
|
||||
await this.updatePeerStore(peerId, event.detail.multiaddrs);
|
||||
await this.dialer.dial(peerId);
|
||||
} catch (error) {
|
||||
log.error(`Error dialing peer ${peerId}`, error);
|
||||
}
|
||||
}
|
||||
|
||||
private async updatePeerStore(
|
||||
peerId: PeerId,
|
||||
multiaddrs: Multiaddr[]
|
||||
): Promise<void> {
|
||||
try {
|
||||
log.info(`Updating peer store for ${peerId}`);
|
||||
const peer = await this.getPeer(peerId);
|
||||
|
||||
if (!peer) {
|
||||
log.info(`Peer ${peerId} not found in store, saving`);
|
||||
await this.libp2p.peerStore.save(peerId, {
|
||||
multiaddrs: multiaddrs
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const hasSameAddr = multiaddrs.every((addr) =>
|
||||
peer.addresses.some((a) => a.multiaddr.equals(addr))
|
||||
);
|
||||
|
||||
if (hasSameAddr) {
|
||||
log.info(`Peer ${peerId} has same addresses in peer store, skipping`);
|
||||
return;
|
||||
}
|
||||
|
||||
log.info(`Merging peer ${peerId} addresses in peer store`);
|
||||
await this.libp2p.peerStore.merge(peerId, {
|
||||
multiaddrs: multiaddrs
|
||||
});
|
||||
} catch (error) {
|
||||
log.error(`Error updating peer store for ${peerId}`, error);
|
||||
}
|
||||
}
|
||||
|
||||
private async getPeer(peerId: PeerId): Promise<Peer | undefined> {
|
||||
try {
|
||||
return await this.libp2p.peerStore.get(peerId);
|
||||
} catch (error) {
|
||||
log.error(`Error getting peer info for ${peerId}`, error);
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -0,0 +1,605 @@
|
||||
import type { PeerId } from "@libp2p/interface";
|
||||
import { AutoSharding } from "@waku/interfaces";
|
||||
import { expect } from "chai";
|
||||
import sinon from "sinon";
|
||||
|
||||
import { KeepAliveManager } from "./keep_alive_manager.js";
|
||||
|
||||
describe("KeepAliveManager", () => {
|
||||
let libp2p: any;
|
||||
let relay: any;
|
||||
let keepAliveManager: KeepAliveManager;
|
||||
let mockPeerId: PeerId;
|
||||
let mockPeerId2: PeerId;
|
||||
let clock: sinon.SinonFakeTimers;
|
||||
|
||||
const createMockPeerId = (id: string): PeerId =>
|
||||
({
|
||||
toString: () => id,
|
||||
equals: (other: PeerId) => other.toString() === id
|
||||
}) as PeerId;
|
||||
|
||||
const defaultOptions = {
|
||||
pingKeepAlive: 30,
|
||||
relayKeepAlive: 60
|
||||
};
|
||||
|
||||
const defaultNetworkConfig: AutoSharding = {
|
||||
clusterId: 0,
|
||||
numShardsInCluster: 1
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
clock = sinon.useFakeTimers();
|
||||
|
||||
mockPeerId = createMockPeerId("12D3KooWTest1");
|
||||
mockPeerId2 = createMockPeerId("12D3KooWTest2");
|
||||
|
||||
libp2p = {
|
||||
addEventListener: sinon.stub(),
|
||||
removeEventListener: sinon.stub(),
|
||||
services: {
|
||||
ping: {
|
||||
ping: sinon.stub().resolves(100)
|
||||
}
|
||||
},
|
||||
peerStore: {
|
||||
merge: sinon.stub().resolves()
|
||||
}
|
||||
};
|
||||
|
||||
relay = {
|
||||
pubsubTopics: ["/waku/2/rs/1/0", "/waku/2/rs/1/1"],
|
||||
getMeshPeers: sinon.stub().returns(["12D3KooWTest1"]),
|
||||
send: sinon.stub().resolves()
|
||||
};
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (keepAliveManager) {
|
||||
keepAliveManager.stop();
|
||||
}
|
||||
clock.restore();
|
||||
sinon.restore();
|
||||
});
|
||||
|
||||
describe("constructor", () => {
|
||||
it("should create KeepAliveManager with required options", () => {
|
||||
keepAliveManager = new KeepAliveManager({
|
||||
options: defaultOptions,
|
||||
networkConfig: defaultNetworkConfig,
|
||||
libp2p
|
||||
});
|
||||
|
||||
expect(keepAliveManager).to.be.instanceOf(KeepAliveManager);
|
||||
});
|
||||
|
||||
it("should create KeepAliveManager with relay", () => {
|
||||
keepAliveManager = new KeepAliveManager({
|
||||
options: defaultOptions,
|
||||
networkConfig: defaultNetworkConfig,
|
||||
libp2p,
|
||||
relay
|
||||
});
|
||||
|
||||
expect(keepAliveManager).to.be.instanceOf(KeepAliveManager);
|
||||
});
|
||||
});
|
||||
|
||||
describe("start", () => {
|
||||
beforeEach(() => {
|
||||
keepAliveManager = new KeepAliveManager({
|
||||
options: defaultOptions,
|
||||
networkConfig: defaultNetworkConfig,
|
||||
libp2p
|
||||
});
|
||||
});
|
||||
|
||||
it("should add event listeners for peer connect and disconnect", () => {
|
||||
keepAliveManager.start();
|
||||
|
||||
expect(libp2p.addEventListener.calledTwice).to.be.true;
|
||||
expect(
|
||||
libp2p.addEventListener.calledWith("peer:connect", sinon.match.func)
|
||||
).to.be.true;
|
||||
expect(
|
||||
libp2p.addEventListener.calledWith("peer:disconnect", sinon.match.func)
|
||||
).to.be.true;
|
||||
});
|
||||
|
||||
it("should be safe to call multiple times", () => {
|
||||
keepAliveManager.start();
|
||||
keepAliveManager.start();
|
||||
|
||||
expect(libp2p.addEventListener.callCount).to.equal(4);
|
||||
});
|
||||
});
|
||||
|
||||
describe("stop", () => {
|
||||
beforeEach(() => {
|
||||
keepAliveManager = new KeepAliveManager({
|
||||
options: defaultOptions,
|
||||
networkConfig: defaultNetworkConfig,
|
||||
libp2p,
|
||||
relay
|
||||
});
|
||||
keepAliveManager.start();
|
||||
});
|
||||
|
||||
it("should remove event listeners", () => {
|
||||
keepAliveManager.stop();
|
||||
|
||||
expect(libp2p.removeEventListener.calledTwice).to.be.true;
|
||||
expect(
|
||||
libp2p.removeEventListener.calledWith("peer:connect", sinon.match.func)
|
||||
).to.be.true;
|
||||
expect(
|
||||
libp2p.removeEventListener.calledWith(
|
||||
"peer:disconnect",
|
||||
sinon.match.func
|
||||
)
|
||||
).to.be.true;
|
||||
});
|
||||
|
||||
it("should clear all timers", () => {
|
||||
const peerConnectHandler = libp2p.addEventListener.getCall(0).args[1];
|
||||
const connectEvent = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
peerConnectHandler(connectEvent);
|
||||
|
||||
const timersBeforeStop = clock.countTimers();
|
||||
expect(timersBeforeStop).to.be.greaterThan(0);
|
||||
|
||||
keepAliveManager.stop();
|
||||
|
||||
expect(clock.countTimers()).to.equal(0);
|
||||
});
|
||||
|
||||
it("should be safe to call multiple times", () => {
|
||||
keepAliveManager.stop();
|
||||
keepAliveManager.stop();
|
||||
|
||||
expect(libp2p.removeEventListener.callCount).to.equal(4);
|
||||
});
|
||||
});
|
||||
|
||||
describe("peer connect event handling", () => {
|
||||
beforeEach(() => {
|
||||
keepAliveManager = new KeepAliveManager({
|
||||
options: defaultOptions,
|
||||
networkConfig: defaultNetworkConfig,
|
||||
libp2p,
|
||||
relay
|
||||
});
|
||||
keepAliveManager.start();
|
||||
});
|
||||
|
||||
it("should start ping timers on peer connect", () => {
|
||||
const peerConnectHandler = libp2p.addEventListener.getCall(0).args[1];
|
||||
const connectEvent = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
peerConnectHandler(connectEvent);
|
||||
|
||||
expect(clock.countTimers()).to.be.greaterThan(0);
|
||||
});
|
||||
|
||||
it("should handle multiple peer connections", () => {
|
||||
const peerConnectHandler = libp2p.addEventListener.getCall(0).args[1];
|
||||
const connectEvent1 = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
const connectEvent2 = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId2
|
||||
});
|
||||
|
||||
peerConnectHandler(connectEvent1);
|
||||
peerConnectHandler(connectEvent2);
|
||||
|
||||
expect(clock.countTimers()).to.be.greaterThan(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe("peer disconnect event handling", () => {
|
||||
beforeEach(() => {
|
||||
keepAliveManager = new KeepAliveManager({
|
||||
options: defaultOptions,
|
||||
networkConfig: defaultNetworkConfig,
|
||||
libp2p,
|
||||
relay
|
||||
});
|
||||
keepAliveManager.start();
|
||||
});
|
||||
|
||||
it("should stop ping timers on peer disconnect", () => {
|
||||
const peerConnectHandler = libp2p.addEventListener.getCall(0).args[1];
|
||||
const peerDisconnectHandler = libp2p.addEventListener.getCall(1).args[1];
|
||||
|
||||
const connectEvent = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
peerConnectHandler(connectEvent);
|
||||
|
||||
const timerCountAfterConnect = clock.countTimers();
|
||||
expect(timerCountAfterConnect).to.be.greaterThan(0);
|
||||
|
||||
const disconnectEvent = new CustomEvent("peer:disconnect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
peerDisconnectHandler(disconnectEvent);
|
||||
|
||||
expect(clock.countTimers()).to.be.lessThan(timerCountAfterConnect);
|
||||
});
|
||||
});
|
||||
|
||||
describe("ping timer management", () => {
|
||||
beforeEach(() => {
|
||||
keepAliveManager = new KeepAliveManager({
|
||||
options: defaultOptions,
|
||||
networkConfig: defaultNetworkConfig,
|
||||
libp2p
|
||||
});
|
||||
keepAliveManager.start();
|
||||
});
|
||||
|
||||
it("should create ping timers when pingKeepAlive > 0", () => {
|
||||
const peerConnectHandler = libp2p.addEventListener.getCall(0).args[1];
|
||||
const connectEvent = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
peerConnectHandler(connectEvent);
|
||||
|
||||
expect(clock.countTimers()).to.be.greaterThan(0);
|
||||
});
|
||||
|
||||
it("should not create ping timers when pingKeepAlive = 0", () => {
|
||||
keepAliveManager.stop();
|
||||
keepAliveManager = new KeepAliveManager({
|
||||
options: { pingKeepAlive: 0, relayKeepAlive: 0 },
|
||||
networkConfig: defaultNetworkConfig,
|
||||
libp2p
|
||||
});
|
||||
keepAliveManager.start();
|
||||
|
||||
const peerConnectHandler = libp2p.addEventListener.getCall(2).args[1];
|
||||
const connectEvent = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
peerConnectHandler(connectEvent);
|
||||
|
||||
expect(clock.countTimers()).to.equal(0);
|
||||
});
|
||||
|
||||
it("should perform ping and update peer store on timer", async () => {
|
||||
const peerConnectHandler = libp2p.addEventListener.getCall(0).args[1];
|
||||
const connectEvent = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
peerConnectHandler(connectEvent);
|
||||
|
||||
clock.tick(defaultOptions.pingKeepAlive * 1000);
|
||||
|
||||
await clock.tickAsync(0);
|
||||
|
||||
sinon.assert.calledWith(libp2p.services.ping.ping, mockPeerId);
|
||||
sinon.assert.calledWith(
|
||||
libp2p.peerStore.merge,
|
||||
mockPeerId,
|
||||
sinon.match.object
|
||||
);
|
||||
});
|
||||
|
||||
it("should handle ping failures gracefully", async () => {
|
||||
libp2p.services.ping.ping.rejects(new Error("Ping failed"));
|
||||
|
||||
const peerConnectHandler = libp2p.addEventListener.getCall(0).args[1];
|
||||
const connectEvent = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
peerConnectHandler(connectEvent);
|
||||
|
||||
clock.tick(defaultOptions.pingKeepAlive * 1000);
|
||||
await clock.tickAsync(0);
|
||||
|
||||
sinon.assert.calledWith(libp2p.services.ping.ping, mockPeerId);
|
||||
sinon.assert.notCalled(libp2p.peerStore.merge);
|
||||
});
|
||||
|
||||
it("should handle peer store update failures gracefully", async () => {
|
||||
libp2p.peerStore.merge.rejects(new Error("Peer store update failed"));
|
||||
|
||||
const peerConnectHandler = libp2p.addEventListener.getCall(0).args[1];
|
||||
const connectEvent = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
peerConnectHandler(connectEvent);
|
||||
|
||||
clock.tick(defaultOptions.pingKeepAlive * 1000);
|
||||
await clock.tickAsync(0);
|
||||
|
||||
sinon.assert.calledWith(libp2p.services.ping.ping, mockPeerId);
|
||||
sinon.assert.calledWith(
|
||||
libp2p.peerStore.merge,
|
||||
mockPeerId,
|
||||
sinon.match.object
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("relay timer management", () => {
|
||||
beforeEach(() => {
|
||||
keepAliveManager = new KeepAliveManager({
|
||||
options: defaultOptions,
|
||||
networkConfig: defaultNetworkConfig,
|
||||
libp2p,
|
||||
relay
|
||||
});
|
||||
keepAliveManager.start();
|
||||
});
|
||||
|
||||
it("should create relay timers when relay exists and relayKeepAlive > 0", () => {
|
||||
const peerConnectHandler = libp2p.addEventListener.getCall(0).args[1];
|
||||
const connectEvent = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
peerConnectHandler(connectEvent);
|
||||
|
||||
expect(clock.countTimers()).to.be.greaterThan(1);
|
||||
});
|
||||
|
||||
it("should not create relay timers when relayKeepAlive = 0", () => {
|
||||
keepAliveManager.stop();
|
||||
keepAliveManager = new KeepAliveManager({
|
||||
options: { pingKeepAlive: 30, relayKeepAlive: 0 },
|
||||
networkConfig: defaultNetworkConfig,
|
||||
libp2p,
|
||||
relay
|
||||
});
|
||||
keepAliveManager.start();
|
||||
|
||||
const peerConnectHandler = libp2p.addEventListener.getCall(2).args[1];
|
||||
const connectEvent = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
peerConnectHandler(connectEvent);
|
||||
|
||||
expect(clock.countTimers()).to.equal(1);
|
||||
});
|
||||
|
||||
it("should not create relay timers when relay is not provided", () => {
|
||||
keepAliveManager.stop();
|
||||
keepAliveManager = new KeepAliveManager({
|
||||
options: defaultOptions,
|
||||
networkConfig: defaultNetworkConfig,
|
||||
libp2p
|
||||
});
|
||||
keepAliveManager.start();
|
||||
|
||||
const peerConnectHandler = libp2p.addEventListener.getCall(2).args[1];
|
||||
const connectEvent = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
peerConnectHandler(connectEvent);
|
||||
|
||||
expect(clock.countTimers()).to.equal(1);
|
||||
});
|
||||
|
||||
it("should create timers for each pubsub topic where peer is in mesh", () => {
|
||||
const peerConnectHandler = libp2p.addEventListener.getCall(0).args[1];
|
||||
const connectEvent = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
peerConnectHandler(connectEvent);
|
||||
|
||||
expect(clock.countTimers()).to.be.greaterThan(relay.pubsubTopics.length);
|
||||
});
|
||||
|
||||
it("should not create timers for topics where peer is not in mesh", () => {
|
||||
relay.getMeshPeers.returns([]);
|
||||
|
||||
const peerConnectHandler = libp2p.addEventListener.getCall(0).args[1];
|
||||
const connectEvent = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
peerConnectHandler(connectEvent);
|
||||
|
||||
expect(clock.countTimers()).to.equal(1);
|
||||
});
|
||||
|
||||
it("should send relay ping messages on timer", async () => {
|
||||
const peerConnectHandler = libp2p.addEventListener.getCall(0).args[1];
|
||||
const connectEvent = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
peerConnectHandler(connectEvent);
|
||||
|
||||
clock.tick(defaultOptions.relayKeepAlive * 1000);
|
||||
await clock.tickAsync(0);
|
||||
|
||||
sinon.assert.called(relay.send);
|
||||
});
|
||||
|
||||
it("should handle relay send failures gracefully", async () => {
|
||||
relay.send.rejects(new Error("Relay send failed"));
|
||||
|
||||
const peerConnectHandler = libp2p.addEventListener.getCall(0).args[1];
|
||||
const connectEvent = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
peerConnectHandler(connectEvent);
|
||||
|
||||
clock.tick(defaultOptions.relayKeepAlive * 1000);
|
||||
await clock.tickAsync(0);
|
||||
|
||||
sinon.assert.called(relay.send);
|
||||
});
|
||||
});
|
||||
|
||||
describe("timer cleanup", () => {
|
||||
beforeEach(() => {
|
||||
keepAliveManager = new KeepAliveManager({
|
||||
options: defaultOptions,
|
||||
networkConfig: defaultNetworkConfig,
|
||||
libp2p,
|
||||
relay
|
||||
});
|
||||
keepAliveManager.start();
|
||||
});
|
||||
|
||||
it("should clear timers for specific peer on disconnect", () => {
|
||||
const peerConnectHandler = libp2p.addEventListener.getCall(0).args[1];
|
||||
const peerDisconnectHandler = libp2p.addEventListener.getCall(1).args[1];
|
||||
|
||||
const connectEvent1 = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
const connectEvent2 = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId2
|
||||
});
|
||||
peerConnectHandler(connectEvent1);
|
||||
peerConnectHandler(connectEvent2);
|
||||
|
||||
const timerCountAfterConnect = clock.countTimers();
|
||||
expect(timerCountAfterConnect).to.be.greaterThan(0);
|
||||
|
||||
const disconnectEvent = new CustomEvent("peer:disconnect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
peerDisconnectHandler(disconnectEvent);
|
||||
|
||||
expect(clock.countTimers()).to.be.lessThan(timerCountAfterConnect);
|
||||
expect(clock.countTimers()).to.be.greaterThan(0);
|
||||
});
|
||||
|
||||
it("should handle disconnect when peer has no timers", () => {
|
||||
const peerDisconnectHandler = libp2p.addEventListener.getCall(1).args[1];
|
||||
const disconnectEvent = new CustomEvent("peer:disconnect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
|
||||
expect(() => peerDisconnectHandler(disconnectEvent)).to.not.throw();
|
||||
});
|
||||
|
||||
it("should clear existing timers before creating new ones", () => {
|
||||
const peerConnectHandler = libp2p.addEventListener.getCall(0).args[1];
|
||||
const connectEvent = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
|
||||
peerConnectHandler(connectEvent);
|
||||
const timerCountAfterFirst = clock.countTimers();
|
||||
|
||||
peerConnectHandler(connectEvent);
|
||||
const timerCountAfterSecond = clock.countTimers();
|
||||
|
||||
expect(timerCountAfterSecond).to.equal(timerCountAfterFirst);
|
||||
});
|
||||
});
|
||||
|
||||
describe("edge cases", () => {
|
||||
it("should handle empty pubsub topics", () => {
|
||||
const emptyRelay = {
|
||||
pubsubTopics: [],
|
||||
getMeshPeers: sinon.stub().returns([]),
|
||||
send: sinon.stub().resolves()
|
||||
} as any;
|
||||
|
||||
keepAliveManager = new KeepAliveManager({
|
||||
options: defaultOptions,
|
||||
networkConfig: defaultNetworkConfig,
|
||||
libp2p,
|
||||
relay: emptyRelay
|
||||
});
|
||||
keepAliveManager.start();
|
||||
|
||||
const peerConnectHandler = libp2p.addEventListener.getCall(0).args[1];
|
||||
const connectEvent = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
peerConnectHandler(connectEvent);
|
||||
|
||||
expect(clock.countTimers()).to.equal(1);
|
||||
});
|
||||
|
||||
it("should handle all zero keep alive options", () => {
|
||||
keepAliveManager = new KeepAliveManager({
|
||||
options: { pingKeepAlive: 0, relayKeepAlive: 0 },
|
||||
networkConfig: defaultNetworkConfig,
|
||||
libp2p,
|
||||
relay
|
||||
});
|
||||
keepAliveManager.start();
|
||||
|
||||
const peerConnectHandler = libp2p.addEventListener.getCall(0).args[1];
|
||||
const connectEvent = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
peerConnectHandler(connectEvent);
|
||||
|
||||
expect(clock.countTimers()).to.equal(0);
|
||||
});
|
||||
|
||||
it("should handle peer not in mesh for all topics", () => {
|
||||
relay.getMeshPeers.returns(["different-peer-id"]);
|
||||
|
||||
keepAliveManager = new KeepAliveManager({
|
||||
options: defaultOptions,
|
||||
networkConfig: defaultNetworkConfig,
|
||||
libp2p,
|
||||
relay
|
||||
});
|
||||
keepAliveManager.start();
|
||||
|
||||
const peerConnectHandler = libp2p.addEventListener.getCall(0).args[1];
|
||||
const connectEvent = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
peerConnectHandler(connectEvent);
|
||||
|
||||
expect(clock.countTimers()).to.equal(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe("integration", () => {
|
||||
it("should handle complete peer lifecycle", async () => {
|
||||
keepAliveManager = new KeepAliveManager({
|
||||
options: defaultOptions,
|
||||
networkConfig: defaultNetworkConfig,
|
||||
libp2p,
|
||||
relay
|
||||
});
|
||||
keepAliveManager.start();
|
||||
|
||||
const peerConnectHandler = libp2p.addEventListener.getCall(0).args[1];
|
||||
const peerDisconnectHandler = libp2p.addEventListener.getCall(1).args[1];
|
||||
|
||||
const connectEvent = new CustomEvent("peer:connect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
peerConnectHandler(connectEvent);
|
||||
|
||||
expect(clock.countTimers()).to.be.greaterThan(0);
|
||||
|
||||
clock.tick(
|
||||
Math.max(defaultOptions.pingKeepAlive, defaultOptions.relayKeepAlive) *
|
||||
1000
|
||||
);
|
||||
await clock.tickAsync(0);
|
||||
|
||||
sinon.assert.called(libp2p.services.ping.ping);
|
||||
sinon.assert.called(relay.send);
|
||||
|
||||
const disconnectEvent = new CustomEvent("peer:disconnect", {
|
||||
detail: mockPeerId
|
||||
});
|
||||
peerDisconnectHandler(disconnectEvent);
|
||||
|
||||
expect(clock.countTimers()).to.equal(0);
|
||||
|
||||
keepAliveManager.stop();
|
||||
|
||||
sinon.assert.called(libp2p.removeEventListener);
|
||||
});
|
||||
});
|
||||
});
|
||||
@ -1,6 +1,6 @@
|
||||
import type { PeerId } from "@libp2p/interface";
|
||||
import type { IRelay, Libp2p, PeerIdStr } from "@waku/interfaces";
|
||||
import { Logger, pubsubTopicToSingleShardInfo } from "@waku/utils";
|
||||
import type { IEncoder, IRelay, Libp2p, NetworkConfig } from "@waku/interfaces";
|
||||
import { createRoutingInfo, Logger } from "@waku/utils";
|
||||
import { utf8ToBytes } from "@waku/utils/bytes";
|
||||
|
||||
import { createEncoder } from "../message/version_0.js";
|
||||
@ -15,145 +15,221 @@ type KeepAliveOptions = {
|
||||
|
||||
type CreateKeepAliveManagerOptions = {
|
||||
options: KeepAliveOptions;
|
||||
networkConfig: NetworkConfig;
|
||||
libp2p: Libp2p;
|
||||
relay?: IRelay;
|
||||
};
|
||||
|
||||
export class KeepAliveManager {
|
||||
interface IKeepAliveManager {
|
||||
start(): void;
|
||||
stop(): void;
|
||||
}
|
||||
|
||||
export class KeepAliveManager implements IKeepAliveManager {
|
||||
private readonly relay?: IRelay;
|
||||
private readonly networkConfig: NetworkConfig;
|
||||
private readonly libp2p: Libp2p;
|
||||
|
||||
private readonly options: KeepAliveOptions;
|
||||
|
||||
private pingKeepAliveTimers: Map<string, ReturnType<typeof setInterval>> =
|
||||
new Map();
|
||||
private relayKeepAliveTimers: Map<PeerId, ReturnType<typeof setInterval>[]> =
|
||||
private relayKeepAliveTimers: Map<string, ReturnType<typeof setInterval>[]> =
|
||||
new Map();
|
||||
|
||||
public constructor({
|
||||
options,
|
||||
relay,
|
||||
networkConfig,
|
||||
libp2p
|
||||
}: CreateKeepAliveManagerOptions) {
|
||||
this.options = options;
|
||||
this.relay = relay;
|
||||
this.networkConfig = networkConfig;
|
||||
this.libp2p = libp2p;
|
||||
|
||||
this.onPeerConnect = this.onPeerConnect.bind(this);
|
||||
this.onPeerDisconnect = this.onPeerDisconnect.bind(this);
|
||||
}
|
||||
|
||||
public start(peerId: PeerId): void {
|
||||
// Just in case a timer already exists for this peer
|
||||
this.stop(peerId);
|
||||
|
||||
const { pingKeepAlive: pingPeriodSecs, relayKeepAlive: relayPeriodSecs } =
|
||||
this.options;
|
||||
|
||||
const peerIdStr = peerId.toString();
|
||||
|
||||
// Ping the peer every pingPeriodSecs seconds
|
||||
// if pingPeriodSecs is 0, don't ping the peer
|
||||
if (pingPeriodSecs !== 0) {
|
||||
const interval = setInterval(() => {
|
||||
void (async () => {
|
||||
let ping: number;
|
||||
try {
|
||||
// ping the peer for keep alive
|
||||
// also update the peer store with the latency
|
||||
try {
|
||||
ping = await this.libp2p.services.ping.ping(peerId);
|
||||
log.info(`Ping succeeded (${peerIdStr})`, ping);
|
||||
} catch (error) {
|
||||
log.error(`Ping failed for peer (${peerIdStr}).
|
||||
Next ping will be attempted in ${pingPeriodSecs} seconds.
|
||||
`);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
await this.libp2p.peerStore.merge(peerId, {
|
||||
metadata: {
|
||||
ping: utf8ToBytes(ping.toString())
|
||||
}
|
||||
});
|
||||
} catch (e) {
|
||||
log.error("Failed to update ping", e);
|
||||
}
|
||||
} catch (e) {
|
||||
log.error(`Ping failed (${peerIdStr})`, e);
|
||||
}
|
||||
})();
|
||||
}, pingPeriodSecs * 1000);
|
||||
|
||||
this.pingKeepAliveTimers.set(peerIdStr, interval);
|
||||
}
|
||||
|
||||
const relay = this.relay;
|
||||
if (relay && relayPeriodSecs !== 0) {
|
||||
const intervals = this.scheduleRelayPings(
|
||||
relay,
|
||||
relayPeriodSecs,
|
||||
peerId.toString()
|
||||
);
|
||||
this.relayKeepAliveTimers.set(peerId, intervals);
|
||||
}
|
||||
public start(): void {
|
||||
this.libp2p.addEventListener("peer:connect", this.onPeerConnect);
|
||||
this.libp2p.addEventListener("peer:disconnect", this.onPeerDisconnect);
|
||||
}
|
||||
|
||||
public stop(peerId: PeerId): void {
|
||||
const peerIdStr = peerId.toString();
|
||||
public stop(): void {
|
||||
this.libp2p.removeEventListener("peer:connect", this.onPeerConnect);
|
||||
this.libp2p.removeEventListener("peer:disconnect", this.onPeerDisconnect);
|
||||
|
||||
if (this.pingKeepAliveTimers.has(peerIdStr)) {
|
||||
clearInterval(this.pingKeepAliveTimers.get(peerIdStr));
|
||||
this.pingKeepAliveTimers.delete(peerIdStr);
|
||||
}
|
||||
|
||||
if (this.relayKeepAliveTimers.has(peerId)) {
|
||||
this.relayKeepAliveTimers.get(peerId)?.map(clearInterval);
|
||||
this.relayKeepAliveTimers.delete(peerId);
|
||||
}
|
||||
}
|
||||
|
||||
public stopAll(): void {
|
||||
for (const timer of [
|
||||
...Object.values(this.pingKeepAliveTimers),
|
||||
...Object.values(this.relayKeepAliveTimers)
|
||||
]) {
|
||||
for (const timer of this.pingKeepAliveTimers.values()) {
|
||||
clearInterval(timer);
|
||||
}
|
||||
|
||||
for (const timerArray of this.relayKeepAliveTimers.values()) {
|
||||
for (const timer of timerArray) {
|
||||
clearInterval(timer);
|
||||
}
|
||||
}
|
||||
|
||||
this.pingKeepAliveTimers.clear();
|
||||
this.relayKeepAliveTimers.clear();
|
||||
}
|
||||
|
||||
public connectionsExist(): boolean {
|
||||
return (
|
||||
this.pingKeepAliveTimers.size > 0 || this.relayKeepAliveTimers.size > 0
|
||||
);
|
||||
private onPeerConnect(evt: CustomEvent<PeerId>): void {
|
||||
const peerId = evt.detail;
|
||||
this.startPingForPeer(peerId);
|
||||
}
|
||||
|
||||
private scheduleRelayPings(
|
||||
relay: IRelay,
|
||||
relayPeriodSecs: number,
|
||||
peerIdStr: PeerIdStr
|
||||
): NodeJS.Timeout[] {
|
||||
// send a ping message to each PubsubTopic the peer is part of
|
||||
private onPeerDisconnect(evt: CustomEvent<PeerId>): void {
|
||||
const peerId = evt.detail;
|
||||
this.stopPingForPeer(peerId);
|
||||
}
|
||||
|
||||
private startPingForPeer(peerId: PeerId): void {
|
||||
// Just in case a timer already exists for this peer
|
||||
this.stopPingForPeer(peerId);
|
||||
|
||||
this.startLibp2pPing(peerId);
|
||||
this.startRelayPing(peerId);
|
||||
}
|
||||
|
||||
private stopPingForPeer(peerId: PeerId): void {
|
||||
this.stopLibp2pPing(peerId);
|
||||
this.stopRelayPing(peerId);
|
||||
}
|
||||
|
||||
private startLibp2pPing(peerId: PeerId): void {
|
||||
if (this.options.pingKeepAlive === 0) {
|
||||
log.warn(
|
||||
`Ping keep alive is disabled pingKeepAlive:${this.options.pingKeepAlive}, skipping start for libp2p ping`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const peerIdStr = peerId.toString();
|
||||
|
||||
if (this.pingKeepAliveTimers.has(peerIdStr)) {
|
||||
log.warn(
|
||||
`Ping already started for peer: ${peerIdStr}, skipping start for libp2p ping`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const interval = setInterval(() => {
|
||||
void this.pingLibp2p(peerId);
|
||||
}, this.options.pingKeepAlive * 1000);
|
||||
|
||||
this.pingKeepAliveTimers.set(peerIdStr, interval);
|
||||
}
|
||||
|
||||
private stopLibp2pPing(peerId: PeerId): void {
|
||||
const peerIdStr = peerId.toString();
|
||||
|
||||
if (!this.pingKeepAliveTimers.has(peerIdStr)) {
|
||||
log.warn(
|
||||
`Ping not started for peer: ${peerIdStr}, skipping stop for ping`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
clearInterval(this.pingKeepAliveTimers.get(peerIdStr));
|
||||
this.pingKeepAliveTimers.delete(peerIdStr);
|
||||
}
|
||||
|
||||
private startRelayPing(peerId: PeerId): void {
|
||||
if (!this.relay) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.options.relayKeepAlive === 0) {
|
||||
log.warn(
|
||||
`Relay keep alive is disabled relayKeepAlive:${this.options.relayKeepAlive}, skipping start for relay ping`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.relayKeepAliveTimers.has(peerId.toString())) {
|
||||
log.warn(
|
||||
`Relay ping already started for peer: ${peerId.toString()}, skipping start for relay ping`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const intervals: NodeJS.Timeout[] = [];
|
||||
for (const topic of relay.pubsubTopics) {
|
||||
const meshPeers = relay.getMeshPeers(topic);
|
||||
if (!meshPeers.includes(peerIdStr)) continue;
|
||||
|
||||
for (const topic of this.relay.pubsubTopics) {
|
||||
const meshPeers = this.relay.getMeshPeers(topic);
|
||||
|
||||
if (!meshPeers.includes(peerId.toString())) {
|
||||
log.warn(
|
||||
`Peer: ${peerId.toString()} is not in the mesh for topic: ${topic}, skipping start for relay ping`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
const routingInfo = createRoutingInfo(this.networkConfig, {
|
||||
contentTopic: RelayPingContentTopic,
|
||||
pubsubTopic: topic
|
||||
});
|
||||
|
||||
const encoder = createEncoder({
|
||||
pubsubTopicShardInfo: pubsubTopicToSingleShardInfo(topic),
|
||||
routingInfo: routingInfo,
|
||||
contentTopic: RelayPingContentTopic,
|
||||
ephemeral: true
|
||||
});
|
||||
|
||||
const interval = setInterval(() => {
|
||||
log.info("Sending Waku Relay ping message");
|
||||
relay
|
||||
.send(encoder, { payload: new Uint8Array([1]) })
|
||||
.catch((e) => log.error("Failed to send relay ping", e));
|
||||
}, relayPeriodSecs * 1000);
|
||||
void this.pingRelay(encoder);
|
||||
}, this.options.relayKeepAlive * 1000);
|
||||
|
||||
intervals.push(interval);
|
||||
}
|
||||
|
||||
return intervals;
|
||||
this.relayKeepAliveTimers.set(peerId.toString(), intervals);
|
||||
}
|
||||
|
||||
private stopRelayPing(peerId: PeerId): void {
|
||||
if (!this.relay) {
|
||||
return;
|
||||
}
|
||||
|
||||
const peerIdStr = peerId.toString();
|
||||
|
||||
if (!this.relayKeepAliveTimers.has(peerIdStr)) {
|
||||
log.warn(
|
||||
`Relay ping not started for peer: ${peerIdStr}, skipping stop for relay ping`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
this.relayKeepAliveTimers.get(peerIdStr)?.map(clearInterval);
|
||||
this.relayKeepAliveTimers.delete(peerIdStr);
|
||||
}
|
||||
|
||||
private async pingRelay(encoder: IEncoder): Promise<void> {
|
||||
try {
|
||||
log.info("Sending Waku Relay ping message");
|
||||
await this.relay!.send(encoder, { payload: new Uint8Array([1]) });
|
||||
} catch (e) {
|
||||
log.error("Failed to send relay ping", e);
|
||||
}
|
||||
}
|
||||
|
||||
private async pingLibp2p(peerId: PeerId): Promise<void> {
|
||||
try {
|
||||
log.info(`Pinging libp2p peer (${peerId.toString()})`);
|
||||
const ping = await this.libp2p.services.ping.ping(peerId);
|
||||
|
||||
log.info(`Ping succeeded (${peerId.toString()})`, ping);
|
||||
|
||||
await this.libp2p.peerStore.merge(peerId, {
|
||||
metadata: {
|
||||
ping: utf8ToBytes(ping.toString())
|
||||
}
|
||||
});
|
||||
log.info(`Ping updated for peer (${peerId.toString()})`);
|
||||
} catch (e) {
|
||||
log.error(`Ping failed for peer (${peerId.toString()})`, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
431
packages/core/src/lib/connection_manager/network_monitor.spec.ts
Normal file
431
packages/core/src/lib/connection_manager/network_monitor.spec.ts
Normal file
@ -0,0 +1,431 @@
|
||||
import { IWakuEventEmitter, Libp2p, WakuEvent } from "@waku/interfaces";
|
||||
import { expect } from "chai";
|
||||
import sinon from "sinon";
|
||||
|
||||
import { NetworkMonitor } from "./network_monitor.js";
|
||||
|
||||
describe("NetworkMonitor", () => {
|
||||
let libp2p: Libp2p;
|
||||
let events: IWakuEventEmitter;
|
||||
let networkMonitor: NetworkMonitor;
|
||||
let originalGlobalThis: typeof globalThis;
|
||||
let mockGlobalThis: {
|
||||
addEventListener: sinon.SinonStub;
|
||||
removeEventListener: sinon.SinonStub;
|
||||
navigator: { onLine: boolean } | undefined;
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
libp2p = {
|
||||
addEventListener: sinon.stub(),
|
||||
removeEventListener: sinon.stub(),
|
||||
getConnections: sinon.stub().returns([])
|
||||
} as unknown as Libp2p;
|
||||
|
||||
events = {
|
||||
dispatchEvent: sinon.stub()
|
||||
} as unknown as IWakuEventEmitter;
|
||||
|
||||
originalGlobalThis = globalThis;
|
||||
mockGlobalThis = {
|
||||
addEventListener: sinon.stub(),
|
||||
removeEventListener: sinon.stub(),
|
||||
navigator: {
|
||||
onLine: true
|
||||
}
|
||||
};
|
||||
|
||||
(global as unknown as { globalThis: typeof mockGlobalThis }).globalThis =
|
||||
mockGlobalThis;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (networkMonitor) {
|
||||
networkMonitor.stop();
|
||||
}
|
||||
|
||||
(
|
||||
global as unknown as { globalThis: typeof originalGlobalThis }
|
||||
).globalThis = originalGlobalThis;
|
||||
sinon.restore();
|
||||
});
|
||||
|
||||
describe("constructor", () => {
|
||||
it("should create NetworkMonitor with libp2p and events", () => {
|
||||
networkMonitor = new NetworkMonitor({
|
||||
libp2p,
|
||||
events
|
||||
});
|
||||
|
||||
expect(networkMonitor).to.be.instanceOf(NetworkMonitor);
|
||||
});
|
||||
|
||||
it("should initialize with isNetworkConnected as false", () => {
|
||||
networkMonitor = new NetworkMonitor({
|
||||
libp2p,
|
||||
events
|
||||
});
|
||||
|
||||
expect(networkMonitor.isConnected()).to.be.false;
|
||||
});
|
||||
});
|
||||
|
||||
describe("start", () => {
|
||||
beforeEach(() => {
|
||||
networkMonitor = new NetworkMonitor({
|
||||
libp2p,
|
||||
events
|
||||
});
|
||||
});
|
||||
|
||||
it("should add event listeners to libp2p", () => {
|
||||
networkMonitor.start();
|
||||
|
||||
const addEventListenerStub = libp2p.addEventListener as sinon.SinonStub;
|
||||
expect(addEventListenerStub.calledTwice).to.be.true;
|
||||
expect(addEventListenerStub.calledWith("peer:connect", sinon.match.func))
|
||||
.to.be.true;
|
||||
expect(
|
||||
addEventListenerStub.calledWith("peer:disconnect", sinon.match.func)
|
||||
).to.be.true;
|
||||
});
|
||||
|
||||
it("should add event listeners to globalThis", () => {
|
||||
networkMonitor.start();
|
||||
|
||||
expect(mockGlobalThis.addEventListener.calledTwice).to.be.true;
|
||||
expect(
|
||||
mockGlobalThis.addEventListener.calledWith("online", sinon.match.func)
|
||||
).to.be.true;
|
||||
expect(
|
||||
mockGlobalThis.addEventListener.calledWith("offline", sinon.match.func)
|
||||
).to.be.true;
|
||||
});
|
||||
|
||||
it("should handle errors when globalThis is not available", () => {
|
||||
mockGlobalThis.addEventListener.throws(new Error("No globalThis"));
|
||||
|
||||
expect(() => networkMonitor.start()).to.not.throw();
|
||||
|
||||
const addEventListenerStub = libp2p.addEventListener as sinon.SinonStub;
|
||||
expect(addEventListenerStub.calledTwice).to.be.true;
|
||||
});
|
||||
});
|
||||
|
||||
describe("stop", () => {
|
||||
beforeEach(() => {
|
||||
networkMonitor = new NetworkMonitor({
|
||||
libp2p,
|
||||
events
|
||||
});
|
||||
networkMonitor.start();
|
||||
});
|
||||
|
||||
it("should remove event listeners from libp2p", () => {
|
||||
networkMonitor.stop();
|
||||
|
||||
const removeEventListenerStub =
|
||||
libp2p.removeEventListener as sinon.SinonStub;
|
||||
expect(removeEventListenerStub.calledTwice).to.be.true;
|
||||
expect(
|
||||
removeEventListenerStub.calledWith("peer:connect", sinon.match.func)
|
||||
).to.be.true;
|
||||
expect(
|
||||
removeEventListenerStub.calledWith("peer:disconnect", sinon.match.func)
|
||||
).to.be.true;
|
||||
});
|
||||
|
||||
it("should remove event listeners from globalThis", () => {
|
||||
networkMonitor.stop();
|
||||
|
||||
expect(mockGlobalThis.removeEventListener.calledTwice).to.be.true;
|
||||
expect(
|
||||
mockGlobalThis.removeEventListener.calledWith(
|
||||
"online",
|
||||
sinon.match.func
|
||||
)
|
||||
).to.be.true;
|
||||
expect(
|
||||
mockGlobalThis.removeEventListener.calledWith(
|
||||
"offline",
|
||||
sinon.match.func
|
||||
)
|
||||
).to.be.true;
|
||||
});
|
||||
|
||||
it("should handle errors when removing globalThis listeners", () => {
|
||||
mockGlobalThis.removeEventListener.throws(new Error("Remove failed"));
|
||||
|
||||
expect(() => networkMonitor.stop()).to.not.throw();
|
||||
|
||||
const removeEventListenerStub =
|
||||
libp2p.removeEventListener as sinon.SinonStub;
|
||||
expect(removeEventListenerStub.calledTwice).to.be.true;
|
||||
});
|
||||
});
|
||||
|
||||
describe("isConnected", () => {
|
||||
beforeEach(() => {
|
||||
networkMonitor = new NetworkMonitor({
|
||||
libp2p,
|
||||
events
|
||||
});
|
||||
});
|
||||
|
||||
it("should return false when navigator.onLine is false", () => {
|
||||
if (mockGlobalThis.navigator) {
|
||||
mockGlobalThis.navigator.onLine = false;
|
||||
}
|
||||
|
||||
expect(networkMonitor.isConnected()).to.be.false;
|
||||
});
|
||||
|
||||
it("should return false when navigator.onLine is true but network is not connected", () => {
|
||||
if (mockGlobalThis.navigator) {
|
||||
mockGlobalThis.navigator.onLine = true;
|
||||
}
|
||||
|
||||
expect(networkMonitor.isConnected()).to.be.false;
|
||||
});
|
||||
|
||||
it("should handle case when navigator is not available", () => {
|
||||
mockGlobalThis.navigator = undefined;
|
||||
|
||||
expect(networkMonitor.isConnected()).to.be.false;
|
||||
});
|
||||
|
||||
it("should handle case when globalThis is not available", () => {
|
||||
(global as unknown as { globalThis: undefined }).globalThis = undefined;
|
||||
|
||||
expect(networkMonitor.isConnected()).to.be.false;
|
||||
});
|
||||
});
|
||||
|
||||
describe("peer connection events", () => {
|
||||
let connectHandler: () => void;
|
||||
let disconnectHandler: () => void;
|
||||
|
||||
beforeEach(() => {
|
||||
networkMonitor = new NetworkMonitor({
|
||||
libp2p,
|
||||
events
|
||||
});
|
||||
networkMonitor.start();
|
||||
|
||||
const addEventListenerStub = libp2p.addEventListener as sinon.SinonStub;
|
||||
|
||||
connectHandler = addEventListenerStub.getCall(0).args[1];
|
||||
disconnectHandler = addEventListenerStub.getCall(1).args[1];
|
||||
});
|
||||
|
||||
it("should handle peer connect event", () => {
|
||||
expect(networkMonitor.isConnected()).to.be.false;
|
||||
|
||||
connectHandler();
|
||||
|
||||
expect(networkMonitor.isConnected()).to.be.true;
|
||||
const dispatchEventStub = events.dispatchEvent as sinon.SinonStub;
|
||||
expect(dispatchEventStub.calledOnce).to.be.true;
|
||||
});
|
||||
|
||||
it("should handle peer disconnect event when no connections remain", () => {
|
||||
connectHandler();
|
||||
|
||||
const dispatchEventStub = events.dispatchEvent as sinon.SinonStub;
|
||||
dispatchEventStub.resetHistory();
|
||||
|
||||
const getConnectionsStub = libp2p.getConnections as sinon.SinonStub;
|
||||
getConnectionsStub.returns([]);
|
||||
|
||||
disconnectHandler();
|
||||
|
||||
expect(networkMonitor.isConnected()).to.be.false;
|
||||
expect(dispatchEventStub.calledOnce).to.be.true;
|
||||
});
|
||||
|
||||
it("should not change state when connections remain after disconnect", () => {
|
||||
connectHandler();
|
||||
|
||||
const dispatchEventStub = events.dispatchEvent as sinon.SinonStub;
|
||||
dispatchEventStub.resetHistory();
|
||||
|
||||
const getConnectionsStub = libp2p.getConnections as sinon.SinonStub;
|
||||
getConnectionsStub.returns([{ id: "connection1" }]);
|
||||
|
||||
disconnectHandler();
|
||||
|
||||
expect(networkMonitor.isConnected()).to.be.true;
|
||||
expect(dispatchEventStub.called).to.be.false;
|
||||
});
|
||||
|
||||
it("should not dispatch event when already connected", () => {
|
||||
connectHandler();
|
||||
const dispatchEventStub = events.dispatchEvent as sinon.SinonStub;
|
||||
dispatchEventStub.resetHistory();
|
||||
|
||||
connectHandler();
|
||||
|
||||
expect(dispatchEventStub.called).to.be.false;
|
||||
});
|
||||
|
||||
it("should not dispatch event when already disconnected", () => {
|
||||
connectHandler();
|
||||
|
||||
const getConnectionsStub = libp2p.getConnections as sinon.SinonStub;
|
||||
getConnectionsStub.returns([]);
|
||||
disconnectHandler();
|
||||
|
||||
const dispatchEventStub = events.dispatchEvent as sinon.SinonStub;
|
||||
dispatchEventStub.resetHistory();
|
||||
|
||||
disconnectHandler();
|
||||
|
||||
expect(dispatchEventStub.called).to.be.false;
|
||||
});
|
||||
});
|
||||
|
||||
describe("browser online/offline events", () => {
|
||||
let onlineHandler: () => void;
|
||||
let offlineHandler: () => void;
|
||||
|
||||
beforeEach(() => {
|
||||
networkMonitor = new NetworkMonitor({
|
||||
libp2p,
|
||||
events
|
||||
});
|
||||
networkMonitor.start();
|
||||
|
||||
onlineHandler = mockGlobalThis.addEventListener.getCall(0).args[1];
|
||||
offlineHandler = mockGlobalThis.addEventListener.getCall(1).args[1];
|
||||
});
|
||||
|
||||
it("should dispatch network event when browser goes online", () => {
|
||||
if (mockGlobalThis.navigator) {
|
||||
mockGlobalThis.navigator.onLine = true;
|
||||
}
|
||||
|
||||
onlineHandler();
|
||||
|
||||
const dispatchEventStub = events.dispatchEvent as sinon.SinonStub;
|
||||
expect(dispatchEventStub.calledOnce).to.be.true;
|
||||
});
|
||||
|
||||
it("should dispatch network event when browser goes offline", () => {
|
||||
if (mockGlobalThis.navigator) {
|
||||
mockGlobalThis.navigator.onLine = false;
|
||||
}
|
||||
|
||||
offlineHandler();
|
||||
|
||||
const dispatchEventStub = events.dispatchEvent as sinon.SinonStub;
|
||||
expect(dispatchEventStub.calledOnce).to.be.true;
|
||||
});
|
||||
});
|
||||
|
||||
describe("dispatchNetworkEvent", () => {
|
||||
beforeEach(() => {
|
||||
networkMonitor = new NetworkMonitor({
|
||||
libp2p,
|
||||
events
|
||||
});
|
||||
});
|
||||
|
||||
it("should dispatch CustomEvent with correct type and detail", () => {
|
||||
networkMonitor.start();
|
||||
const addEventListenerStub = libp2p.addEventListener as sinon.SinonStub;
|
||||
const connectHandler = addEventListenerStub.getCall(0).args[1];
|
||||
connectHandler();
|
||||
|
||||
const dispatchEventStub = events.dispatchEvent as sinon.SinonStub;
|
||||
expect(dispatchEventStub.calledOnce).to.be.true;
|
||||
const dispatchedEvent = dispatchEventStub.getCall(0)
|
||||
.args[0] as CustomEvent<boolean>;
|
||||
expect(dispatchedEvent).to.be.instanceOf(CustomEvent);
|
||||
expect(dispatchedEvent.type).to.equal(WakuEvent.Connection);
|
||||
expect(dispatchedEvent.detail).to.be.true;
|
||||
});
|
||||
});
|
||||
|
||||
describe("error handling", () => {
|
||||
beforeEach(() => {
|
||||
networkMonitor = new NetworkMonitor({
|
||||
libp2p,
|
||||
events
|
||||
});
|
||||
});
|
||||
|
||||
it("should handle errors when getting connections", () => {
|
||||
const getConnectionsStub = libp2p.getConnections as sinon.SinonStub;
|
||||
getConnectionsStub.throws(new Error("Get connections failed"));
|
||||
|
||||
networkMonitor.start();
|
||||
|
||||
const addEventListenerStub = libp2p.addEventListener as sinon.SinonStub;
|
||||
const connectHandler = addEventListenerStub.getCall(0).args[1];
|
||||
const disconnectHandler = addEventListenerStub.getCall(1).args[1];
|
||||
|
||||
connectHandler();
|
||||
expect(networkMonitor.isConnected()).to.be.true;
|
||||
|
||||
expect(() => disconnectHandler()).to.throw("Get connections failed");
|
||||
});
|
||||
|
||||
it("should handle errors when accessing navigator", () => {
|
||||
Object.defineProperty(mockGlobalThis, "navigator", {
|
||||
get: () => {
|
||||
throw new Error("Navigator access failed");
|
||||
}
|
||||
});
|
||||
|
||||
expect(networkMonitor.isConnected()).to.be.false;
|
||||
});
|
||||
});
|
||||
|
||||
describe("integration", () => {
|
||||
beforeEach(() => {
|
||||
networkMonitor = new NetworkMonitor({
|
||||
libp2p,
|
||||
events
|
||||
});
|
||||
networkMonitor.start();
|
||||
});
|
||||
|
||||
it("should handle complete connection lifecycle", () => {
|
||||
const addEventListenerStub = libp2p.addEventListener as sinon.SinonStub;
|
||||
const connectHandler = addEventListenerStub.getCall(0).args[1];
|
||||
const disconnectHandler = addEventListenerStub.getCall(1).args[1];
|
||||
const getConnectionsStub = libp2p.getConnections as sinon.SinonStub;
|
||||
|
||||
expect(networkMonitor.isConnected()).to.be.false;
|
||||
|
||||
connectHandler();
|
||||
expect(networkMonitor.isConnected()).to.be.true;
|
||||
|
||||
getConnectionsStub.returns([{ id: "other" }]);
|
||||
disconnectHandler();
|
||||
expect(networkMonitor.isConnected()).to.be.true;
|
||||
|
||||
getConnectionsStub.returns([]);
|
||||
disconnectHandler();
|
||||
expect(networkMonitor.isConnected()).to.be.false;
|
||||
});
|
||||
|
||||
it("should handle browser offline state overriding peer connections", () => {
|
||||
const addEventListenerStub = libp2p.addEventListener as sinon.SinonStub;
|
||||
const connectHandler = addEventListenerStub.getCall(0).args[1];
|
||||
|
||||
connectHandler();
|
||||
expect(networkMonitor.isConnected()).to.be.true;
|
||||
|
||||
if (mockGlobalThis.navigator) {
|
||||
mockGlobalThis.navigator.onLine = false;
|
||||
}
|
||||
expect(networkMonitor.isConnected()).to.be.false;
|
||||
|
||||
if (mockGlobalThis.navigator) {
|
||||
mockGlobalThis.navigator.onLine = true;
|
||||
}
|
||||
expect(networkMonitor.isConnected()).to.be.true;
|
||||
});
|
||||
});
|
||||
});
|
||||
112
packages/core/src/lib/connection_manager/network_monitor.ts
Normal file
112
packages/core/src/lib/connection_manager/network_monitor.ts
Normal file
@ -0,0 +1,112 @@
|
||||
import { IWakuEventEmitter, Libp2p, WakuEvent } from "@waku/interfaces";
|
||||
|
||||
type NetworkMonitorConstructorOptions = {
|
||||
libp2p: Libp2p;
|
||||
events: IWakuEventEmitter;
|
||||
};
|
||||
|
||||
interface INetworkMonitor {
|
||||
start(): void;
|
||||
stop(): void;
|
||||
isConnected(): boolean;
|
||||
isP2PConnected(): boolean;
|
||||
isBrowserConnected(): boolean;
|
||||
}
|
||||
|
||||
export class NetworkMonitor implements INetworkMonitor {
|
||||
private readonly libp2p: Libp2p;
|
||||
private readonly events: IWakuEventEmitter;
|
||||
|
||||
private isNetworkConnected: boolean = false;
|
||||
|
||||
public constructor(options: NetworkMonitorConstructorOptions) {
|
||||
this.libp2p = options.libp2p;
|
||||
this.events = options.events;
|
||||
|
||||
this.onConnectedEvent = this.onConnectedEvent.bind(this);
|
||||
this.onDisconnectedEvent = this.onDisconnectedEvent.bind(this);
|
||||
this.dispatchNetworkEvent = this.dispatchNetworkEvent.bind(this);
|
||||
}
|
||||
|
||||
public start(): void {
|
||||
this.libp2p.addEventListener("peer:connect", this.onConnectedEvent);
|
||||
this.libp2p.addEventListener("peer:disconnect", this.onDisconnectedEvent);
|
||||
|
||||
try {
|
||||
globalThis.addEventListener("online", this.dispatchNetworkEvent);
|
||||
globalThis.addEventListener("offline", this.dispatchNetworkEvent);
|
||||
} catch (err) {
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
|
||||
public stop(): void {
|
||||
this.libp2p.removeEventListener("peer:connect", this.onConnectedEvent);
|
||||
this.libp2p.removeEventListener(
|
||||
"peer:disconnect",
|
||||
this.onDisconnectedEvent
|
||||
);
|
||||
|
||||
try {
|
||||
globalThis.removeEventListener("online", this.dispatchNetworkEvent);
|
||||
globalThis.removeEventListener("offline", this.dispatchNetworkEvent);
|
||||
} catch (err) {
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the node is connected to the network via libp2p and browser.
|
||||
*/
|
||||
public isConnected(): boolean {
|
||||
if (!this.isBrowserConnected()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return this.isP2PConnected();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the node is connected to the network via libp2p.
|
||||
*/
|
||||
public isP2PConnected(): boolean {
|
||||
return this.isNetworkConnected;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the node is connected to the network via browser.
|
||||
*/
|
||||
public isBrowserConnected(): boolean {
|
||||
try {
|
||||
if (globalThis?.navigator && !globalThis?.navigator?.onLine) {
|
||||
return false;
|
||||
}
|
||||
} catch (err) {
|
||||
// ignore
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private onConnectedEvent(): void {
|
||||
if (!this.isNetworkConnected) {
|
||||
this.isNetworkConnected = true;
|
||||
this.dispatchNetworkEvent();
|
||||
}
|
||||
}
|
||||
|
||||
private onDisconnectedEvent(): void {
|
||||
if (this.isNetworkConnected && this.libp2p.getConnections().length === 0) {
|
||||
this.isNetworkConnected = false;
|
||||
this.dispatchNetworkEvent();
|
||||
}
|
||||
}
|
||||
|
||||
private dispatchNetworkEvent(): void {
|
||||
this.events.dispatchEvent(
|
||||
new CustomEvent<boolean>(WakuEvent.Connection, {
|
||||
detail: this.isConnected()
|
||||
})
|
||||
);
|
||||
}
|
||||
}
|
||||
316
packages/core/src/lib/connection_manager/shard_reader.spec.ts
Normal file
316
packages/core/src/lib/connection_manager/shard_reader.spec.ts
Normal file
@ -0,0 +1,316 @@
|
||||
import { PeerId } from "@libp2p/interface";
|
||||
import {
|
||||
AutoSharding,
|
||||
DEFAULT_NUM_SHARDS,
|
||||
NetworkConfig,
|
||||
PubsubTopic,
|
||||
ShardInfo
|
||||
} from "@waku/interfaces";
|
||||
import { contentTopicToShardIndex, encodeRelayShard } from "@waku/utils";
|
||||
import { expect } from "chai";
|
||||
import { Libp2p } from "libp2p";
|
||||
import sinon from "sinon";
|
||||
|
||||
import { ShardReader } from "./shard_reader.js";
|
||||
|
||||
const createMockPeerId = (): PeerId => {
|
||||
const mockPeerId = {
|
||||
toString: () => "12D3KooWTest123",
|
||||
equals: (other: PeerId) => other.toString() === "12D3KooWTest123"
|
||||
};
|
||||
return mockPeerId as unknown as PeerId;
|
||||
};
|
||||
|
||||
describe("ShardReader", function () {
|
||||
let mockLibp2p: sinon.SinonStubbedInstance<Libp2p>;
|
||||
let mockPeerStore: any;
|
||||
let shardReader: ShardReader;
|
||||
let testPeerId: PeerId;
|
||||
|
||||
const testContentTopic = "/test/1/waku-light-push/utf8";
|
||||
const testClusterId = 3;
|
||||
const testShardIndex = contentTopicToShardIndex(
|
||||
testContentTopic,
|
||||
DEFAULT_NUM_SHARDS
|
||||
);
|
||||
|
||||
const testNetworkConfig: AutoSharding = {
|
||||
clusterId: testClusterId,
|
||||
numShardsInCluster: DEFAULT_NUM_SHARDS
|
||||
};
|
||||
|
||||
const testShardInfo: ShardInfo = {
|
||||
clusterId: testClusterId,
|
||||
shards: [testShardIndex]
|
||||
};
|
||||
|
||||
beforeEach(async function () {
|
||||
testPeerId = createMockPeerId();
|
||||
|
||||
mockPeerStore = {
|
||||
get: sinon.stub(),
|
||||
save: sinon.stub(),
|
||||
merge: sinon.stub()
|
||||
};
|
||||
|
||||
mockLibp2p = {
|
||||
peerStore: mockPeerStore
|
||||
} as any;
|
||||
|
||||
shardReader = new ShardReader({
|
||||
libp2p: mockLibp2p as any,
|
||||
networkConfig: testNetworkConfig
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(function () {
|
||||
sinon.restore();
|
||||
});
|
||||
|
||||
describe("constructor", function () {
|
||||
it("should create ShardReader with auto sharding network config", function () {
|
||||
const config: AutoSharding = {
|
||||
clusterId: 3,
|
||||
numShardsInCluster: 10
|
||||
};
|
||||
|
||||
const reader = new ShardReader({
|
||||
libp2p: mockLibp2p as any,
|
||||
networkConfig: config
|
||||
});
|
||||
|
||||
expect(reader).to.be.instanceOf(ShardReader);
|
||||
});
|
||||
|
||||
it("should create ShardReader with static shards network config", function () {
|
||||
const config: NetworkConfig = {
|
||||
clusterId: 3
|
||||
};
|
||||
|
||||
const reader = new ShardReader({
|
||||
libp2p: mockLibp2p as any,
|
||||
networkConfig: config
|
||||
});
|
||||
|
||||
expect(reader).to.be.instanceOf(ShardReader);
|
||||
});
|
||||
});
|
||||
|
||||
describe("isPeerOnNetwork", function () {
|
||||
it("should return true when peer is on the same cluster", async function () {
|
||||
const shardInfoBytes = encodeRelayShard(testShardInfo);
|
||||
const mockPeer = {
|
||||
metadata: new Map([["shardInfo", shardInfoBytes]])
|
||||
};
|
||||
|
||||
mockPeerStore.get.resolves(mockPeer);
|
||||
|
||||
const result = await shardReader.isPeerOnCluster(testPeerId);
|
||||
|
||||
expect(result).to.be.true;
|
||||
sinon.assert.calledWith(mockPeerStore.get, testPeerId);
|
||||
});
|
||||
|
||||
it("should return false when peer is on different cluster", async function () {
|
||||
const differentClusterShardInfo: ShardInfo = {
|
||||
clusterId: 5,
|
||||
shards: [1, 2]
|
||||
};
|
||||
const shardInfoBytes = encodeRelayShard(differentClusterShardInfo);
|
||||
const mockPeer = {
|
||||
metadata: new Map([["shardInfo", shardInfoBytes]])
|
||||
};
|
||||
|
||||
mockPeerStore.get.resolves(mockPeer);
|
||||
|
||||
const result = await shardReader.isPeerOnCluster(testPeerId);
|
||||
|
||||
expect(result).to.be.false;
|
||||
});
|
||||
|
||||
it("should return true even if peer has no overlapping shards", async function () {
|
||||
const noOverlapShardInfo: ShardInfo = {
|
||||
clusterId: testClusterId,
|
||||
shards: [testShardIndex + 100, testShardIndex + 200] // Use different shards
|
||||
};
|
||||
const shardInfoBytes = encodeRelayShard(noOverlapShardInfo);
|
||||
const mockPeer = {
|
||||
metadata: new Map([["shardInfo", shardInfoBytes]])
|
||||
};
|
||||
|
||||
mockPeerStore.get.resolves(mockPeer);
|
||||
|
||||
const result = await shardReader.isPeerOnCluster(testPeerId);
|
||||
|
||||
expect(result).to.be.true;
|
||||
});
|
||||
|
||||
it("should return false when peer has no shard info", async function () {
|
||||
const mockPeer = {
|
||||
metadata: new Map()
|
||||
};
|
||||
|
||||
mockPeerStore.get.resolves(mockPeer);
|
||||
|
||||
const result = await shardReader.isPeerOnCluster(testPeerId);
|
||||
|
||||
expect(result).to.be.false;
|
||||
});
|
||||
|
||||
it("should return false when peer is not found", async function () {
|
||||
mockPeerStore.get.rejects(new Error("Peer not found"));
|
||||
|
||||
const result = await shardReader.isPeerOnCluster(testPeerId);
|
||||
|
||||
expect(result).to.be.false;
|
||||
});
|
||||
});
|
||||
|
||||
describe("isPeerOnShard", function () {
|
||||
it("should return true when peer is on the specified shard", async function () {
|
||||
const shardInfoBytes = encodeRelayShard(testShardInfo);
|
||||
const mockPeer = {
|
||||
metadata: new Map([["shardInfo", shardInfoBytes]])
|
||||
};
|
||||
|
||||
mockPeerStore.get.resolves(mockPeer);
|
||||
|
||||
const result = await shardReader.isPeerOnShard(
|
||||
testPeerId,
|
||||
testShardIndex
|
||||
);
|
||||
|
||||
expect(result).to.be.true;
|
||||
});
|
||||
|
||||
it("should return false when peer is on different cluster", async function () {
|
||||
const shardInfoBytes = encodeRelayShard(testShardInfo);
|
||||
const mockPeer = {
|
||||
metadata: new Map([["shardInfo", shardInfoBytes]])
|
||||
};
|
||||
|
||||
mockPeerStore.get.resolves(mockPeer);
|
||||
|
||||
const shardReaderCluster5 = new ShardReader({
|
||||
libp2p: mockLibp2p as any,
|
||||
networkConfig: { clusterId: 5 }
|
||||
});
|
||||
|
||||
const result = await shardReaderCluster5.isPeerOnShard(
|
||||
testPeerId,
|
||||
testShardIndex
|
||||
);
|
||||
|
||||
expect(result).to.be.false;
|
||||
});
|
||||
|
||||
it("should return false when peer is not on the specified shard", async function () {
|
||||
const shardInfoBytes = encodeRelayShard(testShardInfo);
|
||||
const mockPeer = {
|
||||
metadata: new Map([["shardInfo", shardInfoBytes]])
|
||||
};
|
||||
|
||||
mockPeerStore.get.resolves(mockPeer);
|
||||
|
||||
const result = await shardReader.isPeerOnShard(
|
||||
testPeerId,
|
||||
testShardIndex + 100
|
||||
);
|
||||
|
||||
expect(result).to.be.false;
|
||||
});
|
||||
|
||||
it("should return false when peer shard info is not found", async function () {
|
||||
mockPeerStore.get.rejects(new Error("Peer not found"));
|
||||
|
||||
const result = await shardReader.isPeerOnShard(
|
||||
testPeerId,
|
||||
testShardIndex
|
||||
);
|
||||
|
||||
expect(result).to.be.false;
|
||||
});
|
||||
});
|
||||
|
||||
describe("isPeerOnTopic", function () {
|
||||
it("should return true when peer is on the pubsub topic shard", async function () {
|
||||
const shardInfoBytes = encodeRelayShard(testShardInfo);
|
||||
const mockPeer = {
|
||||
metadata: new Map([["shardInfo", shardInfoBytes]])
|
||||
};
|
||||
|
||||
mockPeerStore.get.resolves(mockPeer);
|
||||
|
||||
const pubsubTopic: PubsubTopic = `/waku/2/rs/${testClusterId}/${testShardIndex}`;
|
||||
|
||||
const result = await shardReader.isPeerOnTopic(testPeerId, pubsubTopic);
|
||||
|
||||
expect(result).to.be.true;
|
||||
});
|
||||
|
||||
it("should return false when peer is not on the pubsub topic shard", async function () {
|
||||
const shardInfoBytes = encodeRelayShard(testShardInfo);
|
||||
const mockPeer = {
|
||||
metadata: new Map([["shardInfo", shardInfoBytes]])
|
||||
};
|
||||
|
||||
mockPeerStore.get.resolves(mockPeer);
|
||||
|
||||
const pubsubTopic: PubsubTopic = `/waku/2/rs/${testClusterId}/${testShardIndex + 100}`;
|
||||
|
||||
const result = await shardReader.isPeerOnTopic(testPeerId, pubsubTopic);
|
||||
|
||||
expect(result).to.be.false;
|
||||
});
|
||||
|
||||
it("should return false when pubsub topic parsing fails", async function () {
|
||||
const shardInfoBytes = encodeRelayShard(testShardInfo);
|
||||
const mockPeer = {
|
||||
metadata: new Map([["shardInfo", shardInfoBytes]])
|
||||
};
|
||||
|
||||
mockPeerStore.get.resolves(mockPeer);
|
||||
|
||||
const invalidPubsubTopic: PubsubTopic = "/invalid/topic";
|
||||
|
||||
const result = await shardReader.isPeerOnTopic(
|
||||
testPeerId,
|
||||
invalidPubsubTopic
|
||||
);
|
||||
|
||||
expect(result).to.be.false;
|
||||
});
|
||||
|
||||
it("should return false when peer is not found", async function () {
|
||||
mockPeerStore.get.rejects(new Error("Peer not found"));
|
||||
|
||||
const pubsubTopic: PubsubTopic = `/waku/2/rs/${testClusterId}/${testShardIndex}`;
|
||||
|
||||
const result = await shardReader.isPeerOnTopic(testPeerId, pubsubTopic);
|
||||
|
||||
expect(result).to.be.false;
|
||||
});
|
||||
});
|
||||
|
||||
describe("error handling", function () {
|
||||
it("should handle errors gracefully when getting peer info", async function () {
|
||||
mockPeerStore.get.rejects(new Error("Network error"));
|
||||
|
||||
const result = await shardReader.isPeerOnCluster(testPeerId);
|
||||
|
||||
expect(result).to.be.false;
|
||||
});
|
||||
|
||||
it("should handle corrupted shard info gracefully", async function () {
|
||||
const mockPeer = {
|
||||
metadata: new Map([["shardInfo", new Uint8Array([1, 2, 3])]])
|
||||
};
|
||||
|
||||
mockPeerStore.get.resolves(mockPeer);
|
||||
|
||||
const result = await shardReader.isPeerOnCluster(testPeerId);
|
||||
|
||||
expect(result).to.be.false;
|
||||
});
|
||||
});
|
||||
});
|
||||
112
packages/core/src/lib/connection_manager/shard_reader.ts
Normal file
112
packages/core/src/lib/connection_manager/shard_reader.ts
Normal file
@ -0,0 +1,112 @@
|
||||
import type { PeerId } from "@libp2p/interface";
|
||||
import type {
|
||||
ClusterId,
|
||||
NetworkConfig,
|
||||
PubsubTopic,
|
||||
ShardId,
|
||||
ShardInfo
|
||||
} from "@waku/interfaces";
|
||||
import {
|
||||
decodeRelayShard,
|
||||
Logger,
|
||||
pubsubTopicToSingleShardInfo
|
||||
} from "@waku/utils";
|
||||
import { Libp2p } from "libp2p";
|
||||
|
||||
const log = new Logger("shard-reader");
|
||||
|
||||
type ShardReaderConstructorOptions = {
|
||||
libp2p: Libp2p;
|
||||
networkConfig: NetworkConfig;
|
||||
};
|
||||
|
||||
export interface IShardReader {
|
||||
hasShardInfo(id: PeerId): Promise<boolean>;
|
||||
isPeerOnCluster(id: PeerId): Promise<boolean>;
|
||||
isPeerOnShard(
|
||||
id: PeerId,
|
||||
clusterId: ClusterId,
|
||||
shard: ShardId
|
||||
): Promise<boolean>;
|
||||
isPeerOnTopic(id: PeerId, pubsubTopic: PubsubTopic): Promise<boolean>;
|
||||
}
|
||||
|
||||
/**
|
||||
* This class is responsible for reading the shard info from the libp2p peer store or from the current node's network config.
|
||||
*/
|
||||
export class ShardReader implements IShardReader {
|
||||
private readonly libp2p: Libp2p;
|
||||
|
||||
private readonly clusterId: ClusterId;
|
||||
|
||||
public constructor(options: ShardReaderConstructorOptions) {
|
||||
this.libp2p = options.libp2p;
|
||||
|
||||
this.clusterId = options.networkConfig.clusterId;
|
||||
}
|
||||
|
||||
public async isPeerOnCluster(id: PeerId): Promise<boolean> {
|
||||
const peerRelayShards = await this.getRelayShards(id);
|
||||
|
||||
if (!peerRelayShards) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return peerRelayShards.clusterId === this.clusterId;
|
||||
}
|
||||
|
||||
public async hasShardInfo(id: PeerId): Promise<boolean> {
|
||||
const shardInfo = await this.getRelayShards(id);
|
||||
return !!shardInfo;
|
||||
}
|
||||
|
||||
public async isPeerOnTopic(
|
||||
id: PeerId,
|
||||
pubsubTopic: PubsubTopic
|
||||
): Promise<boolean> {
|
||||
try {
|
||||
const { clusterId, shard } = pubsubTopicToSingleShardInfo(pubsubTopic);
|
||||
if (clusterId !== this.clusterId) return false;
|
||||
return await this.isPeerOnShard(id, shard);
|
||||
} catch (error) {
|
||||
log.error(
|
||||
`Error comparing pubsub topic ${pubsubTopic} with shard info for ${id}`,
|
||||
error
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public async isPeerOnShard(id: PeerId, shard: ShardId): Promise<boolean> {
|
||||
const peerShardInfo = await this.getRelayShards(id);
|
||||
log.info(
|
||||
`Checking if peer on same shard: this { clusterId: ${this.clusterId}, shardId: ${shard} },` +
|
||||
`${id} { clusterId: ${peerShardInfo?.clusterId}, shards: ${peerShardInfo?.shards} }`
|
||||
);
|
||||
if (!peerShardInfo) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return (
|
||||
peerShardInfo.clusterId === this.clusterId &&
|
||||
peerShardInfo.shards.includes(shard)
|
||||
);
|
||||
}
|
||||
|
||||
private async getRelayShards(id: PeerId): Promise<ShardInfo | undefined> {
|
||||
try {
|
||||
const peer = await this.libp2p.peerStore.get(id);
|
||||
|
||||
const shardInfoBytes = peer.metadata.get("shardInfo");
|
||||
|
||||
if (!shardInfoBytes) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
return decodeRelayShard(shardInfoBytes);
|
||||
} catch (error) {
|
||||
log.error(`Error getting shard info for ${id}`, error);
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
}
|
||||
46
packages/core/src/lib/connection_manager/utils.spec.ts
Normal file
46
packages/core/src/lib/connection_manager/utils.spec.ts
Normal file
@ -0,0 +1,46 @@
|
||||
import { peerIdFromString } from "@libp2p/peer-id";
|
||||
import { expect } from "chai";
|
||||
|
||||
import { mapToPeerId, mapToPeerIdOrMultiaddr } from "./utils.js";
|
||||
|
||||
describe("mapToPeerIdOrMultiaddr", () => {
|
||||
it("should return PeerId when PeerId is provided", async () => {
|
||||
const peerId = peerIdFromString(
|
||||
"12D3KooWHFJGwBXD7ukXqKaQZYmV1U3xxN1XCNrgriSEyvkxf6nE"
|
||||
);
|
||||
|
||||
const result = mapToPeerIdOrMultiaddr(peerId);
|
||||
|
||||
expect(result).to.equal(peerId);
|
||||
});
|
||||
|
||||
it("should return Multiaddr when Multiaddr input is provided", () => {
|
||||
const multiAddr =
|
||||
"/ip4/127.0.0.1/tcp/8000/p2p/12D3KooWHFJGwBXD7ukXqKaQZYmV1U3xxN1XCNrgriSEyvkxf6nE";
|
||||
|
||||
const result = mapToPeerIdOrMultiaddr(multiAddr);
|
||||
|
||||
expect(result.toString()).to.equal(multiAddr);
|
||||
});
|
||||
});
|
||||
|
||||
describe("mapToPeerId", () => {
|
||||
it("should return PeerId when PeerId is provided", async () => {
|
||||
const peerId = peerIdFromString(
|
||||
"12D3KooWHFJGwBXD7ukXqKaQZYmV1U3xxN1XCNrgriSEyvkxf6nE"
|
||||
);
|
||||
const result = mapToPeerId(peerId);
|
||||
expect(result).to.equal(peerId);
|
||||
expect(result.toString()).to.equal(peerId.toString());
|
||||
});
|
||||
|
||||
it("should return PeerId when Multiaddr input is provided", () => {
|
||||
const multiAddr =
|
||||
"/ip4/127.0.0.1/tcp/8000/p2p/12D3KooWHFJGwBXD7ukXqKaQZYmV1U3xxN1XCNrgriSEyvkxf6nE";
|
||||
|
||||
const result = mapToPeerId(multiAddr);
|
||||
expect(result.toString()).to.equal(
|
||||
"12D3KooWHFJGwBXD7ukXqKaQZYmV1U3xxN1XCNrgriSEyvkxf6nE"
|
||||
);
|
||||
});
|
||||
});
|
||||
@ -1,4 +1,7 @@
|
||||
import type { Peer } from "@libp2p/interface";
|
||||
import { isPeerId, type Peer, type PeerId } from "@libp2p/interface";
|
||||
import { peerIdFromString } from "@libp2p/peer-id";
|
||||
import { Multiaddr, multiaddr, MultiaddrInput } from "@multiformats/multiaddr";
|
||||
import { Libp2p } from "@waku/interfaces";
|
||||
import { bytesToUtf8 } from "@waku/utils/bytes";
|
||||
|
||||
/**
|
||||
@ -23,3 +26,49 @@ export const getPeerPing = (peer: Peer | null): number => {
|
||||
return -1;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Maps a PeerId or MultiaddrInput to a PeerId or Multiaddr.
|
||||
* @param input - The PeerId or MultiaddrInput to map.
|
||||
* @returns The PeerId or Multiaddr.
|
||||
* @throws {Error} If the input is not a valid PeerId or MultiaddrInput.
|
||||
*/
|
||||
export const mapToPeerIdOrMultiaddr = (
|
||||
input: PeerId | MultiaddrInput
|
||||
): PeerId | Multiaddr => {
|
||||
return isPeerId(input) ? input : multiaddr(input);
|
||||
};
|
||||
|
||||
/**
|
||||
* Maps a PeerId or MultiaddrInput to a PeerId.
|
||||
* @param input - The PeerId or MultiaddrInput to map.
|
||||
* @returns The PeerId.
|
||||
* @throws {Error} If the input is not a valid PeerId or MultiaddrInput.
|
||||
*/
|
||||
export const mapToPeerId = (input: PeerId | MultiaddrInput): PeerId => {
|
||||
return isPeerId(input)
|
||||
? input
|
||||
: peerIdFromString(multiaddr(input).getPeerId()!);
|
||||
};
|
||||
|
||||
/**
|
||||
* Checks if the address is supported by the libp2p instance.
|
||||
* @param libp2p - The libp2p instance.
|
||||
* @param addresses - The addresses to check.
|
||||
* @returns True if the addresses are supported, false otherwise.
|
||||
*/
|
||||
export const isAddressesSupported = (
|
||||
libp2p: Libp2p,
|
||||
addresses: Multiaddr[]
|
||||
): boolean => {
|
||||
const transports =
|
||||
libp2p?.components?.transportManager?.getTransports() || [];
|
||||
|
||||
if (transports.length === 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return transports
|
||||
.map((transport) => transport.dialFilter(addresses))
|
||||
.some((supportedAddresses) => supportedAddresses.length > 0);
|
||||
};
|
||||
|
||||
@ -1,11 +1,10 @@
|
||||
import type { PeerId, Stream } from "@libp2p/interface";
|
||||
import type { PeerId } from "@libp2p/interface";
|
||||
import type { IncomingStreamData } from "@libp2p/interface-internal";
|
||||
import {
|
||||
type ContentTopic,
|
||||
type CoreProtocolResult,
|
||||
type IBaseProtocolCore,
|
||||
type FilterCoreResult,
|
||||
FilterError,
|
||||
type Libp2p,
|
||||
ProtocolError,
|
||||
type PubsubTopic
|
||||
} from "@waku/interfaces";
|
||||
import { WakuMessage } from "@waku/proto";
|
||||
@ -15,7 +14,7 @@ import * as lp from "it-length-prefixed";
|
||||
import { pipe } from "it-pipe";
|
||||
import { Uint8ArrayList } from "uint8arraylist";
|
||||
|
||||
import { BaseProtocol } from "../base_protocol.js";
|
||||
import { StreamManager } from "../stream_manager/index.js";
|
||||
|
||||
import {
|
||||
FilterPushRpc,
|
||||
@ -23,40 +22,69 @@ import {
|
||||
FilterSubscribeRpc
|
||||
} from "./filter_rpc.js";
|
||||
|
||||
const log = new Logger("filter:v2");
|
||||
const log = new Logger("filter-core");
|
||||
|
||||
export const FilterCodecs = {
|
||||
SUBSCRIBE: "/vac/waku/filter-subscribe/2.0.0-beta1",
|
||||
PUSH: "/vac/waku/filter-push/2.0.0-beta1"
|
||||
};
|
||||
|
||||
export class FilterCore extends BaseProtocol implements IBaseProtocolCore {
|
||||
public constructor(
|
||||
private handleIncomingMessage: (
|
||||
pubsubTopic: PubsubTopic,
|
||||
wakuMessage: WakuMessage,
|
||||
peerIdStr: string
|
||||
) => Promise<void>,
|
||||
public readonly pubsubTopics: PubsubTopic[],
|
||||
libp2p: Libp2p
|
||||
) {
|
||||
super(FilterCodecs.SUBSCRIBE, libp2p.components, pubsubTopics);
|
||||
type IncomingMessageHandler = (
|
||||
pubsubTopic: PubsubTopic,
|
||||
wakuMessage: WakuMessage,
|
||||
peerIdStr: string
|
||||
) => Promise<void>;
|
||||
|
||||
libp2p
|
||||
.handle(FilterCodecs.PUSH, this.onRequest.bind(this), {
|
||||
export class FilterCore {
|
||||
private streamManager: StreamManager;
|
||||
|
||||
public readonly multicodec = FilterCodecs.SUBSCRIBE;
|
||||
|
||||
public constructor(
|
||||
private handleIncomingMessage: IncomingMessageHandler,
|
||||
private libp2p: Libp2p
|
||||
) {
|
||||
this.streamManager = new StreamManager(
|
||||
FilterCodecs.SUBSCRIBE,
|
||||
libp2p.components
|
||||
);
|
||||
}
|
||||
|
||||
public async start(): Promise<void> {
|
||||
try {
|
||||
await this.libp2p.handle(FilterCodecs.PUSH, this.onRequest.bind(this), {
|
||||
maxInboundStreams: 100
|
||||
})
|
||||
.catch((e) => {
|
||||
log.error("Failed to register ", FilterCodecs.PUSH, e);
|
||||
});
|
||||
} catch (e) {
|
||||
log.error("Failed to register ", FilterCodecs.PUSH, e);
|
||||
}
|
||||
}
|
||||
|
||||
public async stop(): Promise<void> {
|
||||
this.streamManager.stop();
|
||||
try {
|
||||
await this.libp2p.unhandle(FilterCodecs.PUSH);
|
||||
} catch (e) {
|
||||
log.error("Failed to unregister ", FilterCodecs.PUSH, e);
|
||||
}
|
||||
}
|
||||
|
||||
public async subscribe(
|
||||
pubsubTopic: PubsubTopic,
|
||||
peerId: PeerId,
|
||||
contentTopics: ContentTopic[]
|
||||
): Promise<CoreProtocolResult> {
|
||||
const stream = await this.getStream(peerId);
|
||||
): Promise<FilterCoreResult> {
|
||||
const stream = await this.streamManager.getStream(peerId);
|
||||
|
||||
if (!stream) {
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error: FilterError.NO_STREAM_AVAILABLE,
|
||||
peerId: peerId
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
const request = FilterSubscribeRpc.createSubscribeRequest(
|
||||
pubsubTopic,
|
||||
@ -72,12 +100,16 @@ export class FilterCore extends BaseProtocol implements IBaseProtocolCore {
|
||||
lp.decode,
|
||||
async (source) => await all(source)
|
||||
);
|
||||
|
||||
if (!res?.length) {
|
||||
throw Error("Received no response from subscription request.");
|
||||
}
|
||||
} catch (error) {
|
||||
log.error("Failed to send subscribe request", error);
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error: ProtocolError.GENERIC_FAIL,
|
||||
error: FilterError.GENERIC_FAIL,
|
||||
peerId: peerId
|
||||
}
|
||||
};
|
||||
@ -92,7 +124,7 @@ export class FilterCore extends BaseProtocol implements IBaseProtocolCore {
|
||||
);
|
||||
return {
|
||||
failure: {
|
||||
error: ProtocolError.REMOTE_PEER_REJECTED,
|
||||
error: FilterError.REMOTE_PEER_REJECTED,
|
||||
peerId: peerId
|
||||
},
|
||||
success: null
|
||||
@ -109,19 +141,15 @@ export class FilterCore extends BaseProtocol implements IBaseProtocolCore {
|
||||
pubsubTopic: PubsubTopic,
|
||||
peerId: PeerId,
|
||||
contentTopics: ContentTopic[]
|
||||
): Promise<CoreProtocolResult> {
|
||||
let stream: Stream | undefined;
|
||||
try {
|
||||
stream = await this.getStream(peerId);
|
||||
} catch (error) {
|
||||
log.error(
|
||||
`Failed to get a stream for remote peer${peerId.toString()}`,
|
||||
error
|
||||
);
|
||||
): Promise<FilterCoreResult> {
|
||||
const stream = await this.streamManager.getStream(peerId);
|
||||
|
||||
if (!stream) {
|
||||
log.error(`Failed to get a stream for remote peer:${peerId.toString()}`);
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error: ProtocolError.NO_STREAM_AVAILABLE,
|
||||
error: FilterError.NO_STREAM_AVAILABLE,
|
||||
peerId: peerId
|
||||
}
|
||||
};
|
||||
@ -139,7 +167,7 @@ export class FilterCore extends BaseProtocol implements IBaseProtocolCore {
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error: ProtocolError.GENERIC_FAIL,
|
||||
error: FilterError.GENERIC_FAIL,
|
||||
peerId: peerId
|
||||
}
|
||||
};
|
||||
@ -154,8 +182,19 @@ export class FilterCore extends BaseProtocol implements IBaseProtocolCore {
|
||||
public async unsubscribeAll(
|
||||
pubsubTopic: PubsubTopic,
|
||||
peerId: PeerId
|
||||
): Promise<CoreProtocolResult> {
|
||||
const stream = await this.getStream(peerId);
|
||||
): Promise<FilterCoreResult> {
|
||||
const stream = await this.streamManager.getStream(peerId);
|
||||
|
||||
if (!stream) {
|
||||
log.error(`Failed to get a stream for remote peer:${peerId.toString()}`);
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error: FilterError.NO_STREAM_AVAILABLE,
|
||||
peerId: peerId
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
const request = FilterSubscribeRpc.createUnsubscribeAllRequest(pubsubTopic);
|
||||
|
||||
@ -170,7 +209,7 @@ export class FilterCore extends BaseProtocol implements IBaseProtocolCore {
|
||||
if (!res || !res.length) {
|
||||
return {
|
||||
failure: {
|
||||
error: ProtocolError.NO_RESPONSE,
|
||||
error: FilterError.NO_RESPONSE,
|
||||
peerId: peerId
|
||||
},
|
||||
success: null
|
||||
@ -186,7 +225,7 @@ export class FilterCore extends BaseProtocol implements IBaseProtocolCore {
|
||||
);
|
||||
return {
|
||||
failure: {
|
||||
error: ProtocolError.REMOTE_PEER_REJECTED,
|
||||
error: FilterError.REMOTE_PEER_REJECTED,
|
||||
peerId: peerId
|
||||
},
|
||||
success: null
|
||||
@ -199,19 +238,15 @@ export class FilterCore extends BaseProtocol implements IBaseProtocolCore {
|
||||
};
|
||||
}
|
||||
|
||||
public async ping(peerId: PeerId): Promise<CoreProtocolResult> {
|
||||
let stream: Stream | undefined;
|
||||
try {
|
||||
stream = await this.getStream(peerId);
|
||||
} catch (error) {
|
||||
log.error(
|
||||
`Failed to get a stream for remote peer${peerId.toString()}`,
|
||||
error
|
||||
);
|
||||
public async ping(peerId: PeerId): Promise<FilterCoreResult> {
|
||||
const stream = await this.streamManager.getStream(peerId);
|
||||
|
||||
if (!stream) {
|
||||
log.error(`Failed to get a stream for remote peer:${peerId.toString()}`);
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error: ProtocolError.NO_STREAM_AVAILABLE,
|
||||
error: FilterError.NO_STREAM_AVAILABLE,
|
||||
peerId: peerId
|
||||
}
|
||||
};
|
||||
@ -233,7 +268,7 @@ export class FilterCore extends BaseProtocol implements IBaseProtocolCore {
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error: ProtocolError.GENERIC_FAIL,
|
||||
error: FilterError.GENERIC_FAIL,
|
||||
peerId: peerId
|
||||
}
|
||||
};
|
||||
@ -243,7 +278,7 @@ export class FilterCore extends BaseProtocol implements IBaseProtocolCore {
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error: ProtocolError.NO_RESPONSE,
|
||||
error: FilterError.NO_RESPONSE,
|
||||
peerId: peerId
|
||||
}
|
||||
};
|
||||
@ -259,7 +294,7 @@ export class FilterCore extends BaseProtocol implements IBaseProtocolCore {
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error: ProtocolError.REMOTE_PEER_REJECTED,
|
||||
error: FilterError.REMOTE_PEER_REJECTED,
|
||||
peerId: peerId
|
||||
}
|
||||
};
|
||||
|
||||
7
packages/core/src/lib/light_push/constants.ts
Normal file
7
packages/core/src/lib/light_push/constants.ts
Normal file
@ -0,0 +1,7 @@
|
||||
export const CODECS = {
|
||||
v2: "/vac/waku/lightpush/2.0.0-beta1",
|
||||
v3: "/vac/waku/lightpush/3.0.0"
|
||||
} as const;
|
||||
|
||||
export const LightPushCodecV2 = CODECS.v2;
|
||||
export const LightPushCodec = CODECS.v3;
|
||||
@ -1 +1,2 @@
|
||||
export { LightPushCore, LightPushCodec, PushResponse } from "./light_push.js";
|
||||
export { LightPushCore } from "./light_push.js";
|
||||
export { LightPushCodec, LightPushCodecV2 } from "./constants.js";
|
||||
|
||||
@ -1,110 +1,89 @@
|
||||
import type { PeerId, Stream } from "@libp2p/interface";
|
||||
import {
|
||||
type CoreProtocolResult,
|
||||
type IBaseProtocolCore,
|
||||
type IEncoder,
|
||||
type IMessage,
|
||||
type Libp2p,
|
||||
ProtocolError,
|
||||
PubsubTopic,
|
||||
type ThisOrThat
|
||||
type LightPushCoreResult,
|
||||
LightPushError
|
||||
} from "@waku/interfaces";
|
||||
import { PushResponse } from "@waku/proto";
|
||||
import { isMessageSizeUnderCap } from "@waku/utils";
|
||||
import { Logger } from "@waku/utils";
|
||||
import all from "it-all";
|
||||
import * as lp from "it-length-prefixed";
|
||||
import { pipe } from "it-pipe";
|
||||
import { Uint8ArrayList } from "uint8arraylist";
|
||||
|
||||
import { BaseProtocol } from "../base_protocol.js";
|
||||
import { StreamManager } from "../stream_manager/index.js";
|
||||
|
||||
import { PushRpc } from "./push_rpc.js";
|
||||
import { isRLNResponseError } from "./utils.js";
|
||||
import { CODECS } from "./constants.js";
|
||||
import { ProtocolHandler } from "./protocol_handler.js";
|
||||
|
||||
const log = new Logger("light-push");
|
||||
|
||||
export const LightPushCodec = "/vac/waku/lightpush/2.0.0-beta1";
|
||||
export { PushResponse };
|
||||
|
||||
type PreparePushMessageResult = ThisOrThat<"query", PushRpc>;
|
||||
|
||||
/**
|
||||
* Implements the [Waku v2 Light Push protocol](https://rfc.vac.dev/spec/19/).
|
||||
*/
|
||||
export class LightPushCore extends BaseProtocol implements IBaseProtocolCore {
|
||||
public constructor(
|
||||
public readonly pubsubTopics: PubsubTopic[],
|
||||
libp2p: Libp2p
|
||||
) {
|
||||
super(LightPushCodec, libp2p.components, pubsubTopics);
|
||||
export class LightPushCore {
|
||||
private readonly streamManager: StreamManager;
|
||||
private readonly streamManagerV2: StreamManager;
|
||||
|
||||
public readonly multicodec = [CODECS.v3, CODECS.v2];
|
||||
|
||||
public constructor(private libp2p: Libp2p) {
|
||||
this.streamManagerV2 = new StreamManager(CODECS.v2, libp2p.components);
|
||||
this.streamManager = new StreamManager(CODECS.v3, libp2p.components);
|
||||
}
|
||||
|
||||
private async preparePushMessage(
|
||||
encoder: IEncoder,
|
||||
message: IMessage
|
||||
): Promise<PreparePushMessageResult> {
|
||||
try {
|
||||
if (!message.payload || message.payload.length === 0) {
|
||||
log.error("Failed to send waku light push: payload is empty");
|
||||
return { query: null, error: ProtocolError.EMPTY_PAYLOAD };
|
||||
}
|
||||
|
||||
if (!(await isMessageSizeUnderCap(encoder, message))) {
|
||||
log.error("Failed to send waku light push: message is bigger than 1MB");
|
||||
return { query: null, error: ProtocolError.SIZE_TOO_BIG };
|
||||
}
|
||||
|
||||
const protoMessage = await encoder.toProtoObj(message);
|
||||
if (!protoMessage) {
|
||||
log.error("Failed to encode to protoMessage, aborting push");
|
||||
return {
|
||||
query: null,
|
||||
error: ProtocolError.ENCODE_FAILED
|
||||
};
|
||||
}
|
||||
|
||||
const query = PushRpc.createRequest(protoMessage, encoder.pubsubTopic);
|
||||
return { query, error: null };
|
||||
} catch (error) {
|
||||
log.error("Failed to prepare push message", error);
|
||||
|
||||
return {
|
||||
query: null,
|
||||
error: ProtocolError.GENERIC_FAIL
|
||||
};
|
||||
}
|
||||
public stop(): void {
|
||||
this.streamManager.stop();
|
||||
this.streamManagerV2.stop();
|
||||
}
|
||||
|
||||
public async send(
|
||||
encoder: IEncoder,
|
||||
message: IMessage,
|
||||
peerId: PeerId
|
||||
): Promise<CoreProtocolResult> {
|
||||
const { query, error: preparationError } = await this.preparePushMessage(
|
||||
encoder,
|
||||
message
|
||||
peerId: PeerId,
|
||||
useLegacy: boolean = false
|
||||
): Promise<LightPushCoreResult> {
|
||||
const protocol = await this.getProtocol(peerId, useLegacy);
|
||||
|
||||
log.info(
|
||||
`Sending light push request to peer:${peerId.toString()}, protocol:${protocol}`
|
||||
);
|
||||
|
||||
if (preparationError || !query) {
|
||||
if (!protocol) {
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error: preparationError,
|
||||
error: LightPushError.GENERIC_FAIL,
|
||||
peerId
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
let stream: Stream;
|
||||
try {
|
||||
stream = await this.getStream(peerId);
|
||||
} catch (error) {
|
||||
log.error("Failed to get stream", error);
|
||||
const { rpc, error: prepError } = await ProtocolHandler.preparePushMessage(
|
||||
encoder,
|
||||
message,
|
||||
protocol
|
||||
);
|
||||
|
||||
if (prepError) {
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error: ProtocolError.NO_STREAM_AVAILABLE,
|
||||
error: prepError,
|
||||
peerId
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
const stream = await this.getStream(peerId, protocol);
|
||||
|
||||
if (!stream) {
|
||||
log.error(`Failed to get a stream for remote peer:${peerId.toString()}`);
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error: LightPushError.NO_STREAM_AVAILABLE,
|
||||
peerId: peerId
|
||||
}
|
||||
};
|
||||
@ -113,76 +92,74 @@ export class LightPushCore extends BaseProtocol implements IBaseProtocolCore {
|
||||
let res: Uint8ArrayList[] | undefined;
|
||||
try {
|
||||
res = await pipe(
|
||||
[query.encode()],
|
||||
[rpc.encode()],
|
||||
lp.encode,
|
||||
stream,
|
||||
lp.decode,
|
||||
async (source) => await all(source)
|
||||
);
|
||||
} catch (err) {
|
||||
// can fail only because of `stream` abortion
|
||||
log.error("Failed to send waku light push request", err);
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error: ProtocolError.STREAM_ABORTED,
|
||||
error: LightPushError.STREAM_ABORTED,
|
||||
peerId: peerId
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
const bytes = new Uint8ArrayList();
|
||||
res.forEach((chunk) => {
|
||||
bytes.append(chunk);
|
||||
});
|
||||
res.forEach((chunk) => bytes.append(chunk));
|
||||
|
||||
let response: PushResponse | undefined;
|
||||
if (bytes.length === 0) {
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error: LightPushError.NO_RESPONSE,
|
||||
peerId: peerId
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
return ProtocolHandler.handleResponse(bytes, protocol, peerId);
|
||||
}
|
||||
|
||||
private async getProtocol(
|
||||
peerId: PeerId,
|
||||
useLegacy: boolean
|
||||
): Promise<string | undefined> {
|
||||
try {
|
||||
response = PushRpc.decode(bytes).response;
|
||||
} catch (err) {
|
||||
log.error("Failed to decode push reply", err);
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error: ProtocolError.DECODE_FAILED,
|
||||
peerId: peerId
|
||||
}
|
||||
};
|
||||
}
|
||||
const peer = await this.libp2p.peerStore.get(peerId);
|
||||
|
||||
if (!response) {
|
||||
log.error("Remote peer fault: No response in PushRPC");
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error: ProtocolError.NO_RESPONSE,
|
||||
peerId: peerId
|
||||
}
|
||||
};
|
||||
if (
|
||||
useLegacy ||
|
||||
(!peer.protocols.includes(CODECS.v3) &&
|
||||
peer.protocols.includes(CODECS.v2))
|
||||
) {
|
||||
return CODECS.v2;
|
||||
} else if (peer.protocols.includes(CODECS.v3)) {
|
||||
return CODECS.v3;
|
||||
} else {
|
||||
throw new Error("No supported protocol found");
|
||||
}
|
||||
} catch (error) {
|
||||
log.error("Failed to get protocol", error);
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
if (isRLNResponseError(response.info)) {
|
||||
log.error("Remote peer fault: RLN generation");
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error: ProtocolError.RLN_PROOF_GENERATION,
|
||||
peerId: peerId
|
||||
}
|
||||
};
|
||||
private async getStream(
|
||||
peerId: PeerId,
|
||||
protocol: string
|
||||
): Promise<Stream | undefined> {
|
||||
switch (protocol) {
|
||||
case CODECS.v2:
|
||||
return this.streamManagerV2.getStream(peerId);
|
||||
case CODECS.v3:
|
||||
return this.streamManager.getStream(peerId);
|
||||
default:
|
||||
return undefined;
|
||||
}
|
||||
|
||||
if (!response.isSuccess) {
|
||||
log.error("Remote peer rejected the message: ", response.info);
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error: ProtocolError.REMOTE_PEER_REJECTED,
|
||||
peerId: peerId
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
return { success: peerId, failure: null };
|
||||
}
|
||||
}
|
||||
|
||||
191
packages/core/src/lib/light_push/protocol_handler.ts
Normal file
191
packages/core/src/lib/light_push/protocol_handler.ts
Normal file
@ -0,0 +1,191 @@
|
||||
import type { PeerId } from "@libp2p/interface";
|
||||
import type { IEncoder, IMessage, LightPushCoreResult } from "@waku/interfaces";
|
||||
import { LightPushError, LightPushStatusCode } from "@waku/interfaces";
|
||||
import { PushResponse, WakuMessage } from "@waku/proto";
|
||||
import { isMessageSizeUnderCap, Logger } from "@waku/utils";
|
||||
import { Uint8ArrayList } from "uint8arraylist";
|
||||
|
||||
import { CODECS } from "./constants.js";
|
||||
import { PushRpcV2 } from "./push_rpc.js";
|
||||
import { PushRpc } from "./push_rpc_v3.js";
|
||||
import { isRLNResponseError } from "./utils.js";
|
||||
|
||||
type VersionedPushRpc =
|
||||
| ({ version: "v2" } & PushRpcV2)
|
||||
| ({ version: "v3" } & PushRpc);
|
||||
|
||||
type PreparePushMessageResult =
|
||||
| { rpc: VersionedPushRpc; error: null }
|
||||
| { rpc: null; error: LightPushError };
|
||||
|
||||
const log = new Logger("light-push:protocol-handler");
|
||||
|
||||
export class ProtocolHandler {
|
||||
public static async preparePushMessage(
|
||||
encoder: IEncoder,
|
||||
message: IMessage,
|
||||
protocol: string
|
||||
): Promise<PreparePushMessageResult> {
|
||||
try {
|
||||
if (!message.payload || message.payload.length === 0) {
|
||||
log.error("Failed to send waku light push: payload is empty");
|
||||
return { rpc: null, error: LightPushError.EMPTY_PAYLOAD };
|
||||
}
|
||||
|
||||
if (!(await isMessageSizeUnderCap(encoder, message))) {
|
||||
log.error("Failed to send waku light push: message is bigger than 1MB");
|
||||
return { rpc: null, error: LightPushError.SIZE_TOO_BIG };
|
||||
}
|
||||
|
||||
const protoMessage = await encoder.toProtoObj(message);
|
||||
if (!protoMessage) {
|
||||
log.error("Failed to encode to protoMessage, aborting push");
|
||||
return { rpc: null, error: LightPushError.ENCODE_FAILED };
|
||||
}
|
||||
|
||||
if (protocol === CODECS.v3) {
|
||||
log.info("Creating v3 RPC message");
|
||||
return {
|
||||
rpc: ProtocolHandler.createV3Rpc(protoMessage, encoder.pubsubTopic),
|
||||
error: null
|
||||
};
|
||||
}
|
||||
|
||||
log.info("Creating v2 RPC message");
|
||||
return {
|
||||
rpc: ProtocolHandler.createV2Rpc(protoMessage, encoder.pubsubTopic),
|
||||
error: null
|
||||
};
|
||||
} catch (err) {
|
||||
log.error("Failed to prepare push message", err);
|
||||
return { rpc: null, error: LightPushError.GENERIC_FAIL };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode and evaluate a LightPush response according to the protocol version
|
||||
*/
|
||||
public static handleResponse(
|
||||
bytes: Uint8ArrayList,
|
||||
protocol: string,
|
||||
peerId: PeerId
|
||||
): LightPushCoreResult {
|
||||
if (protocol === CODECS.v3) {
|
||||
return ProtocolHandler.handleV3Response(bytes, peerId);
|
||||
}
|
||||
|
||||
return ProtocolHandler.handleV2Response(bytes, peerId);
|
||||
}
|
||||
|
||||
private static handleV3Response(
|
||||
bytes: Uint8ArrayList,
|
||||
peerId: PeerId
|
||||
): LightPushCoreResult {
|
||||
try {
|
||||
const decodedRpcV3 = PushRpc.decodeResponse(bytes);
|
||||
const statusCode = decodedRpcV3.statusCode;
|
||||
const statusDesc = decodedRpcV3.statusDesc;
|
||||
|
||||
if (statusCode !== LightPushStatusCode.SUCCESS) {
|
||||
const error = LightPushError.REMOTE_PEER_REJECTED;
|
||||
log.error(
|
||||
`Remote peer rejected with v3 status code ${statusCode}: ${statusDesc}`
|
||||
);
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error,
|
||||
peerId: peerId
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
if (decodedRpcV3.relayPeerCount !== undefined) {
|
||||
log.info(`Message relayed to ${decodedRpcV3.relayPeerCount} peers`);
|
||||
}
|
||||
|
||||
return { success: peerId, failure: null };
|
||||
} catch (err) {
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error: LightPushError.DECODE_FAILED,
|
||||
peerId: peerId
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
private static handleV2Response(
|
||||
bytes: Uint8ArrayList,
|
||||
peerId: PeerId
|
||||
): LightPushCoreResult {
|
||||
let response: PushResponse | undefined;
|
||||
try {
|
||||
const decodedRpc = PushRpcV2.decode(bytes);
|
||||
response = decodedRpc.response;
|
||||
} catch (err) {
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error: LightPushError.DECODE_FAILED,
|
||||
peerId: peerId
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
if (!response) {
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error: LightPushError.NO_RESPONSE,
|
||||
peerId: peerId
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
if (isRLNResponseError(response.info)) {
|
||||
log.error("Remote peer fault: RLN generation");
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error: LightPushError.RLN_PROOF_GENERATION,
|
||||
peerId: peerId
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
if (!response.isSuccess) {
|
||||
log.error("Remote peer rejected the message: ", response.info);
|
||||
return {
|
||||
success: null,
|
||||
failure: {
|
||||
error: LightPushError.REMOTE_PEER_REJECTED,
|
||||
peerId: peerId
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
return { success: peerId, failure: null };
|
||||
}
|
||||
|
||||
private static createV2Rpc(
|
||||
message: WakuMessage,
|
||||
pubsubTopic: string
|
||||
): VersionedPushRpc {
|
||||
const v2Rpc = PushRpcV2.createRequest(message, pubsubTopic);
|
||||
return Object.assign(v2Rpc, { version: "v2" as const });
|
||||
}
|
||||
|
||||
private static createV3Rpc(
|
||||
message: WakuMessage,
|
||||
pubsubTopic: string
|
||||
): VersionedPushRpc {
|
||||
if (!message.timestamp) {
|
||||
message.timestamp = BigInt(Date.now()) * BigInt(1_000_000);
|
||||
}
|
||||
|
||||
const v3Rpc = PushRpc.createRequest(message, pubsubTopic);
|
||||
return Object.assign(v3Rpc, { version: "v3" as const });
|
||||
}
|
||||
}
|
||||
@ -2,14 +2,14 @@ import { proto_lightpush as proto } from "@waku/proto";
|
||||
import type { Uint8ArrayList } from "uint8arraylist";
|
||||
import { v4 as uuid } from "uuid";
|
||||
|
||||
export class PushRpc {
|
||||
export class PushRpcV2 {
|
||||
public constructor(public proto: proto.PushRpc) {}
|
||||
|
||||
public static createRequest(
|
||||
message: proto.WakuMessage,
|
||||
pubsubTopic: string
|
||||
): PushRpc {
|
||||
return new PushRpc({
|
||||
): PushRpcV2 {
|
||||
return new PushRpcV2({
|
||||
requestId: uuid(),
|
||||
request: {
|
||||
message: message,
|
||||
@ -19,9 +19,9 @@ export class PushRpc {
|
||||
});
|
||||
}
|
||||
|
||||
public static decode(bytes: Uint8ArrayList): PushRpc {
|
||||
public static decode(bytes: Uint8ArrayList): PushRpcV2 {
|
||||
const res = proto.PushRpc.decode(bytes);
|
||||
return new PushRpc(res);
|
||||
return new PushRpcV2(res);
|
||||
}
|
||||
|
||||
public encode(): Uint8Array {
|
||||
|
||||
162
packages/core/src/lib/light_push/push_rpc_v3.ts
Normal file
162
packages/core/src/lib/light_push/push_rpc_v3.ts
Normal file
@ -0,0 +1,162 @@
|
||||
import { proto_lightpush as proto } from "@waku/proto";
|
||||
import type { Uint8ArrayList } from "uint8arraylist";
|
||||
import { v4 as uuid } from "uuid";
|
||||
|
||||
/**
|
||||
* LightPush v3 protocol RPC handler.
|
||||
* Implements the v3 message format with correct field numbers:
|
||||
* - requestId: 1
|
||||
* - pubsubTopic: 20
|
||||
* - message: 21
|
||||
*/
|
||||
export class PushRpc {
|
||||
public constructor(
|
||||
public proto: proto.LightPushRequestV3 | proto.LightPushResponseV3
|
||||
) {}
|
||||
|
||||
/**
|
||||
* Create a v3 request message with proper field numbering
|
||||
*/
|
||||
public static createRequest(
|
||||
message: proto.WakuMessage,
|
||||
pubsubTopic: string
|
||||
): PushRpc {
|
||||
return new PushRpc({
|
||||
requestId: uuid(),
|
||||
pubsubTopic: pubsubTopic,
|
||||
message: message
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a v3 response message with status code handling
|
||||
*/
|
||||
public static createResponse(
|
||||
requestId: string,
|
||||
statusCode: number,
|
||||
statusDesc?: string,
|
||||
relayPeerCount?: number
|
||||
): PushRpc {
|
||||
return new PushRpc({
|
||||
requestId,
|
||||
statusCode,
|
||||
statusDesc,
|
||||
relayPeerCount
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode v3 request message
|
||||
*/
|
||||
public static decodeRequest(bytes: Uint8ArrayList): PushRpc {
|
||||
const res = proto.LightPushRequestV3.decode(bytes);
|
||||
return new PushRpc(res);
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode v3 response message
|
||||
*/
|
||||
public static decodeResponse(bytes: Uint8ArrayList): PushRpc {
|
||||
const res = proto.LightPushResponseV3.decode(bytes);
|
||||
return new PushRpc(res);
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode message to bytes
|
||||
*/
|
||||
public encode(): Uint8Array {
|
||||
if (this.isRequest()) {
|
||||
return proto.LightPushRequestV3.encode(
|
||||
this.proto as proto.LightPushRequestV3
|
||||
);
|
||||
} else {
|
||||
return proto.LightPushResponseV3.encode(
|
||||
this.proto as proto.LightPushResponseV3
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get request data (if this is a request message)
|
||||
*/
|
||||
public get request(): proto.LightPushRequestV3 | undefined {
|
||||
return this.isRequest()
|
||||
? (this.proto as proto.LightPushRequestV3)
|
||||
: undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get response data (if this is a response message)
|
||||
*/
|
||||
public get response(): proto.LightPushResponseV3 | undefined {
|
||||
return this.isResponse()
|
||||
? (this.proto as proto.LightPushResponseV3)
|
||||
: undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the request ID
|
||||
*/
|
||||
public get requestId(): string {
|
||||
return this.proto.requestId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the pubsub topic (only available in requests)
|
||||
*/
|
||||
public get pubsubTopic(): string | undefined {
|
||||
return this.isRequest()
|
||||
? (this.proto as proto.LightPushRequestV3).pubsubTopic
|
||||
: undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the message (only available in requests)
|
||||
*/
|
||||
public get message(): proto.WakuMessage | undefined {
|
||||
return this.isRequest()
|
||||
? (this.proto as proto.LightPushRequestV3).message
|
||||
: undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the status code (only available in responses)
|
||||
*/
|
||||
public get statusCode(): number | undefined {
|
||||
return this.isResponse()
|
||||
? (this.proto as proto.LightPushResponseV3).statusCode
|
||||
: undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the status description (only available in responses)
|
||||
*/
|
||||
public get statusDesc(): string | undefined {
|
||||
return this.isResponse()
|
||||
? (this.proto as proto.LightPushResponseV3).statusDesc
|
||||
: undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the relay peer count (only available in responses)
|
||||
*/
|
||||
public get relayPeerCount(): number | undefined {
|
||||
return this.isResponse()
|
||||
? (this.proto as proto.LightPushResponseV3).relayPeerCount
|
||||
: undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if this is a request message
|
||||
*/
|
||||
private isRequest(): boolean {
|
||||
return "pubsubTopic" in this.proto && "message" in this.proto;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if this is a response message
|
||||
*/
|
||||
private isResponse(): boolean {
|
||||
return "statusCode" in this.proto;
|
||||
}
|
||||
}
|
||||
@ -1,30 +1,46 @@
|
||||
import type { IProtoMessage } from "@waku/interfaces";
|
||||
import { contentTopicToPubsubTopic } from "@waku/utils";
|
||||
import type { AutoSharding, IProtoMessage } from "@waku/interfaces";
|
||||
import { createRoutingInfo } from "@waku/utils";
|
||||
import { bytesToHex } from "@waku/utils/bytes";
|
||||
import { expect } from "chai";
|
||||
import fc from "fast-check";
|
||||
|
||||
import { createDecoder, createEncoder, DecodedMessage } from "./version_0.js";
|
||||
import { messageHash } from "../message_hash/index.js";
|
||||
|
||||
const contentTopic = "/js-waku/1/tests/bytes";
|
||||
const pubsubTopic = contentTopicToPubsubTopic(contentTopic);
|
||||
import {
|
||||
createDecoder,
|
||||
createEncoder,
|
||||
DecodedMessage,
|
||||
proto
|
||||
} from "./version_0.js";
|
||||
|
||||
const testContentTopic = "/js-waku/1/tests/bytes";
|
||||
|
||||
const testNetworkConfig: AutoSharding = {
|
||||
clusterId: 0,
|
||||
numShardsInCluster: 8
|
||||
};
|
||||
const testRoutingInfo = createRoutingInfo(testNetworkConfig, {
|
||||
contentTopic: testContentTopic
|
||||
});
|
||||
|
||||
describe("Waku Message version 0", function () {
|
||||
it("Round trip binary serialization", async function () {
|
||||
await fc.assert(
|
||||
fc.asyncProperty(fc.uint8Array({ minLength: 1 }), async (payload) => {
|
||||
const encoder = createEncoder({
|
||||
contentTopic
|
||||
contentTopic: testContentTopic,
|
||||
routingInfo: testRoutingInfo
|
||||
});
|
||||
const bytes = await encoder.toWire({ payload });
|
||||
const decoder = createDecoder(contentTopic);
|
||||
const decoder = createDecoder(testContentTopic, testRoutingInfo);
|
||||
const protoResult = await decoder.fromWireToProtoObj(bytes);
|
||||
const result = (await decoder.fromProtoObj(
|
||||
pubsubTopic,
|
||||
testRoutingInfo.pubsubTopic,
|
||||
protoResult!
|
||||
)) as DecodedMessage;
|
||||
|
||||
expect(result.contentTopic).to.eq(contentTopic);
|
||||
expect(result.pubsubTopic).to.eq(pubsubTopic);
|
||||
expect(result.contentTopic).to.eq(testContentTopic);
|
||||
expect(result.pubsubTopic).to.eq(testRoutingInfo.pubsubTopic);
|
||||
expect(result.version).to.eq(0);
|
||||
expect(result.ephemeral).to.be.false;
|
||||
expect(result.payload).to.deep.eq(payload);
|
||||
@ -37,14 +53,15 @@ describe("Waku Message version 0", function () {
|
||||
await fc.assert(
|
||||
fc.asyncProperty(fc.uint8Array({ minLength: 1 }), async (payload) => {
|
||||
const encoder = createEncoder({
|
||||
contentTopic,
|
||||
contentTopic: testContentTopic,
|
||||
routingInfo: testRoutingInfo,
|
||||
ephemeral: true
|
||||
});
|
||||
const bytes = await encoder.toWire({ payload });
|
||||
const decoder = createDecoder(contentTopic);
|
||||
const decoder = createDecoder(testContentTopic, testRoutingInfo);
|
||||
const protoResult = await decoder.fromWireToProtoObj(bytes);
|
||||
const result = (await decoder.fromProtoObj(
|
||||
pubsubTopic,
|
||||
testRoutingInfo.pubsubTopic,
|
||||
protoResult!
|
||||
)) as DecodedMessage;
|
||||
|
||||
@ -68,15 +85,16 @@ describe("Waku Message version 0", function () {
|
||||
};
|
||||
|
||||
const encoder = createEncoder({
|
||||
contentTopic,
|
||||
contentTopic: testContentTopic,
|
||||
routingInfo: testRoutingInfo,
|
||||
ephemeral: true,
|
||||
metaSetter
|
||||
});
|
||||
const bytes = await encoder.toWire({ payload });
|
||||
const decoder = createDecoder(contentTopic);
|
||||
const decoder = createDecoder(testContentTopic, testRoutingInfo);
|
||||
const protoResult = await decoder.fromWireToProtoObj(bytes);
|
||||
const result = (await decoder.fromProtoObj(
|
||||
pubsubTopic,
|
||||
testRoutingInfo.pubsubTopic,
|
||||
protoResult!
|
||||
)) as DecodedMessage;
|
||||
|
||||
@ -99,28 +117,34 @@ describe("Waku Message version 0", function () {
|
||||
describe("Ensures content topic is defined", () => {
|
||||
it("Encoder throws on undefined content topic", () => {
|
||||
const wrapper = function (): void {
|
||||
createEncoder({ contentTopic: undefined as unknown as string });
|
||||
createEncoder({
|
||||
contentTopic: undefined as unknown as string,
|
||||
routingInfo: testRoutingInfo
|
||||
});
|
||||
};
|
||||
|
||||
expect(wrapper).to.throw("Content topic must be specified");
|
||||
});
|
||||
it("Encoder throws on empty string content topic", () => {
|
||||
const wrapper = function (): void {
|
||||
createEncoder({ contentTopic: "" });
|
||||
createEncoder({
|
||||
contentTopic: "",
|
||||
routingInfo: testRoutingInfo
|
||||
});
|
||||
};
|
||||
|
||||
expect(wrapper).to.throw("Content topic must be specified");
|
||||
});
|
||||
it("Decoder throws on undefined content topic", () => {
|
||||
const wrapper = function (): void {
|
||||
createDecoder(undefined as unknown as string);
|
||||
createDecoder(undefined as unknown as string, testRoutingInfo);
|
||||
};
|
||||
|
||||
expect(wrapper).to.throw("Content topic must be specified");
|
||||
});
|
||||
it("Decoder throws on empty string content topic", () => {
|
||||
const wrapper = function (): void {
|
||||
createDecoder("");
|
||||
createDecoder("", testRoutingInfo);
|
||||
};
|
||||
|
||||
expect(wrapper).to.throw("Content topic must be specified");
|
||||
@ -130,23 +154,73 @@ describe("Ensures content topic is defined", () => {
|
||||
describe("Sets sharding configuration correctly", () => {
|
||||
it("uses static shard pubsub topic instead of autosharding when set", async () => {
|
||||
// Create an encoder setup to use autosharding
|
||||
const ContentTopic = "/waku/2/content/test.js";
|
||||
const contentTopic = "/myapp/1/test/proto";
|
||||
const autoshardingEncoder = createEncoder({
|
||||
pubsubTopicShardInfo: { clusterId: 0 },
|
||||
contentTopic: ContentTopic
|
||||
contentTopic: contentTopic,
|
||||
routingInfo: createRoutingInfo(testNetworkConfig, { contentTopic })
|
||||
});
|
||||
|
||||
// When autosharding is enabled, we expect the shard index to be 1
|
||||
expect(autoshardingEncoder.pubsubTopic).to.be.eq("/waku/2/rs/0/1");
|
||||
expect(autoshardingEncoder.pubsubTopic).to.be.eq("/waku/2/rs/0/0");
|
||||
|
||||
// Create an encoder setup to use static sharding with the same content topic
|
||||
const singleShardInfo = { clusterId: 0, shard: 0 };
|
||||
const staticshardingEncoder = createEncoder({
|
||||
contentTopic: ContentTopic,
|
||||
pubsubTopicShardInfo: singleShardInfo
|
||||
contentTopic: contentTopic,
|
||||
routingInfo: createRoutingInfo({ clusterId: 0 }, { shardId: 3 })
|
||||
});
|
||||
|
||||
// When static sharding is enabled, we expect the shard index to be 0
|
||||
expect(staticshardingEncoder.pubsubTopic).to.be.eq("/waku/2/rs/0/0");
|
||||
expect(staticshardingEncoder.pubsubTopic).to.be.eq("/waku/2/rs/0/3");
|
||||
});
|
||||
});
|
||||
|
||||
describe("DecodedMessage lazy hash initialization", () => {
|
||||
it("should compute hash only when first accessed", () => {
|
||||
const pubsubTopic = "/waku/2/default-waku/proto";
|
||||
const protoMessage: proto.WakuMessage = {
|
||||
payload: new Uint8Array([1, 2, 3]),
|
||||
contentTopic: "/test/1/test-proto/proto",
|
||||
timestamp: BigInt(1234567890000000),
|
||||
ephemeral: false
|
||||
};
|
||||
|
||||
const message = new DecodedMessage(pubsubTopic, protoMessage);
|
||||
|
||||
expect((message as any)._hash).to.be.undefined;
|
||||
expect((message as any)._hashStr).to.be.undefined;
|
||||
|
||||
const hash = message.hash;
|
||||
expect((message as any)._hash).to.not.be.undefined;
|
||||
expect((message as any)._hashStr).to.be.undefined;
|
||||
|
||||
const hashStr = message.hashStr;
|
||||
expect((message as any)._hashStr).to.not.be.undefined;
|
||||
|
||||
const expectedHash = messageHash(
|
||||
pubsubTopic,
|
||||
protoMessage as IProtoMessage
|
||||
);
|
||||
expect(hash).to.deep.equal(expectedHash);
|
||||
expect(hashStr).to.equal(bytesToHex(expectedHash));
|
||||
});
|
||||
|
||||
it("should return cached hash on subsequent access", () => {
|
||||
const pubsubTopic = "/waku/2/default-waku/proto";
|
||||
const protoMessage: proto.WakuMessage = {
|
||||
payload: new Uint8Array([1, 2, 3]),
|
||||
contentTopic: "/test/1/test-proto/proto",
|
||||
timestamp: BigInt(1234567890000000),
|
||||
ephemeral: false
|
||||
};
|
||||
|
||||
const message = new DecodedMessage(pubsubTopic, protoMessage);
|
||||
|
||||
const hash1 = message.hash;
|
||||
const hash2 = message.hash;
|
||||
expect(hash1).to.equal(hash2);
|
||||
|
||||
const hashStr1 = message.hashStr;
|
||||
const hashStr2 = message.hashStr;
|
||||
expect(hashStr1).to.equal(hashStr2);
|
||||
});
|
||||
});
|
||||
|
||||
@ -7,11 +7,14 @@ import type {
|
||||
IMetaSetter,
|
||||
IProtoMessage,
|
||||
IRateLimitProof,
|
||||
PubsubTopic,
|
||||
SingleShardInfo
|
||||
IRoutingInfo,
|
||||
PubsubTopic
|
||||
} from "@waku/interfaces";
|
||||
import { proto_message as proto } from "@waku/proto";
|
||||
import { determinePubsubTopic, Logger } from "@waku/utils";
|
||||
import { Logger } from "@waku/utils";
|
||||
import { bytesToHex } from "@waku/utils/bytes";
|
||||
|
||||
import { messageHash } from "../message_hash/index.js";
|
||||
|
||||
const log = new Logger("message:version-0");
|
||||
const OneMillion = BigInt(1_000_000);
|
||||
@ -20,9 +23,12 @@ export const Version = 0;
|
||||
export { proto };
|
||||
|
||||
export class DecodedMessage implements IDecodedMessage {
|
||||
private _hash: Uint8Array | undefined;
|
||||
private _hashStr: string | undefined;
|
||||
|
||||
public constructor(
|
||||
public pubsubTopic: string,
|
||||
protected proto: proto.WakuMessage
|
||||
private proto: proto.WakuMessage
|
||||
) {}
|
||||
|
||||
public get ephemeral(): boolean {
|
||||
@ -37,8 +43,18 @@ export class DecodedMessage implements IDecodedMessage {
|
||||
return this.proto.contentTopic;
|
||||
}
|
||||
|
||||
public get _rawTimestamp(): bigint | undefined {
|
||||
return this.proto.timestamp;
|
||||
public get hash(): Uint8Array {
|
||||
if (this._hash === undefined) {
|
||||
this._hash = messageHash(this.pubsubTopic, this.proto as IProtoMessage);
|
||||
}
|
||||
return this._hash;
|
||||
}
|
||||
|
||||
public get hashStr(): string {
|
||||
if (this._hashStr === undefined) {
|
||||
this._hashStr = bytesToHex(this.hash);
|
||||
}
|
||||
return this._hashStr;
|
||||
}
|
||||
|
||||
public get timestamp(): Date | undefined {
|
||||
@ -63,7 +79,7 @@ export class DecodedMessage implements IDecodedMessage {
|
||||
public get version(): number {
|
||||
// https://rfc.vac.dev/spec/14/
|
||||
// > If omitted, the value SHOULD be interpreted as version 0.
|
||||
return this.proto.version ?? 0;
|
||||
return this.proto.version ?? Version;
|
||||
}
|
||||
|
||||
public get rateLimitProof(): IRateLimitProof | undefined {
|
||||
@ -75,7 +91,7 @@ export class Encoder implements IEncoder {
|
||||
public constructor(
|
||||
public contentTopic: string,
|
||||
public ephemeral: boolean = false,
|
||||
public pubsubTopic: PubsubTopic,
|
||||
public routingInfo: IRoutingInfo,
|
||||
public metaSetter?: IMetaSetter
|
||||
) {
|
||||
if (!contentTopic || contentTopic === "") {
|
||||
@ -83,6 +99,10 @@ export class Encoder implements IEncoder {
|
||||
}
|
||||
}
|
||||
|
||||
public get pubsubTopic(): PubsubTopic {
|
||||
return this.routingInfo.pubsubTopic;
|
||||
}
|
||||
|
||||
public async toWire(message: IMessage): Promise<Uint8Array> {
|
||||
return proto.WakuMessage.encode(await this.toProtoObj(message));
|
||||
}
|
||||
@ -116,32 +136,32 @@ export class Encoder implements IEncoder {
|
||||
* format to be sent over the Waku network. The resulting encoder can then be
|
||||
* pass to { @link @waku/interfaces!ISender.send } to automatically encode outgoing
|
||||
* messages.
|
||||
*
|
||||
* Note that a routing info may be tied to a given content topic, this is not checked by the encoder.
|
||||
*/
|
||||
export function createEncoder({
|
||||
pubsubTopic,
|
||||
pubsubTopicShardInfo,
|
||||
contentTopic,
|
||||
routingInfo,
|
||||
ephemeral,
|
||||
metaSetter
|
||||
}: EncoderOptions): Encoder {
|
||||
return new Encoder(
|
||||
contentTopic,
|
||||
ephemeral,
|
||||
determinePubsubTopic(contentTopic, pubsubTopic ?? pubsubTopicShardInfo),
|
||||
metaSetter
|
||||
);
|
||||
return new Encoder(contentTopic, ephemeral, routingInfo, metaSetter);
|
||||
}
|
||||
|
||||
export class Decoder implements IDecoder<IDecodedMessage> {
|
||||
public constructor(
|
||||
public pubsubTopic: PubsubTopic,
|
||||
public contentTopic: string
|
||||
public contentTopic: string,
|
||||
public routingInfo: IRoutingInfo
|
||||
) {
|
||||
if (!contentTopic || contentTopic === "") {
|
||||
throw new Error("Content topic must be specified");
|
||||
}
|
||||
}
|
||||
|
||||
public get pubsubTopic(): PubsubTopic {
|
||||
return this.routingInfo.pubsubTopic;
|
||||
}
|
||||
|
||||
public fromWireToProtoObj(
|
||||
bytes: Uint8Array
|
||||
): Promise<IProtoMessage | undefined> {
|
||||
@ -160,7 +180,7 @@ export class Decoder implements IDecoder<IDecodedMessage> {
|
||||
public async fromProtoObj(
|
||||
pubsubTopic: string,
|
||||
proto: IProtoMessage
|
||||
): Promise<DecodedMessage | undefined> {
|
||||
): Promise<IDecodedMessage | undefined> {
|
||||
// https://rfc.vac.dev/spec/14/
|
||||
// > If omitted, the value SHOULD be interpreted as version 0.
|
||||
if (proto.version ?? 0 !== Version) {
|
||||
@ -186,13 +206,13 @@ export class Decoder implements IDecoder<IDecodedMessage> {
|
||||
* messages.
|
||||
*
|
||||
* @param contentTopic The resulting decoder will only decode messages with this content topic.
|
||||
* @param routingInfo Routing information such as cluster id and shard id on which the message is expected to be received.
|
||||
*
|
||||
* Note that a routing info may be tied to a given content topic, this is not checked by the encoder.
|
||||
*/
|
||||
export function createDecoder(
|
||||
contentTopic: string,
|
||||
pubsubTopicShardInfo?: SingleShardInfo | PubsubTopic
|
||||
routingInfo: IRoutingInfo
|
||||
): Decoder {
|
||||
return new Decoder(
|
||||
determinePubsubTopic(contentTopic, pubsubTopicShardInfo),
|
||||
contentTopic
|
||||
);
|
||||
return new Decoder(contentTopic, routingInfo);
|
||||
}
|
||||
|
||||
1
packages/core/src/lib/message_hash/index.ts
Normal file
1
packages/core/src/lib/message_hash/index.ts
Normal file
@ -0,0 +1 @@
|
||||
export { messageHash, messageHashStr } from "./message_hash.js";
|
||||
@ -1,11 +1,11 @@
|
||||
import type { IDecodedMessage, IProtoMessage } from "@waku/interfaces";
|
||||
import type { IProtoMessage } from "@waku/interfaces";
|
||||
import { bytesToHex, hexToBytes } from "@waku/utils/bytes";
|
||||
import { expect } from "chai";
|
||||
|
||||
import { messageHash } from "./index.js";
|
||||
import { messageHash, messageHashStr } from "./index.js";
|
||||
|
||||
// https://rfc.vac.dev/spec/14/#test-vectors
|
||||
describe("RFC Test Vectors", () => {
|
||||
describe("Message Hash: RFC Test Vectors", () => {
|
||||
it("Waku message hash computation (meta size of 12 bytes)", () => {
|
||||
const expectedHash =
|
||||
"64cce733fed134e83da02b02c6f689814872b1a0ac97ea56b76095c3c72bfe05";
|
||||
@ -93,20 +93,91 @@ describe("RFC Test Vectors", () => {
|
||||
expect(bytesToHex(hash)).to.equal(expectedHash);
|
||||
});
|
||||
|
||||
it("Waku message hash computation (message is IDecodedMessage)", () => {
|
||||
it("Waku message hash computation (message is IProtoMessage with version)", () => {
|
||||
const expectedHash =
|
||||
"3f11bc950dce0e3ffdcf205ae6414c01130bb5d9f20644869bff80407fa52c8f";
|
||||
const pubsubTopic = "/waku/2/default-waku/proto";
|
||||
const message: IDecodedMessage = {
|
||||
const message: IProtoMessage = {
|
||||
payload: new Uint8Array(),
|
||||
pubsubTopic,
|
||||
contentTopic: "/waku/2/default-content/proto",
|
||||
meta: hexToBytes("0x73757065722d736563726574"),
|
||||
timestamp: new Date("2024-04-30T10:54:14.978Z"),
|
||||
timestamp:
|
||||
BigInt(new Date("2024-04-30T10:54:14.978Z").getTime()) *
|
||||
BigInt(1000000),
|
||||
ephemeral: undefined,
|
||||
rateLimitProof: undefined
|
||||
rateLimitProof: undefined,
|
||||
version: 0
|
||||
};
|
||||
const hash = messageHash(pubsubTopic, message);
|
||||
expect(bytesToHex(hash)).to.equal(expectedHash);
|
||||
});
|
||||
});
|
||||
|
||||
describe("messageHash and messageHashStr", () => {
|
||||
const pubsubTopic = "/waku/2/default-waku/proto";
|
||||
const testMessage: IProtoMessage = {
|
||||
payload: hexToBytes("0x010203045445535405060708"),
|
||||
contentTopic: "/waku/2/default-content/proto",
|
||||
meta: hexToBytes("0x73757065722d736563726574"),
|
||||
timestamp: BigInt("0x175789bfa23f8400"),
|
||||
ephemeral: undefined,
|
||||
rateLimitProof: undefined,
|
||||
version: undefined
|
||||
};
|
||||
|
||||
it("messageHash returns a Uint8Array", () => {
|
||||
const hash = messageHash(pubsubTopic, testMessage);
|
||||
expect(hash).to.be.instanceOf(Uint8Array);
|
||||
expect(hash.length).to.equal(32); // SHA-256 hash is 32 bytes
|
||||
});
|
||||
|
||||
it("messageHashStr returns a hex string", () => {
|
||||
const hashStr = messageHashStr(pubsubTopic, testMessage);
|
||||
expect(typeof hashStr).to.equal("string");
|
||||
expect(hashStr.length).to.equal(64); // SHA-256 hash is 32 bytes = 64 hex chars
|
||||
expect(hashStr).to.match(/^[0-9a-f]+$/); // Should be a valid hex string
|
||||
});
|
||||
|
||||
it("messageHashStr returns the same value as bytesToHex(messageHash)", () => {
|
||||
const hash = messageHash(pubsubTopic, testMessage);
|
||||
const hashStrFromBytes = bytesToHex(hash);
|
||||
const hashStr = messageHashStr(pubsubTopic, testMessage);
|
||||
expect(hashStr).to.equal(hashStrFromBytes);
|
||||
});
|
||||
|
||||
it("messageHashStr works with IProtoMessage", () => {
|
||||
const decodedMessage: IProtoMessage = {
|
||||
payload: new Uint8Array([1, 2, 3, 4]),
|
||||
contentTopic: "/waku/2/default-content/proto",
|
||||
meta: new Uint8Array([5, 6, 7, 8]),
|
||||
timestamp:
|
||||
BigInt(new Date("2024-04-30T10:54:14.978Z").getTime()) *
|
||||
BigInt(1000000),
|
||||
ephemeral: undefined,
|
||||
rateLimitProof: undefined,
|
||||
version: 0
|
||||
};
|
||||
|
||||
const hashStr = messageHashStr(pubsubTopic, decodedMessage);
|
||||
expect(typeof hashStr).to.equal("string");
|
||||
expect(hashStr.length).to.equal(64);
|
||||
});
|
||||
|
||||
it("messageHashStr produces consistent results for the same input", () => {
|
||||
const hashStr1 = messageHashStr(pubsubTopic, testMessage);
|
||||
const hashStr2 = messageHashStr(pubsubTopic, testMessage);
|
||||
expect(hashStr1).to.equal(hashStr2);
|
||||
});
|
||||
|
||||
it("messageHashStr produces different results for different inputs", () => {
|
||||
const hashStr1 = messageHashStr(pubsubTopic, testMessage);
|
||||
|
||||
const differentMessage = {
|
||||
...testMessage,
|
||||
payload: hexToBytes("0x0102030454455354050607080A") // Different payload
|
||||
};
|
||||
|
||||
const hashStr2 = messageHashStr(pubsubTopic, differentMessage);
|
||||
expect(hashStr1).to.not.equal(hashStr2);
|
||||
});
|
||||
});
|
||||
@ -11,6 +11,27 @@ import {
|
||||
/**
|
||||
* Deterministic Message Hashing as defined in
|
||||
* [14/WAKU2-MESSAGE](https://rfc.vac.dev/spec/14/#deterministic-message-hashing)
|
||||
*
|
||||
* Computes a SHA-256 hash of the concatenation of pubsub topic, payload, content topic, meta, and timestamp.
|
||||
*
|
||||
* @param pubsubTopic - The pubsub topic string
|
||||
* @param message - The message to be hashed
|
||||
* @returns A Uint8Array containing the SHA-256 hash
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* import { messageHash } from "@waku/core";
|
||||
*
|
||||
* const pubsubTopic = "/waku/2/default-waku/proto";
|
||||
* const message = {
|
||||
* payload: new Uint8Array([1, 2, 3, 4]),
|
||||
* contentTopic: "/waku/2/default-content/proto",
|
||||
* meta: new Uint8Array([5, 6, 7, 8]),
|
||||
* timestamp: new Date()
|
||||
* };
|
||||
*
|
||||
* const hash = messageHash(pubsubTopic, message);
|
||||
* ```
|
||||
*/
|
||||
export function messageHash(
|
||||
pubsubTopic: string,
|
||||
@ -51,6 +72,30 @@ function tryConvertTimestampToBytes(
|
||||
return numberToBytes(bigIntTimestamp);
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes a deterministic message hash and returns it as a hexadecimal string.
|
||||
* This is a convenience wrapper around messageHash that converts the result to a hex string.
|
||||
*
|
||||
* @param pubsubTopic - The pubsub topic string
|
||||
* @param message - The message to be hashed
|
||||
* @returns A string containing the hex representation of the SHA-256 hash
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* import { messageHashStr } from "@waku/core";
|
||||
*
|
||||
* const pubsubTopic = "/waku/2/default-waku/proto";
|
||||
* const message = {
|
||||
* payload: new Uint8Array([1, 2, 3, 4]),
|
||||
* contentTopic: "/waku/2/default-content/proto",
|
||||
* meta: new Uint8Array([5, 6, 7, 8]),
|
||||
* timestamp: new Date()
|
||||
* };
|
||||
*
|
||||
* const hashString = messageHashStr(pubsubTopic, message);
|
||||
* console.log(hashString); // e.g. "a1b2c3d4..."
|
||||
* ```
|
||||
*/
|
||||
export function messageHashStr(
|
||||
pubsubTopic: string,
|
||||
message: IProtoMessage | IDecodedMessage
|
||||
@ -1,36 +1,39 @@
|
||||
import type { PeerId } from "@libp2p/interface";
|
||||
import { IncomingStreamData } from "@libp2p/interface";
|
||||
import {
|
||||
type ClusterId,
|
||||
type IMetadata,
|
||||
type Libp2pComponents,
|
||||
type MetadataQueryResult,
|
||||
type PeerIdStr,
|
||||
ProtocolError,
|
||||
PubsubTopic,
|
||||
type ShardInfo
|
||||
} from "@waku/interfaces";
|
||||
import { proto_metadata } from "@waku/proto";
|
||||
import { encodeRelayShard, Logger, pubsubTopicsToShardInfo } from "@waku/utils";
|
||||
import { encodeRelayShard, Logger } from "@waku/utils";
|
||||
import all from "it-all";
|
||||
import * as lp from "it-length-prefixed";
|
||||
import { pipe } from "it-pipe";
|
||||
import { Uint8ArrayList } from "uint8arraylist";
|
||||
|
||||
import { BaseProtocol } from "../base_protocol.js";
|
||||
import { StreamManager } from "../stream_manager/index.js";
|
||||
|
||||
const log = new Logger("metadata");
|
||||
|
||||
export const MetadataCodec = "/vac/waku/metadata/1.0.0";
|
||||
|
||||
class Metadata extends BaseProtocol implements IMetadata {
|
||||
private libp2pComponents: Libp2pComponents;
|
||||
class Metadata implements IMetadata {
|
||||
private readonly streamManager: StreamManager;
|
||||
private readonly libp2pComponents: Libp2pComponents;
|
||||
protected handshakesConfirmed: Map<PeerIdStr, ShardInfo> = new Map();
|
||||
|
||||
public readonly multicodec = MetadataCodec;
|
||||
|
||||
public constructor(
|
||||
public pubsubTopics: PubsubTopic[],
|
||||
public clusterId: ClusterId,
|
||||
libp2p: Libp2pComponents
|
||||
) {
|
||||
super(MetadataCodec, libp2p.components, pubsubTopics);
|
||||
this.streamManager = new StreamManager(MetadataCodec, libp2p);
|
||||
this.libp2pComponents = libp2p;
|
||||
void libp2p.registrar.handle(MetadataCodec, (streamData) => {
|
||||
void this.onRequest(streamData);
|
||||
@ -41,9 +44,10 @@ class Metadata extends BaseProtocol implements IMetadata {
|
||||
* Make a metadata query to a peer
|
||||
*/
|
||||
public async query(peerId: PeerId): Promise<MetadataQueryResult> {
|
||||
const request = proto_metadata.WakuMetadataRequest.encode(
|
||||
pubsubTopicsToShardInfo(this.pubsubTopics)
|
||||
);
|
||||
const request = proto_metadata.WakuMetadataRequest.encode({
|
||||
clusterId: this.clusterId,
|
||||
shards: [] // Only services node need to provide shards
|
||||
});
|
||||
|
||||
const peer = await this.libp2pComponents.peerStore.get(peerId);
|
||||
if (!peer) {
|
||||
@ -53,11 +57,10 @@ class Metadata extends BaseProtocol implements IMetadata {
|
||||
};
|
||||
}
|
||||
|
||||
let stream;
|
||||
try {
|
||||
stream = await this.getStream(peerId);
|
||||
} catch (error) {
|
||||
log.error("Failed to get stream", error);
|
||||
const stream = await this.streamManager.getStream(peerId);
|
||||
|
||||
if (!stream) {
|
||||
log.error(`Failed to get a stream for remote peer:${peerId.toString()}`);
|
||||
return {
|
||||
shardInfo: null,
|
||||
error: ProtocolError.NO_STREAM_AVAILABLE
|
||||
@ -109,9 +112,10 @@ class Metadata extends BaseProtocol implements IMetadata {
|
||||
private async onRequest(streamData: IncomingStreamData): Promise<void> {
|
||||
try {
|
||||
const { stream, connection } = streamData;
|
||||
const encodedShardInfo = proto_metadata.WakuMetadataResponse.encode(
|
||||
pubsubTopicsToShardInfo(this.pubsubTopics)
|
||||
);
|
||||
const encodedShardInfo = proto_metadata.WakuMetadataResponse.encode({
|
||||
clusterId: this.clusterId,
|
||||
shards: [] // Only service nodes need to provide shards
|
||||
});
|
||||
|
||||
const encodedResponse = await pipe(
|
||||
[encodedShardInfo],
|
||||
@ -175,8 +179,7 @@ class Metadata extends BaseProtocol implements IMetadata {
|
||||
}
|
||||
|
||||
export function wakuMetadata(
|
||||
pubsubTopics: PubsubTopic[]
|
||||
clusterId: ClusterId
|
||||
): (components: Libp2pComponents) => IMetadata {
|
||||
return (components: Libp2pComponents) =>
|
||||
new Metadata(pubsubTopics, components);
|
||||
return (components: Libp2pComponents) => new Metadata(clusterId, components);
|
||||
}
|
||||
|
||||
93
packages/core/src/lib/store/rpc.spec.ts
Normal file
93
packages/core/src/lib/store/rpc.spec.ts
Normal file
@ -0,0 +1,93 @@
|
||||
import { expect } from "chai";
|
||||
|
||||
import { StoreQueryRequest } from "./rpc.js";
|
||||
|
||||
describe("StoreQueryRequest validation", () => {
|
||||
it("accepts valid content-filtered query", () => {
|
||||
const request = StoreQueryRequest.create({
|
||||
pubsubTopic: "/waku/2/default-waku/proto",
|
||||
contentTopics: ["/test/1/content/proto"],
|
||||
includeData: true,
|
||||
paginationForward: true
|
||||
});
|
||||
expect(request).to.exist;
|
||||
});
|
||||
|
||||
it("rejects content-filtered query with only pubsubTopic", () => {
|
||||
expect(() =>
|
||||
StoreQueryRequest.create({
|
||||
pubsubTopic: "/waku/2/default-waku/proto",
|
||||
contentTopics: [],
|
||||
includeData: true,
|
||||
paginationForward: true
|
||||
})
|
||||
).to.throw(
|
||||
"Both pubsubTopic and contentTopics must be set together for content-filtered queries"
|
||||
);
|
||||
});
|
||||
|
||||
it("rejects content-filtered query with only contentTopics", () => {
|
||||
expect(() =>
|
||||
StoreQueryRequest.create({
|
||||
pubsubTopic: "",
|
||||
contentTopics: ["/test/1/content/proto"],
|
||||
includeData: true,
|
||||
paginationForward: true
|
||||
})
|
||||
).to.throw(
|
||||
"Both pubsubTopic and contentTopics must be set together for content-filtered queries"
|
||||
);
|
||||
});
|
||||
|
||||
it("accepts valid message hash query", () => {
|
||||
const request = StoreQueryRequest.create({
|
||||
pubsubTopic: "",
|
||||
contentTopics: [],
|
||||
messageHashes: [new Uint8Array([1, 2, 3, 4])],
|
||||
includeData: true,
|
||||
paginationForward: true
|
||||
});
|
||||
expect(request).to.exist;
|
||||
});
|
||||
|
||||
it("rejects hash query with content filter parameters", () => {
|
||||
expect(() =>
|
||||
StoreQueryRequest.create({
|
||||
messageHashes: [new Uint8Array([1, 2, 3, 4])],
|
||||
pubsubTopic: "/waku/2/default-waku/proto",
|
||||
contentTopics: ["/test/1/content/proto"],
|
||||
includeData: true,
|
||||
paginationForward: true
|
||||
})
|
||||
).to.throw(
|
||||
"Message hash lookup queries cannot include content filter criteria"
|
||||
);
|
||||
});
|
||||
|
||||
it("rejects hash query with time filter", () => {
|
||||
expect(() =>
|
||||
StoreQueryRequest.create({
|
||||
pubsubTopic: "",
|
||||
contentTopics: [],
|
||||
messageHashes: [new Uint8Array([1, 2, 3, 4])],
|
||||
timeStart: new Date(),
|
||||
includeData: true,
|
||||
paginationForward: true
|
||||
})
|
||||
).to.throw(
|
||||
"Message hash lookup queries cannot include content filter criteria"
|
||||
);
|
||||
});
|
||||
|
||||
it("accepts time-filtered query with content filter", () => {
|
||||
const request = StoreQueryRequest.create({
|
||||
pubsubTopic: "/waku/2/default-waku/proto",
|
||||
contentTopics: ["/test/1/content/proto"],
|
||||
timeStart: new Date(Date.now() - 3600000),
|
||||
timeEnd: new Date(),
|
||||
includeData: true,
|
||||
paginationForward: true
|
||||
});
|
||||
expect(request).to.exist;
|
||||
});
|
||||
});
|
||||
@ -6,6 +6,7 @@ import { v4 as uuid } from "uuid";
|
||||
// https://github.com/waku-org/nwaku/blob/7205f95cff9f49ca0bb762e8fd0bf56a6a7f3b3b/waku/waku_store/common.nim#L12
|
||||
export const DEFAULT_PAGE_SIZE = 20;
|
||||
export const MAX_PAGE_SIZE = 100;
|
||||
export const MAX_TIME_RANGE = 24 * 60 * 60 * 1000;
|
||||
const ONE_MILLION = 1_000000;
|
||||
|
||||
export class StoreQueryRequest {
|
||||
@ -14,6 +15,7 @@ export class StoreQueryRequest {
|
||||
public static create(params: QueryRequestParams): StoreQueryRequest {
|
||||
const request = new StoreQueryRequest({
|
||||
...params,
|
||||
contentTopics: params.contentTopics || [],
|
||||
requestId: uuid(),
|
||||
timeStart: params.timeStart
|
||||
? BigInt(params.timeStart.getTime() * ONE_MILLION)
|
||||
@ -27,26 +29,29 @@ export class StoreQueryRequest {
|
||||
: undefined
|
||||
});
|
||||
|
||||
// Validate request parameters based on RFC
|
||||
if (
|
||||
(params.pubsubTopic && !params.contentTopics) ||
|
||||
(!params.pubsubTopic && params.contentTopics)
|
||||
) {
|
||||
throw new Error(
|
||||
"Both pubsubTopic and contentTopics must be set or unset"
|
||||
);
|
||||
}
|
||||
const isHashQuery = params.messageHashes && params.messageHashes.length > 0;
|
||||
const hasContentTopics =
|
||||
params.contentTopics && params.contentTopics.length > 0;
|
||||
const hasTimeFilter = params.timeStart || params.timeEnd;
|
||||
|
||||
if (
|
||||
params.messageHashes &&
|
||||
(params.pubsubTopic ||
|
||||
params.contentTopics ||
|
||||
params.timeStart ||
|
||||
params.timeEnd)
|
||||
) {
|
||||
throw new Error(
|
||||
"Message hash lookup queries cannot include content filter criteria"
|
||||
);
|
||||
if (isHashQuery) {
|
||||
if (hasContentTopics || hasTimeFilter) {
|
||||
throw new Error(
|
||||
"Message hash lookup queries cannot include content filter criteria (contentTopics, timeStart, or timeEnd)"
|
||||
);
|
||||
}
|
||||
} else {
|
||||
if (
|
||||
(params.pubsubTopic &&
|
||||
(!params.contentTopics || params.contentTopics.length === 0)) ||
|
||||
(!params.pubsubTopic &&
|
||||
params.contentTopics &&
|
||||
params.contentTopics.length > 0)
|
||||
) {
|
||||
throw new Error(
|
||||
"Both pubsubTopic and contentTopics must be set together for content-filtered queries"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return request;
|
||||
|
||||
230
packages/core/src/lib/store/store.spec.ts
Normal file
230
packages/core/src/lib/store/store.spec.ts
Normal file
@ -0,0 +1,230 @@
|
||||
import type { PeerId } from "@libp2p/interface";
|
||||
import {
|
||||
IDecodedMessage,
|
||||
IDecoder,
|
||||
Libp2p,
|
||||
QueryRequestParams
|
||||
} from "@waku/interfaces";
|
||||
import { expect } from "chai";
|
||||
import sinon from "sinon";
|
||||
|
||||
import { StreamManager } from "../stream_manager/index.js";
|
||||
|
||||
import {
|
||||
MAX_PAGE_SIZE,
|
||||
MAX_TIME_RANGE,
|
||||
StoreQueryRequest,
|
||||
StoreQueryResponse
|
||||
} from "./rpc.js";
|
||||
import { StoreCore } from "./store.js";
|
||||
|
||||
describe("StoreCore", () => {
|
||||
let libp2p: Libp2p;
|
||||
let storeCore: StoreCore;
|
||||
let mockStreamManager: sinon.SinonStubbedInstance<StreamManager>;
|
||||
let mockPeerId: PeerId;
|
||||
let mockStream: any;
|
||||
let mockDecoder: sinon.SinonStubbedInstance<IDecoder<IDecodedMessage>>;
|
||||
let decoders: Map<string, IDecoder<IDecodedMessage>>;
|
||||
|
||||
const createMockPeerId = (id: string): PeerId =>
|
||||
({
|
||||
toString: () => id,
|
||||
equals: (other: PeerId) => other.toString() === id
|
||||
}) as PeerId;
|
||||
|
||||
beforeEach(() => {
|
||||
libp2p = {
|
||||
components: {
|
||||
events: {
|
||||
addEventListener: sinon.stub(),
|
||||
removeEventListener: sinon.stub()
|
||||
},
|
||||
connectionManager: {
|
||||
getConnections: sinon.stub().returns([])
|
||||
}
|
||||
}
|
||||
} as unknown as Libp2p;
|
||||
|
||||
mockStreamManager = {
|
||||
getStream: sinon.stub()
|
||||
} as unknown as sinon.SinonStubbedInstance<StreamManager>;
|
||||
|
||||
mockPeerId = createMockPeerId("12D3KooWTest1");
|
||||
|
||||
mockStream = {
|
||||
sink: sinon.stub(),
|
||||
source: []
|
||||
};
|
||||
|
||||
mockDecoder = {
|
||||
fromProtoObj: sinon.stub()
|
||||
} as unknown as sinon.SinonStubbedInstance<IDecoder<IDecodedMessage>>;
|
||||
|
||||
decoders = new Map([["test-topic", mockDecoder]]);
|
||||
|
||||
sinon
|
||||
.stub(StreamManager.prototype, "getStream")
|
||||
.callsFake(mockStreamManager.getStream);
|
||||
storeCore = new StoreCore(libp2p);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
sinon.restore();
|
||||
});
|
||||
|
||||
describe("queryPerPage", () => {
|
||||
let queryOpts: QueryRequestParams;
|
||||
let mockStoreQueryRequest: any;
|
||||
let mockStoreQueryResponse: any;
|
||||
|
||||
beforeEach(() => {
|
||||
queryOpts = {
|
||||
pubsubTopic: "test-topic",
|
||||
contentTopics: ["test-topic"],
|
||||
paginationLimit: 10,
|
||||
includeData: true,
|
||||
paginationForward: true
|
||||
};
|
||||
|
||||
mockStoreQueryRequest = {
|
||||
encode: sinon.stub().returns(new Uint8Array([1, 2, 3]))
|
||||
};
|
||||
|
||||
mockStoreQueryResponse = {
|
||||
statusCode: 200,
|
||||
statusDesc: "OK",
|
||||
messages: [
|
||||
{
|
||||
messageHash: new Uint8Array([1]),
|
||||
message: {
|
||||
contentTopic: "test-topic"
|
||||
},
|
||||
pubsubTopic: "test-topic"
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
sinon.stub(StoreQueryRequest, "create").returns(mockStoreQueryRequest);
|
||||
sinon.stub(StoreQueryResponse, "decode").returns(mockStoreQueryResponse);
|
||||
});
|
||||
|
||||
it("throws if time range exceeds MAX_TIME_RANGE", async () => {
|
||||
queryOpts.timeStart = new Date();
|
||||
queryOpts.timeEnd = new Date(
|
||||
queryOpts.timeStart.getTime() + MAX_TIME_RANGE + 1000
|
||||
);
|
||||
const generator = storeCore.queryPerPage(queryOpts, decoders, mockPeerId);
|
||||
try {
|
||||
await generator.next();
|
||||
expect.fail("Should have thrown an error");
|
||||
} catch (error) {
|
||||
expect((error as Error).message).to.equal("Time range bigger than 24h");
|
||||
}
|
||||
});
|
||||
|
||||
it("throws if decoders don't match content topics", async () => {
|
||||
const differentDecoders = new Map([["different-topic", mockDecoder]]);
|
||||
const generator = storeCore.queryPerPage(
|
||||
queryOpts,
|
||||
differentDecoders,
|
||||
mockPeerId
|
||||
);
|
||||
try {
|
||||
await generator.next();
|
||||
expect.fail("Should have thrown an error");
|
||||
} catch (error) {
|
||||
expect((error as Error).message).to.equal(
|
||||
"Internal error, the decoders should match the query's content topics"
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
it("does not validate decoders for hash queries", async () => {
|
||||
queryOpts.messageHashes = [new Uint8Array([1, 2, 3])];
|
||||
queryOpts.contentTopics = [];
|
||||
const differentDecoders = new Map([["different-topic", mockDecoder]]);
|
||||
mockStreamManager.getStream.resolves(mockStream);
|
||||
const generator = storeCore.queryPerPage(
|
||||
queryOpts,
|
||||
differentDecoders,
|
||||
mockPeerId
|
||||
);
|
||||
const result = await generator.next();
|
||||
expect(result.done).to.be.false;
|
||||
});
|
||||
|
||||
it("ends if stream creation fails", async () => {
|
||||
mockStreamManager.getStream.resolves(undefined as any);
|
||||
const generator = storeCore.queryPerPage(queryOpts, decoders, mockPeerId);
|
||||
const result = await generator.next();
|
||||
expect(result.done).to.be.true;
|
||||
});
|
||||
|
||||
it("throws if store query response has error status", async () => {
|
||||
mockStoreQueryResponse.statusCode = 400;
|
||||
mockStoreQueryResponse.statusDesc = "Bad Request";
|
||||
mockStreamManager.getStream.resolves(mockStream);
|
||||
const generator = storeCore.queryPerPage(queryOpts, decoders, mockPeerId);
|
||||
try {
|
||||
await generator.next();
|
||||
expect.fail("Should have thrown an error");
|
||||
} catch (error) {
|
||||
expect((error as Error).message).to.equal(
|
||||
"Store query failed with status code: 400, description: Bad Request"
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
it("ends if response has no messages", async () => {
|
||||
mockStoreQueryResponse.messages = [];
|
||||
mockStreamManager.getStream.resolves(mockStream);
|
||||
const generator = storeCore.queryPerPage(queryOpts, decoders, mockPeerId);
|
||||
const result = await generator.next();
|
||||
expect(result.done).to.be.true;
|
||||
});
|
||||
|
||||
it("yields decoded messages", async () => {
|
||||
const mockDecodedMessage = {
|
||||
contentTopic: "test-topic"
|
||||
} as IDecodedMessage;
|
||||
mockDecoder.fromProtoObj.resolves(mockDecodedMessage);
|
||||
mockStreamManager.getStream.resolves(mockStream);
|
||||
const generator = storeCore.queryPerPage(queryOpts, decoders, mockPeerId);
|
||||
const result = await generator.next();
|
||||
const decodedMessage = await result.value[0];
|
||||
expect(decodedMessage).to.equal(mockDecodedMessage);
|
||||
});
|
||||
|
||||
it("yields undefined for messages without content topic", async () => {
|
||||
mockStoreQueryResponse.messages[0].message.contentTopic = undefined;
|
||||
mockStreamManager.getStream.resolves(mockStream);
|
||||
const generator = storeCore.queryPerPage(queryOpts, decoders, mockPeerId);
|
||||
const result = await generator.next();
|
||||
const decodedMessage = await result.value[0];
|
||||
expect(decodedMessage).to.be.undefined;
|
||||
});
|
||||
|
||||
it("yields undefined for messages without decoder", async () => {
|
||||
mockStoreQueryResponse.messages[0].message.contentTopic = "unknown-topic";
|
||||
mockStreamManager.getStream.resolves(mockStream);
|
||||
const generator = storeCore.queryPerPage(queryOpts, decoders, mockPeerId);
|
||||
const result = await generator.next();
|
||||
const decodedMessage = await result.value[0];
|
||||
expect(decodedMessage).to.be.undefined;
|
||||
});
|
||||
|
||||
it("ends after yielding if response size indicates end", async () => {
|
||||
queryOpts.paginationLimit = MAX_PAGE_SIZE + 10;
|
||||
mockStoreQueryResponse.messages = new Array(MAX_PAGE_SIZE + 1).fill({
|
||||
messageHash: new Uint8Array([1]),
|
||||
message: { contentTopic: "test-topic" }
|
||||
});
|
||||
mockStreamManager.getStream.resolves(mockStream);
|
||||
const generator = storeCore.queryPerPage(queryOpts, decoders, mockPeerId);
|
||||
await generator.next();
|
||||
const second = await generator.next();
|
||||
expect(second.done).to.be.true;
|
||||
});
|
||||
});
|
||||
});
|
||||
@ -2,9 +2,7 @@ import type { PeerId } from "@libp2p/interface";
|
||||
import {
|
||||
IDecodedMessage,
|
||||
IDecoder,
|
||||
IStoreCore,
|
||||
Libp2p,
|
||||
PubsubTopic,
|
||||
QueryRequestParams
|
||||
} from "@waku/interfaces";
|
||||
import { Logger } from "@waku/utils";
|
||||
@ -13,12 +11,13 @@ import * as lp from "it-length-prefixed";
|
||||
import { pipe } from "it-pipe";
|
||||
import { Uint8ArrayList } from "uint8arraylist";
|
||||
|
||||
import { BaseProtocol } from "../base_protocol.js";
|
||||
import { StreamManager } from "../stream_manager/index.js";
|
||||
import { toProtoMessage } from "../to_proto_message.js";
|
||||
|
||||
import {
|
||||
DEFAULT_PAGE_SIZE,
|
||||
MAX_PAGE_SIZE,
|
||||
MAX_TIME_RANGE,
|
||||
StoreQueryRequest,
|
||||
StoreQueryResponse
|
||||
} from "./rpc.js";
|
||||
@ -27,12 +26,21 @@ const log = new Logger("store");
|
||||
|
||||
export const StoreCodec = "/vac/waku/store-query/3.0.0";
|
||||
|
||||
export class StoreCore extends BaseProtocol implements IStoreCore {
|
||||
public constructor(
|
||||
public readonly pubsubTopics: PubsubTopic[],
|
||||
libp2p: Libp2p
|
||||
) {
|
||||
super(StoreCodec, libp2p.components, pubsubTopics);
|
||||
export class StoreCore {
|
||||
private readonly streamManager: StreamManager;
|
||||
|
||||
public readonly multicodec = StoreCodec;
|
||||
|
||||
public constructor(libp2p: Libp2p) {
|
||||
this.streamManager = new StreamManager(StoreCodec, libp2p.components);
|
||||
}
|
||||
|
||||
public stop(): void {
|
||||
this.streamManager.stop();
|
||||
}
|
||||
|
||||
public get maxTimeLimit(): number {
|
||||
return MAX_TIME_RANGE;
|
||||
}
|
||||
|
||||
public async *queryPerPage<T extends IDecodedMessage>(
|
||||
@ -40,9 +48,22 @@ export class StoreCore extends BaseProtocol implements IStoreCore {
|
||||
decoders: Map<string, IDecoder<T>>,
|
||||
peerId: PeerId
|
||||
): AsyncGenerator<Promise<T | undefined>[]> {
|
||||
if (queryOpts.timeStart && queryOpts.timeEnd) {
|
||||
const timeDiff =
|
||||
queryOpts.timeEnd.getTime() - queryOpts.timeStart.getTime();
|
||||
if (timeDiff > MAX_TIME_RANGE) {
|
||||
throw new Error("Time range bigger than 24h");
|
||||
}
|
||||
}
|
||||
|
||||
// Only validate decoder content topics for content-filtered queries
|
||||
const isHashQuery =
|
||||
queryOpts.messageHashes && queryOpts.messageHashes.length > 0;
|
||||
if (
|
||||
!isHashQuery &&
|
||||
queryOpts.contentTopics &&
|
||||
queryOpts.contentTopics.toString() !==
|
||||
Array.from(decoders.keys()).toString()
|
||||
Array.from(decoders.keys()).toString()
|
||||
) {
|
||||
throw new Error(
|
||||
"Internal error, the decoders should match the query's content topics"
|
||||
@ -51,26 +72,48 @@ export class StoreCore extends BaseProtocol implements IStoreCore {
|
||||
|
||||
let currentCursor = queryOpts.paginationCursor;
|
||||
while (true) {
|
||||
if (queryOpts.abortSignal?.aborted) {
|
||||
log.info("Store query aborted by signal");
|
||||
break;
|
||||
}
|
||||
|
||||
const storeQueryRequest = StoreQueryRequest.create({
|
||||
...queryOpts,
|
||||
paginationCursor: currentCursor
|
||||
});
|
||||
|
||||
let stream;
|
||||
try {
|
||||
stream = await this.getStream(peerId);
|
||||
} catch (e) {
|
||||
log.error("Failed to get stream", e);
|
||||
log.info("Sending store query request:", {
|
||||
hasMessageHashes: !!queryOpts.messageHashes?.length,
|
||||
messageHashCount: queryOpts.messageHashes?.length,
|
||||
pubsubTopic: queryOpts.pubsubTopic,
|
||||
contentTopics: queryOpts.contentTopics
|
||||
});
|
||||
|
||||
const stream = await this.streamManager.getStream(peerId);
|
||||
|
||||
if (!stream) {
|
||||
log.error(
|
||||
`Failed to get a stream for remote peer:${peerId.toString()}`
|
||||
);
|
||||
break;
|
||||
}
|
||||
|
||||
const res = await pipe(
|
||||
[storeQueryRequest.encode()],
|
||||
lp.encode,
|
||||
stream,
|
||||
lp.decode,
|
||||
async (source) => await all(source)
|
||||
);
|
||||
let res;
|
||||
try {
|
||||
res = await pipe(
|
||||
[storeQueryRequest.encode()],
|
||||
lp.encode,
|
||||
stream,
|
||||
lp.decode,
|
||||
async (source) => await all(source)
|
||||
);
|
||||
} catch (error) {
|
||||
if (error instanceof Error && error.name === "AbortError") {
|
||||
log.info(`Store query aborted for peer ${peerId.toString()}`);
|
||||
break;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
|
||||
const bytes = new Uint8ArrayList();
|
||||
res.forEach((chunk) => {
|
||||
@ -97,6 +140,11 @@ export class StoreCore extends BaseProtocol implements IStoreCore {
|
||||
`${storeQueryResponse.messages.length} messages retrieved from store`
|
||||
);
|
||||
|
||||
if (queryOpts.abortSignal?.aborted) {
|
||||
log.info("Store query aborted by signal before processing messages");
|
||||
break;
|
||||
}
|
||||
|
||||
const decodedMessages = storeQueryResponse.messages.map((protoMsg) => {
|
||||
if (!protoMsg.message) {
|
||||
return Promise.resolve(undefined);
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
import { Connection, Peer, PeerId, Stream } from "@libp2p/interface";
|
||||
import { Libp2pComponents } from "@waku/interfaces";
|
||||
import { expect } from "chai";
|
||||
import sinon from "sinon";
|
||||
|
||||
@ -20,11 +21,14 @@ describe("StreamManager", () => {
|
||||
|
||||
beforeEach(() => {
|
||||
eventTarget = new EventTarget();
|
||||
streamManager = new StreamManager(
|
||||
MULTICODEC,
|
||||
() => [],
|
||||
eventTarget.addEventListener.bind(eventTarget)
|
||||
);
|
||||
streamManager = new StreamManager(MULTICODEC, {
|
||||
connectionManager: { getConnections: () => [] },
|
||||
events: eventTarget
|
||||
} as any as Libp2pComponents);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
sinon.restore();
|
||||
});
|
||||
|
||||
it("should return usable stream attached to connection", async () => {
|
||||
@ -34,7 +38,9 @@ describe("StreamManager", () => {
|
||||
createMockStream({ id: "1", protocol: MULTICODEC, writeStatus })
|
||||
];
|
||||
|
||||
streamManager["getConnections"] = (_peerId: PeerId | undefined) => [con1];
|
||||
streamManager["libp2p"]["connectionManager"]["getConnections"] = (
|
||||
_peerId: PeerId | undefined
|
||||
) => [con1];
|
||||
|
||||
const stream = await streamManager.getStream(mockPeer.id);
|
||||
|
||||
@ -43,19 +49,13 @@ describe("StreamManager", () => {
|
||||
}
|
||||
});
|
||||
|
||||
it("should throw if no connection provided", async () => {
|
||||
streamManager["getConnections"] = (_peerId: PeerId | undefined) => [];
|
||||
it("should return undefined if no connection provided", async () => {
|
||||
streamManager["libp2p"]["connectionManager"]["getConnections"] = (
|
||||
_peerId: PeerId | undefined
|
||||
) => [];
|
||||
|
||||
let error: Error | undefined;
|
||||
try {
|
||||
await streamManager.getStream(mockPeer.id);
|
||||
} catch (e) {
|
||||
error = e as Error;
|
||||
}
|
||||
|
||||
expect(error).not.to.be.undefined;
|
||||
expect(error?.message).to.include(mockPeer.id.toString());
|
||||
expect(error?.message).to.include(MULTICODEC);
|
||||
const stream = await streamManager.getStream(mockPeer.id);
|
||||
expect(stream).to.be.undefined;
|
||||
});
|
||||
|
||||
it("should create a new stream if no existing for protocol found", async () => {
|
||||
@ -74,7 +74,9 @@ describe("StreamManager", () => {
|
||||
);
|
||||
|
||||
con1.newStream = newStreamSpy;
|
||||
streamManager["getConnections"] = (_peerId: PeerId | undefined) => [con1];
|
||||
streamManager["libp2p"]["connectionManager"]["getConnections"] = (
|
||||
_peerId: PeerId | undefined
|
||||
) => [con1];
|
||||
|
||||
const stream = await streamManager.getStream(mockPeer.id);
|
||||
|
||||
@ -99,15 +101,20 @@ describe("StreamManager", () => {
|
||||
);
|
||||
|
||||
con1.newStream = newStreamSpy;
|
||||
streamManager["getConnections"] = (_peerId: PeerId | undefined) => [con1];
|
||||
streamManager["libp2p"]["connectionManager"]["getConnections"] = (
|
||||
_peerId: PeerId | undefined
|
||||
) => [con1];
|
||||
|
||||
const [stream1, stream2] = await Promise.all([
|
||||
streamManager.getStream(mockPeer.id),
|
||||
streamManager.getStream(mockPeer.id)
|
||||
]);
|
||||
|
||||
expect(stream1).to.not.be.undefined;
|
||||
expect(stream2).to.not.be.undefined;
|
||||
|
||||
const expected = ["1", "2"].toString();
|
||||
const actual = [stream1.id, stream2.id].sort().toString();
|
||||
const actual = [stream1?.id, stream2?.id].sort().toString();
|
||||
|
||||
expect(actual).to.be.eq(expected);
|
||||
});
|
||||
@ -116,7 +123,9 @@ describe("StreamManager", () => {
|
||||
const scheduleNewStreamSpy = sinon.spy();
|
||||
streamManager["scheduleNewStream"] = scheduleNewStreamSpy;
|
||||
eventTarget.dispatchEvent(
|
||||
new CustomEvent("peer:update", { detail: { peer: { protocols: [] } } })
|
||||
new CustomEvent("peer:update", {
|
||||
detail: { peer: { id: mockPeer.id, protocols: [] } }
|
||||
})
|
||||
);
|
||||
|
||||
expect(scheduleNewStreamSpy.calledOnce).to.be.false;
|
||||
@ -127,7 +136,7 @@ describe("StreamManager", () => {
|
||||
streamManager["scheduleNewStream"] = scheduleNewStreamSpy;
|
||||
eventTarget.dispatchEvent(
|
||||
new CustomEvent("peer:update", {
|
||||
detail: { peer: { protocols: [MULTICODEC] } }
|
||||
detail: { peer: { id: mockPeer.id, protocols: [MULTICODEC] } }
|
||||
})
|
||||
);
|
||||
|
||||
@ -143,14 +152,16 @@ describe("StreamManager", () => {
|
||||
writeStatus: "writable"
|
||||
})
|
||||
];
|
||||
streamManager["getConnections"] = (_id) => [con1];
|
||||
streamManager["libp2p"]["connectionManager"]["getConnections"] = (
|
||||
_id: PeerId | undefined
|
||||
) => [con1];
|
||||
|
||||
const scheduleNewStreamSpy = sinon.spy();
|
||||
streamManager["scheduleNewStream"] = scheduleNewStreamSpy;
|
||||
|
||||
eventTarget.dispatchEvent(
|
||||
new CustomEvent("peer:update", {
|
||||
detail: { peer: { protocols: [MULTICODEC] } }
|
||||
detail: { peer: { id: mockPeer.id, protocols: [MULTICODEC] } }
|
||||
})
|
||||
);
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
import type { Peer, PeerId, PeerUpdate, Stream } from "@libp2p/interface";
|
||||
import type { Libp2p } from "@waku/interfaces";
|
||||
import type { Libp2pComponents } from "@waku/interfaces";
|
||||
import { Logger } from "@waku/utils";
|
||||
|
||||
import { selectOpenConnection } from "./utils.js";
|
||||
@ -13,47 +13,66 @@ export class StreamManager {
|
||||
private streamPool: Map<string, Promise<void>> = new Map();
|
||||
|
||||
public constructor(
|
||||
private multicodec: string,
|
||||
private getConnections: Libp2p["getConnections"],
|
||||
private addEventListener: Libp2p["addEventListener"]
|
||||
private readonly multicodec: string,
|
||||
private readonly libp2p: Libp2pComponents
|
||||
) {
|
||||
this.log = new Logger(`stream-manager:${multicodec}`);
|
||||
this.addEventListener("peer:update", this.handlePeerUpdateStreamPool);
|
||||
this.libp2p.events.addEventListener(
|
||||
"peer:update",
|
||||
this.handlePeerUpdateStreamPool
|
||||
);
|
||||
}
|
||||
|
||||
public async getStream(peerId: PeerId): Promise<Stream> {
|
||||
const peerIdStr = peerId.toString();
|
||||
const scheduledStream = this.streamPool.get(peerIdStr);
|
||||
public stop(): void {
|
||||
this.libp2p.events.removeEventListener(
|
||||
"peer:update",
|
||||
this.handlePeerUpdateStreamPool
|
||||
);
|
||||
this.streamPool.clear();
|
||||
this.ongoingCreation.clear();
|
||||
}
|
||||
|
||||
if (scheduledStream) {
|
||||
this.streamPool.delete(peerIdStr);
|
||||
await scheduledStream;
|
||||
}
|
||||
public async getStream(peerId: PeerId): Promise<Stream | undefined> {
|
||||
try {
|
||||
const peerIdStr = peerId.toString();
|
||||
const scheduledStream = this.streamPool.get(peerIdStr);
|
||||
|
||||
let stream = this.getOpenStreamForCodec(peerId);
|
||||
if (scheduledStream) {
|
||||
this.streamPool.delete(peerIdStr);
|
||||
await scheduledStream;
|
||||
}
|
||||
|
||||
const stream =
|
||||
this.getOpenStreamForCodec(peerId) || (await this.createStream(peerId));
|
||||
|
||||
if (!stream) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (stream) {
|
||||
this.log.info(
|
||||
`Found existing stream peerId=${peerIdStr} multicodec=${this.multicodec}`
|
||||
`Using stream for peerId=${peerIdStr} multicodec=${this.multicodec}`
|
||||
);
|
||||
|
||||
this.lockStream(peerIdStr, stream);
|
||||
return stream;
|
||||
} catch (error) {
|
||||
this.log.error(`Failed to getStream:`, error);
|
||||
return;
|
||||
}
|
||||
|
||||
stream = await this.createStream(peerId);
|
||||
this.lockStream(peerIdStr, stream);
|
||||
|
||||
return stream;
|
||||
}
|
||||
|
||||
private async createStream(peerId: PeerId, retries = 0): Promise<Stream> {
|
||||
const connections = this.getConnections(peerId);
|
||||
private async createStream(
|
||||
peerId: PeerId,
|
||||
retries = 0
|
||||
): Promise<Stream | undefined> {
|
||||
const connections = this.libp2p.connectionManager.getConnections(peerId);
|
||||
const connection = selectOpenConnection(connections);
|
||||
|
||||
if (!connection) {
|
||||
throw new Error(
|
||||
this.log.error(
|
||||
`Failed to get a connection to the peer peerId=${peerId.toString()} multicodec=${this.multicodec}`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let lastError: unknown;
|
||||
@ -75,9 +94,10 @@ export class StreamManager {
|
||||
}
|
||||
|
||||
if (!stream) {
|
||||
throw new Error(
|
||||
this.log.error(
|
||||
`Failed to create a new stream for ${peerId.toString()} -- ` + lastError
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
return stream;
|
||||
@ -135,10 +155,13 @@ export class StreamManager {
|
||||
}
|
||||
|
||||
private getOpenStreamForCodec(peerId: PeerId): Stream | undefined {
|
||||
const connections = this.getConnections(peerId);
|
||||
const connections = this.libp2p.connectionManager.getConnections(peerId);
|
||||
const connection = selectOpenConnection(connections);
|
||||
|
||||
if (!connection) {
|
||||
this.log.info(
|
||||
`No open connection found for peerId=${peerId.toString()} multicodec=${this.multicodec}`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -147,16 +170,27 @@ export class StreamManager {
|
||||
);
|
||||
|
||||
if (!stream) {
|
||||
this.log.info(
|
||||
`No open stream found for peerId=${peerId.toString()} multicodec=${this.multicodec}`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const isStreamUnusable = ["done", "closed", "closing"].includes(
|
||||
stream.writeStatus || ""
|
||||
);
|
||||
|
||||
if (isStreamUnusable || this.isStreamLocked(stream)) {
|
||||
this.log.info(
|
||||
`Stream for peerId=${peerId.toString()} multicodec=${this.multicodec} is unusable`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
this.log.info(
|
||||
`Found open stream for peerId=${peerId.toString()} multicodec=${this.multicodec}`
|
||||
);
|
||||
|
||||
return stream;
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,119 @@
|
||||
# Changelog
|
||||
|
||||
## [0.0.14](https://github.com/logos-messaging/logos-messaging-js/compare/discovery-v0.0.13...discovery-v0.0.14) (2026-01-16)
|
||||
|
||||
|
||||
### Dependencies
|
||||
|
||||
* The following workspace dependencies were updated
|
||||
* dependencies
|
||||
* @waku/core bumped from 0.0.40 to 0.0.41
|
||||
* @waku/enr bumped from 0.0.33 to 0.0.34
|
||||
* @waku/interfaces bumped from 0.0.34 to 0.0.35
|
||||
* @waku/utils bumped from 0.0.27 to 0.0.28
|
||||
|
||||
## [0.0.13](https://github.com/waku-org/js-waku/compare/discovery-v0.0.12...discovery-v0.0.13) (2025-10-31)
|
||||
|
||||
|
||||
### Dependencies
|
||||
|
||||
* The following workspace dependencies were updated
|
||||
* dependencies
|
||||
* @waku/core bumped from 0.0.39 to 0.0.40
|
||||
* @waku/proto bumped from ^0.0.14 to ^0.0.15
|
||||
|
||||
## [0.0.12](https://github.com/waku-org/js-waku/compare/discovery-v0.0.11...discovery-v0.0.12) (2025-09-20)
|
||||
|
||||
|
||||
### Dependencies
|
||||
|
||||
* The following workspace dependencies were updated
|
||||
* dependencies
|
||||
* @waku/core bumped from 0.0.38 to 0.0.39
|
||||
* @waku/enr bumped from 0.0.32 to 0.0.33
|
||||
* @waku/interfaces bumped from 0.0.33 to 0.0.34
|
||||
* @waku/proto bumped from ^0.0.13 to ^0.0.14
|
||||
* @waku/utils bumped from 0.0.26 to 0.0.27
|
||||
|
||||
## [0.0.11](https://github.com/waku-org/js-waku/compare/discovery-v0.0.10...discovery-v0.0.11) (2025-08-14)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* local peer discovery improvements ([#2557](https://github.com/waku-org/js-waku/issues/2557))
|
||||
* Introduce routing info concept
|
||||
|
||||
### Features
|
||||
|
||||
* Introduce routing info concept ([3842d84](https://github.com/waku-org/js-waku/commit/3842d84b55eb96728f6b05b9307ff823fac58a54))
|
||||
* Local peer discovery improvements ([#2557](https://github.com/waku-org/js-waku/issues/2557)) ([eab8ce8](https://github.com/waku-org/js-waku/commit/eab8ce81b431b11d79dcbec31aea759319853336))
|
||||
* Peer exchange discovery improvements ([#2537](https://github.com/waku-org/js-waku/issues/2537)) ([95da57a](https://github.com/waku-org/js-waku/commit/95da57a8705fa195529ef52a6c908642da5e120c))
|
||||
* Retrieve peers from all passed enrtree URLs ([25f884e](https://github.com/waku-org/js-waku/commit/25f884e05b430cebe3b6650c16026d771d1b7626))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* Do not limit DNS Peer Discovery on capability ([0dfe352](https://github.com/waku-org/js-waku/commit/0dfe35281c677e91c064557a83a50e6a1ca6d0ac))
|
||||
* Improve error handling for stream manager ([#2546](https://github.com/waku-org/js-waku/issues/2546)) ([ada2657](https://github.com/waku-org/js-waku/commit/ada265731acfeddc2bfe2e8e963bc2be37f13900))
|
||||
* Prevent setting shard info from PX if it exists ([#2561](https://github.com/waku-org/js-waku/issues/2561)) ([dfb2baf](https://github.com/waku-org/js-waku/commit/dfb2baf004a58c29f7afd0144c82a8d2e6710d5a))
|
||||
|
||||
|
||||
### Dependencies
|
||||
|
||||
* The following workspace dependencies were updated
|
||||
* dependencies
|
||||
* @waku/core bumped from 0.0.37 to 0.0.38
|
||||
* @waku/enr bumped from 0.0.31 to 0.0.32
|
||||
* @waku/interfaces bumped from 0.0.32 to 0.0.33
|
||||
* @waku/proto bumped from ^0.0.12 to ^0.0.13
|
||||
* @waku/utils bumped from 0.0.25 to 0.0.26
|
||||
|
||||
## [0.0.10](https://github.com/waku-org/js-waku/compare/discovery-v0.0.9...discovery-v0.0.10) (2025-07-18)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* re-architect connection manager ([#2445](https://github.com/waku-org/js-waku/issues/2445))
|
||||
|
||||
### Features
|
||||
|
||||
* Re-architect connection manager ([#2445](https://github.com/waku-org/js-waku/issues/2445)) ([c7682ea](https://github.com/waku-org/js-waku/commit/c7682ea67c54d2c26a68ce96208003fb1ffc915c))
|
||||
|
||||
|
||||
### Dependencies
|
||||
|
||||
* The following workspace dependencies were updated
|
||||
* dependencies
|
||||
* @waku/core bumped from 0.0.36 to 0.0.37
|
||||
* @waku/enr bumped from 0.0.30 to 0.0.31
|
||||
* @waku/interfaces bumped from 0.0.31 to 0.0.32
|
||||
* @waku/proto bumped from ^0.0.11 to ^0.0.12
|
||||
* @waku/utils bumped from 0.0.24 to 0.0.25
|
||||
|
||||
## [0.0.9](https://github.com/waku-org/js-waku/compare/discovery-v0.0.8...discovery-v0.0.9) (2025-06-23)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* upgrade libp2p, nodejs and typescript ([#2401](https://github.com/waku-org/js-waku/issues/2401))
|
||||
* remove IBaseProtocol and improve interface on PeerExchange ([#2422](https://github.com/waku-org/js-waku/issues/2422))
|
||||
|
||||
### Miscellaneous Chores
|
||||
|
||||
* Remove IBaseProtocol and improve interface on PeerExchange ([#2422](https://github.com/waku-org/js-waku/issues/2422)) ([7c8d107](https://github.com/waku-org/js-waku/commit/7c8d1073b0d076117fb33ce05452a88871259782))
|
||||
* Upgrade libp2p, nodejs and typescript ([#2401](https://github.com/waku-org/js-waku/issues/2401)) ([fcc6496](https://github.com/waku-org/js-waku/commit/fcc6496fef914c56f6a4d2d17c494c8b94caea3c))
|
||||
|
||||
|
||||
### Dependencies
|
||||
|
||||
* The following workspace dependencies were updated
|
||||
* dependencies
|
||||
* @waku/core bumped from 0.0.35 to 0.0.36
|
||||
* @waku/enr bumped from 0.0.29 to 0.0.30
|
||||
* @waku/interfaces bumped from 0.0.30 to 0.0.31
|
||||
* @waku/proto bumped from ^0.0.10 to ^0.0.11
|
||||
* @waku/utils bumped from 0.0.23 to 0.0.24
|
||||
|
||||
## [0.0.8](https://github.com/waku-org/js-waku/compare/discovery-v0.0.7...discovery-v0.0.8) (2025-04-23)
|
||||
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@waku/discovery",
|
||||
"version": "0.0.8",
|
||||
"version": "0.0.14",
|
||||
"description": "Contains various discovery mechanisms: DNS Discovery (EIP-1459, Peer Exchange, Local Peer Cache Discovery.",
|
||||
"types": "./dist/index.d.ts",
|
||||
"module": "./dist/index.js",
|
||||
@ -12,17 +12,18 @@
|
||||
},
|
||||
"type": "module",
|
||||
"author": "Waku Team",
|
||||
"homepage": "https://github.com/waku-org/js-waku/tree/master/packages/discovery#readme",
|
||||
"homepage": "https://github.com/logos-messaging/logos-messaging-js/tree/master/packages/discovery#readme",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/waku-org/js-waku.git"
|
||||
"url": "git+https://github.com/logos-messaging/logos-messaging-js.git"
|
||||
},
|
||||
"bugs": {
|
||||
"url": "https://github.com/waku-org/js-waku/issues"
|
||||
"url": "https://github.com/logos-messaging/logos-messaging-js/issues"
|
||||
},
|
||||
"license": "MIT OR Apache-2.0",
|
||||
"keywords": [
|
||||
"waku",
|
||||
"logos-messaging",
|
||||
"decentralized",
|
||||
"secure",
|
||||
"communication",
|
||||
@ -48,34 +49,32 @@
|
||||
"test:browser": "NODE_ENV=test karma start karma.conf.cjs"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=20"
|
||||
"node": ">=22"
|
||||
},
|
||||
"dependencies": {
|
||||
"@waku/core": "0.0.35",
|
||||
"@waku/enr": "0.0.29",
|
||||
"@waku/interfaces": "0.0.30",
|
||||
"@waku/proto": "^0.0.10",
|
||||
"@waku/utils": "0.0.23",
|
||||
"@waku/core": "0.0.41",
|
||||
"@waku/enr": "0.0.34",
|
||||
"@waku/interfaces": "0.0.35",
|
||||
"@waku/proto": "^0.0.15",
|
||||
"@waku/utils": "0.0.28",
|
||||
"debug": "^4.3.4",
|
||||
"dns-over-http-resolver": "^3.0.8",
|
||||
"hi-base32": "^0.5.1",
|
||||
"uint8arrays": "^5.0.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@libp2p/interface": "^2.1.3",
|
||||
"@libp2p/peer-id": "5.0.1",
|
||||
"@libp2p/interface": "2.10.4",
|
||||
"@libp2p/peer-id": "5.1.7",
|
||||
"@multiformats/multiaddr": "^12.3.0",
|
||||
"@rollup/plugin-commonjs": "^25.0.7",
|
||||
"@rollup/plugin-json": "^6.0.0",
|
||||
"@rollup/plugin-node-resolve": "^15.2.3",
|
||||
"@types/chai": "^4.3.11",
|
||||
"@types/node-localstorage": "^1.3.3",
|
||||
"@waku/build-utils": "*",
|
||||
"chai": "^4.3.10",
|
||||
"chai-as-promised": "^7.1.1",
|
||||
"cspell": "^8.6.1",
|
||||
"mocha": "^10.3.0",
|
||||
"node-localstorage": "^3.0.5",
|
||||
"npm-run-all": "^4.1.5",
|
||||
"rollup": "^4.12.0",
|
||||
"sinon": "^18.0.0"
|
||||
|
||||
@ -3,7 +3,7 @@ import json from "@rollup/plugin-json";
|
||||
import { nodeResolve } from "@rollup/plugin-node-resolve";
|
||||
import { extractExports } from "@waku/build-utils";
|
||||
|
||||
import * as packageJson from "./package.json" assert { type: "json" };
|
||||
import * as packageJson from "./package.json" with { type: "json" };
|
||||
|
||||
const input = extractExports(packageJson);
|
||||
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import type { NodeCapabilityCount } from "@waku/interfaces";
|
||||
import { Tags } from "@waku/interfaces";
|
||||
|
||||
/**
|
||||
* The ENR tree for the different fleets.
|
||||
@ -10,12 +10,6 @@ export const enrTree = {
|
||||
TEST: "enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im"
|
||||
};
|
||||
|
||||
export const DEFAULT_BOOTSTRAP_TAG_NAME = "bootstrap";
|
||||
export const DEFAULT_BOOTSTRAP_TAG_NAME = Tags.BOOTSTRAP;
|
||||
export const DEFAULT_BOOTSTRAP_TAG_VALUE = 50;
|
||||
export const DEFAULT_BOOTSTRAP_TAG_TTL = 100_000_000;
|
||||
|
||||
export const DEFAULT_NODE_REQUIREMENTS: Partial<NodeCapabilityCount> = {
|
||||
store: 1,
|
||||
filter: 2,
|
||||
lightPush: 2
|
||||
};
|
||||
|
||||
@ -1,8 +1,9 @@
|
||||
import type { DnsClient } from "@waku/interfaces";
|
||||
import { expect } from "chai";
|
||||
import sinon from "sinon";
|
||||
|
||||
import { DnsNodeDiscovery } from "./dns.js";
|
||||
import testData from "./testdata.json" assert { type: "json" };
|
||||
import testData from "./testdata.json" with { type: "json" };
|
||||
|
||||
import { enrTree } from "./index.js";
|
||||
|
||||
@ -17,7 +18,6 @@ const branchDomainD = "D5SNLTAGWNQ34NTQTPHNZDECFU";
|
||||
const partialBranchA = "AAAA";
|
||||
const partialBranchB = "BBBB";
|
||||
const singleBranch = `enrtree-branch:${branchDomainA}`;
|
||||
const doubleBranch = `enrtree-branch:${branchDomainA},${branchDomainB}`;
|
||||
const multiComponentBranch = [
|
||||
`enrtree-branch:${branchDomainA},${partialBranchA}`,
|
||||
`${partialBranchB},${branchDomainB}`
|
||||
@ -34,10 +34,12 @@ const errorBranchB = `enrtree-branch:${branchDomainD}`;
|
||||
class MockDNS implements DnsClient {
|
||||
private fqdnRes: Map<string, string[]>;
|
||||
private fqdnThrows: string[];
|
||||
public hasThrown: boolean;
|
||||
|
||||
public constructor() {
|
||||
this.fqdnRes = new Map();
|
||||
this.fqdnThrows = [];
|
||||
this.hasThrown = false;
|
||||
}
|
||||
|
||||
public addRes(fqdn: string, res: string[]): void {
|
||||
@ -49,11 +51,17 @@ class MockDNS implements DnsClient {
|
||||
}
|
||||
|
||||
public resolveTXT(fqdn: string): Promise<string[]> {
|
||||
if (this.fqdnThrows.includes(fqdn)) throw "Mock DNS throws.";
|
||||
if (this.fqdnThrows.includes(fqdn)) {
|
||||
this.hasThrown = true;
|
||||
throw "Mock DNS throws.";
|
||||
}
|
||||
|
||||
const res = this.fqdnRes.get(fqdn);
|
||||
|
||||
if (!res) throw `Mock DNS could not resolve ${fqdn}`;
|
||||
if (!res) {
|
||||
this.hasThrown = true;
|
||||
throw `Mock DNS could not resolve ${fqdn}`;
|
||||
}
|
||||
|
||||
return Promise.resolve(res);
|
||||
}
|
||||
@ -72,9 +80,10 @@ describe("DNS Node Discovery", () => {
|
||||
mockDns.addRes(`${branchDomainA}.${host}`, [mockData.enrWithWaku2Relay]);
|
||||
|
||||
const dnsNodeDiscovery = new DnsNodeDiscovery(mockDns);
|
||||
const peers = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
|
||||
relay: 1
|
||||
});
|
||||
const peers = [];
|
||||
for await (const peer of dnsNodeDiscovery.getNextPeer([mockData.enrTree])) {
|
||||
peers.push(peer);
|
||||
}
|
||||
|
||||
expect(peers.length).to.eq(1);
|
||||
expect(peers[0].ip).to.eq("192.168.178.251");
|
||||
@ -88,9 +97,10 @@ describe("DNS Node Discovery", () => {
|
||||
mockDns.addRes(`${branchDomainA}.${host}`, [singleBranch]);
|
||||
|
||||
const dnsNodeDiscovery = new DnsNodeDiscovery(mockDns);
|
||||
const peers = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
|
||||
relay: 1
|
||||
});
|
||||
const peers = [];
|
||||
for await (const peer of dnsNodeDiscovery.getNextPeer([mockData.enrTree])) {
|
||||
peers.push(peer);
|
||||
}
|
||||
|
||||
expect(peers.length).to.eq(0);
|
||||
});
|
||||
@ -102,17 +112,21 @@ describe("DNS Node Discovery", () => {
|
||||
mockDns.addRes(`${branchDomainA}.${host}`, []);
|
||||
|
||||
const dnsNodeDiscovery = new DnsNodeDiscovery(mockDns);
|
||||
let peers = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
|
||||
relay: 1
|
||||
});
|
||||
const peersA = [];
|
||||
for await (const peer of dnsNodeDiscovery.getNextPeer([mockData.enrTree])) {
|
||||
peersA.push(peer);
|
||||
}
|
||||
|
||||
expect(peers.length).to.eq(0);
|
||||
expect(peersA.length).to.eq(0);
|
||||
|
||||
// No TXT records case
|
||||
mockDns.addRes(`${branchDomainA}.${host}`, []);
|
||||
|
||||
peers = await dnsNodeDiscovery.getPeers([mockData.enrTree], { relay: 1 });
|
||||
expect(peers.length).to.eq(0);
|
||||
const peersB = [];
|
||||
for await (const peer of dnsNodeDiscovery.getNextPeer([mockData.enrTree])) {
|
||||
peersB.push(peer);
|
||||
}
|
||||
expect(peersB.length).to.eq(0);
|
||||
});
|
||||
|
||||
it("ignores domain fetching errors", async function () {
|
||||
@ -120,18 +134,20 @@ describe("DNS Node Discovery", () => {
|
||||
mockDns.addThrow(`${branchDomainC}.${host}`);
|
||||
|
||||
const dnsNodeDiscovery = new DnsNodeDiscovery(mockDns);
|
||||
const peers = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
|
||||
relay: 1
|
||||
});
|
||||
const peers = [];
|
||||
for await (const peer of dnsNodeDiscovery.getNextPeer([mockData.enrTree])) {
|
||||
peers.push(peer);
|
||||
}
|
||||
expect(peers.length).to.eq(0);
|
||||
});
|
||||
|
||||
it("ignores unrecognized TXT record formats", async function () {
|
||||
mockDns.addRes(`${rootDomain}.${host}`, [mockData.enrBranchBadPrefix]);
|
||||
const dnsNodeDiscovery = new DnsNodeDiscovery(mockDns);
|
||||
const peers = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
|
||||
relay: 1
|
||||
});
|
||||
const peers = [];
|
||||
for await (const peer of dnsNodeDiscovery.getNextPeer([mockData.enrTree])) {
|
||||
peers.push(peer);
|
||||
}
|
||||
expect(peers.length).to.eq(0);
|
||||
});
|
||||
|
||||
@ -140,20 +156,23 @@ describe("DNS Node Discovery", () => {
|
||||
mockDns.addRes(`${branchDomainD}.${host}`, [mockData.enrWithWaku2Relay]);
|
||||
|
||||
const dnsNodeDiscovery = new DnsNodeDiscovery(mockDns);
|
||||
const peersA = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
|
||||
relay: 1
|
||||
});
|
||||
const peersA = [];
|
||||
for await (const peer of dnsNodeDiscovery.getNextPeer([mockData.enrTree])) {
|
||||
peersA.push(peer);
|
||||
}
|
||||
expect(peersA.length).to.eq(1);
|
||||
|
||||
// Specify that a subsequent network call retrieving the same peer should throw.
|
||||
// This test passes only if the peer is fetched from cache
|
||||
mockDns.addThrow(`${branchDomainD}.${host}`);
|
||||
|
||||
const peersB = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
|
||||
relay: 1
|
||||
});
|
||||
const peersB = [];
|
||||
for await (const peer of dnsNodeDiscovery.getNextPeer([mockData.enrTree])) {
|
||||
peersB.push(peer);
|
||||
}
|
||||
expect(peersB.length).to.eq(1);
|
||||
expect(peersA[0].ip).to.eq(peersB[0].ip);
|
||||
expect(mockDns.hasThrown).to.be.false;
|
||||
});
|
||||
});
|
||||
|
||||
@ -169,9 +188,10 @@ describe("DNS Node Discovery w/ capabilities", () => {
|
||||
mockDns.addRes(`${rootDomain}.${host}`, [mockData.enrWithWaku2Relay]);
|
||||
|
||||
const dnsNodeDiscovery = new DnsNodeDiscovery(mockDns);
|
||||
const peers = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
|
||||
relay: 1
|
||||
});
|
||||
const peers = [];
|
||||
for await (const peer of dnsNodeDiscovery.getNextPeer([mockData.enrTree])) {
|
||||
peers.push(peer);
|
||||
}
|
||||
|
||||
expect(peers.length).to.eq(1);
|
||||
expect(peers[0].peerId?.toString()).to.eq(
|
||||
@ -183,10 +203,10 @@ describe("DNS Node Discovery w/ capabilities", () => {
|
||||
mockDns.addRes(`${rootDomain}.${host}`, [mockData.enrWithWaku2RelayStore]);
|
||||
|
||||
const dnsNodeDiscovery = new DnsNodeDiscovery(mockDns);
|
||||
const peers = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
|
||||
store: 1,
|
||||
relay: 1
|
||||
});
|
||||
const peers = [];
|
||||
for await (const peer of dnsNodeDiscovery.getNextPeer([mockData.enrTree])) {
|
||||
peers.push(peer);
|
||||
}
|
||||
|
||||
expect(peers.length).to.eq(1);
|
||||
expect(peers[0].peerId?.toString()).to.eq(
|
||||
@ -194,42 +214,29 @@ describe("DNS Node Discovery w/ capabilities", () => {
|
||||
);
|
||||
});
|
||||
|
||||
it("should only return 1 node with store capability", async () => {
|
||||
mockDns.addRes(`${rootDomain}.${host}`, [mockData.enrWithWaku2Store]);
|
||||
|
||||
const dnsNodeDiscovery = new DnsNodeDiscovery(mockDns);
|
||||
const peers = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
|
||||
store: 1
|
||||
});
|
||||
|
||||
expect(peers.length).to.eq(1);
|
||||
expect(peers[0].peerId?.toString()).to.eq(
|
||||
"16Uiu2HAkv3La3ECgQpdYeEJfrX36EWdhkUDv4C9wvXM8TFZ9dNgd"
|
||||
);
|
||||
});
|
||||
|
||||
it("retrieves all peers (2) when cannot fulfill all requirements", async () => {
|
||||
mockDns.addRes(`${rootDomain}.${host}`, [doubleBranch]);
|
||||
it("return first retrieved peers without further DNS queries", async function () {
|
||||
mockDns.addRes(`${rootDomain}.${host}`, multiComponentBranch);
|
||||
mockDns.addRes(`${branchDomainA}.${host}`, [
|
||||
mockData.enrWithWaku2RelayStore
|
||||
]);
|
||||
mockDns.addRes(`${branchDomainB}.${host}`, [mockData.enrWithWaku2Relay]);
|
||||
// The ENR Tree is such as there are more branches to be explored.
|
||||
// But they should not be explored if it isn't asked
|
||||
mockDns.addThrow(`${branchDomainB}.${host}`);
|
||||
|
||||
const dnsNodeDiscovery = new DnsNodeDiscovery(mockDns);
|
||||
const peers = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
|
||||
store: 1,
|
||||
relay: 2,
|
||||
filter: 1
|
||||
});
|
||||
|
||||
expect(peers.length).to.eq(2);
|
||||
const peerIds = peers.map((p) => p.peerId?.toString());
|
||||
expect(peerIds).to.contain(
|
||||
"16Uiu2HAm2HyS6brcCspSbszG9i36re2bWBVjMe3tMdnFp1Hua34F"
|
||||
);
|
||||
expect(peerIds).to.contain(
|
||||
"16Uiu2HAmPsYLvfKafxgRsb6tioYyGnSvGXS2iuMigptHrqHPNPzx"
|
||||
);
|
||||
const randomStub = sinon.stub(Math, "random").returns(0);
|
||||
try {
|
||||
const iterator = dnsNodeDiscovery.getNextPeer([mockData.enrTree]);
|
||||
const { value: peer } = await iterator.next();
|
||||
|
||||
expect(peer.peerId?.toString()).to.eq(
|
||||
"16Uiu2HAm2HyS6brcCspSbszG9i36re2bWBVjMe3tMdnFp1Hua34F"
|
||||
);
|
||||
expect(mockDns.hasThrown).to.be.false;
|
||||
} finally {
|
||||
randomStub.restore();
|
||||
}
|
||||
});
|
||||
|
||||
it("retrieves all peers (3) when branch entries are composed of multiple strings", async function () {
|
||||
@ -243,10 +250,10 @@ describe("DNS Node Discovery w/ capabilities", () => {
|
||||
]);
|
||||
|
||||
const dnsNodeDiscovery = new DnsNodeDiscovery(mockDns);
|
||||
const peers = await dnsNodeDiscovery.getPeers([mockData.enrTree], {
|
||||
store: 2,
|
||||
relay: 2
|
||||
});
|
||||
const peers = [];
|
||||
for await (const peer of dnsNodeDiscovery.getNextPeer([mockData.enrTree])) {
|
||||
peers.push(peer);
|
||||
}
|
||||
|
||||
expect(peers.length).to.eq(3);
|
||||
const peerIds = peers.map((p) => p.peerId?.toString());
|
||||
@ -275,12 +282,10 @@ describe("DNS Node Discovery [live data]", function () {
|
||||
this.timeout(10000);
|
||||
// Google's dns server address. Needs to be set explicitly to run in CI
|
||||
const dnsNodeDiscovery = await DnsNodeDiscovery.dnsOverHttp();
|
||||
const peers = await dnsNodeDiscovery.getPeers([enrTree.TEST], {
|
||||
relay: maxQuantity,
|
||||
store: maxQuantity,
|
||||
filter: maxQuantity,
|
||||
lightPush: maxQuantity
|
||||
});
|
||||
const peers = [];
|
||||
for await (const peer of dnsNodeDiscovery.getNextPeer([enrTree.TEST])) {
|
||||
peers.push(peer);
|
||||
}
|
||||
|
||||
expect(peers.length).to.eq(maxQuantity);
|
||||
|
||||
@ -298,12 +303,10 @@ describe("DNS Node Discovery [live data]", function () {
|
||||
this.timeout(10000);
|
||||
// Google's dns server address. Needs to be set explicitly to run in CI
|
||||
const dnsNodeDiscovery = await DnsNodeDiscovery.dnsOverHttp();
|
||||
const peers = await dnsNodeDiscovery.getPeers([enrTree.SANDBOX], {
|
||||
relay: maxQuantity,
|
||||
store: maxQuantity,
|
||||
filter: maxQuantity,
|
||||
lightPush: maxQuantity
|
||||
});
|
||||
const peers = [];
|
||||
for await (const peer of dnsNodeDiscovery.getNextPeer([enrTree.SANDBOX])) {
|
||||
peers.push(peer);
|
||||
}
|
||||
|
||||
expect(peers.length).to.eq(maxQuantity);
|
||||
|
||||
|
||||
@ -1,25 +1,16 @@
|
||||
import { ENR, EnrDecoder } from "@waku/enr";
|
||||
import type {
|
||||
DnsClient,
|
||||
IEnr,
|
||||
NodeCapabilityCount,
|
||||
SearchContext
|
||||
} from "@waku/interfaces";
|
||||
import { Logger } from "@waku/utils";
|
||||
import type { DnsClient, IEnr, SearchContext } from "@waku/interfaces";
|
||||
import { Logger, shuffle } from "@waku/utils";
|
||||
|
||||
import { DnsOverHttps } from "./dns_over_https.js";
|
||||
import { ENRTree } from "./enrtree.js";
|
||||
import {
|
||||
fetchNodesUntilCapabilitiesFulfilled,
|
||||
yieldNodesUntilCapabilitiesFulfilled
|
||||
} from "./fetch_nodes.js";
|
||||
import { fetchNodes } from "./fetch_nodes.js";
|
||||
|
||||
const log = new Logger("discovery:dns");
|
||||
|
||||
export class DnsNodeDiscovery {
|
||||
private readonly dns: DnsClient;
|
||||
private readonly _DNSTreeCache: { [key: string]: string };
|
||||
private readonly _errorTolerance: number = 10;
|
||||
|
||||
public static async dnsOverHttp(
|
||||
dnsClient?: DnsClient
|
||||
@ -30,68 +21,29 @@ export class DnsNodeDiscovery {
|
||||
return new DnsNodeDiscovery(dnsClient);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a list of verified peers listed in an EIP-1459 DNS tree. Method may
|
||||
* return fewer peers than requested if @link wantedNodeCapabilityCount requires
|
||||
* larger quantity of peers than available or the number of errors/duplicate
|
||||
* peers encountered by randomized search exceeds the sum of the fields of
|
||||
* @link wantedNodeCapabilityCount plus the @link _errorTolerance factor.
|
||||
*/
|
||||
public async getPeers(
|
||||
enrTreeUrls: string[],
|
||||
wantedNodeCapabilityCount: Partial<NodeCapabilityCount>
|
||||
): Promise<IEnr[]> {
|
||||
const networkIndex = Math.floor(Math.random() * enrTreeUrls.length);
|
||||
const { publicKey, domain } = ENRTree.parseTree(enrTreeUrls[networkIndex]);
|
||||
const context: SearchContext = {
|
||||
domain,
|
||||
publicKey,
|
||||
visits: {}
|
||||
};
|
||||
|
||||
const peers = await fetchNodesUntilCapabilitiesFulfilled(
|
||||
wantedNodeCapabilityCount,
|
||||
this._errorTolerance,
|
||||
() => this._search(domain, context)
|
||||
);
|
||||
log.info(
|
||||
"retrieved peers: ",
|
||||
peers.map((peer) => {
|
||||
return {
|
||||
id: peer.peerId?.toString(),
|
||||
multiaddrs: peer.multiaddrs?.map((ma) => ma.toString())
|
||||
};
|
||||
})
|
||||
);
|
||||
return peers;
|
||||
}
|
||||
|
||||
public constructor(dns: DnsClient) {
|
||||
this._DNSTreeCache = {};
|
||||
this.dns = dns;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc getPeers}
|
||||
* Retrieve the next peers from the passed [[enrTreeUrls]],
|
||||
*/
|
||||
public async *getNextPeer(
|
||||
enrTreeUrls: string[],
|
||||
wantedNodeCapabilityCount: Partial<NodeCapabilityCount>
|
||||
): AsyncGenerator<IEnr> {
|
||||
const networkIndex = Math.floor(Math.random() * enrTreeUrls.length);
|
||||
const { publicKey, domain } = ENRTree.parseTree(enrTreeUrls[networkIndex]);
|
||||
const context: SearchContext = {
|
||||
domain,
|
||||
publicKey,
|
||||
visits: {}
|
||||
};
|
||||
public async *getNextPeer(enrTreeUrls: string[]): AsyncGenerator<IEnr> {
|
||||
// Shuffle the ENR Trees so that not all clients connect to same nodes first.
|
||||
for (const enrTreeUrl of shuffle(enrTreeUrls)) {
|
||||
const { publicKey, domain } = ENRTree.parseTree(enrTreeUrl);
|
||||
const context: SearchContext = {
|
||||
domain,
|
||||
publicKey,
|
||||
visits: {}
|
||||
};
|
||||
|
||||
for await (const peer of yieldNodesUntilCapabilitiesFulfilled(
|
||||
wantedNodeCapabilityCount,
|
||||
this._errorTolerance,
|
||||
() => this._search(domain, context)
|
||||
)) {
|
||||
yield peer;
|
||||
for await (const peer of fetchNodes(() =>
|
||||
this._search(domain, context)
|
||||
)) {
|
||||
yield peer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -165,7 +117,7 @@ export class DnsNodeDiscovery {
|
||||
throw new Error("Received empty result array while fetching TXT record");
|
||||
if (!response[0].length) throw new Error("Received empty TXT record");
|
||||
|
||||
// Branch entries can be an array of strings of comma delimited subdomains, with
|
||||
// Branch entries can be an array of strings of comma-delimited subdomains, with
|
||||
// some subdomain strings split across the array elements
|
||||
const result = response.join("");
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user