mirror of
https://github.com/logos-messaging/logos-messaging-go.git
synced 2026-01-11 02:13:08 +00:00
Compare commits
514 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f40bcd7e79 | ||
|
|
b0af7695bd | ||
|
|
4b28d08451 | ||
|
|
5635735da6 | ||
|
|
84a4b1be7a | ||
|
|
06c9af60f3 | ||
|
|
01bb2e1c49 | ||
|
|
070ff0d7c5 | ||
|
|
b2d4752dd1 | ||
|
|
0c3d6dc0a8 | ||
|
|
feac2604f5 | ||
|
|
6ceea038ff | ||
|
|
d6b9120de3 | ||
|
|
5dea6d3bce | ||
|
|
900b98812a | ||
|
|
f59588a970 | ||
|
|
fdf03de179 | ||
|
|
e68fcdb554 | ||
|
|
b53227f641 | ||
|
|
24932b529c | ||
|
|
4ef460cb95 | ||
|
|
c0afa070a3 | ||
|
|
6dcf177414 | ||
|
|
78b522db50 | ||
|
|
ffed0595ad | ||
|
|
9a243696d7 | ||
|
|
809dba5854 | ||
|
|
6550ff35bc | ||
|
|
0c594b3140 | ||
|
|
1608cf2b0b | ||
|
|
68a6faaf5c | ||
|
|
f98a17bacf | ||
|
|
96702e278b | ||
|
|
dd82c24e00 | ||
|
|
c78b09d4ca | ||
|
|
fdb3c3d0b3 | ||
|
|
6bdf125dd1 | ||
|
|
38be0dc169 | ||
|
|
37f936d747 | ||
|
|
76275f6fb8 | ||
|
|
0ed94ce0b1 | ||
|
|
15b4aee808 | ||
|
|
ae423936ed | ||
|
|
244bb176eb | ||
|
|
12abd041d6 | ||
|
|
8b0e03113d | ||
|
|
798c9c5d81 | ||
|
|
821481fec4 | ||
|
|
2800391204 | ||
|
|
f0acee4d1d | ||
|
|
991e872de9 | ||
|
|
bc2444ca46 | ||
|
|
2b61569558 | ||
|
|
1a96cd2271 | ||
|
|
bf2b7dce1a | ||
|
|
f9e7895202 | ||
|
|
3066ff10b1 | ||
|
|
99d2477035 | ||
|
|
690849c986 | ||
|
|
27d640e391 | ||
|
|
69e1b559bc | ||
|
|
3b5ec53bab | ||
|
|
949684092e | ||
|
|
4c3ec60da5 | ||
|
|
a4f0cae911 | ||
|
|
1472b17d39 | ||
|
|
8ff8779bb0 | ||
| c324e3df82 | |||
|
|
d3b5113059 | ||
|
|
8ab0764350 | ||
|
|
bc16c74f2e | ||
|
|
3b2cde8365 | ||
|
|
159635e21b | ||
|
|
92d62a7c38 | ||
|
|
3eab289abb | ||
|
|
c2e6320953 | ||
|
|
4f1d692413 | ||
|
|
240051b8b8 | ||
|
|
5aa11311f8 | ||
|
|
f3560ced3b | ||
|
|
d047df3859 | ||
|
|
0fc5bcc953 | ||
|
|
04a9af931f | ||
| a4009b70d1 | |||
| cd70fbc912 | |||
|
|
e1e136cc68 | ||
|
|
76d8fd687d | ||
|
|
a9be17fd48 | ||
|
|
58d9721026 | ||
|
|
75047cc9da | ||
|
|
f3da812b33 | ||
|
|
8afeb529df | ||
|
|
dacff8a6ae | ||
|
|
9fbb955b16 | ||
|
|
2f333c1e1c | ||
|
|
bb74e39ed9 | ||
|
|
9412af28dd | ||
|
|
3b0c8e9207 | ||
|
|
221cbf6599 | ||
|
|
7c13021a32 | ||
|
|
e7a5bd3c68 | ||
|
|
5b5ea977af | ||
|
|
e3d7ab1d58 | ||
|
|
7302eb05ac | ||
|
|
201d434d50 | ||
|
|
8d7c2f7bfa | ||
|
|
19a47a1ac1 | ||
|
|
ee33baa283 | ||
|
|
8303c592d3 | ||
|
|
93331b483e | ||
|
|
795322a196 | ||
|
|
a06208321e | ||
|
|
b3b8f709a5 | ||
|
|
32da07cad9 | ||
|
|
389b359e43 | ||
|
|
d2d2f5672e | ||
|
|
0e223591ed | ||
|
|
ad1b0948e3 | ||
|
|
349754056d | ||
|
|
269417c5e9 | ||
|
|
8115ec7013 | ||
|
|
5ceb61766c | ||
|
|
05d247d272 | ||
|
|
879bc08426 | ||
|
|
07d9fc9770 | ||
|
|
e59729766f | ||
|
|
95968a780f | ||
|
|
6e47bd1cf0 | ||
|
|
7028a0b1cb | ||
|
|
e8dc887c6f | ||
|
|
bc16421c74 | ||
|
|
febeb6c9c9 | ||
|
|
19d27befd9 | ||
|
|
a453c027b7 | ||
|
|
28c2a2704a | ||
|
|
8805f6cc45 | ||
|
|
46e48044da | ||
|
|
ea3f9d8d9d | ||
|
|
67bbbaf60d | ||
|
|
46b4efec56 | ||
|
|
714a310462 | ||
|
|
0260cfe954 | ||
|
|
3f69fb3776 | ||
|
|
e29cf0d191 | ||
|
|
53e0ea6ac6 | ||
|
|
b199b08ed6 | ||
|
|
83efe65f01 | ||
|
|
dcd802c027 | ||
|
|
327391a9b4 | ||
|
|
6f1280e704 | ||
|
|
826c7fb924 | ||
|
|
7d767c0105 | ||
|
|
afa124e1ca | ||
|
|
4b9e3635a2 | ||
|
|
3074cdb11c | ||
|
|
bdf44c0a23 | ||
|
|
32be835b5e | ||
|
|
bdf10a46e4 | ||
|
|
4d828bdf70 | ||
|
|
d4bda1255c | ||
|
|
0ba0f1fe26 | ||
|
|
0bdd3590f7 | ||
|
|
d65a836bb6 | ||
|
|
57cf95cd5c | ||
| 8e95f75a38 | |||
| 75a2aee295 | |||
|
|
4f232c40ca | ||
|
|
c09bd8383b | ||
|
|
2e7a82e130 | ||
|
|
0c27742f67 | ||
|
|
b647314846 | ||
|
|
0f00fb8d96 | ||
|
|
3be0edbf14 | ||
|
|
5fdd0da9ee | ||
|
|
190d8e8e08 | ||
|
|
0723ff9282 | ||
|
|
faf046e059 | ||
|
|
75f975ce7a | ||
|
|
82fc800b08 | ||
|
|
369a025b7c | ||
|
|
4e19d93da1 | ||
|
|
68c0ee2598 | ||
|
|
ff68934354 | ||
|
|
adda1cfd6d | ||
|
|
846183d515 | ||
|
|
a327e56377 | ||
|
|
6141f94b40 | ||
|
|
ec468e0a26 | ||
|
|
71aec6d37b | ||
|
|
bad57fcb0c | ||
|
|
b5068b4357 | ||
|
|
4f8ed170fe | ||
|
|
c00b218215 | ||
|
|
2a2e8dcc13 | ||
|
|
02e2c5926e | ||
|
|
4ef0c75ded | ||
|
|
b4ba7b75d4 | ||
|
|
5e3c9fdfa1 | ||
|
|
097123a30e | ||
|
|
0218169b5f | ||
|
|
5d1477d5b4 | ||
|
|
c403388ec2 | ||
|
|
0ba1b63066 | ||
|
|
e340337d64 | ||
|
|
fd4df9221e | ||
|
|
d7c7255aa4 | ||
|
|
48ab9e6ce7 | ||
|
|
6ab2cfb53b | ||
|
|
f0ed5e32d5 | ||
|
|
021265eba4 | ||
|
|
5a5ee51f4b | ||
|
|
13e2d7ac4b | ||
|
|
16d59f37d0 | ||
|
|
0b4df80b98 | ||
|
|
6bd85a1dc9 | ||
|
|
ac1a699171 | ||
|
|
b7105f9b9f | ||
|
|
e0be9857ef | ||
| d8d67d8eb5 | |||
|
|
251188d217 | ||
|
|
28107bd307 | ||
|
|
e1266b836b | ||
|
|
cf8c36f85d | ||
|
|
d7249fc123 | ||
|
|
b59a498606 | ||
|
|
fb49752f0f | ||
|
|
792b73b358 | ||
|
|
2af8cf7344 | ||
|
|
61e0c55e76 | ||
|
|
ad8f349817 | ||
|
|
e743069387 | ||
|
|
49593fd61d | ||
|
|
f0fbe62b8d | ||
|
|
c0aa5111a1 | ||
|
|
b02a663e1b | ||
|
|
9a30c78e1a | ||
|
|
e464131f89 | ||
|
|
ae61805152 | ||
|
|
f441f33c5f | ||
|
|
8122cf47a1 | ||
|
|
392558ec8e | ||
|
|
a5ce5dfaa4 | ||
|
|
73bcb2e78a | ||
|
|
b6d9e3d4be | ||
|
|
be9a2cce10 | ||
|
|
e6459df7ec | ||
|
|
684c7a46df | ||
|
|
10e32d1059 | ||
|
|
24879b2a0b | ||
|
|
5aa4d578aa | ||
|
|
ff94b1faf0 | ||
| 712febd32f | |||
|
|
3d217ed5ff | ||
|
|
70cb6de576 | ||
|
|
286f760c97 | ||
|
|
fab51beadf | ||
|
|
28c0cd5d8e | ||
|
|
43412c9da5 | ||
|
|
150ade6f33 | ||
|
|
3226def4cf | ||
|
|
2616d43c9d | ||
|
|
9315de8d8a | ||
|
|
25eb4d60a3 | ||
|
|
532a04013f | ||
|
|
a0bc53c679 | ||
|
|
67d57a36b8 | ||
|
|
d51c207a1f | ||
|
|
e0c6ab8ee1 | ||
|
|
02f2800b04 | ||
|
|
5dfbd98c74 | ||
|
|
36beb9de75 | ||
|
|
48acff4a5c | ||
|
|
cf82f66d12 | ||
|
|
4584bb4324 | ||
|
|
38202e7a2e | ||
|
|
ddf188bbf8 | ||
|
|
279752344f | ||
|
|
db222a24ef | ||
|
|
fc3b2f76d5 | ||
|
|
0868f5d4dd | ||
|
|
c58d0f51e4 | ||
|
|
0ba8b2caeb | ||
|
|
2701a38b2a | ||
|
|
18c16de94e | ||
|
|
94f18c537c | ||
|
|
fa51d10b4b | ||
|
|
a3c3aab44e | ||
|
|
077df2fbb6 | ||
|
|
3d69e78cf3 | ||
|
|
9b9fc634cb | ||
|
|
ab65c4869c | ||
|
|
9161c4f7fe | ||
|
|
519fa2977a | ||
|
|
19ba25ffcb | ||
|
|
a16d00624e | ||
|
|
4181655b7a | ||
|
|
b5be83a02e | ||
|
|
4af7e7a500 | ||
|
|
ac9d826b03 | ||
|
|
bfee9964f6 | ||
|
|
f6460efee9 | ||
|
|
d4abe15634 | ||
|
|
ee94581d0a | ||
|
|
9a0cf85ae1 | ||
|
|
6955d01498 | ||
|
|
2ef7e732dd | ||
|
|
b5802adf5b | ||
|
|
d96e1aedde | ||
|
|
5d0692b339 | ||
|
|
d268b2e403 | ||
|
|
2f9f304762 | ||
|
|
7826e31f14 | ||
|
|
5dcae1d190 | ||
|
|
e1417b364d | ||
|
|
3aa477cbc6 | ||
|
|
9017f9816a | ||
|
|
dcc828749f | ||
|
|
47c961dcbb | ||
|
|
dfd104dbac | ||
|
|
88d69ebccd | ||
|
|
7f466c1d99 | ||
|
|
388f56b43f | ||
|
|
d324234c81 | ||
|
|
16ec22596e | ||
|
|
e0ba66791d | ||
| 735c2fa0d7 | |||
|
|
9f4754dcae | ||
|
|
a84701abaa | ||
|
|
3254d28968 | ||
|
|
d317b294a0 | ||
| c132ee4303 | |||
| fcee52757e | |||
|
|
003c90fba8 | ||
|
|
054bdae1de | ||
|
|
bf90ab4d1b | ||
|
|
81638fe111 | ||
|
|
3d8d435502 | ||
|
|
9b05d48318 | ||
|
|
a650469fae | ||
|
|
be4601e8f1 | ||
|
|
dd5dc7a9c8 | ||
|
|
a5f9ee5ad8 | ||
|
|
fb2df14cb7 | ||
|
|
2060c8c837 | ||
|
|
22b097817e | ||
|
|
e1a84aab0e | ||
|
|
bfc3083fb1 | ||
|
|
55bc21c604 | ||
|
|
7beaa3f029 | ||
|
|
ab7e45c735 | ||
|
|
8df69279ee | ||
|
|
4b1c188cf0 | ||
|
|
2aea2f5ca5 | ||
|
|
75486f8226 | ||
|
|
dcc87cf24f | ||
|
|
db25b307e2 | ||
|
|
db3a2d53b3 | ||
|
|
cc28267951 | ||
|
|
18efd2c737 | ||
|
|
793c059ea7 | ||
|
|
be982d8617 | ||
|
|
8783cd2f45 | ||
|
|
08cabab41f | ||
|
|
092811658e | ||
|
|
7badb4a37b | ||
|
|
76b007163a | ||
|
|
def745cfba | ||
|
|
7fbb9ebe42 | ||
|
|
d13b1f0aa3 | ||
|
|
e66f0e3b9c | ||
|
|
f9179cd116 | ||
|
|
f9ed8d973c | ||
|
|
84fa332e1c | ||
|
|
5fcfbb9897 | ||
|
|
ab3f21f209 | ||
|
|
30422114f6 | ||
|
|
8aa1c4a39b | ||
|
|
e8bd38a023 | ||
|
|
215c7c46b6 | ||
|
|
9c0bebc859 | ||
|
|
0854edaf3d | ||
|
|
229fb7a970 | ||
|
|
accd9ff3e3 | ||
|
|
4a546d12d3 | ||
|
|
4c52149fac | ||
|
|
8ad08d6b04 | ||
|
|
6463dbeb70 | ||
|
|
67d0b77147 | ||
|
|
467d1b2ca5 | ||
|
|
09eb8ed19b | ||
|
|
041dc4070a | ||
|
|
8b73eb8ae3 | ||
|
|
bfbc50eb22 | ||
|
|
36464acacb | ||
|
|
44d3ef6d78 | ||
|
|
cb3f5da322 | ||
|
|
ddb08adbbd | ||
|
|
624fe1bb2f | ||
|
|
5422af8130 | ||
|
|
7e36f91a5a | ||
|
|
f62ba67a92 | ||
|
|
0b943caaa8 | ||
|
|
f088e49075 | ||
|
|
8cc92dfdef | ||
|
|
bc06867cc9 | ||
|
|
54c02213cd | ||
|
|
5d5fe3413e | ||
|
|
fc4840e1c9 | ||
|
|
78db78d5a9 | ||
|
|
8a9c4d68e3 | ||
|
|
ee17c23345 | ||
|
|
2818c3d394 | ||
|
|
387ee5f9ac | ||
|
|
287d7a014e | ||
|
|
770f41158f | ||
|
|
f263be4a74 | ||
| d860d825c1 | |||
|
|
9e6c09e680 | ||
|
|
06f027b1a9 | ||
|
|
419adcb6a8 | ||
|
|
c320b38cbe | ||
|
|
da45dab68e | ||
|
|
eb836d71e3 | ||
|
|
b498308cdd | ||
|
|
317798ac9f | ||
|
|
9e52e09dd5 | ||
|
|
e0e4a2fa87 | ||
|
|
e56f54252f | ||
|
|
05d8cfdc65 | ||
|
|
ee2af4646c | ||
|
|
b88907c5ee | ||
|
|
164c92554b | ||
|
|
9f45d271ac | ||
|
|
299801d4ad | ||
|
|
95fc8ff77b | ||
|
|
65f26978f9 | ||
|
|
b1a4d739ce | ||
|
|
df78d21dfd | ||
|
|
7c632e40c0 | ||
|
|
6429af9ca7 | ||
|
|
af7471d158 | ||
|
|
baeaa1dd55 | ||
|
|
20040f2e9b | ||
|
|
22398b2868 | ||
|
|
045fcf02d9 | ||
|
|
87a7501166 | ||
|
|
884f65ecdf | ||
|
|
42b2c0bc6d | ||
|
|
34de2941c7 | ||
|
|
33344c2ae0 | ||
|
|
a4b038730c | ||
|
|
2b494a384e | ||
|
|
e970c033cb | ||
|
|
1b1450e04f | ||
|
|
e6aaf9c852 | ||
|
|
55ec2347a8 | ||
|
|
8b3f42310d | ||
|
|
76961f2bd8 | ||
|
|
1abd476d3e | ||
|
|
88d52d6417 | ||
|
|
dc08c44804 | ||
|
|
ad9236fd6b | ||
|
|
a422add209 | ||
|
|
13a23a9c42 | ||
|
|
bc6a305759 | ||
|
|
cfe28d4698 | ||
|
|
a124c48b4e | ||
|
|
55a7d7b332 | ||
|
|
6d9018cb11 | ||
|
|
dbd94ebb81 | ||
|
|
7781e850d5 | ||
|
|
8cac406a4c | ||
|
|
ca297713fa | ||
|
|
b26859fc6a | ||
|
|
5ca26ef897 | ||
|
|
97f02361d4 | ||
|
|
6ece3c483b | ||
|
|
7dd02067f8 | ||
|
|
0822fdb280 | ||
|
|
49baff14cd | ||
|
|
47cdb86aaf | ||
|
|
fb3447f3d8 | ||
|
|
b8ab99ba08 | ||
|
|
06d303f8ab | ||
|
|
80eb17cfed | ||
|
|
cd358c7bd6 | ||
|
|
fff255d0ef | ||
|
|
cd335f8168 | ||
|
|
eba4aa43e5 | ||
|
|
0381b92531 | ||
|
|
f0f3543df8 | ||
|
|
be766a8636 | ||
|
|
c380faa6ae | ||
|
|
875d132c1e | ||
|
|
76186e5477 | ||
|
|
52ac8e3740 | ||
|
|
f6fe353e2e | ||
|
|
b0c094b0b6 | ||
|
|
1bffa35dfa | ||
|
|
1c75c89790 | ||
|
|
f18f219086 | ||
|
|
9207dbcb4a | ||
|
|
5d514ddd6a | ||
|
|
10c2e20910 | ||
|
|
2c17e20d99 | ||
|
|
0dbe4fd5ff | ||
|
|
39c97c5d89 | ||
|
|
96c8980828 | ||
|
|
0c3803b5d8 | ||
|
|
189b2ed120 | ||
|
|
a5abfa8710 | ||
|
|
b66c19ad8f | ||
|
|
898f433d2c | ||
|
|
8d6b2cd721 |
16
.codeclimate.yml
Normal file
16
.codeclimate.yml
Normal file
@ -0,0 +1,16 @@
|
||||
plugins:
|
||||
# golint:
|
||||
# enabled: true
|
||||
gofmt:
|
||||
enabled: true
|
||||
govet:
|
||||
enabled: true
|
||||
# golangci-lint:
|
||||
# enabled: true
|
||||
exclude_patterns:
|
||||
- "."
|
||||
- "**/*.pb.go"
|
||||
- "**/rln/contracts/*.go"
|
||||
- "**/bindata.go"
|
||||
- "./examples/waku-csharp"
|
||||
- "./examples/swift-waku"
|
||||
@ -2,6 +2,5 @@ README.md
|
||||
Dockerfile
|
||||
.*ignore
|
||||
LICENSE*
|
||||
tests
|
||||
examples
|
||||
*.db
|
||||
|
||||
1
.github/CODEOWNERS
vendored
Normal file
1
.github/CODEOWNERS
vendored
Normal file
@ -0,0 +1 @@
|
||||
* @richard-ramos @chaitanyaprem
|
||||
36
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
36
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: 'bug:'
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Desktop (please complete the following information):**
|
||||
- OS: [e.g. iOS]
|
||||
- Browser [e.g. chrome, safari]
|
||||
- Version [e.g. 22]
|
||||
|
||||
### go-waku version/commit hash
|
||||
State the version of `go-waku` where you've encountered the bug or, if built off a specific commit, the relevant commit hash. You can check the version by running `./build/waku --version`.
|
||||
- e.g. `v0.7.0` or `e21bdab`
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
40
.github/ISSUE_TEMPLATE/epic.md
vendored
Normal file
40
.github/ISSUE_TEMPLATE/epic.md
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
---
|
||||
name: Epic
|
||||
about: Track Epic
|
||||
title: "[Epic] "
|
||||
labels: epic
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!-- Please ensure you are assigning the matching milestone label to the epic -->
|
||||
<!-- All _active_ (being worked on) epics MUST have an owner (GitHub assignee) -->
|
||||
|
||||
**Planned start date**:
|
||||
**Due date**:
|
||||
|
||||
# Summary
|
||||
|
||||
<!-- Provide a high level summary of the Epic -->
|
||||
|
||||
# Acceptance Criteria
|
||||
|
||||
<!-- describe the deliverable of this epic and its attributes in plain English -->
|
||||
|
||||
## Tasks
|
||||
|
||||
<!--
|
||||
Breakdown of the work
|
||||
- [ ] Task 1
|
||||
- [ ] Link to GitHub issue tracking task 2
|
||||
-->
|
||||
|
||||
# RAID (Risks, Assumptions, Issues and Dependencies)
|
||||
|
||||
<!-- List dependencies on other epics (avoid dependencies on tasks) -->
|
||||
|
||||
<!-- List dependencies on other teams -->
|
||||
|
||||
<!-- List any risks or assumptions that will be cleared as work progresses -->
|
||||
|
||||
<!-- List any GitHub issues that tracks any blocker or any of the items above -->
|
||||
23
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
23
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for go-waku implementation
|
||||
title: 'feat:'
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
### Problem
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
### Suggested solution
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
### Alternatives considered
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
### Additional context
|
||||
Add any other context or screenshots about the feature request here.
|
||||
|
||||
### Acceptance criteria
|
||||
A list of tasks that need to be done for the issue to be considered resolved.
|
||||
18
.github/ISSUE_TEMPLATE/improvement.md
vendored
Normal file
18
.github/ISSUE_TEMPLATE/improvement.md
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
---
|
||||
name: Improvement
|
||||
about: Suggest improvements to the codebase or processes. This includes refactoring,
|
||||
docs and any other chores
|
||||
title: 'chore:'
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
### Background
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]; There is a spelling error in [...]; It's difficult to read the code in module [...]
|
||||
|
||||
### Details
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
### Acceptance criteria
|
||||
A list of tasks that need to be done for the issue to be considered resolved.
|
||||
6
.github/docker-compose/ganache.yml
vendored
Normal file
6
.github/docker-compose/ganache.yml
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
services:
|
||||
ganache:
|
||||
image: "trufflesuite/ganache:latest"
|
||||
command: ["-m='swim relax risk shy chimney please usual search industry board music segment'"]
|
||||
ports:
|
||||
- "8545:8545"
|
||||
13
.github/docker-compose/nwaku.yml
vendored
Normal file
13
.github/docker-compose/nwaku.yml
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
services:
|
||||
nwaku:
|
||||
image: "harbor.status.im/wakuorg/nwaku:v0.35.1"
|
||||
command:
|
||||
[
|
||||
"--relay",
|
||||
"--store",
|
||||
"--nodekey=1122334455667788990011223344556677889900112233445566778899001122",
|
||||
"--cluster-id=99",
|
||||
"--shard=1",
|
||||
]
|
||||
ports:
|
||||
- "60000"
|
||||
7
.github/docker-compose/postgres.yml
vendored
Normal file
7
.github/docker-compose/postgres.yml
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
services:
|
||||
postgres:
|
||||
environment:
|
||||
- POSTGRES_HOST_AUTH_METHOD=trust
|
||||
image: "postgres:9.6-alpine"
|
||||
ports:
|
||||
- "5432:5432"
|
||||
31
.github/pull_request_template.md
vendored
Normal file
31
.github/pull_request_template.md
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
# Description
|
||||
<!--- Describe your changes to provide context for reviewrs -->
|
||||
|
||||
# Changes
|
||||
|
||||
<!-- List of detailed changes -->
|
||||
|
||||
- [ ] ...
|
||||
- [ ] ...
|
||||
|
||||
# Tests
|
||||
|
||||
<!-- List down any tests that were executed specifically for this pull-request -->
|
||||
|
||||
|
||||
|
||||
<!--
|
||||
## How to test
|
||||
|
||||
1.
|
||||
1.
|
||||
1.
|
||||
|
||||
-->
|
||||
|
||||
|
||||
<!--
|
||||
## Issue
|
||||
|
||||
closes #
|
||||
-->
|
||||
4
.github/workflows/add-action-project.yml
vendored
4
.github/workflows/add-action-project.yml
vendored
@ -11,7 +11,7 @@ jobs:
|
||||
name: Add issue to project
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/add-to-project@v0.3.0
|
||||
- uses: actions/add-to-project@v0.5.0
|
||||
with:
|
||||
project-url: https://github.com/orgs/waku-org/projects/2
|
||||
github-token: ${{ secrets.ADD_TO_PROJECT_PAT }}
|
||||
github-token: ${{ secrets.ADD_TO_PROJECT_20240815 }}
|
||||
|
||||
12
.github/workflows/auto_assign_pr.yml
vendored
Normal file
12
.github/workflows/auto_assign_pr.yml
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
name: Auto Assign PR to Creator
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
|
||||
jobs:
|
||||
assign_creator:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: toshimaru/auto-author-assign@v1.6.2
|
||||
78
.github/workflows/build_linux_pkgs.yml
vendored
Normal file
78
.github/workflows/build_linux_pkgs.yml
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
name: build_linux_pkgs
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*' # "e.g. v0.4"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
env:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: xom9ikk/dotenv@v2
|
||||
with:
|
||||
path: ".github/"
|
||||
- run: |
|
||||
echo "go_version=${{ env.GO_VERSION }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- run: |
|
||||
VERSION=$(cat ./VERSION)
|
||||
echo "waku_version=$VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
|
||||
build-linux:
|
||||
needs: env
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
ext: [deb, rpm]
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Get submodules hash
|
||||
id: submodules
|
||||
run: |
|
||||
echo "hash=$(git submodule status | awk '{print $1}' | sort | shasum -a 256 | sed 's/[ -]*//g')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache submodules
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
vendor/
|
||||
.git/modules
|
||||
key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }}
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ needs.env.outputs.go_version }}
|
||||
cache: false
|
||||
|
||||
- name: Build
|
||||
id: build
|
||||
run: |
|
||||
make build
|
||||
mkdir ./build/linux
|
||||
cp ./build/waku ./build/linux/.
|
||||
strip --strip-unneeded ./build/linux/waku
|
||||
|
||||
- name: Package ${{ matrix.ext }}
|
||||
uses: bpicode/github-action-fpm@master
|
||||
with:
|
||||
fpm_args: ./build/linux/waku=/usr/bin/waku
|
||||
fpm_opts: '-p gowaku-${{ needs.env.outputs.waku_version }}-x86_64.${{ matrix.ext }} -n go-waku -t ${{ matrix.ext }} -s dir --license "MIT, Apache 2.0" --version ${{ needs.env.outputs.waku_version }} --architecture x86_64 --depends libc6 --description "Go implementation of Waku v2 protocols" --url "https://github.com/waku-org/go-waku" --maintainer "Richard Ramos <richard@status.im>"'
|
||||
|
||||
- name: Upload ${{ matrix.ext }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: gowaku-${{ needs.env.outputs.waku_version }}-x86_64.${{ matrix.ext }}
|
||||
path: ./gowaku-${{ needs.env.outputs.waku_version }}-x86_64.${{ matrix.ext }}
|
||||
if-no-files-found: error
|
||||
110
.github/workflows/build_mobile.yml
vendored
Normal file
110
.github/workflows/build_mobile.yml
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
name: build_mobile
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*' # "e.g. v0.4"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
env:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: xom9ikk/dotenv@v2
|
||||
with:
|
||||
path: ".github/"
|
||||
- run: |
|
||||
echo "go_version=${{ env.GO_VERSION }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- run: |
|
||||
VERSION=$(cat ./VERSION)
|
||||
echo "waku_version=$VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
|
||||
build-android:
|
||||
needs: env
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Get submodules hash
|
||||
id: submodules
|
||||
run: |
|
||||
echo "hash=$(git submodule status | awk '{print $1}' | sort | shasum -a 256 | sed 's/[ -]*//g')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache submodules
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
vendor/
|
||||
.git/modules
|
||||
key: ${{ runner.os }}-vendor-modules-${{ needs.env.outputs.go_version }}
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ needs.env.outputs.go_version }}
|
||||
cache: false
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
make install-gomobile
|
||||
make mobile-android || make mobile-android
|
||||
cd ./build/lib
|
||||
tar -czvf gowaku-${{ needs.env.outputs.waku_version }}-android.tar.gz gowaku.aar gowaku-sources.jar
|
||||
|
||||
- name: Upload asset
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: gowaku-${{ needs.env.outputs.waku_version }}-android.tar.gz
|
||||
path: ./build/lib/gowaku-${{ needs.env.outputs.waku_version }}-android.tar.gz
|
||||
if-no-files-found: error
|
||||
|
||||
build-ios:
|
||||
needs: env
|
||||
runs-on: macos-latest
|
||||
timeout-minutes: 60
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Get submodules hash
|
||||
id: submodules
|
||||
run: |
|
||||
echo "hash=$(git submodule status | awk '{print $1}' | sort | shasum -a 256 | sed 's/[ -]*//g')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache submodules
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
vendor/
|
||||
.git/modules
|
||||
key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }}
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ needs.env.outputs.go_version }}
|
||||
cache: false
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
make install-gomobile
|
||||
make mobile-ios
|
||||
cd ./build/lib
|
||||
tar -czvf gowaku-${{ needs.env.outputs.waku_version }}-ios.tar.gz Gowaku.xcframework
|
||||
|
||||
- name: Upload asset
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: gowaku-${{ needs.env.outputs.waku_version }}-ios.tar.gz
|
||||
path: ./build/lib/gowaku-${{ needs.env.outputs.waku_version }}-ios.tar.gz
|
||||
if-no-files-found: error
|
||||
|
||||
169
.github/workflows/ci.yml
vendored
Normal file
169
.github/workflows/ci.yml
vendored
Normal file
@ -0,0 +1,169 @@
|
||||
name: ci
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
changes: # changes detection
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: read
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
name: Checkout code
|
||||
id: checkout
|
||||
|
||||
- uses: dorny/paths-filter@v2
|
||||
id: filter
|
||||
with:
|
||||
filters: |
|
||||
common:
|
||||
- '.github/workflows/**'
|
||||
- 'Makefile'
|
||||
- 'libs/**'
|
||||
- 'go.mod'
|
||||
- 'go.sum'
|
||||
- 'flake.nix'
|
||||
- 'examples/**'
|
||||
|
||||
v2:
|
||||
- 'waku/**'
|
||||
- 'cmd/**'
|
||||
- 'library/**'
|
||||
- 'tests/**'
|
||||
|
||||
docker:
|
||||
- 'docker/**'
|
||||
- 'Dockerfile'
|
||||
|
||||
outputs:
|
||||
common: ${{ steps.filter.outputs.common }}
|
||||
v2: ${{ steps.filter.outputs.v2 }}
|
||||
docker: ${{ steps.filter.outputs.docker }}
|
||||
|
||||
env:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- run: |
|
||||
VERSION=$(cat ./VERSION)
|
||||
echo "waku_version=$VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
golangci:
|
||||
name: lint
|
||||
needs: [changes, env]
|
||||
if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
cache: false
|
||||
|
||||
- name: Execute golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.64.6
|
||||
args: --timeout=5m
|
||||
|
||||
build:
|
||||
needs: [changes, env]
|
||||
if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 60
|
||||
|
||||
name: build-${{ matrix.os }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Get submodules hash
|
||||
id: submodules
|
||||
run: |
|
||||
echo "hash=$(git submodule status | awk '{print $1}' | sort | shasum -a 256 | sed 's/[ -]*//g')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache submodules
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
vendor/
|
||||
.git/modules
|
||||
key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }}
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
cache: false
|
||||
|
||||
- name: Build binary
|
||||
run: make
|
||||
|
||||
- name: Build library
|
||||
run: make static-library dynamic-library
|
||||
|
||||
- name: Build examples
|
||||
run: make build-example
|
||||
|
||||
test:
|
||||
needs: [changes, env]
|
||||
if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' }}
|
||||
strategy:
|
||||
matrix:
|
||||
tests: [test-ci, test-with-race]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
|
||||
name: ${{ matrix.tests }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Get submodules hash
|
||||
id: submodules
|
||||
run: |
|
||||
echo "hash=$(git submodule status | awk '{print $1}' | sort | shasum -a 256 | sed 's/[ -]*//g')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache submodules
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
vendor/
|
||||
.git/modules
|
||||
key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }}
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
cache: false
|
||||
|
||||
- name: "Run tests"
|
||||
run: make ${{ matrix.tests }}
|
||||
|
||||
- name: "Run onchain-tests"
|
||||
run: |
|
||||
docker compose -f .github/docker-compose/ganache.yml up -d
|
||||
make test-onchain${{ matrix.tests == 'test-with-race' && '-with-race' || '' }}
|
||||
|
||||
- name: "Run storev3 tests"
|
||||
run: |
|
||||
docker compose -f .github/docker-compose/nwaku.yml up -d
|
||||
NWAKU_HOST=$(docker compose -f .github/docker-compose/nwaku.yml port nwaku 60000)
|
||||
NWAKU_PORT=$(echo $NWAKU_HOST | cut -d ":" -f 2)
|
||||
sleep 5
|
||||
make test-storev3 TEST_STOREV3_NODE="/ip4/127.0.0.1/tcp/${NWAKU_PORT}/p2p/16Uiu2HAmMGhfSTUzKbsjMWxc6T1X4wiTWSF1bEWSLjAukCm7KiHV"
|
||||
50
.github/workflows/container-image.yml
vendored
Normal file
50
.github/workflows/container-image.yml
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
name: container-image-build
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
image_tag:
|
||||
type: string
|
||||
default: ${{ github.event.number }}
|
||||
outputs:
|
||||
image:
|
||||
description: The resulting image link
|
||||
value: ${{ jobs.build-docker-image.outputs.image }}
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build-docker-image:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 60
|
||||
|
||||
name: docker-build-${{ matrix.os }}
|
||||
outputs:
|
||||
image: ${{ steps.build.outputs.image }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Build image
|
||||
id: build
|
||||
run: |
|
||||
|
||||
SHORT_REF=$(git rev-parse --short HEAD)
|
||||
|
||||
TAG=$([ "${PR_NUMBER}" == "" ] && echo "${SHORT_REF}" || echo "${PR_NUMBER}")
|
||||
IMAGE=quay.io/wakuorg/go-waku-pr:${TAG}
|
||||
|
||||
echo "image=${IMAGE}" >> $GITHUB_OUTPUT
|
||||
echo "commit_hash=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT
|
||||
|
||||
docker login -u ${QUAY_USER} -p ${QUAY_PASSWORD} quay.io
|
||||
docker build -t ${IMAGE} -f docker/Dockerfile.test.amd64 --label quay.expires-after=7d .
|
||||
docker push ${IMAGE}
|
||||
env:
|
||||
QUAY_PASSWORD: ${{ secrets.QUAY_PASSWORD }}
|
||||
QUAY_USER: ${{ secrets.QUAY_USER }}
|
||||
PR_NUMBER: ${{ inputs.image_tag }}
|
||||
|
||||
43
.github/workflows/lint_pr.yml
vendored
Normal file
43
.github/workflows/lint_pr.yml
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
name: "Conventional Commits"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- synchronize
|
||||
jobs:
|
||||
main:
|
||||
name: Validate format
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: amannn/action-semantic-pull-request@v5
|
||||
id: lint_pr_title
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: richard-ramos/action-conventional-commits@v1.1.1
|
||||
id: lint_pr_commits
|
||||
- uses: marocchino/sticky-pull-request-comment@v2
|
||||
# When the previous steps fails, the workflow would stop. By adding this
|
||||
# condition you can continue the execution with the populated error message.
|
||||
if: always() && (steps.lint_pr_title.outputs.error_message != null || steps.lint_pr_commits.outputs.error_message != null )
|
||||
with:
|
||||
header: pr-title-lint-error
|
||||
message: |
|
||||
Thank you for opening this pull request!
|
||||
|
||||
We require pull request titles and commits to follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/) and it looks like your PR needs to be adjusted.
|
||||
|
||||
Details:
|
||||
|
||||
> ${{ steps.lint_pr_title.outputs.error_message }}
|
||||
> ${{ steps.lint_pr_commits.outputs.error_message }}
|
||||
|
||||
# Delete a previous comment when the issue has been resolved
|
||||
- if: ${{ steps.lint_pr_title.outputs.error_message == null && steps.lint_pr_commits.outputs.error_message == null }}
|
||||
uses: marocchino/sticky-pull-request-comment@v2
|
||||
with:
|
||||
header: pr-title-lint-error
|
||||
delete: true
|
||||
29
.gitignore
vendored
29
.gitignore
vendored
@ -1,9 +1,10 @@
|
||||
.codeclimate.yml
|
||||
nodekey
|
||||
rlnCredentials.txt
|
||||
rlnCredentials.json
|
||||
rlnKeystore.json
|
||||
test_onchain.json
|
||||
*.bkp
|
||||
*.log
|
||||
.vscode
|
||||
|
||||
# sqlite db
|
||||
*.db
|
||||
@ -26,18 +27,18 @@ pkg/*
|
||||
|
||||
# output binaries
|
||||
go-waku
|
||||
examples/basic-relay/build/basic-relay
|
||||
examples/filter2/build/filter2
|
||||
examples/noise/build/
|
||||
examples/noise/noise
|
||||
examples/basic-light-client/basic2
|
||||
examples/basic-relay/basic2
|
||||
examples/filter2/filter2
|
||||
examples/rln/rln
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
*.out.tmp
|
||||
coverage.html
|
||||
coverage.json
|
||||
|
||||
cc-test-reporter
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
# vendor/
|
||||
|
||||
@ -61,6 +62,12 @@ Icon
|
||||
# Nix
|
||||
result
|
||||
|
||||
# Solidity files
|
||||
waku/v2/protocol/rln/contracts/*.abi
|
||||
waku/v2/protocol/rln/contracts/*.sol
|
||||
waku/v2/protocol/rln/contracts/*.bin
|
||||
|
||||
|
||||
# Thumbnails
|
||||
._*
|
||||
|
||||
@ -176,3 +183,5 @@ iOSInjectionProject/
|
||||
**/xcshareddata/WorkspaceSettings.xcsettings
|
||||
|
||||
# End of https://www.toptal.com/developers/gitignore/api/swift,xcode,Cobjective-c,osx
|
||||
|
||||
.idea
|
||||
|
||||
6
.gitmodules
vendored
6
.gitmodules
vendored
@ -0,0 +1,6 @@
|
||||
[submodule "libs/waku-rln-contract"]
|
||||
path = libs/waku-rln-contract
|
||||
url = https://github.com/waku-org/waku-rln-contract.git
|
||||
[submodule "waku/v2/protocol/waku-proto"]
|
||||
path = waku/v2/protocol/waku-proto
|
||||
url = https://github.com/waku-org/waku-proto
|
||||
8
.golangci.full.yaml
Normal file
8
.golangci.full.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
issues:
|
||||
include:
|
||||
- EXC0012
|
||||
- EXC0014
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- revive
|
||||
@ -1,3 +0,0 @@
|
||||
run:
|
||||
build-tags:
|
||||
- gowaku_rln
|
||||
11
Dockerfile
11
Dockerfile
@ -1,10 +1,5 @@
|
||||
# BUILD IMAGE --------------------------------------------------------
|
||||
FROM golang:1.19-alpine3.16 as builder
|
||||
|
||||
# Get build tools and required header files
|
||||
RUN apk add --no-cache build-base
|
||||
RUN apk add --no-cache bash
|
||||
RUN apk add --no-cache git
|
||||
FROM golang:1.23 as builder
|
||||
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
@ -14,7 +9,7 @@ RUN make -j$(nproc) build
|
||||
|
||||
# ACTUAL IMAGE -------------------------------------------------------
|
||||
|
||||
FROM alpine:3.16
|
||||
FROM debian:12.1-slim
|
||||
|
||||
ARG GIT_COMMIT=unknown
|
||||
|
||||
@ -26,6 +21,8 @@ LABEL commit=$GIT_COMMIT
|
||||
# color, nocolor, json
|
||||
ENV GOLOG_LOG_FMT=nocolor
|
||||
|
||||
RUN apt update && apt install -y ca-certificates
|
||||
|
||||
# go-waku default ports
|
||||
EXPOSE 9000 30303 60000 60001 8008 8009
|
||||
|
||||
|
||||
144
Makefile
144
Makefile
@ -1,14 +1,11 @@
|
||||
|
||||
CC_TEST_REPORTER_ID := c09efa7c67c269bfdc6f8a356785d8f7ed55c9dc2b9a1d07b78c384f55c4e527
|
||||
GO_HTML_COV := ./coverage.html
|
||||
GO_TEST_OUTFILE := ./c.out
|
||||
CC_PREFIX := github.com/waku-org/go-waku
|
||||
|
||||
SHELL := bash # the shell used internally by Make
|
||||
|
||||
GOBIN ?= $(shell which go)
|
||||
GOCMD ?= $(shell which go)
|
||||
|
||||
.PHONY: all build lint test coverage build-example static-library dynamic-library test-c test-c-template mobile-android mobile-ios
|
||||
.PHONY: all build lint lint-full test build-example static-library dynamic-library test-c test-c-template mobile-android mobile-ios
|
||||
|
||||
ifeq ($(OS),Windows_NT) # is Windows_NT on XP, 2000, 7, Vista, 10...
|
||||
detected_OS := Windows
|
||||
@ -18,11 +15,13 @@ endif
|
||||
|
||||
ifeq ($(detected_OS),Darwin)
|
||||
GOBIN_SHARED_LIB_EXT := dylib
|
||||
TEST_REPORTER_URL := https://codeclimate.com/downloads/test-reporter/test-reporter-latest-darwin-amd64
|
||||
else ifeq ($(detected_OS),Windows)
|
||||
# on Windows need `--export-all-symbols` flag else expected symbols will not be found in libgowaku.dll
|
||||
GOBIN_SHARED_LIB_CGO_LDFLAGS := CGO_LDFLAGS="-Wl,--export-all-symbols"
|
||||
GOBIN_SHARED_LIB_EXT := dll
|
||||
else
|
||||
TEST_REPORTER_URL := https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64
|
||||
GOBIN_SHARED_LIB_EXT := so
|
||||
GOBIN_SHARED_LIB_CGO_LDFLAGS := CGO_LDFLAGS="-Wl,-soname,libgowaku.so.0"
|
||||
endif
|
||||
@ -40,68 +39,70 @@ BUILD_FLAGS ?= $(shell echo "-ldflags='\
|
||||
ANDROID_TARGET ?= 23
|
||||
|
||||
# control rln code compilation
|
||||
ifeq ($(RLN), true)
|
||||
BUILD_TAGS := gowaku_rln
|
||||
ifeq ($(NO_RLN), true)
|
||||
BUILD_TAGS := gowaku_no_rln
|
||||
endif
|
||||
|
||||
all: build
|
||||
|
||||
deps: lint-install
|
||||
|
||||
build-with-race:
|
||||
${GOCMD} build -race -tags="${BUILD_TAGS}" $(BUILD_FLAGS) -o build/waku ./cmd/waku
|
||||
|
||||
build:
|
||||
${GOBIN} build -tags="${BUILD_TAGS}" $(BUILD_FLAGS) -o build/waku ./cmd/waku
|
||||
${GOCMD} build -tags="${BUILD_TAGS}" $(BUILD_FLAGS) -o build/waku ./cmd/waku
|
||||
|
||||
chat2:
|
||||
pushd ./examples/chat2 && \
|
||||
${GOBIN} build -tags="gowaku_rln" -o ../../build/chat2 . && \
|
||||
${GOCMD} build -o ../../build/chat2 . && \
|
||||
popd
|
||||
|
||||
vendor:
|
||||
${GOBIN} mod tidy
|
||||
${GOCMD} mod tidy
|
||||
cd examples/basic-light-client && ${GOCMD} mod tidy
|
||||
cd examples/basic-relay && ${GOCMD} mod tidy
|
||||
cd examples/chat2-reliable && ${GOCMD} mod tidy
|
||||
cd examples/chat2 && ${GOCMD} mod tidy
|
||||
cd examples/filter2 && ${GOCMD} mod tidy
|
||||
cd examples/noise && ${GOCMD} mod tidy
|
||||
cd examples/rln && ${GOCMD} mod tidy
|
||||
|
||||
lint-install:
|
||||
curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
|
||||
bash -s -- -b $(shell ${GOBIN} env GOPATH)/bin v1.52.2
|
||||
bash -s -- -b $(shell ${GOCMD} env GOPATH)/bin v1.59.1
|
||||
|
||||
lint:
|
||||
@echo "lint"
|
||||
@golangci-lint --exclude=SA1019 run ./... --deadline=5m
|
||||
@golangci-lint run ./...
|
||||
|
||||
lint-full:
|
||||
@echo "lint"
|
||||
@golangci-lint run ./... --config=./.golangci.full.yaml
|
||||
|
||||
test-with-race:
|
||||
${GOCMD} test -race -timeout 300s ./waku/... ./cmd/waku/server/...
|
||||
|
||||
test:
|
||||
${GOBIN} test -timeout 300s ./waku/... -coverprofile=${GO_TEST_OUTFILE}.tmp
|
||||
cat ${GO_TEST_OUTFILE}.tmp | grep -v ".pb.go" > ${GO_TEST_OUTFILE}
|
||||
${GOBIN} tool cover -html=${GO_TEST_OUTFILE} -o ${GO_HTML_COV}
|
||||
${GOCMD} test -timeout 300s ./waku/... ./cmd/waku/server/... ./...
|
||||
|
||||
COVERAGE_FILE := ./coverage/cc-test-reporter
|
||||
$(COVERAGE_FILE):
|
||||
curl -sfL https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64 --output ./coverage/cc-test-reporter # TODO: support mac and windows
|
||||
chmod +x ./coverage/cc-test-reporter
|
||||
|
||||
_before-cc: $(COVERAGE_FILE)
|
||||
|
||||
CC_TEST_REPORTER_ID=${CC_TEST_REPORTER_ID} ./coverage/cc-test-reporter before-build
|
||||
|
||||
_after-cc:
|
||||
GIT_COMMIT=$(git log | grep -m1 -oE '[^ ]+$') CC_TEST_REPORTER_ID=${CC_TEST_REPORTER_ID} ./coverage/cc-test-reporter after-build --prefix ${CC_PREFIX}
|
||||
|
||||
test-ci: _before-cc test _after-cc
|
||||
test-ci: test
|
||||
|
||||
generate:
|
||||
${GOBIN} generate ./...
|
||||
|
||||
coverage:
|
||||
${GOBIN} test -count 1 -coverprofile=coverage.out ./...
|
||||
${GOBIN} tool cover -html=coverage.out -o=coverage.html
|
||||
${GOCMD} generate ./...
|
||||
|
||||
# build a docker image for the fleet
|
||||
docker-image: DOCKER_IMAGE_TAG ?= latest
|
||||
docker-image: DOCKER_IMAGE_NAME ?= statusteam/go-waku:$(DOCKER_IMAGE_TAG)
|
||||
docker-image: DOCKER_IMAGE_NAME ?= wakuorg/go-waku:$(DOCKER_IMAGE_TAG)
|
||||
docker-image:
|
||||
docker build --tag $(DOCKER_IMAGE_NAME) \
|
||||
--build-arg="GIT_COMMIT=$(shell git rev-parse HEAD)" .
|
||||
|
||||
build-example-basic2:
|
||||
cd examples/basic2 && $(MAKE)
|
||||
build-example-basic-relay:
|
||||
cd examples/basic-relay && $(MAKE)
|
||||
|
||||
build-example-basic-light-client:
|
||||
cd examples/basic-light-client && $(MAKE)
|
||||
|
||||
build-example-chat-2:
|
||||
cd examples/chat2 && $(MAKE)
|
||||
@ -112,28 +113,44 @@ build-example-filter2:
|
||||
build-example-c-bindings:
|
||||
cd examples/c-bindings && $(MAKE)
|
||||
|
||||
build-example: build-example-basic2 build-example-chat-2 build-example-filter2 build-example-c-bindings
|
||||
build-example-noise:
|
||||
cd examples/noise && $(MAKE)
|
||||
|
||||
build-example-rln:
|
||||
cd examples/rln && $(MAKE)
|
||||
|
||||
build-example: build-example-basic-relay build-example-basic-light-client build-example-chat-2 build-example-filter2 build-example-c-bindings build-example-noise build-example-rln
|
||||
|
||||
static-library:
|
||||
@echo "Building static library..."
|
||||
${GOBIN} build \
|
||||
${GOCMD} build \
|
||||
-buildmode=c-archive \
|
||||
-tags="${BUILD_TAGS}" \
|
||||
-tags="${BUILD_TAGS} gowaku_no_rln" \
|
||||
-o ./build/lib/libgowaku.a \
|
||||
./library/
|
||||
./library/c/
|
||||
@echo "Static library built:"
|
||||
ifeq ($(detected_OS),Darwin)
|
||||
sed -i '' -e "s/#include <cgo_utils.h>//gi" ./build/lib/libgowaku.h
|
||||
else
|
||||
sed -i "s/#include <cgo_utils.h>//gi" ./build/lib/libgowaku.h
|
||||
endif
|
||||
@ls -la ./build/lib/libgowaku.*
|
||||
|
||||
dynamic-library:
|
||||
@echo "Building shared library..."
|
||||
$(GOBIN_SHARED_LIB_CFLAGS) $(GOBIN_SHARED_LIB_CGO_LDFLAGS) ${GOBIN} build \
|
||||
rm -f ./build/lib/libgowaku.$(GOBIN_SHARED_LIB_EXT)*
|
||||
$(GOBIN_SHARED_LIB_CFLAGS) $(GOBIN_SHARED_LIB_CGO_LDFLAGS) ${GOCMD} build \
|
||||
-buildmode=c-shared \
|
||||
-tags="${BUILD_TAGS}" \
|
||||
-tags="${BUILD_TAGS} gowaku_no_rln" \
|
||||
-o ./build/lib/libgowaku.$(GOBIN_SHARED_LIB_EXT) \
|
||||
./library/
|
||||
./library/c/
|
||||
ifeq ($(detected_OS),Darwin)
|
||||
sed -i '' -e "s/#include <cgo_utils.h>//gi" ./build/lib/libgowaku.h
|
||||
else
|
||||
sed -i "s/#include <cgo_utils.h>//gi" ./build/lib/libgowaku.h
|
||||
endif
|
||||
ifeq ($(detected_OS),Linux)
|
||||
cd ./build/lib && \
|
||||
ls -lah . && \
|
||||
mv ./libgowaku.$(GOBIN_SHARED_LIB_EXT) ./libgowaku.$(GOBIN_SHARED_LIB_EXT).0 && \
|
||||
ln -s ./libgowaku.$(GOBIN_SHARED_LIB_EXT).0 ./libgowaku.$(GOBIN_SHARED_LIB_EXT)
|
||||
endif
|
||||
@ -143,30 +160,30 @@ endif
|
||||
mobile-android:
|
||||
@echo "Android target: ${ANDROID_TARGET} (override with ANDROID_TARGET var)"
|
||||
gomobile init && \
|
||||
${GOBIN} get -d golang.org/x/mobile/cmd/gomobile && \
|
||||
gomobile bind -v -target=android -androidapi=${ANDROID_TARGET} -ldflags="-s -w" -tags="${BUILD_TAGS}" $(BUILD_FLAGS) -o ./build/lib/gowaku.aar ./mobile
|
||||
${GOCMD} get -d golang.org/x/mobile/cmd/gomobile && \
|
||||
CGO=1 gomobile bind -v -target=android -androidapi=${ANDROID_TARGET} -ldflags="-s -w" -tags="${BUILD_TAGS} gowaku_no_rln" $(BUILD_FLAGS) -o ./build/lib/gowaku.aar ./library/mobile
|
||||
@echo "Android library built:"
|
||||
@ls -la ./build/lib/*.aar ./build/lib/*.jar
|
||||
|
||||
mobile-ios:
|
||||
gomobile init && \
|
||||
${GOBIN} get -d golang.org/x/mobile/cmd/gomobile && \
|
||||
gomobile bind -target=ios -ldflags="-s -w" -tags="nowatchdog ${BUILD_TAGS}" $(BUILD_FLAGS) -o ./build/lib/Gowaku.xcframework ./mobile
|
||||
${GOCMD} get -d golang.org/x/mobile/cmd/gomobile && \
|
||||
gomobile bind -target=ios -ldflags="-s -w" -tags="nowatchdog ${BUILD_TAGS} gowaku_no_rln" $(BUILD_FLAGS) -o ./build/lib/Gowaku.xcframework ./library/mobile
|
||||
@echo "IOS library built:"
|
||||
@ls -la ./build/lib/*.xcframework
|
||||
|
||||
install-xtools:
|
||||
${GOBIN} install golang.org/x/tools/...@v0.1.10
|
||||
${GOCMD} install golang.org/x/tools/...@v0.1.10
|
||||
|
||||
install-bindata:
|
||||
${GOBIN} install github.com/kevinburke/go-bindata/go-bindata@v3.13.0
|
||||
${GOCMD} install github.com/kevinburke/go-bindata/go-bindata@v3.13.0
|
||||
|
||||
install-gomobile: install-xtools
|
||||
${GOBIN} install golang.org/x/mobile/cmd/gomobile@v0.0.0-20220518205345-8578da9835fd
|
||||
${GOBIN} install golang.org/x/mobile/cmd/gobind@v0.0.0-20220518205345-8578da9835fd
|
||||
${GOCMD} install golang.org/x/mobile/cmd/gomobile@v0.0.0-20220518205345-8578da9835fd
|
||||
${GOCMD} install golang.org/x/mobile/cmd/gobind@v0.0.0-20220518205345-8578da9835fd
|
||||
|
||||
build-linux-pkg:
|
||||
docker build --build-arg UID=${UID} --build-arg GID=${GID} -f ./scripts/linux/Dockerfile -t statusteam/gowaku-linux-pkgs:latest .
|
||||
docker build --build-arg UID=${UID} --build-arg GID=${GID} -f ./scripts/linux/Dockerfile -t wakuorg/gowaku-linux-pkgs:latest .
|
||||
./scripts/linux/docker-run.sh
|
||||
ls -la ./build/*.rpm ./build/*.deb
|
||||
|
||||
@ -180,5 +197,24 @@ stop-ganache:
|
||||
|
||||
test-onchain: BUILD_TAGS += include_onchain_tests
|
||||
test-onchain:
|
||||
${GOBIN} test -v -count 1 -tags="${BUILD_TAGS}" github.com/waku-org/go-waku/waku/v2/protocol/rln
|
||||
${GOCMD} test -v -count 1 -tags="${BUILD_TAGS}" github.com/waku-org/go-waku/waku/v2/protocol/rln
|
||||
|
||||
test-onchain-with-race:
|
||||
${GOCMD} test -race -v -count 1 -tags="${BUILD_TAGS}" github.com/waku-org/go-waku/waku/v2/protocol/rln
|
||||
|
||||
test-postgres: PG_BUILD_TAGS = ${BUILD_TAGS} include_postgres_tests
|
||||
test-postgres:
|
||||
${GOCMD} test -p 1 -v -count 1 -tags="${PG_BUILD_TAGS}" github.com/waku-org/go-waku/waku/persistence/...
|
||||
|
||||
test-postgres-with-race:
|
||||
${GOCMD} test -race -p 1 -v -count 1 -tags="${PG_BUILD_TAGS}" github.com/waku-org/go-waku/waku/persistence/...
|
||||
|
||||
test-filter:
|
||||
${GOCMD} test -v github.com/waku-org/go-waku/waku/v2/protocol/filter -run TestFilterSuite -count=1
|
||||
|
||||
test-filter-api:
|
||||
${GOCMD} test -v github.com/waku-org/go-waku/waku/v2/api -run TestFilterApiSuite
|
||||
|
||||
TEST_STOREV3_NODE ?=
|
||||
test-storev3:
|
||||
TEST_STOREV3_NODE=${TEST_STOREV3_NODE} ${GOCMD} test -p 1 -v -count 1 -tags="${BUILD_TAGS} include_storev3_tests" github.com/waku-org/go-waku/waku/v2/protocol/store/...
|
||||
|
||||
18
README.md
18
README.md
@ -5,8 +5,7 @@ A Go implementation of the [Waku v2 protocol](https://rfc.vac.dev/spec/10).
|
||||
<p align="left">
|
||||
<a href="https://goreportcard.com/report/github.com/waku-org/go-waku"><img src="https://goreportcard.com/badge/github.com/waku-org/go-waku" /></a>
|
||||
<a href="https://godoc.org/github.com/waku-org/go-waku"><img src="http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square" /></a>
|
||||
<a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.18.0-orange.svg?style=flat-square" /></a>
|
||||
<a href="https://lgtm.com/projects/g/waku-org/go-waku/alerts/"><img alt="Total alerts" src="https://img.shields.io/lgtm/alerts/g/waku-org/go-waku.svg?logo=lgtm&logoWidth=18"/></a>
|
||||
<a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.20.0-orange.svg?style=flat-square" /></a>
|
||||
<a href="https://codeclimate.com/github/waku-org/go-waku/maintainability"><img src="https://api.codeclimate.com/v1/badges/426bdff6a339ff4d536b/maintainability" /></a>
|
||||
<br>
|
||||
</p>
|
||||
@ -39,18 +38,19 @@ nix develop
|
||||
#### Docker
|
||||
```
|
||||
docker run -i -t -p 60000:60000 -p 9000:9000/udp \
|
||||
statusteam/go-waku:v0.5.2 \ # or, the image:tag of your choice
|
||||
--dns-discovery:true \
|
||||
--dns-discovery-url:enrtree://AOGECG2SPND25EEFMAJ5WF3KSGJNSGV356DSTL2YVLLZWIV6SAYBM@prod.waku.nodes.status.im \
|
||||
wakuorg/go-waku:latest \
|
||||
--dns-discovery \
|
||||
--dns-discovery-url enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im \
|
||||
--discv5-discovery
|
||||
```
|
||||
or use the [image:tag](https://hub.docker.com/r/wakuorg/go-waku/tags) of your choice.
|
||||
|
||||
or build and run the image with:
|
||||
|
||||
```
|
||||
docker build -t go-waku:latest .
|
||||
docker build -t wakuorg/go-waku:latest .
|
||||
|
||||
docker run go-waku:latest --help
|
||||
docker run wakuorg/go-waku:latest --help
|
||||
```
|
||||
|
||||
#### Building on windows
|
||||
@ -85,7 +85,7 @@ make mobile-ios
|
||||
- [Send messages using Waku Lightpush](docs/api/lightpush.md)
|
||||
- [Encrypting and decrypting Waku Messages](docs/api/encoding.md)
|
||||
- [Retrieve message history using Waku Store](docs/api/store.md)
|
||||
- [C Bindings](library/README.md)
|
||||
- [C Bindings](library/c/README.md)
|
||||
- [Waku Specs](https://rfc.vac.dev/spec), has information of [waku topics](https://rfc.vac.dev/spec/23/), wakuv1/[wakuv2](https://rfc.vac.dev/spec/14/) message, [rln relay](https://rfc.vac.dev/spec/58/) etc.
|
||||
- [Enr](https://eips.ethereum.org/EIPS/eip-778), [Enrtree](https://eips.ethereum.org/EIPS/eip-1459)
|
||||
- [devp2p](https://github.com/ethereum/go-ethereum/tree/master/cmd/devp2p) tool for playing with enr/entree sync tree. [Tutorial](https://geth.ethereum.org/docs/developers/geth-developer/dns-discovery-setup)
|
||||
@ -107,7 +107,7 @@ Thank you for considering to help out with the source code! We welcome contribut
|
||||
If you'd like to contribute to go-waku, please fork, fix, commit and send a pull request. If you wish to submit more complex changes though, please check up with the core devs first to ensure those changes are in line with the general philosophy of the project and/or get some early feedback which can make both your efforts much lighter as well as our review and merge procedures quick and simple.
|
||||
|
||||
To build and test this repository, you need:
|
||||
- [Go](https://golang.org/) (version 1.17 or later)
|
||||
- [Go](https://golang.org/) (version 1.20)
|
||||
- [protoc](https://grpc.io/docs/protoc-installation/)
|
||||
- [protoc-gen-go](https://protobuf.dev/getting-started/gotutorial/#compiling-protocol-buffers)
|
||||
|
||||
|
||||
99
ci/Jenkinsfile
vendored
99
ci/Jenkinsfile
vendored
@ -1,99 +0,0 @@
|
||||
library 'status-jenkins-lib@v1.7.0'
|
||||
|
||||
pipeline {
|
||||
agent { label 'linux' }
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
disableConcurrentBuilds()
|
||||
/* Prevent Jenkins jobs from running forever */
|
||||
timeout(time: 40, unit: 'MINUTES')
|
||||
/* Limit builds retained */
|
||||
buildDiscarder(logRotator(
|
||||
numToKeepStr: '10',
|
||||
daysToKeepStr: '30',
|
||||
artifactNumToKeepStr: '10',
|
||||
))
|
||||
}
|
||||
|
||||
/* WARNING: Defining parameters here with the ?: trick causes them to remember last value. */
|
||||
parameters {
|
||||
booleanParam(
|
||||
name: 'PUBLISH',
|
||||
description: 'Trigger publishing of build results for nightly or release.',
|
||||
defaultValue: getPublishDefault(params.PUBLISH),
|
||||
)
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Build') {
|
||||
parallel {
|
||||
stage('iOS') { steps { script {
|
||||
ios = jenkins.Build('go-waku/platforms/ios')
|
||||
} } }
|
||||
stage('Android') { steps { script {
|
||||
android = jenkins.Build('go-waku/platforms/android')
|
||||
} } }
|
||||
stage('Linux') { steps { script {
|
||||
linux = jenkins.Build('go-waku/platforms/linux')
|
||||
} } }
|
||||
}
|
||||
}
|
||||
stage('Archive') {
|
||||
steps { script {
|
||||
sh('rm -f pkg/*')
|
||||
jenkins.copyArts(ios)
|
||||
jenkins.copyArts(android)
|
||||
jenkins.copyArts(linux)
|
||||
sha = "pkg/${utils.pkgFilename(ext: 'sha256')}"
|
||||
dir('pkg') {
|
||||
/* generate sha256 checksums for upload */
|
||||
sh "sha256sum * | tee ../${sha}"
|
||||
archiveArtifacts('*')
|
||||
}
|
||||
} }
|
||||
}
|
||||
stage('Upload') {
|
||||
steps { script {
|
||||
/* object for easier URLs handling */
|
||||
urls = [
|
||||
/* mobile */
|
||||
Android: utils.pkgUrl(android),
|
||||
iOS: utils.pkgUrl(ios),
|
||||
Linux: utils.pkgUrl(linux),
|
||||
/* upload the sha256 checksums file too */
|
||||
SHA: s3.uploadArtifact(sha),
|
||||
]
|
||||
/* add URLs to the build description */
|
||||
jenkins.setBuildDesc(urls)
|
||||
} }
|
||||
}
|
||||
stage('Publish') {
|
||||
when { expression { params.PUBLISH } }
|
||||
steps { script {
|
||||
github.publishReleaseFiles(repo: 'status-desktop');
|
||||
} }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Helper that generates list of available choices for a parameter
|
||||
* but re-orders them based on the currently set value. First is default. */
|
||||
def List genChoices(String previousChoice, List defaultChoices) {
|
||||
if (previousChoice == null) {
|
||||
return defaultChoices
|
||||
}
|
||||
choices = defaultChoices.minus(previousChoice)
|
||||
choices.add(0, previousChoice)
|
||||
return choices
|
||||
}
|
||||
|
||||
/* Helper that makes PUBLISH default to 'false' unless:
|
||||
* - The build is for a release branch
|
||||
* - A user explicitly specified a value
|
||||
* Since release builds create and re-create GitHub drafts every time. */
|
||||
def Boolean getPublishDefault(Boolean previousValue) {
|
||||
if (env.JOB_NAME.startsWith('go-waku/release')) { return true }
|
||||
if (previousValue != null) { return previousValue }
|
||||
return false
|
||||
}
|
||||
@ -1,82 +0,0 @@
|
||||
library 'status-jenkins-lib@v1.7.0'
|
||||
|
||||
pipeline {
|
||||
agent { label 'linux && nix-2.11 && x86_64' }
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
disableConcurrentBuilds()
|
||||
/* Prevent Jenkins jobs from running forever */
|
||||
timeout(time: 30, unit: 'MINUTES')
|
||||
/* Go requires a certain directory structure */
|
||||
checkoutToSubdirectory('src/github.com/waku-org/go-waku')
|
||||
/* Limit builds retained */
|
||||
buildDiscarder(logRotator(
|
||||
numToKeepStr: '10',
|
||||
daysToKeepStr: '20',
|
||||
artifactNumToKeepStr: '10',
|
||||
))
|
||||
/* Allows combined build to copy */
|
||||
copyArtifactPermission('/go-waku/*')
|
||||
}
|
||||
|
||||
environment {
|
||||
CC = "gcc-10"
|
||||
/* Other stuff */
|
||||
TARGET = 'android'
|
||||
REPO = "${env.WORKSPACE}/src/github.com/waku-org/go-waku"
|
||||
GOCACHE = "${env.WORKSPACE_TMP}/go-build"
|
||||
GOPATH = "${env.WORKSPACE}"
|
||||
PATH = "${env.PATH}:${env.GOPATH}/bin"
|
||||
/* Android SDK */
|
||||
ANDROID_HOME = '/usr/lib/android-sdk'
|
||||
ANDROID_SDK_ROOT = '/usr/lib/android-sdk'
|
||||
/* gomobile requires a specific NDK version */
|
||||
ANDROID_NDK = "/opt/android-ndk-r23c"
|
||||
ANDROID_NDK_HOME = "/opt/android-ndk-r23c"
|
||||
}
|
||||
|
||||
stages {
|
||||
|
||||
stage('Prep') { steps { script { dir(env.REPO) {
|
||||
env.ARTIFACT = "${env.REPO}/pkg/" + utils.pkgFilename(
|
||||
name: "go-waku",
|
||||
type: "android",
|
||||
ext: "tar.gz"
|
||||
)
|
||||
|
||||
nix.develop('make install-gomobile', pure: false)
|
||||
} } } }
|
||||
|
||||
stage('Build') { steps { script { dir(env.REPO) {
|
||||
/* First gomobile run always fails.
|
||||
* https://github.com/golang/go/issues/37372 */
|
||||
nix.develop('make mobile-android || make mobile-android', pure: false)
|
||||
dir('build/lib') {
|
||||
sh 'tar -czvf gowaku-android.tar.gz gowaku.aar gowaku-sources.jar'
|
||||
sh "cp gowaku-android.tar.gz ${env.ARTIFACT}"
|
||||
}
|
||||
} } } }
|
||||
|
||||
stage('Parallel Upload') {
|
||||
parallel {
|
||||
stage('Archive') {
|
||||
steps { script {
|
||||
archiveArtifacts(env.ARTIFACT.minus("${env.WORKSPACE}/"))
|
||||
} }
|
||||
}
|
||||
stage('Upload') {
|
||||
steps { script {
|
||||
env.PKG_URL = s3.uploadArtifact(env.ARTIFACT)
|
||||
jenkins.setBuildDesc(android: env.PKG_URL)
|
||||
} }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
success { script { github.notifyPR(true) } }
|
||||
failure { script { github.notifyPR(false) } }
|
||||
always { cleanWs() }
|
||||
}
|
||||
}
|
||||
@ -11,18 +11,30 @@ pipeline {
|
||||
)
|
||||
string(
|
||||
name: 'IMAGE_NAME',
|
||||
defaultValue: 'statusteam/go-waku',
|
||||
description: 'Docker image name.',
|
||||
defaultValue: params.IMAGE_NAME ?: 'waku-org/go-waku',
|
||||
)
|
||||
string(
|
||||
name: 'IMAGE_TAG',
|
||||
defaultValue: env.JOB_BASE_NAME == 'release' ? 'stable' : 'deploy-test',
|
||||
description: 'Docker image tag.',
|
||||
defaultValue: getDefaultImageTag(params.IMAGE_TAG)
|
||||
)
|
||||
string(
|
||||
name: 'DOCKER_CRED',
|
||||
description: 'Name of Docker Registry credential.',
|
||||
defaultValue: params.DOCKER_CRED ?: 'harbor-wakuorg-robot',
|
||||
)
|
||||
string(
|
||||
name: 'DOCKER_REGISTRY_URL',
|
||||
description: 'URL of the Docker Registry',
|
||||
defaultValue: params.DOCKER_REGISTRY_URL ?: 'https://harbor.status.im'
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
disableRestartFromStage()
|
||||
buildDiscarder(logRotator(
|
||||
numToKeepStr: '10',
|
||||
daysToKeepStr: '30',
|
||||
@ -32,83 +44,38 @@ pipeline {
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps { script {
|
||||
def imageTag = GIT_COMMIT.take(8)
|
||||
if (env.JOB_BASE_NAME == 'release') {
|
||||
imageTag = params.GIT_REF
|
||||
}
|
||||
image = docker.build(
|
||||
"${params.IMAGE_NAME}:${imageTag}",
|
||||
"${params.IMAGE_NAME}:${params.IMAGE_TAG ?: GIT_COMMIT.take(8)}",
|
||||
"--build-arg='GIT_COMMIT=${GIT_COMMIT.take(8)}' ."
|
||||
)
|
||||
} }
|
||||
}
|
||||
|
||||
stage('Push') {
|
||||
when { expression { params.IMAGE_TAG != '' } }
|
||||
steps { script {
|
||||
withDockerRegistry([
|
||||
credentialsId: "dockerhub-statusteam-auto", url: ""
|
||||
credentialsId: params.DOCKER_CRED, url: params.DOCKER_REGISTRY_URL
|
||||
]) {
|
||||
image.push()
|
||||
}
|
||||
} }
|
||||
}
|
||||
|
||||
stage('Deploy') {
|
||||
steps { script {
|
||||
withDockerRegistry([
|
||||
credentialsId: "dockerhub-statusteam-auto", url: ""
|
||||
]) {
|
||||
image.push(env.IMAGE_TAG)
|
||||
/* If Git ref is a tag push it as Docker tag too. */
|
||||
if (params.GIT_REF ==~ /v\d+\.\d+\.\d+.*/) {
|
||||
image.push(params.GIT_REF)
|
||||
}
|
||||
}
|
||||
} }
|
||||
}
|
||||
}
|
||||
post {
|
||||
success { script {
|
||||
discordNotify(
|
||||
header: 'Go-Waku build successful!',
|
||||
cred: 'discord-waku-deployments-webhook',
|
||||
)
|
||||
} }
|
||||
always { cleanWs() }
|
||||
}
|
||||
}
|
||||
|
||||
def discordNotify(Map args=[:]) {
|
||||
def opts = [
|
||||
header: args.header ?: 'Deployment successful!',
|
||||
cred: args.cred ?: null,
|
||||
]
|
||||
def repo = [
|
||||
url: GIT_URL.minus('.git'),
|
||||
branch: GIT_BRANCH.minus('origin/'),
|
||||
commit: GIT_COMMIT.take(8),
|
||||
prev: (
|
||||
env.GIT_PREVIOUS_SUCCESSFUL_COMMIT ?: env.GIT_PREVIOUS_COMMIT ?: 'master'
|
||||
).take(8),
|
||||
]
|
||||
wrap([$class: 'BuildUser']) {
|
||||
BUILD_USER_ID = env.BUILD_USER_ID
|
||||
}
|
||||
withCredentials([
|
||||
string(
|
||||
credentialsId: opts.cred,
|
||||
variable: 'DISCORD_WEBHOOK',
|
||||
),
|
||||
]) {
|
||||
discordSend(
|
||||
link: env.BUILD_URL,
|
||||
result: currentBuild.currentResult,
|
||||
webhookURL: env.DISCORD_WEBHOOK,
|
||||
title: "${env.JOB_NAME}#${env.BUILD_NUMBER}",
|
||||
description: """
|
||||
${opts.header}
|
||||
Image: [`${IMAGE_NAME}:${IMAGE_TAG}`](https://hub.docker.com/r/${IMAGE_NAME}/tags?name=${IMAGE_TAG})
|
||||
Branch: [`${repo.branch}`](${repo.url}/commits/${repo.branch})
|
||||
Commit: [`${repo.commit}`](${repo.url}/commit/${repo.commit})
|
||||
Diff: [`${repo.prev}...${repo.commit}`](${repo.url}/compare/${repo.prev}...${repo.commit})
|
||||
By: [`${BUILD_USER_ID}`](${repo.url}/commits?author=${BUILD_USER_ID})
|
||||
""",
|
||||
)
|
||||
def getDefaultImageTag(currentValue) {
|
||||
switch (env.JOB_BASE_NAME) {
|
||||
case 'docker-latest': return 'latest'
|
||||
case 'docker-release': return 'stable'
|
||||
case 'docker-manual': return ''
|
||||
default: return currentValue
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,78 +0,0 @@
|
||||
library 'status-jenkins-lib@v1.7.0'
|
||||
|
||||
pipeline {
|
||||
agent { label 'macos && nix-2.11 && aarch64' }
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
/* Prevent Jenkins jobs from running forever */
|
||||
timeout(time: 30, unit: 'MINUTES')
|
||||
/* Go requires a certain directory structure */
|
||||
checkoutToSubdirectory('src/github.com/waku-org/go-waku')
|
||||
/* Limit builds retained */
|
||||
buildDiscarder(logRotator(
|
||||
numToKeepStr: '10',
|
||||
daysToKeepStr: '20',
|
||||
artifactNumToKeepStr: '10',
|
||||
))
|
||||
/* Allows combined build to copy */
|
||||
copyArtifactPermission('/go-waku/*')
|
||||
}
|
||||
|
||||
environment {
|
||||
TARGET = 'ios'
|
||||
REPO = "${env.WORKSPACE}/src/github.com/waku-org/go-waku"
|
||||
GOCACHE = "${env.WORKSPACE_TMP}/go-build"
|
||||
GOPATH = "${env.WORKSPACE}"
|
||||
PATH = "${env.PATH}:${env.GOPATH}/bin"
|
||||
}
|
||||
|
||||
stages {
|
||||
|
||||
stage('Prep') { steps { script { dir(env.REPO) {
|
||||
env.ARTIFACT = "${env.REPO}/pkg/" + utils.pkgFilename(
|
||||
name: "go-waku",
|
||||
type: "ios",
|
||||
ext: "tar.gz"
|
||||
)
|
||||
sh 'make install-gomobile'
|
||||
} } } }
|
||||
|
||||
stage('Build') {
|
||||
steps { script { dir(env.REPO) {
|
||||
nix.develop('which xcodebuild', pure: false)
|
||||
nix.develop('make mobile-ios', pure: false)
|
||||
} } }
|
||||
}
|
||||
|
||||
stage('Package') {
|
||||
steps { dir(env.REPO) {
|
||||
dir('build/lib') {
|
||||
sh 'tar -czvf gowaku-ios.tar.gz Gowaku.xcframework'
|
||||
sh "cp gowaku-ios.tar.gz ${env.ARTIFACT}"
|
||||
}
|
||||
} }
|
||||
}
|
||||
|
||||
stage('Parallel Upload') {
|
||||
parallel {
|
||||
stage('Archive') {
|
||||
steps { script {
|
||||
archiveArtifacts(env.ARTIFACT.minus("${env.WORKSPACE}/"))
|
||||
} }
|
||||
}
|
||||
stage('Upload') {
|
||||
steps { script {
|
||||
env.PKG_URL = s3.uploadArtifact(env.ARTIFACT)
|
||||
jenkins.setBuildDesc(ios: env.PKG_URL)
|
||||
} }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
success { script { github.notifyPR(true) } }
|
||||
failure { script { github.notifyPR(false) } }
|
||||
always { cleanWs() }
|
||||
}
|
||||
}
|
||||
@ -1,83 +0,0 @@
|
||||
library 'status-jenkins-lib@v1.7.0'
|
||||
|
||||
pipeline {
|
||||
agent {
|
||||
label 'linux && nix-2.11 && x86_64'
|
||||
}
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
disableConcurrentBuilds()
|
||||
/* Prevent Jenkins jobs from running forever */
|
||||
timeout(time: 30, unit: 'MINUTES')
|
||||
/* Go requires a certain directory structure */
|
||||
checkoutToSubdirectory('src/github.com/waku-org/go-waku')
|
||||
/* Limit builds retained */
|
||||
buildDiscarder(logRotator(
|
||||
numToKeepStr: '10',
|
||||
daysToKeepStr: '20',
|
||||
artifactNumToKeepStr: '10',
|
||||
))
|
||||
/* Allows combined build to copy */
|
||||
copyArtifactPermission('/go-waku/*')
|
||||
}
|
||||
|
||||
environment {
|
||||
TARGET = 'linux'
|
||||
REPO = "${env.WORKSPACE}/src/github.com/waku-org/go-waku"
|
||||
GOCACHE = "${env.WORKSPACE_TMP}/go-build"
|
||||
GOPATH = "${env.WORKSPACE}"
|
||||
PATH = "${env.PATH}:${env.GOPATH}/bin"
|
||||
}
|
||||
|
||||
stages {
|
||||
|
||||
stage('Prep') {
|
||||
steps { script { dir(env.REPO) {
|
||||
env.DEB_ARTIFACT = "${env.REPO}/pkg/" + utils.pkgFilename(
|
||||
name: "go-waku",
|
||||
type: "x86_64",
|
||||
ext: "deb"
|
||||
)
|
||||
} } }
|
||||
}
|
||||
|
||||
stage('Build') {
|
||||
steps { script { dir(env.REPO) {
|
||||
nix.develop('make build')
|
||||
} } }
|
||||
}
|
||||
|
||||
stage('Package') {
|
||||
steps { script { dir(env.REPO) {
|
||||
dir('./scripts/linux') {
|
||||
nix.develop('./fpm-build.sh', attr: 'fpm')
|
||||
}
|
||||
dir('build') {
|
||||
sh "cp gowaku*.deb ${env.DEB_ARTIFACT}"
|
||||
}
|
||||
} } }
|
||||
}
|
||||
|
||||
stage('Parallel Upload') {
|
||||
parallel {
|
||||
stage('Archive') {
|
||||
steps { script {
|
||||
archiveArtifacts(env.DEB_ARTIFACT.minus("${env.WORKSPACE}/"))
|
||||
} }
|
||||
}
|
||||
stage('Upload') {
|
||||
steps { script {
|
||||
env.PKG_URL = s3.uploadArtifact(env.DEB_ARTIFACT)
|
||||
jenkins.setBuildDesc(x86_64_deb: env.PKG_URL)
|
||||
} }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
success { script { github.notifyPR(true) } }
|
||||
failure { script { github.notifyPR(false) } }
|
||||
always { cleanWs() }
|
||||
}
|
||||
}
|
||||
@ -1,12 +1,13 @@
|
||||
library 'status-jenkins-lib@v1.7.0'
|
||||
library 'status-jenkins-lib@v1.9.26'
|
||||
|
||||
pipeline {
|
||||
agent {
|
||||
label 'linux && nix-2.11 && x86_64'
|
||||
label 'linux && nix-2.24 && x86_64'
|
||||
}
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
disableRestartFromStage()
|
||||
disableConcurrentBuilds()
|
||||
/* Prevent Jenkins jobs from running forever */
|
||||
timeout(time: 30, unit: 'MINUTES')
|
||||
@ -27,10 +28,7 @@ pipeline {
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps { script {
|
||||
sh("""#!/usr/bin/env bash
|
||||
${nix._sourceProfileInline()}
|
||||
nix build --print-out-paths .#node
|
||||
""")
|
||||
nix.flake('node')
|
||||
} }
|
||||
}
|
||||
stage('Check') {
|
||||
@ -45,15 +43,12 @@ pipeline {
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps { script {
|
||||
sh("""#!/usr/bin/env bash
|
||||
${nix._sourceProfileInline()}
|
||||
nix build --print-out-paths .#library
|
||||
""")
|
||||
nix.flake('static-library')
|
||||
} }
|
||||
}
|
||||
stage('Check') {
|
||||
steps {
|
||||
sh 'ldd ./result/bin/library'
|
||||
sh 'readelf -h ./result/bin/libgowaku.a'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,80 +0,0 @@
|
||||
library 'status-jenkins-lib@v1.7.0'
|
||||
|
||||
pipeline {
|
||||
agent {
|
||||
label 'linux && nix-2.11 && x86_64'
|
||||
}
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
disableConcurrentBuilds()
|
||||
/* Prevent Jenkins jobs from running forever */
|
||||
timeout(time: 30, unit: 'MINUTES')
|
||||
/* Go requires a certain directory structure */
|
||||
checkoutToSubdirectory('src/github.com/waku-org/go-waku')
|
||||
buildDiscarder(logRotator(
|
||||
numToKeepStr: '10',
|
||||
daysToKeepStr: '30',
|
||||
))
|
||||
}
|
||||
|
||||
environment {
|
||||
TARGET = 'tests'
|
||||
REPO = "${env.WORKSPACE}/src/github.com/waku-org/go-waku"
|
||||
GOCACHE = "${env.WORKSPACE_TMP}/go-build"
|
||||
GOPATH = "${env.WORKSPACE}/go"
|
||||
PATH = "${env.PATH}:${env.GOPATH}/bin"
|
||||
/* Necesary to avoid cache poisoning by other builds. */
|
||||
GOLANGCI_LINT_CACHE = "${env.WORKSPACE_TMP}/golangci-lint"
|
||||
/* Ganache config */
|
||||
GANACHE_RPC_PORT = "${8989 + env.EXECUTOR_NUMBER.toInteger()}"
|
||||
GANACHE_MNEMONIC = 'swim relax risk shy chimney please usual search industry board music segment'
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Lint') {
|
||||
steps { script { dir(env.REPO) {
|
||||
nix.develop('make lint', pure: false)
|
||||
} } }
|
||||
}
|
||||
|
||||
stage('Test') {
|
||||
steps { script { dir(env.REPO) {
|
||||
nix.develop('make test-ci', pure: false)
|
||||
} } }
|
||||
}
|
||||
|
||||
stage('Ganache') {
|
||||
steps { script {
|
||||
ganache = docker.image(
|
||||
'trufflesuite/ganache:v7.4.1'
|
||||
).run(
|
||||
"-p 127.0.0.1:${env.GANACHE_RPC_PORT}:8545",
|
||||
"-m='${GANACHE_MNEMONIC}'"
|
||||
)
|
||||
} }
|
||||
}
|
||||
|
||||
stage('On-chain tests') {
|
||||
environment {
|
||||
GANACHE_NETWORK_RPC_URL = "ws://localhost:${env.GANACHE_RPC_PORT}"
|
||||
}
|
||||
steps { script { dir(env.REPO) {
|
||||
nix.develop('make test-onchain', pure: false)
|
||||
} } }
|
||||
}
|
||||
}
|
||||
post {
|
||||
always { script { /* No artifact but a PKG_URL is necessary. */
|
||||
env.PKG_URL = "${currentBuild.absoluteUrl}consoleText"
|
||||
} }
|
||||
success { script { github.notifyPR(true) } }
|
||||
failure { script { github.notifyPR(false) } }
|
||||
cleanup { script {
|
||||
cleanWs()
|
||||
catchError {
|
||||
ganache.stop()
|
||||
}
|
||||
} }
|
||||
}
|
||||
}
|
||||
@ -6,6 +6,7 @@ import (
|
||||
cli "github.com/urfave/cli/v2"
|
||||
"github.com/urfave/cli/v2/altsrc"
|
||||
"github.com/waku-org/go-waku/waku/cliutils"
|
||||
"github.com/waku-org/go-waku/waku/v2/node"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -25,6 +26,19 @@ var (
|
||||
Destination: &options.Address,
|
||||
EnvVars: []string{"WAKUNODE2_ADDRESS"},
|
||||
})
|
||||
MaxPeerConnections = altsrc.NewIntFlag(&cli.IntFlag{
|
||||
Name: "max-connections",
|
||||
Value: 50,
|
||||
Usage: "Maximum allowed number of libp2p connections.",
|
||||
Destination: &options.MaxPeerConnections,
|
||||
EnvVars: []string{"WAKUNODE2_MAX_CONNECTIONS"},
|
||||
})
|
||||
PeerStoreCapacity = altsrc.NewIntFlag(&cli.IntFlag{
|
||||
Name: "peer-store-capacity",
|
||||
Usage: "Maximum stored peers in the peerstore.",
|
||||
Destination: &options.PeerStoreCapacity,
|
||||
EnvVars: []string{"WAKUNODE2_PEERSTORE_CAPACITY"},
|
||||
})
|
||||
WebsocketSupport = altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "websocket-support",
|
||||
Aliases: []string{"ws"},
|
||||
@ -79,11 +93,11 @@ var (
|
||||
Destination: &options.Websocket.CertPath,
|
||||
EnvVars: []string{"WAKUNODE2_WEBSOCKET_SECURE_CERT_PATH"},
|
||||
})
|
||||
Dns4DomainName = altsrc.NewStringFlag(&cli.StringFlag{
|
||||
DNS4DomainName = altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "dns4-domain-name",
|
||||
Value: "",
|
||||
Usage: "The domain name resolving to the node's public IPv4 address",
|
||||
Destination: &options.Dns4DomainName,
|
||||
Destination: &options.DNS4DomainName,
|
||||
EnvVars: []string{"WAKUNODE2_WEBSOCKET_DNS4_DOMAIN_NAME"},
|
||||
})
|
||||
NodeKey = cliutils.NewGenericFlagSingleValue(&cli.GenericFlag{
|
||||
@ -108,15 +122,12 @@ var (
|
||||
Destination: &options.KeyPasswd,
|
||||
EnvVars: []string{"WAKUNODE2_KEY_PASSWORD"},
|
||||
})
|
||||
GenerateKey = altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "generate-key",
|
||||
Usage: "Generate private key file at path specified in --key-file with the password defined by --key-password",
|
||||
Destination: &options.GenerateKey,
|
||||
})
|
||||
Overwrite = altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "overwrite",
|
||||
Usage: "When generating a keyfile, overwrite the nodekey file if it already exists",
|
||||
Destination: &options.Overwrite,
|
||||
ClusterID = altsrc.NewUintFlag(&cli.UintFlag{
|
||||
Name: "cluster-id",
|
||||
Value: 0,
|
||||
Usage: "Cluster id that the node is running in. Node in a different cluster id is disconnected.",
|
||||
Destination: &options.ClusterID,
|
||||
EnvVars: []string{"WAKUNODE2_CLUSTER_ID"},
|
||||
})
|
||||
StaticNode = cliutils.NewGenericFlagMultiValue(&cli.GenericFlag{
|
||||
Name: "staticnode",
|
||||
@ -174,6 +185,28 @@ var (
|
||||
Destination: &options.CircuitRelay,
|
||||
EnvVars: []string{"WAKUNODE2_CIRCUIT_RELAY"},
|
||||
})
|
||||
ForceReachability = altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "force-reachability",
|
||||
Usage: "Force the node reachability. WARNING: This flag is created for testing circuit relay and is not meant to be used in production. Use 'public' or 'private'",
|
||||
Value: "",
|
||||
Hidden: true,
|
||||
Destination: &options.ForceReachability,
|
||||
EnvVars: []string{"WAKUNODE2_REACHABILITY"},
|
||||
})
|
||||
ResourceScalingMemoryPercent = altsrc.NewFloat64Flag(&cli.Float64Flag{
|
||||
Name: "resource-scaling-memory-percentage",
|
||||
Usage: "Determines the percentage of total accessible memory that wil be dedicated to go-waku. A dedicated node with a lot of RAM could allocate 25% or more memory to go-waku",
|
||||
Value: 25,
|
||||
Destination: &options.ResourceScalingMemoryPercent,
|
||||
EnvVars: []string{"WAKUNODE2_RESOURCE_MEMORY_PERCENTAGE"},
|
||||
})
|
||||
ResourceScalingFDPercent = altsrc.NewFloat64Flag(&cli.Float64Flag{
|
||||
Name: "resource-scaling-fd-percentage",
|
||||
Usage: "Determines the percentage of total file descriptors that wil be dedicated to go-waku.",
|
||||
Value: 50,
|
||||
Destination: &options.ResourceScalingFDPercent,
|
||||
EnvVars: []string{"WAKUNODE2_RESOURCE_FD_PERCENTAGE"},
|
||||
})
|
||||
LogLevel = cliutils.NewGenericFlagSingleValue(&cli.GenericFlag{
|
||||
Name: "log-level",
|
||||
Aliases: []string{"l"},
|
||||
@ -202,11 +235,18 @@ var (
|
||||
})
|
||||
AgentString = altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "agent-string",
|
||||
Value: "go-waku",
|
||||
Value: node.UserAgent,
|
||||
Usage: "client id to advertise",
|
||||
Destination: &options.UserAgent,
|
||||
EnvVars: []string{"WAKUNODE2_AGENT_STRING"},
|
||||
})
|
||||
IPColocationLimit = altsrc.NewIntFlag(&cli.IntFlag{
|
||||
Name: "ip-colocation-limit",
|
||||
Value: node.DefaultMaxConnectionsPerIP,
|
||||
Usage: "max number of allowed peers from the same IP. Set it to 0 to remove the limitation.",
|
||||
Destination: &options.IPColocationLimit,
|
||||
EnvVars: []string{"WAKUNODE2_IP_COLOCATION_LIMIT"},
|
||||
})
|
||||
Relay = altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "relay",
|
||||
Value: true,
|
||||
@ -215,11 +255,23 @@ var (
|
||||
EnvVars: []string{"WAKUNODE2_RELAY"},
|
||||
})
|
||||
Topics = altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
|
||||
Name: "topics",
|
||||
Usage: "List of topics to listen",
|
||||
Name: "topic",
|
||||
Usage: "Default topic to subscribe to. Argument may be repeated. Deprecated! Please use pubsub-topic and/or content-topic instead.",
|
||||
Destination: &options.Relay.Topics,
|
||||
EnvVars: []string{"WAKUNODE2_TOPICS"},
|
||||
})
|
||||
PubSubTopics = altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
|
||||
Name: "pubsub-topic",
|
||||
Usage: "Default pubsub topic to subscribe to. Argument may be repeated.",
|
||||
Destination: &options.Relay.PubSubTopics,
|
||||
EnvVars: []string{"WAKUNODE2_PUBSUB_TOPICS"},
|
||||
})
|
||||
ContentTopics = altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
|
||||
Name: "content-topic",
|
||||
Usage: "Default content topic to subscribe to. Argument may be repeated.",
|
||||
Destination: &options.Relay.ContentTopics,
|
||||
EnvVars: []string{"WAKUNODE2_CONTENT_TOPICS"},
|
||||
})
|
||||
ProtectedTopics = cliutils.NewGenericFlagMultiValue(&cli.GenericFlag{
|
||||
Name: "protected-topic",
|
||||
Usage: "Topics and its public key to be used for message validation, topic:pubkey. Argument may be repeated.",
|
||||
@ -242,6 +294,13 @@ var (
|
||||
Destination: &options.Relay.MinRelayPeersToPublish,
|
||||
EnvVars: []string{"WAKUNODE2_MIN_RELAY_PEERS_TO_PUBLISH"},
|
||||
})
|
||||
MaxRelayMsgSize = altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "max-msg-size",
|
||||
Value: "150KB",
|
||||
Usage: "Maximum message size. Supported formats are B, KiB, KB, MiB. If no suffix, default is bytes",
|
||||
Destination: &options.Relay.MaxMsgSize,
|
||||
EnvVars: []string{"WAKUNODE2_MAX_RELAY_MSG_SIZE"},
|
||||
})
|
||||
StoreNodeFlag = cliutils.NewGenericFlagMultiValue(&cli.GenericFlag{
|
||||
Name: "storenode",
|
||||
Usage: "Multiaddr of a peer that supports store protocol. Option may be repeated",
|
||||
@ -277,13 +336,12 @@ var (
|
||||
Destination: &options.Store.DatabaseURL,
|
||||
EnvVars: []string{"WAKUNODE2_STORE_MESSAGE_DB_URL"},
|
||||
})
|
||||
StoreResumePeer = cliutils.NewGenericFlagMultiValue(&cli.GenericFlag{
|
||||
Name: "store-resume-peer",
|
||||
Usage: "Peer multiaddress to resume the message store at boot. Option may be repeated",
|
||||
Value: &cliutils.MultiaddrSlice{
|
||||
Values: &options.Store.ResumeNodes,
|
||||
},
|
||||
EnvVars: []string{"WAKUNODE2_STORE_RESUME_PEER"},
|
||||
StoreMessageDBMigration = altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "store-message-db-migration",
|
||||
Usage: "Enable database migration at start.",
|
||||
Destination: &options.Store.Migration,
|
||||
Value: true,
|
||||
EnvVars: []string{"WAKUNODE2_STORE_MESSAGE_DB_MIGRATION"},
|
||||
})
|
||||
FilterFlag = altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "filter",
|
||||
@ -306,26 +364,6 @@ var (
|
||||
Destination: &options.Filter.Timeout,
|
||||
EnvVars: []string{"WAKUNODE2_FILTER_TIMEOUT"},
|
||||
})
|
||||
FilterLegacyFlag = altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "legacy-filter",
|
||||
Usage: "Use filter protocol (legacy)",
|
||||
Destination: &options.Filter.UseV1,
|
||||
EnvVars: []string{"WAKUNODE2_USE_LEGACY_FILTER"},
|
||||
})
|
||||
FilterLegacyLightClient = altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "legacy-filter-light-client",
|
||||
Usage: "Don't accept legacy filter subscribers",
|
||||
Destination: &options.Filter.DisableFullNode,
|
||||
EnvVars: []string{"WAKUNODE2_LEGACY_FILTER_LIGHT_CLIENT"},
|
||||
})
|
||||
FilterLegacyNode = cliutils.NewGenericFlagMultiValue(&cli.GenericFlag{
|
||||
Name: "legacy-filternode",
|
||||
Usage: "Multiaddr of a peer that supports legacy filter protocol. Option may be repeated",
|
||||
Value: &cliutils.MultiaddrSlice{
|
||||
Values: &options.Filter.NodesV1,
|
||||
},
|
||||
EnvVars: []string{"WAKUNODE2_LEGACY_FILTERNODE"},
|
||||
})
|
||||
LightPush = altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "lightpush",
|
||||
Usage: "Enable lightpush protocol",
|
||||
@ -381,8 +419,8 @@ var (
|
||||
})
|
||||
RendezvousServer = altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "rendezvous-server",
|
||||
Usage: "Enable rendezvous protocol server so other peers can use this node for discovery",
|
||||
Destination: &options.Rendezvous.Server,
|
||||
Usage: "Enable rendezvous protocol so other peers can use this node for discovery",
|
||||
Destination: &options.Rendezvous.Enable,
|
||||
EnvVars: []string{"WAKUNODE2_RENDEZVOUS_SERVER"},
|
||||
})
|
||||
PeerExchange = altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
@ -407,7 +445,7 @@ var (
|
||||
})
|
||||
DNSDiscoveryUrl = altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
|
||||
Name: "dns-discovery-url",
|
||||
Usage: "URL for DNS node list in format 'enrtree://<key>@<fqdn>'",
|
||||
Usage: "URL for DNS node list in format 'enrtree://<key>@<fqdn>'. Option may be repeated",
|
||||
Destination: &options.DNSDiscovery.URLs,
|
||||
EnvVars: []string{"WAKUNODE2_DNS_DISCOVERY_URL"},
|
||||
})
|
||||
@ -441,47 +479,6 @@ var (
|
||||
Destination: &options.Metrics.Port,
|
||||
EnvVars: []string{"WAKUNODE2_METRICS_SERVER_PORT"},
|
||||
})
|
||||
RPCFlag = altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "rpc",
|
||||
Usage: "Enable the rpc server",
|
||||
Destination: &options.RPCServer.Enable,
|
||||
EnvVars: []string{"WAKUNODE2_RPC"},
|
||||
})
|
||||
RPCPort = altsrc.NewIntFlag(&cli.IntFlag{
|
||||
Name: "rpc-port",
|
||||
Value: 8545,
|
||||
Usage: "Listening port of the rpc server",
|
||||
Destination: &options.RPCServer.Port,
|
||||
EnvVars: []string{"WAKUNODE2_RPC_PORT"},
|
||||
})
|
||||
RPCAddress = altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "rpc-address",
|
||||
Value: "127.0.0.1",
|
||||
Usage: "Listening address of the rpc server",
|
||||
Destination: &options.RPCServer.Address,
|
||||
EnvVars: []string{"WAKUNODE2_RPC_ADDRESS"},
|
||||
})
|
||||
RPCRelayCacheCapacity = altsrc.NewIntFlag(&cli.IntFlag{
|
||||
Name: "rpc-relay-cache-capacity",
|
||||
Value: 30,
|
||||
Usage: "Capacity of the Relay REST API message cache",
|
||||
Destination: &options.RPCServer.RelayCacheCapacity,
|
||||
EnvVars: []string{"WAKUNODE2_RPC_RELAY_CACHE_CAPACITY"},
|
||||
})
|
||||
RPCAdmin = altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "rpc-admin",
|
||||
Value: false,
|
||||
Usage: "Enable access to JSON-RPC Admin API",
|
||||
Destination: &options.RPCServer.Admin,
|
||||
EnvVars: []string{"WAKUNODE2_RPC_ADMIN"},
|
||||
})
|
||||
RPCPrivate = altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "rpc-private",
|
||||
Value: false,
|
||||
Usage: "Enable access to JSON-RPC Private API",
|
||||
Destination: &options.RPCServer.Private,
|
||||
EnvVars: []string{"WAKUNODE2_RPC_PRIVATE"},
|
||||
})
|
||||
RESTFlag = altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "rest",
|
||||
Usage: "Enable Waku REST HTTP server",
|
||||
@ -504,11 +501,18 @@ var (
|
||||
})
|
||||
RESTRelayCacheCapacity = altsrc.NewIntFlag(&cli.IntFlag{
|
||||
Name: "rest-relay-cache-capacity",
|
||||
Value: 30,
|
||||
Value: 1000,
|
||||
Usage: "Capacity of the Relay REST API message cache",
|
||||
Destination: &options.RESTServer.RelayCacheCapacity,
|
||||
EnvVars: []string{"WAKUNODE2_REST_RELAY_CACHE_CAPACITY"},
|
||||
})
|
||||
RESTFilterCacheCapacity = altsrc.NewIntFlag(&cli.IntFlag{
|
||||
Name: "rest-filter-cache-capacity",
|
||||
Value: 30,
|
||||
Usage: "Capacity of the Filter REST API message cache",
|
||||
Destination: &options.RESTServer.FilterCacheCapacity,
|
||||
EnvVars: []string{"WAKUNODE2_REST_FILTER_CACHE_CAPACITY"},
|
||||
})
|
||||
RESTAdmin = altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "rest-admin",
|
||||
Value: false,
|
||||
@ -516,13 +520,6 @@ var (
|
||||
Destination: &options.RESTServer.Admin,
|
||||
EnvVars: []string{"WAKUNODE2_REST_ADMIN"},
|
||||
})
|
||||
RESTPrivate = altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "rest-private",
|
||||
Value: false,
|
||||
Usage: "Enable access to REST HTTP Private API",
|
||||
Destination: &options.RESTServer.Private,
|
||||
EnvVars: []string{"WAKUNODE2_REST_PRIVATE"},
|
||||
})
|
||||
PProf = altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "pprof",
|
||||
Usage: "provides runtime profiling data at /debug/pprof in both REST and RPC servers if they're enabled",
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
//go:build !gowaku_rln
|
||||
// +build !gowaku_rln
|
||||
//go:build gowaku_no_rln
|
||||
// +build gowaku_no_rln
|
||||
|
||||
package main
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
//go:build gowaku_rln
|
||||
// +build gowaku_rln
|
||||
//go:build !gowaku_no_rln
|
||||
// +build !gowaku_no_rln
|
||||
|
||||
package main
|
||||
|
||||
@ -16,23 +16,12 @@ func rlnFlags() []cli.Flag {
|
||||
Usage: "Enable spam protection through rln-relay",
|
||||
Destination: &options.RLNRelay.Enable,
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "rln-relay-membership-index",
|
||||
Value: 0,
|
||||
Usage: "(experimental) the index of node in the rln-relay group: a value between 0-99 inclusive",
|
||||
Destination: &options.RLNRelay.MembershipIndex,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "rln-relay-pubsub-topic",
|
||||
Value: "/waku/2/default-waku/proto",
|
||||
Usage: "the pubsub topic for which rln-relay gets enabled",
|
||||
Destination: &options.RLNRelay.PubsubTopic,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "rln-relay-content-topic",
|
||||
Value: "/toy-chat/2/luzhou/proto",
|
||||
Usage: "the content topic for which rln-relay gets enabled",
|
||||
Destination: &options.RLNRelay.ContentTopic,
|
||||
&cli.GenericFlag{
|
||||
Name: "rln-relay-cred-index",
|
||||
Usage: "the index of the onchain commitment to use",
|
||||
Value: &wcli.OptionalUint{
|
||||
Value: &options.RLNRelay.MembershipIndex,
|
||||
},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "rln-relay-dynamic",
|
||||
@ -51,14 +40,11 @@ func rlnFlags() []cli.Flag {
|
||||
Usage: "Password for encrypting RLN credentials",
|
||||
Destination: &options.RLNRelay.CredentialsPassword,
|
||||
},
|
||||
// TODO: this is a good candidate option for subcommands
|
||||
// TODO: consider accepting a private key file and passwd
|
||||
&cli.GenericFlag{
|
||||
Name: "rln-relay-eth-account-private-key",
|
||||
Usage: "Ethereum account private key used for registering in member contract",
|
||||
Value: &wcli.PrivateKeyValue{
|
||||
Value: &options.RLNRelay.ETHPrivateKey,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "rln-relay-tree-path",
|
||||
Value: "",
|
||||
Usage: "Path to the RLN merkle tree sled db (https://github.com/spacejam/sled)",
|
||||
Destination: &options.RLNRelay.TreePath,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "rln-relay-eth-client-address",
|
||||
@ -68,7 +54,7 @@ func rlnFlags() []cli.Flag {
|
||||
},
|
||||
&cli.GenericFlag{
|
||||
Name: "rln-relay-eth-contract-address",
|
||||
Usage: "Address of membership contract ",
|
||||
Usage: "Address of membership contract",
|
||||
Value: &wcli.AddressValue{
|
||||
Value: &options.RLNRelay.MembershipContractAddress,
|
||||
},
|
||||
80
cmd/waku/keygen/command.go
Normal file
80
cmd/waku/keygen/command.go
Normal file
@ -0,0 +1,80 @@
|
||||
package keygen
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Command generates a key file used to generate the node's peerID, encrypted with an optional password
|
||||
var Command = cli.Command{
|
||||
Name: "generate-key",
|
||||
Usage: "Generate private key file at path specified in --key-file with the password defined by --key-password",
|
||||
Action: func(cCtx *cli.Context) error {
|
||||
if err := generateKeyFile(Options.KeyFile, []byte(Options.KeyPasswd), Options.Overwrite); err != nil {
|
||||
utils.Logger().Fatal("could not write keyfile", zap.Error(err))
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Flags: []cli.Flag{
|
||||
KeyFile,
|
||||
KeyPassword,
|
||||
Overwrite,
|
||||
},
|
||||
}
|
||||
|
||||
func checkForFileExistence(path string, overwrite bool) error {
|
||||
_, err := os.Stat(path)
|
||||
|
||||
if err == nil && !overwrite {
|
||||
return fmt.Errorf("%s already exists. Use --overwrite to overwrite the file", path)
|
||||
}
|
||||
|
||||
if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func generatePrivateKey() ([]byte, error) {
|
||||
key, err := crypto.GenerateKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return key.D.Bytes(), nil
|
||||
}
|
||||
|
||||
func writeKeyFile(path string, key []byte, passwd []byte) error {
|
||||
encryptedK, err := keystore.EncryptDataV3(key, passwd, keystore.StandardScryptN, keystore.StandardScryptP)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
output, err := json.Marshal(encryptedK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(path, output, 0600)
|
||||
}
|
||||
|
||||
func generateKeyFile(path string, passwd []byte, overwrite bool) error {
|
||||
if err := checkForFileExistence(path, overwrite); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key, err := generatePrivateKey()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return writeKeyFile(path, key, passwd)
|
||||
}
|
||||
35
cmd/waku/keygen/flags.go
Normal file
35
cmd/waku/keygen/flags.go
Normal file
@ -0,0 +1,35 @@
|
||||
package keygen
|
||||
|
||||
import (
|
||||
cli "github.com/urfave/cli/v2"
|
||||
"github.com/urfave/cli/v2/altsrc"
|
||||
)
|
||||
|
||||
// Options contain the settings used for generating a key file
|
||||
var Options GenerateKeyOptions
|
||||
|
||||
var (
|
||||
// KeyFile is a flag that contains the path where the node key will be written
|
||||
KeyFile = altsrc.NewPathFlag(&cli.PathFlag{
|
||||
Name: "key-file",
|
||||
Value: "./nodekey",
|
||||
Usage: "Path to a file containing the private key for the P2P node",
|
||||
Destination: &Options.KeyFile,
|
||||
EnvVars: []string{"WAKUNODE2_KEY_FILE"},
|
||||
})
|
||||
// KeyPassword is a flag to set the password used to encrypt the file
|
||||
KeyPassword = altsrc.NewStringFlag(&cli.StringFlag{
|
||||
Name: "key-password",
|
||||
Value: "secret",
|
||||
Usage: "Password used for the private key file",
|
||||
Destination: &Options.KeyPasswd,
|
||||
EnvVars: []string{"WAKUNODE2_KEY_PASSWORD"},
|
||||
})
|
||||
// Overwrite is a flag used to overwrite an existing key file
|
||||
Overwrite = altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||
Name: "overwrite",
|
||||
Value: false,
|
||||
Usage: "Overwrite the nodekey file if it already exists",
|
||||
Destination: &Options.Overwrite,
|
||||
})
|
||||
)
|
||||
9
cmd/waku/keygen/options.go
Normal file
9
cmd/waku/keygen/options.go
Normal file
@ -0,0 +1,9 @@
|
||||
package keygen
|
||||
|
||||
// GenerateKeyOptions contains all the settings that can be used when generating
|
||||
// a keyfile with the generate-key command
|
||||
type GenerateKeyOptions struct {
|
||||
KeyFile string
|
||||
KeyPasswd string
|
||||
Overwrite bool
|
||||
}
|
||||
@ -5,11 +5,14 @@ import (
|
||||
|
||||
cli "github.com/urfave/cli/v2"
|
||||
"github.com/urfave/cli/v2/altsrc"
|
||||
"github.com/waku-org/go-waku/waku"
|
||||
"github.com/waku-org/go-waku/cmd/waku/keygen"
|
||||
"github.com/waku-org/go-waku/cmd/waku/rlngenerate"
|
||||
"github.com/waku-org/go-waku/waku/v2/node"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var options waku.Options
|
||||
var options NodeOptions
|
||||
|
||||
func main() {
|
||||
// Defaults
|
||||
@ -20,6 +23,8 @@ func main() {
|
||||
&cli.StringFlag{Name: "config-file", Usage: "loads configuration from a TOML file (cmd-line parameters take precedence)"},
|
||||
TcpPort,
|
||||
Address,
|
||||
MaxPeerConnections,
|
||||
PeerStoreCapacity,
|
||||
WebsocketSupport,
|
||||
WebsocketPort,
|
||||
WebsocketSecurePort,
|
||||
@ -27,12 +32,11 @@ func main() {
|
||||
WebsocketSecureSupport,
|
||||
WebsocketSecureKeyPath,
|
||||
WebsocketSecureCertPath,
|
||||
Dns4DomainName,
|
||||
DNS4DomainName,
|
||||
NodeKey,
|
||||
KeyFile,
|
||||
KeyPassword,
|
||||
GenerateKey,
|
||||
Overwrite,
|
||||
ClusterID,
|
||||
StaticNode,
|
||||
KeepAlive,
|
||||
PersistPeers,
|
||||
@ -41,27 +45,31 @@ func main() {
|
||||
ExtMultiaddresses,
|
||||
ShowAddresses,
|
||||
CircuitRelay,
|
||||
ForceReachability,
|
||||
ResourceScalingMemoryPercent,
|
||||
ResourceScalingFDPercent,
|
||||
IPColocationLimit,
|
||||
LogLevel,
|
||||
LogEncoding,
|
||||
LogOutput,
|
||||
AgentString,
|
||||
Relay,
|
||||
Topics,
|
||||
ContentTopics,
|
||||
PubSubTopics,
|
||||
ProtectedTopics,
|
||||
RelayPeerExchange,
|
||||
MinRelayPeersToPublish,
|
||||
MaxRelayMsgSize,
|
||||
StoreNodeFlag,
|
||||
StoreFlag,
|
||||
StoreMessageDBURL,
|
||||
StoreMessageRetentionTime,
|
||||
StoreMessageRetentionCapacity,
|
||||
StoreResumePeer,
|
||||
StoreMessageDBMigration,
|
||||
FilterFlag,
|
||||
FilterNode,
|
||||
FilterTimeout,
|
||||
FilterLegacyFlag,
|
||||
FilterLegacyNode,
|
||||
FilterLegacyLightClient,
|
||||
LightPush,
|
||||
LightPushNode,
|
||||
Discv5Discovery,
|
||||
@ -79,18 +87,12 @@ func main() {
|
||||
MetricsServer,
|
||||
MetricsServerAddress,
|
||||
MetricsServerPort,
|
||||
RPCFlag,
|
||||
RPCPort,
|
||||
RPCAddress,
|
||||
RPCRelayCacheCapacity,
|
||||
RPCAdmin,
|
||||
RPCPrivate,
|
||||
RESTFlag,
|
||||
RESTAddress,
|
||||
RESTPort,
|
||||
RESTRelayCacheCapacity,
|
||||
RESTFilterCacheCapacity,
|
||||
RESTAdmin,
|
||||
RESTPrivate,
|
||||
PProf,
|
||||
}
|
||||
|
||||
@ -108,9 +110,22 @@ func main() {
|
||||
Before: altsrc.InitInputSourceWithContext(cliFlags, altsrc.NewTomlSourceFromFlagFunc("config-file")),
|
||||
Flags: cliFlags,
|
||||
Action: func(c *cli.Context) error {
|
||||
waku.Execute(options)
|
||||
err := Execute(options)
|
||||
if err != nil {
|
||||
utils.Logger().Error("failure while executing wakunode", zap.Error(err))
|
||||
switch e := err.(type) {
|
||||
case cli.ExitCoder:
|
||||
return e
|
||||
case error:
|
||||
return cli.Exit(err.Error(), 1)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Commands: []*cli.Command{
|
||||
&keygen.Command,
|
||||
&rlngenerate.Command,
|
||||
},
|
||||
}
|
||||
|
||||
err := app.Run(os.Args)
|
||||
|
||||
582
cmd/waku/node.go
Normal file
582
cmd/waku/node.go
Normal file
@ -0,0 +1,582 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
|
||||
"github.com/pbnjay/memory"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
dbutils "github.com/waku-org/go-waku/waku/persistence/utils"
|
||||
"github.com/waku-org/go-waku/waku/v2/dnsdisc"
|
||||
wakupeerstore "github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
"github.com/waku-org/go-waku/waku/v2/rendezvous"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
dssql "github.com/ipfs/go-ds-sql"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/config"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds" // nolint: staticcheck
|
||||
ws "github.com/libp2p/go-libp2p/p2p/transport/websocket"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/waku-org/go-waku/cmd/waku/server/rest"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
"github.com/waku-org/go-waku/waku/metrics"
|
||||
"github.com/waku-org/go-waku/waku/persistence"
|
||||
"github.com/waku-org/go-waku/waku/v2/node"
|
||||
wprotocol "github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/filter"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/legacy_store"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/lightpush"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/peer_exchange"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
)
|
||||
|
||||
func requiresDB(options NodeOptions) bool {
|
||||
return options.Store.Enable || options.Rendezvous.Enable
|
||||
}
|
||||
|
||||
func scalePerc(value float64) float64 {
|
||||
if value > 100 {
|
||||
return 100
|
||||
}
|
||||
|
||||
if value < 0.1 {
|
||||
return 0.1
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
const dialTimeout = 7 * time.Second
|
||||
|
||||
func nonRecoverErrorMsg(format string, a ...any) error {
|
||||
err := fmt.Errorf(format, a...)
|
||||
return nonRecoverError(err)
|
||||
}
|
||||
|
||||
func nonRecoverError(err error) error {
|
||||
return cli.Exit(err.Error(), 166)
|
||||
}
|
||||
|
||||
// Execute starts a go-waku node with settings determined by the Options parameter
|
||||
func Execute(options NodeOptions) error {
|
||||
// Set encoding for logs (console, json, ...)
|
||||
// Note that libp2p reads the encoding from GOLOG_LOG_FMT env var.
|
||||
lvl, err := zapcore.ParseLevel(options.LogLevel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
utils.InitLogger(options.LogEncoding, options.LogOutput, "gowaku", lvl)
|
||||
|
||||
hostAddr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("%s:%d", options.Address, options.Port))
|
||||
if err != nil {
|
||||
return nonRecoverErrorMsg("invalid host address: %w", err)
|
||||
}
|
||||
|
||||
prvKey, err := getPrivKey(options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p2pPrvKey := utils.EcdsaPrivKeyToSecp256k1PrivKey(prvKey)
|
||||
id, err := peer.IDFromPublicKey(p2pPrvKey.GetPublic())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger := utils.Logger().With(logging.HostID("node", id))
|
||||
|
||||
var db *sql.DB
|
||||
var migrationFn func(*sql.DB, *zap.Logger) error
|
||||
if requiresDB(options) && options.Store.Migration {
|
||||
dbSettings := dbutils.DBSettings{}
|
||||
db, migrationFn, err = dbutils.ParseURL(options.Store.DatabaseURL, dbSettings, logger)
|
||||
if err != nil {
|
||||
return nonRecoverErrorMsg("could not connect to DB: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
var metricsServer *metrics.Server
|
||||
if options.Metrics.Enable {
|
||||
metricsServer = metrics.NewMetricsServer(options.Metrics.Address, options.Metrics.Port, logger)
|
||||
go metricsServer.Start()
|
||||
}
|
||||
|
||||
nodeOpts := []node.WakuNodeOption{
|
||||
node.WithLogger(logger),
|
||||
node.WithLogLevel(lvl),
|
||||
node.WithPrivateKey(prvKey),
|
||||
node.WithHostAddress(hostAddr),
|
||||
node.WithKeepAlive(10*time.Second, options.KeepAlive),
|
||||
node.WithMaxPeerConnections(options.MaxPeerConnections),
|
||||
node.WithPrometheusRegisterer(prometheus.DefaultRegisterer),
|
||||
node.WithPeerStoreCapacity(options.PeerStoreCapacity),
|
||||
node.WithMaxConnectionsPerIP(options.IPColocationLimit),
|
||||
node.WithClusterID(uint16(options.ClusterID)),
|
||||
}
|
||||
if len(options.AdvertiseAddresses) != 0 {
|
||||
nodeOpts = append(nodeOpts, node.WithAdvertiseAddresses(options.AdvertiseAddresses...))
|
||||
}
|
||||
|
||||
if options.ExtIP != "" {
|
||||
ip := net.ParseIP(options.ExtIP)
|
||||
if ip == nil {
|
||||
return nonRecoverErrorMsg("could not set external IP address: invalid IP")
|
||||
}
|
||||
|
||||
nodeOpts = append(nodeOpts, node.WithExternalIP(ip))
|
||||
}
|
||||
|
||||
if options.DNS4DomainName != "" {
|
||||
nodeOpts = append(nodeOpts, node.WithDNS4Domain(options.DNS4DomainName))
|
||||
}
|
||||
|
||||
libp2pOpts := node.DefaultLibP2POptions
|
||||
|
||||
libp2pOpts = append(libp2pOpts, libp2p.PrometheusRegisterer(prometheus.DefaultRegisterer))
|
||||
|
||||
memPerc := scalePerc(options.ResourceScalingMemoryPercent)
|
||||
fdPerc := scalePerc(options.ResourceScalingFDPercent)
|
||||
limits := rcmgr.DefaultLimits // Default memory limit: 1/8th of total memory, minimum 128MB, maximum 1GB
|
||||
scaledLimits := limits.Scale(int64(float64(memory.TotalMemory())*memPerc/100), int(float64(getNumFDs())*fdPerc/100))
|
||||
resourceManager, err := rcmgr.NewResourceManager(rcmgr.NewFixedLimiter(scaledLimits))
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not set resource limits: %w", err)
|
||||
}
|
||||
|
||||
libp2pOpts = append(libp2pOpts, libp2p.ResourceManager(resourceManager))
|
||||
libp2p.SetDefaultServiceLimits(&limits)
|
||||
|
||||
if len(options.AdvertiseAddresses) == 0 {
|
||||
libp2pOpts = append(libp2pOpts, libp2p.NATPortMap()) // Attempt to open ports using uPNP for NATed hosts.)
|
||||
}
|
||||
|
||||
// Node can be a circuit relay server
|
||||
if options.CircuitRelay {
|
||||
libp2pOpts = append(libp2pOpts, libp2p.EnableRelayService())
|
||||
}
|
||||
|
||||
if options.ForceReachability != "" {
|
||||
libp2pOpts = append(libp2pOpts, libp2p.EnableRelay())
|
||||
nodeOpts = append(nodeOpts, node.WithCircuitRelayParams(2*time.Second, 2*time.Second))
|
||||
if options.ForceReachability == "private" {
|
||||
logger.Warn("node forced to be unreachable!")
|
||||
libp2pOpts = append(libp2pOpts, libp2p.ForceReachabilityPrivate())
|
||||
} else if options.ForceReachability == "public" {
|
||||
logger.Warn("node forced to be publicly reachable!")
|
||||
libp2pOpts = append(libp2pOpts, libp2p.ForceReachabilityPublic())
|
||||
} else {
|
||||
return nonRecoverErrorMsg("invalid reachability value")
|
||||
}
|
||||
}
|
||||
|
||||
if options.UserAgent != "" {
|
||||
libp2pOpts = append(libp2pOpts, libp2p.UserAgent(options.UserAgent))
|
||||
}
|
||||
|
||||
if options.Websocket.Enable {
|
||||
nodeOpts = append(nodeOpts, node.WithWebsockets(options.Websocket.Address, options.Websocket.WSPort))
|
||||
}
|
||||
|
||||
if options.Websocket.Secure {
|
||||
nodeOpts = append(nodeOpts, node.WithSecureWebsockets(options.Websocket.Address, options.Websocket.WSSPort, options.Websocket.CertPath, options.Websocket.KeyPath))
|
||||
}
|
||||
|
||||
if options.ShowAddresses {
|
||||
printListeningAddresses(ctx, nodeOpts, options)
|
||||
return nil
|
||||
}
|
||||
|
||||
if options.Store.Enable && options.PersistPeers {
|
||||
// Create persistent peerstore
|
||||
queries, err := dbutils.NewQueries("peerstore", db)
|
||||
if err != nil {
|
||||
return nonRecoverErrorMsg("could not setup persistent peerstore database: %w", err)
|
||||
|
||||
}
|
||||
|
||||
datastore := dssql.NewDatastore(db, queries)
|
||||
opts := pstoreds.DefaultOpts()
|
||||
peerStore, err := pstoreds.NewPeerstore(ctx, datastore, opts)
|
||||
if err != nil {
|
||||
return nonRecoverErrorMsg("could not create persistent peerstore: %w", err)
|
||||
}
|
||||
|
||||
nodeOpts = append(nodeOpts, node.WithPeerStore(peerStore))
|
||||
}
|
||||
|
||||
nodeOpts = append(nodeOpts, node.WithLibP2POptions(libp2pOpts...))
|
||||
nodeOpts = append(nodeOpts, node.WithNTP())
|
||||
|
||||
maxMsgSize := parseMsgSizeConfig(options.Relay.MaxMsgSize)
|
||||
|
||||
if options.Relay.Enable {
|
||||
var wakurelayopts []pubsub.Option
|
||||
wakurelayopts = append(wakurelayopts, pubsub.WithPeerExchange(options.Relay.PeerExchange))
|
||||
wakurelayopts = append(wakurelayopts, pubsub.WithMaxMessageSize(maxMsgSize))
|
||||
|
||||
nodeOpts = append(nodeOpts, node.WithWakuRelayAndMinPeers(options.Relay.MinRelayPeersToPublish, wakurelayopts...))
|
||||
nodeOpts = append(nodeOpts, node.WithMaxMsgSize(maxMsgSize))
|
||||
}
|
||||
|
||||
nodeOpts = append(nodeOpts, node.WithWakuFilterLightNode())
|
||||
|
||||
if options.Filter.Enable {
|
||||
nodeOpts = append(nodeOpts, node.WithWakuFilterFullNode(filter.WithTimeout(options.Filter.Timeout)))
|
||||
}
|
||||
|
||||
var dbStore *persistence.DBStore
|
||||
if requiresDB(options) {
|
||||
dbOptions := []persistence.DBOption{
|
||||
persistence.WithDB(db),
|
||||
persistence.WithRetentionPolicy(options.Store.RetentionMaxMessages, options.Store.RetentionTime),
|
||||
}
|
||||
|
||||
if options.Store.Migration {
|
||||
dbOptions = append(dbOptions, persistence.WithMigrations(migrationFn)) // TODO: refactor migrations out of DBStore, or merge DBStore with rendezvous DB
|
||||
}
|
||||
|
||||
dbStore, err = persistence.NewDBStore(prometheus.DefaultRegisterer, logger, dbOptions...)
|
||||
if err != nil {
|
||||
return nonRecoverErrorMsg("error setting up db store: %w", err)
|
||||
}
|
||||
|
||||
nodeOpts = append(nodeOpts, node.WithMessageProvider(dbStore))
|
||||
}
|
||||
|
||||
if options.Store.Enable {
|
||||
nodeOpts = append(nodeOpts, node.WithWakuStore())
|
||||
nodeOpts = append(nodeOpts, node.WithMessageProvider(dbStore))
|
||||
}
|
||||
|
||||
if options.LightPush.Enable {
|
||||
nodeOpts = append(nodeOpts, node.WithLightPush())
|
||||
}
|
||||
|
||||
if options.PeerExchange.Enable {
|
||||
nodeOpts = append(nodeOpts, node.WithPeerExchange())
|
||||
}
|
||||
|
||||
if options.Rendezvous.Enable {
|
||||
rdb := rendezvous.NewDB(db, logger)
|
||||
nodeOpts = append(nodeOpts, node.WithRendezvous(rdb))
|
||||
}
|
||||
|
||||
utils.Logger().Info("Version details ", zap.String("version", node.Version), zap.String("commit", node.GitCommit))
|
||||
|
||||
if err = checkForRLN(logger, options, &nodeOpts); err != nil {
|
||||
return nonRecoverError(err)
|
||||
}
|
||||
|
||||
var discoveredNodes []dnsdisc.DiscoveredNode
|
||||
if options.DNSDiscovery.Enable {
|
||||
if len(options.DNSDiscovery.URLs.Value()) == 0 {
|
||||
return nonRecoverErrorMsg("DNS discovery URL is required")
|
||||
}
|
||||
discoveredNodes = node.GetNodesFromDNSDiscovery(logger, ctx, options.DNSDiscovery.Nameserver, options.DNSDiscovery.URLs.Value())
|
||||
}
|
||||
if options.DiscV5.Enable {
|
||||
discv5Opts, err := node.GetDiscv5Option(discoveredNodes, options.DiscV5.Nodes.Value(), options.DiscV5.Port, options.DiscV5.AutoUpdate)
|
||||
if err != nil {
|
||||
logger.Fatal("parsing ENR", zap.Error(err))
|
||||
}
|
||||
nodeOpts = append(nodeOpts, discv5Opts)
|
||||
}
|
||||
|
||||
//Process pubSub and contentTopics specified and arrive at all corresponding pubSubTopics
|
||||
pubSubTopicMap, err := processTopics(options)
|
||||
if err != nil {
|
||||
return nonRecoverError(err)
|
||||
}
|
||||
|
||||
pubSubTopicMapKeys := make([]string, 0, len(pubSubTopicMap))
|
||||
for k := range pubSubTopicMap {
|
||||
pubSubTopicMapKeys = append(pubSubTopicMapKeys, k)
|
||||
}
|
||||
|
||||
rs, err := wprotocol.TopicsToRelayShards(pubSubTopicMapKeys...)
|
||||
if err == nil {
|
||||
if len(rs) == 1 {
|
||||
nodeOpts = append(nodeOpts, node.WithShards(rs[0].ShardIDs))
|
||||
} else {
|
||||
logger.Warn("could not set ENR shard info", zap.String("error", "invalid number of clusters found"), zap.Int("numClusters", len(rs)))
|
||||
}
|
||||
} else {
|
||||
logger.Warn("could not obtain list of shards", zap.Error(err))
|
||||
}
|
||||
|
||||
wakuNode, err := node.New(nodeOpts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not instantiate waku: %w", err)
|
||||
}
|
||||
|
||||
if err = wakuNode.Start(ctx); err != nil {
|
||||
return nonRecoverError(err)
|
||||
}
|
||||
|
||||
for _, d := range discoveredNodes {
|
||||
wakuNode.AddDiscoveredPeer(d.PeerID, d.PeerInfo.Addrs, wakupeerstore.DNSDiscovery, nil, d.ENR, true)
|
||||
}
|
||||
|
||||
//For now assuming that static peers added support/listen on all topics specified via commandLine.
|
||||
staticPeers := map[protocol.ID][]multiaddr.Multiaddr{
|
||||
legacy_store.StoreID_v20beta4: options.Store.Nodes,
|
||||
lightpush.LightPushID_v20beta1: options.LightPush.Nodes,
|
||||
rendezvous.RendezvousID: options.Rendezvous.Nodes,
|
||||
filter.FilterSubscribeID_v20beta1: options.Filter.Nodes,
|
||||
}
|
||||
for protocolID, peers := range staticPeers {
|
||||
if err = addStaticPeers(wakuNode, peers, pubSubTopicMapKeys, protocolID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
if options.Relay.Enable {
|
||||
if err = handleRelayTopics(ctx, &wg, wakuNode, pubSubTopicMap); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, n := range options.StaticNodes {
|
||||
go func(ctx context.Context, node multiaddr.Multiaddr) {
|
||||
ctx, cancel := context.WithTimeout(ctx, dialTimeout)
|
||||
defer cancel()
|
||||
err = wakuNode.DialPeerWithMultiAddress(ctx, node)
|
||||
if err != nil {
|
||||
logger.Error("dialing peer", zap.Error(err))
|
||||
}
|
||||
}(ctx, n)
|
||||
}
|
||||
|
||||
if options.DiscV5.Enable {
|
||||
if err = wakuNode.DiscV5().Start(ctx); err != nil {
|
||||
logger.Fatal("starting discovery v5", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// retrieve and connect to peer exchange peers
|
||||
if options.PeerExchange.Enable && options.PeerExchange.Node != nil {
|
||||
logger.Info("retrieving peer info via peer exchange protocol")
|
||||
|
||||
peerID, err := wakuNode.AddPeer([]multiaddr.Multiaddr{*options.PeerExchange.Node}, wakupeerstore.Static,
|
||||
pubSubTopicMapKeys, peer_exchange.PeerExchangeID_v20alpha1)
|
||||
if err != nil {
|
||||
logger.Error("adding peer exchange peer", logging.MultiAddrs("node", *options.PeerExchange.Node), zap.Error(err))
|
||||
} else {
|
||||
desiredOutDegree := wakuNode.Relay().Params().D
|
||||
if err = wakuNode.PeerExchange().Request(ctx, desiredOutDegree, peer_exchange.WithPeer(peerID)); err != nil {
|
||||
logger.Error("requesting peers via peer exchange", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var restServer *rest.WakuRest
|
||||
if options.RESTServer.Enable {
|
||||
wg.Add(1)
|
||||
restConfig := rest.RestConfig{Address: options.RESTServer.Address,
|
||||
Port: uint(options.RESTServer.Port),
|
||||
EnablePProf: options.PProf,
|
||||
EnableAdmin: options.RESTServer.Admin,
|
||||
RelayCacheCapacity: uint(options.RESTServer.RelayCacheCapacity),
|
||||
FilterCacheCapacity: uint(options.RESTServer.FilterCacheCapacity)}
|
||||
|
||||
restServer = rest.NewWakuRest(wakuNode, restConfig, logger)
|
||||
restServer.Start(ctx, &wg)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
logger.Info("Node setup complete")
|
||||
|
||||
// Wait for a SIGINT or SIGTERM signal
|
||||
ch := make(chan os.Signal, 1)
|
||||
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-ch
|
||||
logger.Info("Received signal, shutting down...")
|
||||
|
||||
// shut the node down
|
||||
wakuNode.Stop()
|
||||
|
||||
if options.RESTServer.Enable {
|
||||
if err := restServer.Stop(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if options.Metrics.Enable {
|
||||
if err = metricsServer.Stop(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if db != nil {
|
||||
if err = db.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func processTopics(options NodeOptions) (map[string][]string, error) {
|
||||
|
||||
//Using a map to avoid duplicate pub-sub topics that can result from autosharding
|
||||
// or same-topic being passed twice.
|
||||
pubSubTopicMap := make(map[string][]string)
|
||||
|
||||
for _, topic := range options.Relay.Topics.Value() {
|
||||
pubSubTopicMap[topic] = []string{}
|
||||
}
|
||||
|
||||
for _, topic := range options.Relay.PubSubTopics.Value() {
|
||||
pubSubTopicMap[topic] = []string{}
|
||||
}
|
||||
|
||||
//Get pubSub topics from contentTopics if they are as per autosharding
|
||||
for _, cTopic := range options.Relay.ContentTopics.Value() {
|
||||
contentTopic, err := wprotocol.StringToContentTopic(cTopic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pTopic := wprotocol.GetShardFromContentTopic(contentTopic, wprotocol.GenerationZeroShardsCount)
|
||||
if _, ok := pubSubTopicMap[pTopic.String()]; !ok {
|
||||
pubSubTopicMap[pTopic.String()] = []string{}
|
||||
}
|
||||
pubSubTopicMap[pTopic.String()] = append(pubSubTopicMap[pTopic.String()], cTopic)
|
||||
}
|
||||
//If no topics are passed, then use default waku topic.
|
||||
if len(pubSubTopicMap) == 0 && options.ClusterID == 0 {
|
||||
pubSubTopicMap[relay.DefaultWakuTopic] = []string{}
|
||||
}
|
||||
|
||||
return pubSubTopicMap, nil
|
||||
}
|
||||
|
||||
func addStaticPeers(wakuNode *node.WakuNode, addresses []multiaddr.Multiaddr, pubSubTopics []string, protocols ...protocol.ID) error {
|
||||
for _, addr := range addresses {
|
||||
_, err := wakuNode.AddPeer([]multiaddr.Multiaddr{addr}, wakupeerstore.Static, pubSubTopics, protocols...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not add static peer: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadPrivateKeyFromFile(path string, passwd string) (*ecdsa.PrivateKey, error) {
|
||||
src, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var encryptedK keystore.CryptoJSON
|
||||
err = json.Unmarshal(src, &encryptedK)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pKey, err := keystore.DecryptDataV3(encryptedK, passwd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return crypto.ToECDSA(pKey)
|
||||
}
|
||||
|
||||
func getPrivKey(options NodeOptions) (*ecdsa.PrivateKey, error) {
|
||||
var prvKey *ecdsa.PrivateKey
|
||||
// get private key from nodeKey or keyFile
|
||||
if options.NodeKey != nil {
|
||||
prvKey = options.NodeKey
|
||||
} else {
|
||||
if _, err := os.Stat(options.KeyFile); err == nil {
|
||||
if prvKey, err = loadPrivateKeyFromFile(options.KeyFile, options.KeyPasswd); err != nil {
|
||||
return nil, fmt.Errorf("could not read keyfile: %w", err)
|
||||
}
|
||||
} else {
|
||||
if os.IsNotExist(err) {
|
||||
if prvKey, err = crypto.GenerateKey(); err != nil {
|
||||
return nil, fmt.Errorf("error generating key: %w", err)
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("could not read keyfile: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return prvKey, nil
|
||||
}
|
||||
|
||||
func printListeningAddresses(ctx context.Context, nodeOpts []node.WakuNodeOption, options NodeOptions) {
|
||||
params := new(node.WakuNodeParameters)
|
||||
for _, opt := range nodeOpts {
|
||||
err := opt(params)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
var libp2pOpts []config.Option
|
||||
libp2pOpts = append(libp2pOpts,
|
||||
params.Identity(),
|
||||
libp2p.ListenAddrs(params.MultiAddresses()...),
|
||||
)
|
||||
|
||||
if options.Websocket.Secure {
|
||||
transports := libp2p.ChainOptions(
|
||||
libp2p.Transport(tcp.NewTCPTransport),
|
||||
libp2p.Transport(ws.New, ws.WithTLSConfig(params.TLSConfig())),
|
||||
)
|
||||
libp2pOpts = append(libp2pOpts, transports)
|
||||
}
|
||||
|
||||
addrFactory := params.AddressFactory()
|
||||
if addrFactory != nil {
|
||||
libp2pOpts = append(libp2pOpts, libp2p.AddrsFactory(addrFactory))
|
||||
}
|
||||
|
||||
h, err := libp2p.New(libp2pOpts...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
hostAddrs := utils.EncapsulatePeerID(h.ID(), h.Addrs()...)
|
||||
for _, addr := range hostAddrs {
|
||||
fmt.Println(addr)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func parseMsgSizeConfig(msgSizeConfig string) int {
|
||||
|
||||
msgSize, err := humanize.ParseBytes(msgSizeConfig)
|
||||
if err != nil {
|
||||
msgSize = 0
|
||||
}
|
||||
return int(msgSize)
|
||||
}
|
||||
14
cmd/waku/node_no_rln.go
Normal file
14
cmd/waku/node_no_rln.go
Normal file
@ -0,0 +1,14 @@
|
||||
//go:build gowaku_no_rln
|
||||
// +build gowaku_no_rln
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/waku-org/go-waku/waku/v2/node"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func checkForRLN(logger *zap.Logger, options NodeOptions, nodeOpts *[]node.WakuNodeOption) error {
|
||||
// Do nothing
|
||||
return nil
|
||||
}
|
||||
38
cmd/waku/node_rln.go
Normal file
38
cmd/waku/node_rln.go
Normal file
@ -0,0 +1,38 @@
|
||||
//go:build !gowaku_no_rln
|
||||
// +build !gowaku_no_rln
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/node"
|
||||
"github.com/waku-org/go-zerokit-rln/rln"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func checkForRLN(logger *zap.Logger, options NodeOptions, nodeOpts *[]node.WakuNodeOption) error {
|
||||
if options.RLNRelay.Enable {
|
||||
if !options.Relay.Enable {
|
||||
return errors.New("waku relay is required to enable RLN relay")
|
||||
}
|
||||
|
||||
if !options.RLNRelay.Dynamic {
|
||||
*nodeOpts = append(*nodeOpts, node.WithStaticRLNRelay((*rln.MembershipIndex)(options.RLNRelay.MembershipIndex), nil))
|
||||
} else {
|
||||
// TODO: too many parameters in this function
|
||||
// consider passing a config struct instead
|
||||
*nodeOpts = append(*nodeOpts, node.WithDynamicRLNRelay(
|
||||
options.RLNRelay.CredentialsPath,
|
||||
options.RLNRelay.CredentialsPassword,
|
||||
options.RLNRelay.TreePath,
|
||||
options.RLNRelay.MembershipContractAddress,
|
||||
options.RLNRelay.MembershipIndex,
|
||||
nil,
|
||||
options.RLNRelay.ETHClientAddress,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -1,4 +1,4 @@
|
||||
package waku
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
@ -26,19 +26,23 @@ type RelayOptions struct {
|
||||
Enable bool
|
||||
Topics cli.StringSlice
|
||||
ProtectedTopics []cliutils.ProtectedTopic
|
||||
PubSubTopics cli.StringSlice
|
||||
ContentTopics cli.StringSlice
|
||||
PeerExchange bool
|
||||
MinRelayPeersToPublish int
|
||||
MaxMsgSize string
|
||||
}
|
||||
|
||||
// RLNRelayOptions are settings used to enable RLN Relay. This is a protocol
|
||||
// used to rate limit messages and penalize those attempting to send more than
|
||||
// N messages per epoch
|
||||
type RLNRelayOptions struct {
|
||||
Enable bool
|
||||
CredentialsPath string
|
||||
CredentialsPassword string
|
||||
MembershipIndex int
|
||||
PubsubTopic string
|
||||
ContentTopic string
|
||||
TreePath string
|
||||
MembershipIndex *uint
|
||||
Dynamic bool
|
||||
ETHPrivateKey *ecdsa.PrivateKey
|
||||
ETHClientAddress string
|
||||
MembershipContractAddress common.Address
|
||||
}
|
||||
@ -49,10 +53,8 @@ type RLNRelayOptions struct {
|
||||
// restricted devices.
|
||||
type FilterOptions struct {
|
||||
Enable bool
|
||||
UseV1 bool
|
||||
DisableFullNode bool
|
||||
Nodes []multiaddr.Multiaddr
|
||||
NodesV1 []multiaddr.Multiaddr
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
@ -75,8 +77,9 @@ type StoreOptions struct {
|
||||
DatabaseURL string
|
||||
RetentionTime time.Duration
|
||||
RetentionMaxMessages int
|
||||
ResumeNodes []multiaddr.Multiaddr
|
||||
Nodes []multiaddr.Multiaddr
|
||||
//ResumeNodes []multiaddr.Multiaddr
|
||||
Nodes []multiaddr.Multiaddr
|
||||
Migration bool
|
||||
}
|
||||
|
||||
// DNSDiscoveryOptions are settings used for enabling DNS-based discovery
|
||||
@ -96,24 +99,14 @@ type MetricsOptions struct {
|
||||
Port int
|
||||
}
|
||||
|
||||
// RPCServerOptions are settings used to start a json rpc server
|
||||
type RPCServerOptions struct {
|
||||
Enable bool
|
||||
Port int
|
||||
Address string
|
||||
Admin bool
|
||||
Private bool
|
||||
RelayCacheCapacity int
|
||||
}
|
||||
|
||||
// RESTServerOptions are settings used to start a rest http server
|
||||
type RESTServerOptions struct {
|
||||
Enable bool
|
||||
Port int
|
||||
Address string
|
||||
Admin bool
|
||||
Private bool
|
||||
RelayCacheCapacity int
|
||||
Enable bool
|
||||
Port int
|
||||
Address string
|
||||
Admin bool
|
||||
RelayCacheCapacity int
|
||||
FilterCacheCapacity int
|
||||
}
|
||||
|
||||
// WSOptions are settings used for enabling websockets and secure websockets
|
||||
@ -134,36 +127,41 @@ type PeerExchangeOptions struct {
|
||||
Node *multiaddr.Multiaddr
|
||||
}
|
||||
|
||||
// RendezvousOptions are settings used with the rendezvous protocol
|
||||
type RendezvousOptions struct {
|
||||
Enable bool
|
||||
Server bool
|
||||
Nodes []multiaddr.Multiaddr
|
||||
}
|
||||
|
||||
// Options contains all the available features and settings that can be
|
||||
// NodeOptions contains all the available features and settings that can be
|
||||
// configured via flags when executing go-waku as a service.
|
||||
type Options struct {
|
||||
Port int
|
||||
Address string
|
||||
Dns4DomainName string
|
||||
NodeKey *ecdsa.PrivateKey
|
||||
KeyFile string
|
||||
KeyPasswd string
|
||||
GenerateKey bool
|
||||
Overwrite bool
|
||||
StaticNodes []multiaddr.Multiaddr
|
||||
KeepAlive time.Duration
|
||||
AdvertiseAddresses []multiaddr.Multiaddr
|
||||
ShowAddresses bool
|
||||
CircuitRelay bool
|
||||
LogLevel string
|
||||
LogEncoding string
|
||||
LogOutput string
|
||||
NAT string
|
||||
ExtIP string
|
||||
PersistPeers bool
|
||||
UserAgent string
|
||||
PProf bool
|
||||
type NodeOptions struct {
|
||||
Port int
|
||||
Address string
|
||||
ClusterID uint
|
||||
DNS4DomainName string
|
||||
NodeKey *ecdsa.PrivateKey
|
||||
KeyFile string
|
||||
KeyPasswd string
|
||||
StaticNodes []multiaddr.Multiaddr
|
||||
KeepAlive time.Duration
|
||||
AdvertiseAddresses []multiaddr.Multiaddr
|
||||
ShowAddresses bool
|
||||
CircuitRelay bool
|
||||
ForceReachability string
|
||||
ResourceScalingMemoryPercent float64
|
||||
ResourceScalingFDPercent float64
|
||||
LogLevel string
|
||||
LogEncoding string
|
||||
LogOutput string
|
||||
NAT string
|
||||
ExtIP string
|
||||
PersistPeers bool
|
||||
UserAgent string
|
||||
PProf bool
|
||||
MaxPeerConnections int
|
||||
PeerStoreCapacity int
|
||||
IPColocationLimit int
|
||||
|
||||
PeerExchange PeerExchangeOptions
|
||||
Websocket WSOptions
|
||||
@ -176,6 +174,5 @@ type Options struct {
|
||||
DNSDiscovery DNSDiscoveryOptions
|
||||
Rendezvous RendezvousOptions
|
||||
Metrics MetricsOptions
|
||||
RPCServer RPCServerOptions
|
||||
RESTServer RESTServerOptions
|
||||
}
|
||||
82
cmd/waku/relay.go
Normal file
82
cmd/waku/relay.go
Normal file
@ -0,0 +1,82 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/node"
|
||||
wprotocol "github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/rendezvous"
|
||||
)
|
||||
|
||||
func handleRelayTopics(ctx context.Context, wg *sync.WaitGroup, wakuNode *node.WakuNode, pubSubTopicMap map[string][]string) error {
|
||||
for nodeTopic, cTopics := range pubSubTopicMap {
|
||||
nodeTopic := nodeTopic
|
||||
_, err := wakuNode.Relay().Subscribe(ctx, wprotocol.NewContentFilter(nodeTopic, cTopics...), relay.WithoutConsumer())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(options.Rendezvous.Nodes) != 0 {
|
||||
// Register the node in rendezvous point
|
||||
iter := rendezvous.NewRendezvousPointIterator(options.Rendezvous.Nodes)
|
||||
|
||||
wg.Add(1)
|
||||
go func(nodeTopic string) {
|
||||
t := time.NewTicker(rendezvous.RegisterDefaultTTL)
|
||||
defer t.Stop()
|
||||
defer wg.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-t.C:
|
||||
// Register in rendezvous points periodically
|
||||
wakuNode.Rendezvous().RegisterWithNamespace(ctx, nodeTopic, iter.RendezvousPoints())
|
||||
}
|
||||
}
|
||||
}(nodeTopic)
|
||||
|
||||
wg.Add(1)
|
||||
go func(nodeTopic string) {
|
||||
defer wg.Done()
|
||||
desiredOutDegree := wakuNode.Relay().Params().D
|
||||
t := time.NewTicker(7 * time.Second)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-t.C:
|
||||
peerCnt := len(wakuNode.Relay().PubSub().ListPeers(nodeTopic))
|
||||
peersToFind := desiredOutDegree - peerCnt
|
||||
if peersToFind <= 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
rp := <-iter.Next(ctx)
|
||||
if rp == nil {
|
||||
continue
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(ctx, 7*time.Second)
|
||||
wakuNode.Rendezvous().DiscoverWithNamespace(ctx, nodeTopic, rp, peersToFind)
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
}(nodeTopic)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Protected topics
|
||||
for _, protectedTopic := range options.Relay.ProtectedTopics {
|
||||
if err := wakuNode.Relay().AddSignedTopicValidator(protectedTopic.Topic, protectedTopic.PublicKey); err != nil {
|
||||
return nonRecoverErrorMsg("could not add signed topic validator: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
19
cmd/waku/rlngenerate/command_no_rln.go
Normal file
19
cmd/waku/rlngenerate/command_no_rln.go
Normal file
@ -0,0 +1,19 @@
|
||||
//go:build gowaku_no_rln
|
||||
// +build gowaku_no_rln
|
||||
|
||||
package rlngenerate
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
cli "github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
// Command generates a key file used to generate the node's peerID, encrypted with an optional password
|
||||
var Command = cli.Command{
|
||||
Name: "generate-rln-credentials",
|
||||
Usage: "Generate credentials for usage with RLN",
|
||||
Action: func(cCtx *cli.Context) error {
|
||||
return errors.New("not available. Execute `make RLN=true` to add RLN support to go-waku")
|
||||
},
|
||||
}
|
||||
115
cmd/waku/rlngenerate/command_rln.go
Normal file
115
cmd/waku/rlngenerate/command_rln.go
Normal file
@ -0,0 +1,115 @@
|
||||
//go:build !gowaku_no_rln
|
||||
// +build !gowaku_no_rln
|
||||
|
||||
package rlngenerate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
cli "github.com/urfave/cli/v2"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/group_manager/dynamic"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/keystore"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/web3"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"github.com/waku-org/go-zerokit-rln/rln"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var options Options
|
||||
var logger = utils.Logger().Named("rln-credentials")
|
||||
|
||||
// Command generates a key file used to generate the node's peerID, encrypted with an optional password
|
||||
var Command = cli.Command{
|
||||
Name: "generate-rln-credentials",
|
||||
Usage: "Generate credentials for usage with RLN",
|
||||
Action: func(cCtx *cli.Context) error {
|
||||
if options.ETHPrivateKey == nil {
|
||||
err := errors.New("a private key must be specified")
|
||||
logger.Error("validating option flags", zap.Error(err))
|
||||
return cli.Exit(err, 1)
|
||||
}
|
||||
|
||||
err := execute(context.Background())
|
||||
if err != nil {
|
||||
logger.Error("registering RLN credentials", zap.Error(err))
|
||||
return cli.Exit(err, 1)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Flags: flags,
|
||||
}
|
||||
|
||||
func execute(ctx context.Context) error {
|
||||
rlnInstance, err := rln.NewRLN()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
web3Config, err := web3.BuildConfig(ctx, options.ETHClientAddress, options.MembershipContractAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// prepare rln membership key pair
|
||||
logger.Info("generating rln credential")
|
||||
identityCredential, err := rlnInstance.MembershipKeyGen()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// register the rln-relay peer to the membership contract
|
||||
membershipIndex, err := register(ctx, web3Config, identityCredential.IDCommitment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: clean private key from memory
|
||||
|
||||
err = persistCredentials(identityCredential, membershipIndex, web3Config.ChainID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if logger.Level() == zap.DebugLevel {
|
||||
logger.Info("registered credentials into the membership contract",
|
||||
logging.HexBytes("IDCommitment", identityCredential.IDCommitment[:]),
|
||||
logging.HexBytes("IDNullifier", identityCredential.IDNullifier[:]),
|
||||
logging.HexBytes("IDSecretHash", identityCredential.IDSecretHash[:]),
|
||||
logging.HexBytes("IDTrapDoor", identityCredential.IDTrapdoor[:]),
|
||||
zap.Uint("index", membershipIndex),
|
||||
)
|
||||
} else {
|
||||
logger.Info("registered credentials into the membership contract", logging.HexBytes("idCommitment", identityCredential.IDCommitment[:]), zap.Uint("index", membershipIndex))
|
||||
}
|
||||
|
||||
web3Config.ETHClient.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func persistCredentials(identityCredential *rln.IdentityCredential, treeIndex rln.MembershipIndex, chainID *big.Int) error {
|
||||
appKeystore, err := keystore.New(options.CredentialsPath, dynamic.RLNAppInfo, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
membershipCredential := keystore.MembershipCredentials{
|
||||
IdentityCredential: identityCredential,
|
||||
TreeIndex: treeIndex,
|
||||
MembershipContractInfo: keystore.NewMembershipContractInfo(chainID, options.MembershipContractAddress),
|
||||
}
|
||||
|
||||
err = appKeystore.AddMembershipCredentials(membershipCredential, options.CredentialsPassword)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to persist credentials: %w", err)
|
||||
}
|
||||
|
||||
logger.Info("persisted credentials succesfully")
|
||||
|
||||
return nil
|
||||
}
|
||||
74
cmd/waku/rlngenerate/flags.go
Normal file
74
cmd/waku/rlngenerate/flags.go
Normal file
@ -0,0 +1,74 @@
|
||||
//go:build !gowaku_no_rln
|
||||
// +build !gowaku_no_rln
|
||||
|
||||
package rlngenerate
|
||||
|
||||
import (
|
||||
cli "github.com/urfave/cli/v2"
|
||||
wcli "github.com/waku-org/go-waku/waku/cliutils"
|
||||
)
|
||||
|
||||
var flags = []cli.Flag{
|
||||
&cli.PathFlag{
|
||||
Name: "cred-path",
|
||||
Usage: "RLN relay membership credentials file",
|
||||
Value: "./rlnKeystore.json",
|
||||
Destination: &options.CredentialsPath,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "cred-password",
|
||||
Value: "password",
|
||||
Usage: "Password for encrypting RLN credentials",
|
||||
Destination: &options.CredentialsPassword,
|
||||
},
|
||||
&cli.GenericFlag{
|
||||
Name: "eth-account-private-key",
|
||||
Usage: "Ethereum account private key used for registering in member contract",
|
||||
Value: &wcli.PrivateKeyValue{
|
||||
Value: &options.ETHPrivateKey,
|
||||
},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "eth-client-address",
|
||||
Usage: "Ethereum testnet client address",
|
||||
Value: "ws://localhost:8545",
|
||||
Destination: &options.ETHClientAddress,
|
||||
},
|
||||
&cli.GenericFlag{
|
||||
Name: "eth-contract-address",
|
||||
Usage: "Address of membership contract",
|
||||
Value: &wcli.AddressValue{
|
||||
Value: &options.MembershipContractAddress,
|
||||
},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "eth-nonce",
|
||||
Value: "",
|
||||
Usage: "Set an specific ETH transaction nonce. Leave empty to calculate the nonce automatically",
|
||||
Destination: &options.ETHNonce,
|
||||
},
|
||||
&cli.Uint64Flag{
|
||||
Name: "eth-gas-limit",
|
||||
Value: 0,
|
||||
Usage: "Gas limit to set for the transaction execution (0 = estimate)",
|
||||
Destination: &options.ETHGasLimit,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "eth-gas-price",
|
||||
Value: "",
|
||||
Usage: "Gas price in wei to use for the transaction execution (empty = gas price oracle)",
|
||||
Destination: &options.ETHGasPrice,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "eth-gas-fee-cap",
|
||||
Value: "",
|
||||
Usage: "Gas fee cap in wei to use for the 1559 transaction execution (empty = gas price oracle)",
|
||||
Destination: &options.ETHGasFeeCap,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "eth-gas-tip-cap",
|
||||
Value: "",
|
||||
Usage: "Gas priority fee cap in wei to use for the 1559 transaction execution (empty = gas price oracle)",
|
||||
Destination: &options.ETHGasTipCap,
|
||||
},
|
||||
}
|
||||
21
cmd/waku/rlngenerate/options.go
Normal file
21
cmd/waku/rlngenerate/options.go
Normal file
@ -0,0 +1,21 @@
|
||||
package rlngenerate
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
// Options are settings used to create RLN credentials.
|
||||
type Options struct {
|
||||
CredentialsPath string
|
||||
CredentialsPassword string
|
||||
ETHPrivateKey *ecdsa.PrivateKey
|
||||
ETHClientAddress string
|
||||
MembershipContractAddress common.Address
|
||||
ETHGasLimit uint64
|
||||
ETHNonce string
|
||||
ETHGasPrice string
|
||||
ETHGasFeeCap string
|
||||
ETHGasTipCap string
|
||||
}
|
||||
134
cmd/waku/rlngenerate/web3.go
Normal file
134
cmd/waku/rlngenerate/web3.go
Normal file
@ -0,0 +1,134 @@
|
||||
//go:build !gowaku_no_rln
|
||||
// +build !gowaku_no_rln
|
||||
|
||||
package rlngenerate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln/web3"
|
||||
"github.com/waku-org/go-zerokit-rln/rln"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func getMembershipFee(ctx context.Context, rlnContract web3.RLNContract) (*big.Int, error) {
|
||||
return rlnContract.MEMBERSHIPDEPOSIT(&bind.CallOpts{Context: ctx})
|
||||
}
|
||||
|
||||
func buildTransactor(ctx context.Context, membershipFee *big.Int, chainID *big.Int) (*bind.TransactOpts, error) {
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(options.ETHPrivateKey, chainID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
auth.Value = membershipFee
|
||||
auth.Context = ctx
|
||||
auth.GasLimit = options.ETHGasLimit
|
||||
|
||||
var ok bool
|
||||
|
||||
if options.ETHNonce != "" {
|
||||
nonce := &big.Int{}
|
||||
auth.Nonce, ok = nonce.SetString(options.ETHNonce, 10)
|
||||
if !ok {
|
||||
return nil, errors.New("invalid nonce value")
|
||||
}
|
||||
}
|
||||
|
||||
if options.ETHGasFeeCap != "" {
|
||||
gasFeeCap := &big.Int{}
|
||||
auth.GasFeeCap, ok = gasFeeCap.SetString(options.ETHGasFeeCap, 10)
|
||||
if !ok {
|
||||
return nil, errors.New("invalid gas fee cap value")
|
||||
}
|
||||
}
|
||||
|
||||
if options.ETHGasTipCap != "" {
|
||||
gasTipCap := &big.Int{}
|
||||
auth.GasTipCap, ok = gasTipCap.SetString(options.ETHGasTipCap, 10)
|
||||
if !ok {
|
||||
return nil, errors.New("invalid gas tip cap value")
|
||||
}
|
||||
}
|
||||
|
||||
if options.ETHGasPrice != "" {
|
||||
gasPrice := &big.Int{}
|
||||
auth.GasPrice, ok = gasPrice.SetString(options.ETHGasPrice, 10)
|
||||
if !ok {
|
||||
return nil, errors.New("invalid gas price value")
|
||||
}
|
||||
}
|
||||
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
func register(ctx context.Context, web3Config *web3.Config, idComm rln.IDCommitment) (rln.MembershipIndex, error) {
|
||||
// check if the contract exists by calling a static function
|
||||
membershipFee, err := getMembershipFee(ctx, web3Config.RLNContract)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
auth, err := buildTransactor(ctx, membershipFee, web3Config.ChainID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
log.Debug("registering an id commitment", zap.Binary("idComm", idComm[:]))
|
||||
|
||||
// registers the idComm into the membership contract whose address is in rlnPeer.membershipContractAddress
|
||||
tx, err := web3Config.RegistryContract.Register(auth, web3Config.RLNContract.StorageIndex, rln.Bytes32ToBigInt(idComm))
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("transaction error: %w", err)
|
||||
}
|
||||
|
||||
explorerURL := ""
|
||||
switch web3Config.ChainID.Int64() {
|
||||
case 1:
|
||||
explorerURL = "https://etherscan.io"
|
||||
case 5:
|
||||
explorerURL = "https://goerli.etherscan.io"
|
||||
case 11155111:
|
||||
explorerURL = "https://sepolia.etherscan.io"
|
||||
}
|
||||
|
||||
if explorerURL != "" {
|
||||
logger.Info(fmt.Sprintf("transaction broadcasted, find details of your registration transaction in %s/tx/%s", explorerURL, tx.Hash()))
|
||||
} else {
|
||||
logger.Info("transaction broadcasted.", zap.String("transactionHash", tx.Hash().String()))
|
||||
}
|
||||
|
||||
logger.Warn("waiting for transaction to be mined...")
|
||||
|
||||
txReceipt, err := bind.WaitMined(ctx, web3Config.ETHClient, tx)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("transaction error: %w", err)
|
||||
}
|
||||
|
||||
if txReceipt.Status != types.ReceiptStatusSuccessful {
|
||||
return 0, errors.New("transaction reverted")
|
||||
}
|
||||
|
||||
// the receipt topic holds the hash of signature of the raised events
|
||||
evt, err := web3Config.RLNContract.ParseMemberRegistered(*txReceipt.Logs[0])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var eventIDComm rln.IDCommitment = rln.BigIntToBytes32(evt.IdCommitment)
|
||||
|
||||
log.Debug("information extracted from tx log", zap.Uint64("blockNumber", evt.Raw.BlockNumber), logging.HexBytes("idCommitment", eventIDComm[:]), zap.Uint64("index", evt.Index.Uint64()))
|
||||
|
||||
if eventIDComm != idComm {
|
||||
return 0, errors.New("invalid id commitment key")
|
||||
}
|
||||
|
||||
return rln.MembershipIndex(uint(evt.Index.Int64())), nil
|
||||
}
|
||||
13
cmd/waku/server/no_rln.go
Normal file
13
cmd/waku/server/no_rln.go
Normal file
@ -0,0 +1,13 @@
|
||||
//go:build gowaku_no_rln
|
||||
// +build gowaku_no_rln
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"github.com/waku-org/go-waku/waku/v2/node"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
)
|
||||
|
||||
func AppendRLNProof(node *node.WakuNode, msg *pb.WakuMessage) error {
|
||||
return nil
|
||||
}
|
||||
136
cmd/waku/server/rest/admin.go
Normal file
136
cmd/waku/server/rest/admin.go
Normal file
@ -0,0 +1,136 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/waku-org/go-waku/cmd/waku/server"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
"github.com/waku-org/go-waku/waku/v2/node"
|
||||
"github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
waku_proto "github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type AdminService struct {
|
||||
node *node.WakuNode
|
||||
mux *chi.Mux
|
||||
log *zap.Logger
|
||||
}
|
||||
|
||||
type WakuPeer struct {
|
||||
ID string `json:"id"`
|
||||
MultiAddrs []string `json:"multiaddrs"`
|
||||
Protocols []string `json:"protocols"`
|
||||
Connected bool `json:"connected"`
|
||||
PubsubTopics []string `json:"pubsubTopics"`
|
||||
}
|
||||
|
||||
type WakuPeerInfo struct {
|
||||
MultiAddr string `json:"multiaddr"`
|
||||
Shards []int `json:"shards"`
|
||||
Protocols []string `json:"protocols"`
|
||||
}
|
||||
|
||||
const routeAdminV1Peers = "/admin/v1/peers"
|
||||
|
||||
func NewAdminService(node *node.WakuNode, m *chi.Mux, log *zap.Logger) *AdminService {
|
||||
d := &AdminService{
|
||||
node: node,
|
||||
mux: m,
|
||||
log: log,
|
||||
}
|
||||
|
||||
m.Get(routeAdminV1Peers, d.getV1Peers)
|
||||
m.Post(routeAdminV1Peers, d.postV1Peer)
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
func (a *AdminService) getV1Peers(w http.ResponseWriter, req *http.Request) {
|
||||
peers, err := a.node.Peers()
|
||||
if err != nil {
|
||||
a.log.Error("failed to fetch peers", zap.Error(err))
|
||||
writeErrOrResponse(w, err, nil)
|
||||
return
|
||||
}
|
||||
a.log.Info("fetched peers", zap.Int("count", len(peers)))
|
||||
|
||||
response := make([]WakuPeer, 0)
|
||||
for _, peer := range peers {
|
||||
if peer.ID.String() == a.node.Host().ID().String() {
|
||||
//Skip own node id
|
||||
continue
|
||||
}
|
||||
wPeer := WakuPeer{
|
||||
ID: peer.ID.String(),
|
||||
Connected: peer.Connected,
|
||||
}
|
||||
|
||||
for _, addr := range peer.Addrs {
|
||||
wPeer.MultiAddrs = append(wPeer.MultiAddrs, addr.String())
|
||||
}
|
||||
for _, proto := range peer.Protocols {
|
||||
if !server.IsWakuProtocol(proto) {
|
||||
a.log.Debug("skipping protocol as it is a non-waku protocol", logging.HostID("peer", peer.ID), zap.String("protocol", string(proto)))
|
||||
continue
|
||||
}
|
||||
wPeer.Protocols = append(wPeer.Protocols, string(proto))
|
||||
}
|
||||
wPeer.PubsubTopics = peer.PubsubTopics
|
||||
response = append(response, wPeer)
|
||||
}
|
||||
|
||||
writeErrOrResponse(w, nil, response)
|
||||
}
|
||||
|
||||
func (a *AdminService) postV1Peer(w http.ResponseWriter, req *http.Request) {
|
||||
var pInfo WakuPeerInfo
|
||||
var topics []string
|
||||
var protos []protocol.ID
|
||||
|
||||
decoder := json.NewDecoder(req.Body)
|
||||
if err := decoder.Decode(&pInfo); err != nil {
|
||||
a.log.Error("failed to decode request", zap.Error(err))
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
defer req.Body.Close()
|
||||
|
||||
addr, err := ma.NewMultiaddr(pInfo.MultiAddr)
|
||||
if err != nil {
|
||||
a.log.Error("building multiaddr", zap.Error(err))
|
||||
writeErrOrResponse(w, err, nil)
|
||||
return
|
||||
}
|
||||
|
||||
for _, shard := range pInfo.Shards {
|
||||
topic := waku_proto.NewStaticShardingPubsubTopic(a.node.ClusterID(), uint16(shard))
|
||||
topics = append(topics, topic.String())
|
||||
}
|
||||
|
||||
for _, proto := range pInfo.Protocols {
|
||||
protos = append(protos, protocol.ID(proto))
|
||||
}
|
||||
|
||||
id, err := a.node.AddPeer([]multiaddr.Multiaddr{addr}, peerstore.Static, topics, protos...)
|
||||
if err != nil {
|
||||
a.log.Error("failed to add peer", zap.Error(err))
|
||||
writeErrOrResponse(w, err, nil)
|
||||
return
|
||||
}
|
||||
a.log.Info("add peer successful", logging.HostID("peerID", id))
|
||||
pi := peer.AddrInfo{ID: id, Addrs: []ma.Multiaddr{addr}}
|
||||
err = a.node.Host().Connect(req.Context(), pi)
|
||||
if err != nil {
|
||||
a.log.Error("failed to connect to peer", logging.HostID("peerID", id), zap.Error(err))
|
||||
writeErrOrResponse(w, err, nil)
|
||||
return
|
||||
}
|
||||
writeErrOrResponse(w, nil, nil)
|
||||
}
|
||||
92
cmd/waku/server/rest/admin_api.yaml
Normal file
92
cmd/waku/server/rest/admin_api.yaml
Normal file
@ -0,0 +1,92 @@
|
||||
openapi: 3.0.3
|
||||
info:
|
||||
title: Waku V2 node REST API
|
||||
version: 1.0.0
|
||||
contact:
|
||||
name: VAC Team
|
||||
url: https://forum.vac.dev/
|
||||
|
||||
tags:
|
||||
- name: admin
|
||||
description: Admin REST API for WakuV2 node
|
||||
|
||||
paths:
|
||||
/admin/v1/peers:
|
||||
get:
|
||||
summary: Get connected peers info
|
||||
description: Retrieve information about connected peers.
|
||||
operationId: getPeerInfo
|
||||
tags:
|
||||
- admin
|
||||
responses:
|
||||
'200':
|
||||
description: Information about a Waku v2 node.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/WakuPeer'
|
||||
'5XX':
|
||||
description: Unexpected error.
|
||||
post:
|
||||
summary: Adds new peer(s) to connect with
|
||||
description: Adds new peer(s) to connect with.
|
||||
operationId: postPeerInfo
|
||||
tags:
|
||||
- admin
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
items:
|
||||
$ref: '#/components/schemas/WakuPeerInfo'
|
||||
responses:
|
||||
'200':
|
||||
description: Ok
|
||||
'400':
|
||||
description: Cannot connect to one or more peers.
|
||||
'5XX':
|
||||
description: Unexpected error.
|
||||
|
||||
components:
|
||||
schemas:
|
||||
WakuPeerInfo:
|
||||
type: object
|
||||
required:
|
||||
- multiaddr
|
||||
- shards
|
||||
- protocols
|
||||
protocols:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
shards:
|
||||
type: array
|
||||
items:
|
||||
type: integer
|
||||
WakuPeer:
|
||||
type: object
|
||||
required:
|
||||
- id
|
||||
- addrs
|
||||
- protocols
|
||||
- connected
|
||||
properties:
|
||||
connected:
|
||||
type: string
|
||||
addrs:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
protocols:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
connected:
|
||||
type: boolean
|
||||
pubsubTopics:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
@ -20,8 +20,8 @@ type InfoReply struct {
|
||||
ListenAddresses []string `json:"listenAddresses,omitempty"`
|
||||
}
|
||||
|
||||
const ROUTE_DEBUG_INFOV1 = "/debug/v1/info"
|
||||
const ROUTE_DEBUG_VERSIONV1 = "/debug/v1/info"
|
||||
const routeDebugInfoV1 = "/debug/v1/info"
|
||||
const routeDebugVersionV1 = "/debug/v1/version"
|
||||
|
||||
func NewDebugService(node *node.WakuNode, m *chi.Mux) *DebugService {
|
||||
d := &DebugService{
|
||||
@ -29,15 +29,15 @@ func NewDebugService(node *node.WakuNode, m *chi.Mux) *DebugService {
|
||||
mux: m,
|
||||
}
|
||||
|
||||
m.Get(ROUTE_DEBUG_INFOV1, d.getV1Info)
|
||||
m.Get(ROUTE_DEBUG_VERSIONV1, d.getV1Version)
|
||||
m.Get(routeDebugInfoV1, d.getV1Info)
|
||||
m.Get(routeDebugVersionV1, d.getV1Version)
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
type VersionResponse string
|
||||
|
||||
func (d *DebugService) getV1Info(w http.ResponseWriter, r *http.Request) {
|
||||
func (d *DebugService) getV1Info(w http.ResponseWriter, req *http.Request) {
|
||||
response := new(InfoReply)
|
||||
response.ENRUri = d.node.ENR().String()
|
||||
for _, addr := range d.node.ListenAddresses() {
|
||||
@ -46,7 +46,7 @@ func (d *DebugService) getV1Info(w http.ResponseWriter, r *http.Request) {
|
||||
writeErrOrResponse(w, nil, response)
|
||||
}
|
||||
|
||||
func (d *DebugService) getV1Version(w http.ResponseWriter, r *http.Request) {
|
||||
func (d *DebugService) getV1Version(w http.ResponseWriter, req *http.Request) {
|
||||
response := VersionResponse(node.GetVersionInfo().String())
|
||||
writeErrOrResponse(w, nil, response)
|
||||
}
|
||||
@ -1,6 +1,6 @@
|
||||
openapi: 3.0.3
|
||||
info:
|
||||
title: Waku V2 node REST API
|
||||
title: Waku V2 node Debug REST API
|
||||
version: 1.0.0
|
||||
contact:
|
||||
name: VAC Team
|
||||
@ -22,7 +22,7 @@ func TestGetV1Info(t *testing.T) {
|
||||
node: wakuNode1,
|
||||
}
|
||||
|
||||
request, err := http.NewRequest(http.MethodPost, ROUTE_DEBUG_INFOV1, bytes.NewReader([]byte("")))
|
||||
request, err := http.NewRequest(http.MethodPost, routeDebugInfoV1, bytes.NewReader([]byte("")))
|
||||
require.NoError(t, err)
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
380
cmd/waku/server/rest/filter.go
Normal file
380
cmd/waku/server/rest/filter.go
Normal file
@ -0,0 +1,380 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/waku-org/go-waku/logging"
|
||||
"github.com/waku-org/go-waku/waku/v2/node"
|
||||
"github.com/waku-org/go-waku/waku/v2/peermanager"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/filter"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const filterV2Subscriptions = "/filter/v2/subscriptions"
|
||||
const filterv2Messages = "/filter/v2/messages"
|
||||
|
||||
// FilterService represents the REST service for Filter client
|
||||
type FilterService struct {
|
||||
node *node.WakuNode
|
||||
cancel context.CancelFunc
|
||||
|
||||
log *zap.Logger
|
||||
|
||||
cache *filterCache
|
||||
runner *runnerService
|
||||
}
|
||||
|
||||
// Start starts the RelayService
|
||||
func (s *FilterService) Start(ctx context.Context) {
|
||||
|
||||
for _, sub := range s.node.FilterLightnode().Subscriptions() {
|
||||
s.cache.subscribe(sub.ContentFilter)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
s.cancel = cancel
|
||||
s.runner.Start(ctx)
|
||||
}
|
||||
|
||||
// Stop stops the RelayService
|
||||
func (r *FilterService) Stop() {
|
||||
if r.cancel == nil {
|
||||
return
|
||||
}
|
||||
r.cancel()
|
||||
}
|
||||
|
||||
// NewFilterService returns an instance of FilterService
|
||||
func NewFilterService(node *node.WakuNode, m *chi.Mux, cacheCapacity int, log *zap.Logger) *FilterService {
|
||||
logger := log.Named("filter")
|
||||
|
||||
s := &FilterService{
|
||||
node: node,
|
||||
log: logger,
|
||||
cache: newFilterCache(cacheCapacity, logger),
|
||||
}
|
||||
|
||||
m.Route(filterV2Subscriptions, func(r chi.Router) {
|
||||
r.Get("/", s.ping)
|
||||
r.Get("/{requestId}", s.ping)
|
||||
r.Post("/", s.subscribe)
|
||||
r.Delete("/", s.unsubscribe)
|
||||
r.Delete("/all", s.unsubscribeAll)
|
||||
})
|
||||
|
||||
m.Route(filterv2Messages, func(r chi.Router) {
|
||||
r.Get("/{contentTopic}", s.getMessagesByContentTopic)
|
||||
r.Get("/{pubsubTopic}/{contentTopic}", s.getMessagesByPubsubTopic)
|
||||
})
|
||||
|
||||
s.runner = newRunnerService(node.Broadcaster(), s.cache.addMessage)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func convertFilterErrorToHttpStatus(err error) (int, string) {
|
||||
code := http.StatusInternalServerError
|
||||
statusDesc := "ping request failed"
|
||||
|
||||
filterErrorCode := filter.ExtractCodeFromFilterError(err.Error())
|
||||
switch filterErrorCode {
|
||||
case 404:
|
||||
code = http.StatusNotFound
|
||||
statusDesc = "peer has no subscription"
|
||||
case 300:
|
||||
case 400:
|
||||
code = http.StatusBadRequest
|
||||
statusDesc = "bad request format"
|
||||
case 504:
|
||||
code = http.StatusGatewayTimeout
|
||||
case 503:
|
||||
code = http.StatusServiceUnavailable
|
||||
}
|
||||
return code, statusDesc
|
||||
}
|
||||
|
||||
// 400 for bad requestId
|
||||
// 404 when request failed or no suitable peers
|
||||
// 200 when ping successful
|
||||
func (s *FilterService) ping(w http.ResponseWriter, req *http.Request) {
|
||||
requestID := chi.URLParam(req, "requestId")
|
||||
if requestID == "" {
|
||||
writeResponse(w, &filterSubscriptionResponse{
|
||||
RequestID: requestID,
|
||||
StatusDesc: "bad request id",
|
||||
}, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// selecting random peer that supports filter protocol
|
||||
peerId := s.getRandomFilterPeer(req.Context(), requestID, w)
|
||||
if peerId == "" {
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.node.FilterLightnode().Ping(req.Context(), peerId, filter.WithPingRequestId([]byte(requestID))); err != nil {
|
||||
s.log.Error("ping request failed", zap.Error(err))
|
||||
|
||||
code, statusDesc := convertFilterErrorToHttpStatus(err)
|
||||
|
||||
writeResponse(w, &filterSubscriptionResponse{
|
||||
RequestID: requestID,
|
||||
StatusDesc: statusDesc,
|
||||
}, code)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// success
|
||||
writeResponse(w, &filterSubscriptionResponse{
|
||||
RequestID: requestID,
|
||||
StatusDesc: http.StatusText(http.StatusOK),
|
||||
}, http.StatusOK)
|
||||
}
|
||||
|
||||
// same for FilterUnsubscribeRequest
|
||||
type filterSubscriptionRequest struct {
|
||||
RequestID string `json:"requestId"`
|
||||
ContentFilters []string `json:"contentFilters"`
|
||||
PubsubTopic string `json:"pubsubTopic"`
|
||||
}
|
||||
|
||||
type filterSubscriptionResponse struct {
|
||||
RequestID string `json:"requestId"`
|
||||
StatusDesc string `json:"statusDesc"`
|
||||
}
|
||||
|
||||
// 400 on invalid request
|
||||
// 404 on failed subscription
|
||||
// 200 on single returned successful subscription
|
||||
// NOTE: subscribe on filter client randomly selects a peer if missing for given pubSubTopic
|
||||
func (s *FilterService) subscribe(w http.ResponseWriter, req *http.Request) {
|
||||
message := filterSubscriptionRequest{}
|
||||
if !s.readBody(w, req, &message) {
|
||||
return
|
||||
}
|
||||
|
||||
contentFilter := protocol.NewContentFilter(message.PubsubTopic, message.ContentFilters...)
|
||||
//
|
||||
subscriptions, err := s.node.FilterLightnode().Subscribe(req.Context(),
|
||||
contentFilter,
|
||||
filter.WithRequestID([]byte(message.RequestID)))
|
||||
|
||||
// on partial subscribe failure
|
||||
if len(subscriptions) > 0 && err != nil {
|
||||
s.log.Error("partial subscribe failed", zap.Error(err))
|
||||
// on partial failure
|
||||
writeResponse(w, filterSubscriptionResponse{
|
||||
RequestID: message.RequestID,
|
||||
StatusDesc: err.Error(),
|
||||
}, http.StatusOK)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
s.log.Error("subscription failed", zap.Error(err))
|
||||
code := filter.ExtractCodeFromFilterError(err.Error())
|
||||
if code == -1 {
|
||||
code = http.StatusBadRequest
|
||||
}
|
||||
writeResponse(w, filterSubscriptionResponse{
|
||||
RequestID: message.RequestID,
|
||||
StatusDesc: "subscription failed",
|
||||
}, code)
|
||||
return
|
||||
}
|
||||
|
||||
// on success
|
||||
s.cache.subscribe(contentFilter)
|
||||
writeResponse(w, filterSubscriptionResponse{
|
||||
RequestID: message.RequestID,
|
||||
StatusDesc: http.StatusText(http.StatusOK),
|
||||
}, http.StatusOK)
|
||||
}
|
||||
|
||||
// 400 on invalid request
|
||||
// 500 on failed subscription
|
||||
// 200 on successful unsubscribe
|
||||
// NOTE: unsubscribe on filter client will remove subscription from all peers with matching pubSubTopic, if peerId is not provided
|
||||
// to match functionality in nwaku, we will randomly select a peer that supports filter protocol.
|
||||
func (s *FilterService) unsubscribe(w http.ResponseWriter, req *http.Request) {
|
||||
message := filterSubscriptionRequest{} // as pubSubTopics can also be present
|
||||
if !s.readBody(w, req, &message) {
|
||||
return
|
||||
}
|
||||
|
||||
peerId := s.getRandomFilterPeer(req.Context(), message.RequestID, w)
|
||||
if peerId == "" {
|
||||
return
|
||||
}
|
||||
|
||||
contentFilter := protocol.NewContentFilter(message.PubsubTopic, message.ContentFilters...)
|
||||
// unsubscribe on filter
|
||||
result, err := s.node.FilterLightnode().Unsubscribe(
|
||||
req.Context(),
|
||||
contentFilter,
|
||||
filter.WithRequestID([]byte(message.RequestID)),
|
||||
filter.WithPeer(peerId),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
s.log.Error("unsubscribe failed", zap.Error(err))
|
||||
if result == nil {
|
||||
writeResponse(w, filterSubscriptionResponse{
|
||||
RequestID: message.RequestID,
|
||||
StatusDesc: err.Error(),
|
||||
}, http.StatusBadRequest)
|
||||
}
|
||||
writeResponse(w, filterSubscriptionResponse{
|
||||
RequestID: message.RequestID,
|
||||
StatusDesc: err.Error(),
|
||||
}, http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
// on success
|
||||
for cTopic := range contentFilter.ContentTopics {
|
||||
if !s.node.FilterLightnode().IsListening(contentFilter.PubsubTopic, cTopic) {
|
||||
s.cache.unsubscribe(contentFilter.PubsubTopic, cTopic)
|
||||
}
|
||||
}
|
||||
writeResponse(w, filterSubscriptionResponse{
|
||||
RequestID: message.RequestID,
|
||||
StatusDesc: s.unsubscribeGetMessage(result),
|
||||
}, http.StatusOK)
|
||||
}
|
||||
|
||||
func (s *FilterService) unsubscribeGetMessage(result *filter.WakuFilterPushResult) string {
|
||||
if result == nil {
|
||||
return http.StatusText(http.StatusOK)
|
||||
}
|
||||
var peerIds string
|
||||
ind := 0
|
||||
for _, entry := range result.Errors() {
|
||||
if entry.Err != nil {
|
||||
s.log.Error("can't unsubscribe", logging.HostID("peer", entry.PeerID), zap.Error(entry.Err))
|
||||
if ind != 0 {
|
||||
peerIds += ", "
|
||||
}
|
||||
peerIds += entry.PeerID.String()
|
||||
}
|
||||
ind++
|
||||
}
|
||||
if peerIds != "" {
|
||||
return "can't unsubscribe from " + peerIds
|
||||
}
|
||||
return http.StatusText(http.StatusOK)
|
||||
}
|
||||
|
||||
type filterUnsubscribeAllRequest struct {
|
||||
RequestID string `json:"requestId"`
|
||||
}
|
||||
|
||||
func (s *FilterService) readBody(w http.ResponseWriter, req *http.Request, message interface{}) bool {
|
||||
decoder := json.NewDecoder(req.Body)
|
||||
if err := decoder.Decode(message); err != nil {
|
||||
s.log.Error("bad request", zap.Error(err))
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return false
|
||||
}
|
||||
defer req.Body.Close()
|
||||
return true
|
||||
}
|
||||
|
||||
// 400 on invalid request
|
||||
// 500 on failed subscription
|
||||
// 200 on all successful unsubscribe
|
||||
// unsubscribe all subscriptions for a given peer
|
||||
func (s *FilterService) unsubscribeAll(w http.ResponseWriter, req *http.Request) {
|
||||
message := filterUnsubscribeAllRequest{}
|
||||
if !s.readBody(w, req, &message) {
|
||||
return
|
||||
}
|
||||
|
||||
peerId := s.getRandomFilterPeer(req.Context(), message.RequestID, w)
|
||||
if peerId == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// unsubscribe all subscriptions for a given peer
|
||||
errCh, err := s.node.FilterLightnode().UnsubscribeAll(
|
||||
req.Context(),
|
||||
filter.WithRequestID([]byte(message.RequestID)),
|
||||
filter.WithPeer(peerId),
|
||||
)
|
||||
if err != nil {
|
||||
s.log.Error("unsubscribeAll failed", zap.Error(err))
|
||||
writeResponse(w, filterSubscriptionResponse{
|
||||
RequestID: message.RequestID,
|
||||
StatusDesc: err.Error(),
|
||||
}, http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
// on success
|
||||
writeResponse(w, filterSubscriptionResponse{
|
||||
RequestID: message.RequestID,
|
||||
StatusDesc: s.unsubscribeGetMessage(errCh),
|
||||
}, http.StatusOK)
|
||||
}
|
||||
|
||||
func (s FilterService) getRandomFilterPeer(ctx context.Context, requestId string, w http.ResponseWriter) peer.ID {
|
||||
// selecting random peer that supports filter protocol
|
||||
peerIds, err := s.node.PeerManager().SelectPeers(peermanager.PeerSelectionCriteria{
|
||||
SelectionType: peermanager.Automatic,
|
||||
Proto: filter.FilterSubscribeID_v20beta1,
|
||||
Ctx: ctx,
|
||||
})
|
||||
if err != nil {
|
||||
s.log.Error("selecting peer", zap.Error(err))
|
||||
writeResponse(w, filterSubscriptionResponse{
|
||||
RequestID: requestId,
|
||||
StatusDesc: "No suitable peers",
|
||||
}, http.StatusServiceUnavailable)
|
||||
return ""
|
||||
}
|
||||
return peerIds[0]
|
||||
}
|
||||
|
||||
func (s *FilterService) getMessagesByContentTopic(w http.ResponseWriter, req *http.Request) {
|
||||
contentTopic := topicFromPath(w, req, "contentTopic", s.log)
|
||||
if contentTopic == "" {
|
||||
return
|
||||
}
|
||||
pubsubTopic, err := protocol.GetPubSubTopicFromContentTopic(contentTopic)
|
||||
if err != nil {
|
||||
writeGetMessageErr(w, fmt.Errorf("bad content topic"), http.StatusBadRequest, s.log)
|
||||
return
|
||||
}
|
||||
s.getMessages(w, req, pubsubTopic, contentTopic)
|
||||
}
|
||||
|
||||
func (s *FilterService) getMessagesByPubsubTopic(w http.ResponseWriter, req *http.Request) {
|
||||
contentTopic := topicFromPath(w, req, "contentTopic", s.log)
|
||||
if contentTopic == "" {
|
||||
return
|
||||
}
|
||||
pubsubTopic := topicFromPath(w, req, "pubsubTopic", s.log)
|
||||
if pubsubTopic == "" {
|
||||
return
|
||||
}
|
||||
s.getMessages(w, req, pubsubTopic, contentTopic)
|
||||
}
|
||||
|
||||
// 400 on invalid request
|
||||
// 500 on failed subscription
|
||||
// 200 on all successful unsubscribe
|
||||
// unsubscribe all subscriptions for a given peer
|
||||
func (s *FilterService) getMessages(w http.ResponseWriter, req *http.Request, pubsubTopic, contentTopic string) {
|
||||
msgs, err := s.cache.getMessages(pubsubTopic, contentTopic)
|
||||
if err != nil {
|
||||
writeGetMessageErr(w, err, http.StatusNotFound, s.log)
|
||||
return
|
||||
}
|
||||
writeResponse(w, msgs, http.StatusOK)
|
||||
}
|
||||
337
cmd/waku/server/rest/filter_api.yaml
Normal file
337
cmd/waku/server/rest/filter_api.yaml
Normal file
@ -0,0 +1,337 @@
|
||||
openapi: 3.0.3
|
||||
info:
|
||||
title: Waku V2 node REST API
|
||||
version: 1.0.0
|
||||
contact:
|
||||
name: VAC Team
|
||||
url: https://forum.vac.dev/
|
||||
tags:
|
||||
- name: filter
|
||||
description: Filter REST API for WakuV2 node
|
||||
|
||||
paths:
|
||||
/filter/v2/subscriptions/{requestId}:
|
||||
get: # get_waku_v2_filter_v2_subscription - ping
|
||||
summary: Subscriber-ping - a peer can query if there is a registered subscription for it
|
||||
description: |
|
||||
Subscriber peer can query its subscription existence on service node.
|
||||
Returns HTTP200 if exists and HTTP404 if not.
|
||||
Client must not fill anything but requestId in the request body.
|
||||
operationId: subscriberPing
|
||||
tags:
|
||||
- filter
|
||||
parameters:
|
||||
- in: path
|
||||
name: requestId
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
description: Id of ping request
|
||||
responses:
|
||||
'200':
|
||||
description: OK
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FilterSubscriptionResponse'
|
||||
'400':
|
||||
description: Bad request.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FilterSubscriptionResponse'
|
||||
'404':
|
||||
description: Not found.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FilterSubscriptionResponse'
|
||||
'5XX':
|
||||
description: Unexpected error.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FilterSubscriptionResponse'
|
||||
|
||||
|
||||
/filter/v2/subscriptions:
|
||||
post: # post_waku_v2_filter_v2_subscription
|
||||
summary: Subscribe a peer to an array of content topics under a pubsubTopic
|
||||
description: |
|
||||
Subscribe a peer to an array of content topics under a pubsubTopic.
|
||||
|
||||
It is allowed to refresh or add new content topic to an existing subscription.
|
||||
|
||||
Fields pubsubTopic and contentFilters must be filled.
|
||||
operationId: postSubscriptions
|
||||
tags:
|
||||
- filter
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FilterSubscribeRequest'
|
||||
responses:
|
||||
'200':
|
||||
description: OK
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FilterSubscriptionResponse'
|
||||
# TODO: Review the possible errors of this endpoint
|
||||
'400':
|
||||
description: Bad request.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FilterSubscriptionResponse'
|
||||
'404':
|
||||
description: Not found.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FilterSubscriptionResponse'
|
||||
'5XX':
|
||||
description: Unexpected error.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FilterSubscriptionResponse'
|
||||
delete: # delete_waku_v2_filter_v2_subscription
|
||||
summary: Unsubscribe a peer from content topics
|
||||
description: |
|
||||
Unsubscribe a peer from content topics
|
||||
Only that subscription will be removed which matches existing.
|
||||
operationId: deleteSubscriptions
|
||||
tags:
|
||||
- filter
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FilterUnsubscribeRequest'
|
||||
responses:
|
||||
'200':
|
||||
description: OK
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FilterSubscriptionResponse'
|
||||
'400':
|
||||
description: Bad request.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FilterSubscriptionResponse'
|
||||
'404':
|
||||
description: Not found.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FilterSubscriptionResponse'
|
||||
'5XX':
|
||||
description: Unexpected error.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FilterSubscriptionResponse'
|
||||
|
||||
/filter/v2/subscriptions/all:
|
||||
delete: # delete_waku_v2_filter_v2_subscription
|
||||
summary: Unsubscribe a peer from all content topics
|
||||
description: |
|
||||
Unsubscribe a peer from all content topics
|
||||
operationId: deleteAllSubscriptions
|
||||
tags:
|
||||
- filter
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FilterUnsubscribeAllRequest'
|
||||
responses:
|
||||
'200':
|
||||
description: OK
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FilterSubscriptionResponse'
|
||||
'400':
|
||||
description: Bad request.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FilterSubscriptionResponse'
|
||||
'404':
|
||||
description: Not found.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FilterSubscriptionResponse'
|
||||
'5XX':
|
||||
description: Unexpected error.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FilterSubscriptionResponse'
|
||||
/filter/v2/messages/{contentTopic}:
|
||||
get: # get_waku_v2_filter_v2_messages
|
||||
summary: Get the latest messages on the polled content topic
|
||||
description: Get a list of messages that were received on a subscribed content topic after the last time this method was called.
|
||||
operationId: getMessagesByTopic
|
||||
tags:
|
||||
- filter
|
||||
parameters:
|
||||
- in: path
|
||||
name: contentTopic # Note the name is the same as in the path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
description: Content topic of message
|
||||
responses:
|
||||
'200':
|
||||
description: The latest messages on the polled topic.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FilterGetMessagesResponse'
|
||||
# TODO: Review the possible errors of this endpoint
|
||||
'400':
|
||||
description: Bad request.
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
'404':
|
||||
description: Not found.
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
'5XX':
|
||||
description: Unexpected error.
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
/filter/v2/messages/{pubsubTopic}/{contentTopic}:
|
||||
get: # get_waku_v2_filter_v2_messages
|
||||
summary: Get the latest messages on the polled pubsub/content topic pair
|
||||
description: Get a list of messages that were received on a subscribed content topic after the last time this method was called.
|
||||
operationId: getMessagesByTopic
|
||||
tags:
|
||||
- filter
|
||||
parameters:
|
||||
- in: path
|
||||
name: contentTopic # Note the name is the same as in the path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
description: Content topic of message
|
||||
- in: path
|
||||
name: pubsubTopic # Note the name is the same as in the path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
description: pubsub topic of message
|
||||
responses:
|
||||
'200':
|
||||
description: The latest messages on the polled topic.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/FilterGetMessagesResponse'
|
||||
# TODO: Review the possible errors of this endpoint
|
||||
'400':
|
||||
description: Bad request.
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
'404':
|
||||
description: Not found.
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
'5XX':
|
||||
description: Unexpected error.
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
|
||||
components:
|
||||
PubSubTopic:
|
||||
type: string
|
||||
ContentTopic:
|
||||
type: string
|
||||
|
||||
FilterSubscriptionResponse:
|
||||
type: object
|
||||
properties:
|
||||
requestId:
|
||||
type: string
|
||||
statusDesc:
|
||||
type: string
|
||||
required:
|
||||
- requestId
|
||||
|
||||
FilterSubscribeRequest:
|
||||
type: object
|
||||
properties:
|
||||
requestId:
|
||||
type: string
|
||||
contentFilters:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/ContentTopic'
|
||||
pubsubTopic:
|
||||
$ref: "#/components/schemas/PubSubTopic"
|
||||
required:
|
||||
- requestId
|
||||
- contentFilters
|
||||
- pubsubTopic
|
||||
|
||||
FilterUnsubscribeRequest:
|
||||
type: object
|
||||
properties:
|
||||
requestId:
|
||||
type: string
|
||||
contentFilters:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/ContentTopic'
|
||||
pubsubTopic:
|
||||
$ref: "#/components/schemas/PubSubTopic"
|
||||
required:
|
||||
- requestId
|
||||
- contentFilters
|
||||
|
||||
FilterUnsubscribeAllRequest:
|
||||
type: object
|
||||
properties:
|
||||
requestId:
|
||||
type: string
|
||||
required:
|
||||
- requestId
|
||||
|
||||
FilterGetMessagesResponse:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/FilterWakuMessage'
|
||||
|
||||
FilterWakuMessage:
|
||||
type: object
|
||||
properties:
|
||||
payload:
|
||||
type: string
|
||||
format: byte
|
||||
contentTopic:
|
||||
$ref: '#/components/schemas/ContentTopic'
|
||||
version:
|
||||
type: number
|
||||
timestamp:
|
||||
type: number
|
||||
required:
|
||||
- payload
|
||||
84
cmd/waku/server/rest/filter_cache.go
Normal file
84
cmd/waku/server/rest/filter_cache.go
Normal file
@ -0,0 +1,84 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type filterCache struct {
|
||||
capacity int
|
||||
mu sync.RWMutex
|
||||
log *zap.Logger
|
||||
data map[string]map[string][]*RestWakuMessage
|
||||
}
|
||||
|
||||
func newFilterCache(capacity int, log *zap.Logger) *filterCache {
|
||||
return &filterCache{
|
||||
capacity: capacity,
|
||||
data: make(map[string]map[string][]*RestWakuMessage),
|
||||
log: log.Named("cache"),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *filterCache) subscribe(contentFilter protocol.ContentFilter) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
pubSubTopicMap, _ := protocol.ContentFilterToPubSubTopicMap(contentFilter)
|
||||
for pubsubTopic, contentTopics := range pubSubTopicMap {
|
||||
if c.data[pubsubTopic] == nil {
|
||||
c.data[pubsubTopic] = make(map[string][]*RestWakuMessage)
|
||||
}
|
||||
for _, topic := range contentTopics {
|
||||
if c.data[pubsubTopic][topic] == nil {
|
||||
c.data[pubsubTopic][topic] = []*RestWakuMessage{}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *filterCache) unsubscribe(pubsubTopic string, contentTopic string) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
delete(c.data[pubsubTopic], contentTopic)
|
||||
}
|
||||
|
||||
func (c *filterCache) addMessage(envelope *protocol.Envelope) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
pubsubTopic := envelope.PubsubTopic()
|
||||
contentTopic := envelope.Message().ContentTopic
|
||||
if c.data[pubsubTopic] == nil || c.data[pubsubTopic][contentTopic] == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Keep a specific max number of message per topic
|
||||
if len(c.data[pubsubTopic][contentTopic]) >= c.capacity {
|
||||
c.data[pubsubTopic][contentTopic] = c.data[pubsubTopic][contentTopic][1:]
|
||||
}
|
||||
|
||||
message := &RestWakuMessage{}
|
||||
if err := message.FromProto(envelope.Message()); err != nil {
|
||||
c.log.Error("converting protobuffer msg into rest msg", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
c.data[pubsubTopic][contentTopic] = append(c.data[pubsubTopic][contentTopic], message)
|
||||
}
|
||||
|
||||
func (c *filterCache) getMessages(pubsubTopic string, contentTopic string) ([]*RestWakuMessage, error) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
if c.data[pubsubTopic] == nil || c.data[pubsubTopic][contentTopic] == nil {
|
||||
return nil, fmt.Errorf("not subscribed to pubsubTopic:%s contentTopic: %s", pubsubTopic, contentTopic)
|
||||
}
|
||||
msgs := c.data[pubsubTopic][contentTopic]
|
||||
c.data[pubsubTopic][contentTopic] = []*RestWakuMessage{}
|
||||
return msgs, nil
|
||||
}
|
||||
393
cmd/waku/server/rest/filter_test.go
Normal file
393
cmd/waku/server/rest/filter_test.go
Normal file
@ -0,0 +1,393 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/waku-org/go-waku/tests"
|
||||
"github.com/waku-org/go-waku/waku/v2/node"
|
||||
wakupeerstore "github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/filter"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
func createNode(t *testing.T, opts ...node.WakuNodeOption) *node.WakuNode {
|
||||
node, err := node.New(opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = node.Start(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
return node
|
||||
}
|
||||
|
||||
// node2 connects to node1
|
||||
func twoFilterConnectedNodes(t *testing.T, pubSubTopics ...string) (*node.WakuNode, *node.WakuNode) {
|
||||
node1 := createNode(t, node.WithWakuFilterFullNode(filter.WithFullNodeRateLimiter(rate.Inf, 0))) // full node filter
|
||||
node2 := createNode(t, node.WithWakuFilterLightNode()) // light node filter
|
||||
|
||||
node2.Host().Peerstore().AddAddr(node1.Host().ID(), tests.GetHostAddress(node1.Host()), peerstore.PermanentAddrTTL)
|
||||
err := node2.Host().Peerstore().AddProtocols(node1.Host().ID(), filter.FilterSubscribeID_v20beta1)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = node2.Host().Peerstore().(*wakupeerstore.WakuPeerstoreImpl).SetPubSubTopics(node1.Host().ID(), pubSubTopics)
|
||||
require.NoError(t, err)
|
||||
|
||||
return node1, node2
|
||||
}
|
||||
|
||||
// test 400, 404 status code for ping rest endpoint
|
||||
// both requests are not successful
|
||||
func TestFilterPingFailure(t *testing.T) {
|
||||
node1, node2 := twoFilterConnectedNodes(t)
|
||||
defer func() {
|
||||
node1.Stop()
|
||||
node2.Stop()
|
||||
}()
|
||||
|
||||
router := chi.NewRouter()
|
||||
_ = NewFilterService(node2, router, 0, utils.Logger())
|
||||
|
||||
// with empty requestID
|
||||
rr := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest(http.MethodGet, fmt.Sprintf("/filter/v2/subscriptions/%s", ""), nil)
|
||||
router.ServeHTTP(rr, req)
|
||||
checkJSON(t, filterSubscriptionResponse{
|
||||
RequestID: "",
|
||||
StatusDesc: "bad request id",
|
||||
}, getFilterResponse(t, rr.Body))
|
||||
require.Equal(t, http.StatusBadRequest, rr.Code)
|
||||
|
||||
// no subscription with peer
|
||||
requestID := hex.EncodeToString(protocol.GenerateRequestID())
|
||||
rr = httptest.NewRecorder()
|
||||
req, _ = http.NewRequest(http.MethodGet, fmt.Sprintf("/filter/v2/subscriptions/%s", requestID), nil)
|
||||
router.ServeHTTP(rr, req)
|
||||
checkJSON(t, filterSubscriptionResponse{
|
||||
RequestID: requestID,
|
||||
StatusDesc: "peer has no subscription",
|
||||
}, getFilterResponse(t, rr.Body))
|
||||
require.Equal(t, http.StatusNotFound, rr.Code)
|
||||
}
|
||||
|
||||
// create a filter subscription to the peer and try peer that peer
|
||||
// both steps should be successful
|
||||
func TestFilterSubscribeAndPing(t *testing.T) {
|
||||
pubsubTopic := "/waku/2/test/proto"
|
||||
contentTopics := []string{"test"}
|
||||
requestID := hex.EncodeToString(protocol.GenerateRequestID())
|
||||
|
||||
node1, node2 := twoFilterConnectedNodes(t, pubsubTopic)
|
||||
defer func() {
|
||||
node1.Stop()
|
||||
node2.Stop()
|
||||
}()
|
||||
|
||||
router := chi.NewRouter()
|
||||
_ = NewFilterService(node2, router, 0, utils.Logger())
|
||||
|
||||
// create subscription to peer
|
||||
rr := httptest.NewRecorder()
|
||||
reqReader := strings.NewReader(toString(t, filterSubscriptionRequest{
|
||||
RequestID: requestID,
|
||||
PubsubTopic: pubsubTopic,
|
||||
ContentFilters: contentTopics,
|
||||
}))
|
||||
req, _ := http.NewRequest(http.MethodPost, filterV2Subscriptions, reqReader)
|
||||
router.ServeHTTP(rr, req)
|
||||
checkJSON(t, filterSubscriptionResponse{
|
||||
RequestID: requestID,
|
||||
StatusDesc: "OK",
|
||||
}, getFilterResponse(t, rr.Body))
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
|
||||
// trying pinging the peer once there is subscription to it
|
||||
rr = httptest.NewRecorder()
|
||||
req, _ = http.NewRequest(http.MethodGet, fmt.Sprintf("%s/%s", filterV2Subscriptions, requestID), nil)
|
||||
router.ServeHTTP(rr, req)
|
||||
checkJSON(t, filterSubscriptionResponse{
|
||||
RequestID: requestID,
|
||||
StatusDesc: "OK",
|
||||
}, getFilterResponse(t, rr.Body))
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
// create subscription to peer
|
||||
// delete the subscription to the peer with matching pubSub and contentTopic
|
||||
func TestFilterSubscribeAndUnsubscribe(t *testing.T) {
|
||||
pubsubTopic := "/waku/2/test/proto"
|
||||
contentTopics := []string{"test"}
|
||||
requestID := hex.EncodeToString(protocol.GenerateRequestID())
|
||||
|
||||
node1, node2 := twoFilterConnectedNodes(t, pubsubTopic)
|
||||
defer func() {
|
||||
node1.Stop()
|
||||
node2.Stop()
|
||||
}()
|
||||
|
||||
router := chi.NewRouter()
|
||||
_ = NewFilterService(node2, router, 0, utils.Logger())
|
||||
|
||||
// create subscription to peer
|
||||
rr := httptest.NewRecorder()
|
||||
reqReader := strings.NewReader(toString(t, filterSubscriptionRequest{
|
||||
RequestID: requestID,
|
||||
PubsubTopic: pubsubTopic,
|
||||
ContentFilters: contentTopics,
|
||||
}))
|
||||
req, _ := http.NewRequest(http.MethodPost, filterV2Subscriptions, reqReader)
|
||||
router.ServeHTTP(rr, req)
|
||||
checkJSON(t, filterSubscriptionResponse{
|
||||
RequestID: requestID,
|
||||
StatusDesc: "OK",
|
||||
}, getFilterResponse(t, rr.Body))
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
|
||||
// delete the subscription to the peer with matching pubSub and contentTopic
|
||||
requestID = hex.EncodeToString(protocol.GenerateRequestID())
|
||||
rr = httptest.NewRecorder()
|
||||
reqReader = strings.NewReader(toString(t, filterSubscriptionRequest{
|
||||
RequestID: requestID,
|
||||
PubsubTopic: pubsubTopic,
|
||||
ContentFilters: contentTopics,
|
||||
}))
|
||||
req, _ = http.NewRequest(http.MethodDelete, filterV2Subscriptions, reqReader)
|
||||
router.ServeHTTP(rr, req)
|
||||
checkJSON(t, filterSubscriptionResponse{
|
||||
RequestID: requestID,
|
||||
StatusDesc: "OK",
|
||||
}, getFilterResponse(t, rr.Body))
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
// create 2 subscription from filter client to server
|
||||
// make a unsubscribeAll request
|
||||
// try pinging the peer, if 404 is returned then unsubscribeAll was successful
|
||||
func TestFilterAllUnsubscribe(t *testing.T) {
|
||||
pubsubTopic := "/waku/2/test/proto"
|
||||
contentTopics1 := "ct_1"
|
||||
contentTopics2 := "ct_2"
|
||||
|
||||
node1, node2 := twoFilterConnectedNodes(t, pubsubTopic)
|
||||
defer func() {
|
||||
node1.Stop()
|
||||
node2.Stop()
|
||||
}()
|
||||
|
||||
router := chi.NewRouter()
|
||||
_ = NewFilterService(node2, router, 0, utils.Logger())
|
||||
|
||||
// create 2 different subscription to peer
|
||||
for _, ct := range []string{contentTopics1, contentTopics2} {
|
||||
requestID := hex.EncodeToString(protocol.GenerateRequestID())
|
||||
rr := httptest.NewRecorder()
|
||||
reqReader := strings.NewReader(toString(t, filterSubscriptionRequest{
|
||||
RequestID: requestID,
|
||||
PubsubTopic: pubsubTopic,
|
||||
ContentFilters: []string{ct},
|
||||
}))
|
||||
req, _ := http.NewRequest(http.MethodPost, filterV2Subscriptions, reqReader)
|
||||
router.ServeHTTP(rr, req)
|
||||
checkJSON(t, filterSubscriptionResponse{
|
||||
RequestID: requestID,
|
||||
StatusDesc: "OK",
|
||||
}, getFilterResponse(t, rr.Body))
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
// delete all subscription to the peer
|
||||
requestID := hex.EncodeToString(protocol.GenerateRequestID())
|
||||
rr := httptest.NewRecorder()
|
||||
reqReader := strings.NewReader(toString(t, filterUnsubscribeAllRequest{
|
||||
RequestID: requestID,
|
||||
}))
|
||||
req, _ := http.NewRequest(http.MethodDelete, fmt.Sprintf("%s/all", filterV2Subscriptions), reqReader)
|
||||
router.ServeHTTP(rr, req)
|
||||
checkJSON(t, filterSubscriptionResponse{
|
||||
RequestID: requestID,
|
||||
StatusDesc: "OK",
|
||||
}, getFilterResponse(t, rr.Body))
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
|
||||
// check if all subscriptions are deleted to the peer are deleted
|
||||
requestID = hex.EncodeToString(protocol.GenerateRequestID())
|
||||
rr = httptest.NewRecorder()
|
||||
req, _ = http.NewRequest(http.MethodGet, fmt.Sprintf("%s/%s", filterV2Subscriptions, requestID), nil)
|
||||
router.ServeHTTP(rr, req)
|
||||
checkJSON(t, filterSubscriptionResponse{
|
||||
RequestID: requestID,
|
||||
StatusDesc: "peer has no subscription",
|
||||
}, getFilterResponse(t, rr.Body))
|
||||
require.Equal(t, http.StatusNotFound, rr.Code)
|
||||
}
|
||||
|
||||
func checkJSON(t *testing.T, expected, actual interface{}) {
|
||||
require.JSONEq(t, toString(t, expected), toString(t, actual))
|
||||
}
|
||||
func getFilterResponse(t *testing.T, body *bytes.Buffer) filterSubscriptionResponse {
|
||||
resp := filterSubscriptionResponse{}
|
||||
err := json.Unmarshal(body.Bytes(), &resp)
|
||||
require.NoError(t, err)
|
||||
return resp
|
||||
}
|
||||
func getMessageResponse(t *testing.T, body *bytes.Buffer) []*pb.WakuMessage {
|
||||
resp := []*pb.WakuMessage{}
|
||||
err := json.Unmarshal(body.Bytes(), &resp)
|
||||
require.NoError(t, err)
|
||||
return resp
|
||||
}
|
||||
func toString(t *testing.T, data interface{}) string {
|
||||
bytes, err := json.Marshal(data)
|
||||
require.NoError(t, err)
|
||||
return string(bytes)
|
||||
}
|
||||
|
||||
func TestFilterGetMessages(t *testing.T) {
|
||||
pubsubTopic := "/waku/2/test/proto"
|
||||
contentTopic := "/waku/2/app/1"
|
||||
|
||||
// get nodes add connect them
|
||||
generatedPubsubTopic, err := protocol.GetPubSubTopicFromContentTopic(contentTopic)
|
||||
require.NoError(t, err)
|
||||
node1, node2 := twoFilterConnectedNodes(t, pubsubTopic, generatedPubsubTopic)
|
||||
defer func() {
|
||||
node1.Stop()
|
||||
node2.Stop()
|
||||
}()
|
||||
|
||||
// set router and start filter service
|
||||
router := chi.NewRouter()
|
||||
service := NewFilterService(node2, router, 2, utils.Logger())
|
||||
go service.Start(context.Background())
|
||||
defer service.Stop()
|
||||
|
||||
{ // create subscription so that messages are cached
|
||||
for _, pubsubTopic := range []string{"", pubsubTopic} {
|
||||
requestID := hex.EncodeToString(protocol.GenerateRequestID())
|
||||
rr := httptest.NewRecorder()
|
||||
reqReader := strings.NewReader(toString(t, filterSubscriptionRequest{
|
||||
RequestID: requestID,
|
||||
PubsubTopic: pubsubTopic,
|
||||
ContentFilters: []string{contentTopic},
|
||||
}))
|
||||
req, _ := http.NewRequest(http.MethodPost, filterV2Subscriptions, reqReader)
|
||||
router.ServeHTTP(rr, req)
|
||||
checkJSON(t, filterSubscriptionResponse{
|
||||
RequestID: requestID,
|
||||
StatusDesc: "OK",
|
||||
}, getFilterResponse(t, rr.Body))
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// submit messages
|
||||
messageByContentTopic := []*protocol.Envelope{
|
||||
genMessage("", contentTopic),
|
||||
genMessage("", contentTopic),
|
||||
genMessage("", contentTopic),
|
||||
}
|
||||
messageByPubsubTopic := []*protocol.Envelope{
|
||||
genMessage(pubsubTopic, contentTopic),
|
||||
}
|
||||
for _, envelope := range append(messageByContentTopic, messageByPubsubTopic...) {
|
||||
node2.Broadcaster().Submit(envelope)
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
{ // with malformed contentTopic
|
||||
rr := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest(http.MethodGet,
|
||||
fmt.Sprintf("%s/%s", filterv2Messages, url.QueryEscape("/waku/2/wrongtopic")),
|
||||
nil,
|
||||
)
|
||||
router.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusBadRequest, rr.Code)
|
||||
require.Equal(t, "bad content topic", rr.Body.String())
|
||||
}
|
||||
|
||||
{ // with check if the cache is working properly
|
||||
rr := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest(http.MethodGet,
|
||||
fmt.Sprintf("%s/%s", filterv2Messages, url.QueryEscape(contentTopic)),
|
||||
nil,
|
||||
)
|
||||
router.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
checkJSON(t, toMessage(messageByContentTopic[1:]), getMessageResponse(t, rr.Body))
|
||||
}
|
||||
|
||||
{ // check if pubsubTopic is present in the url
|
||||
rr := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest(http.MethodGet,
|
||||
fmt.Sprintf("%s//%s", filterv2Messages, url.QueryEscape(contentTopic)),
|
||||
nil,
|
||||
)
|
||||
router.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusBadRequest, rr.Code)
|
||||
require.Equal(t, "missing pubsubTopic", rr.Body.String())
|
||||
}
|
||||
|
||||
{ // check messages by pubsub/contentTopic pair
|
||||
rr := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest(http.MethodGet,
|
||||
fmt.Sprintf("%s/%s/%s", filterv2Messages, url.QueryEscape(pubsubTopic), url.QueryEscape(contentTopic)),
|
||||
nil,
|
||||
)
|
||||
router.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
checkJSON(t, toMessage(messageByPubsubTopic), getMessageResponse(t, rr.Body))
|
||||
}
|
||||
|
||||
{ // check if pubsubTopic/contentTOpic is subscribed or not.
|
||||
rr := httptest.NewRecorder()
|
||||
notSubscibredPubsubTopic := "/waku/2/test2/proto"
|
||||
req, _ := http.NewRequest(http.MethodGet,
|
||||
fmt.Sprintf("%s/%s/%s", filterv2Messages, url.QueryEscape(notSubscibredPubsubTopic), url.QueryEscape(contentTopic)),
|
||||
nil,
|
||||
)
|
||||
router.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusNotFound, rr.Code)
|
||||
require.Equal(t,
|
||||
fmt.Sprintf("not subscribed to pubsubTopic:%s contentTopic: %s", notSubscibredPubsubTopic, contentTopic),
|
||||
rr.Body.String(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func toMessage(envs []*protocol.Envelope) []*pb.WakuMessage {
|
||||
msgs := make([]*pb.WakuMessage, len(envs))
|
||||
for i, env := range envs {
|
||||
msgs[i] = env.Message()
|
||||
}
|
||||
return msgs
|
||||
}
|
||||
|
||||
func genMessage(pubsubTopic, contentTopic string) *protocol.Envelope {
|
||||
if pubsubTopic == "" {
|
||||
pubsubTopic, _ = protocol.GetPubSubTopicFromContentTopic(contentTopic)
|
||||
}
|
||||
return protocol.NewEnvelope(
|
||||
&pb.WakuMessage{
|
||||
Payload: []byte{1, 2, 3},
|
||||
ContentTopic: contentTopic,
|
||||
Timestamp: utils.GetUnixEpoch(),
|
||||
},
|
||||
0,
|
||||
pubsubTopic,
|
||||
)
|
||||
}
|
||||
52
cmd/waku/server/rest/health.go
Normal file
52
cmd/waku/server/rest/health.go
Normal file
@ -0,0 +1,52 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/waku-org/go-waku/waku/v2/node"
|
||||
)
|
||||
|
||||
type HealthService struct {
|
||||
node *node.WakuNode
|
||||
mux *chi.Mux
|
||||
}
|
||||
|
||||
const routeHealth = "/health"
|
||||
|
||||
func NewHealthService(node *node.WakuNode, m *chi.Mux) *HealthService {
|
||||
h := &HealthService{
|
||||
node: node,
|
||||
mux: m,
|
||||
}
|
||||
|
||||
m.Get(routeHealth, h.getHealth)
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
type HealthResponse string
|
||||
|
||||
func (d *HealthService) getHealth(w http.ResponseWriter, r *http.Request) {
|
||||
if d.node.RLNRelay() != nil {
|
||||
isReady, err := d.node.RLNRelay().IsReady(r.Context())
|
||||
if err != nil {
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
writeResponse(w, HealthResponse("Health check timed out"), http.StatusInternalServerError)
|
||||
} else {
|
||||
writeResponse(w, HealthResponse(err.Error()), http.StatusInternalServerError)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if isReady {
|
||||
writeResponse(w, HealthResponse("Node is healthy"), http.StatusOK)
|
||||
} else {
|
||||
writeResponse(w, HealthResponse("Node is not ready"), http.StatusInternalServerError)
|
||||
}
|
||||
} else {
|
||||
writeResponse(w, HealthResponse("Non RLN healthcheck is not implemented"), http.StatusNotImplemented)
|
||||
}
|
||||
}
|
||||
41
cmd/waku/server/rest/health_api.yaml
Normal file
41
cmd/waku/server/rest/health_api.yaml
Normal file
@ -0,0 +1,41 @@
|
||||
openapi: 3.0.3
|
||||
info:
|
||||
title: Waku V2 node Health REST API
|
||||
version: 1.0.0
|
||||
contact:
|
||||
name: VAC Team
|
||||
url: https://forum.vac.dev/
|
||||
|
||||
tags:
|
||||
- name: health
|
||||
description: Healt check REST API for WakuV2 node
|
||||
|
||||
paths:
|
||||
/health:
|
||||
get:
|
||||
summary: Get node health status
|
||||
description: Retrieve readiness of a Waku v2 node.
|
||||
operationId: healthcheck
|
||||
tags:
|
||||
- health
|
||||
responses:
|
||||
'200':
|
||||
description: Waku v2 node is up and running.
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
example: Node is healty
|
||||
'500':
|
||||
description: Internal server error
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
'503':
|
||||
description: Node not initialized or having issues
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
example: Node is not initialized
|
||||
212
cmd/waku/server/rest/legacy_store.go
Normal file
212
cmd/waku/server/rest/legacy_store.go
Normal file
@ -0,0 +1,212 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/waku-org/go-waku/waku/v2/node"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/legacy_store"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/legacy_store/pb"
|
||||
)
|
||||
|
||||
type LegacyStoreService struct {
|
||||
node *node.WakuNode
|
||||
mux *chi.Mux
|
||||
}
|
||||
|
||||
type LegacyStoreResponse struct {
|
||||
Messages []LegacyStoreWakuMessage `json:"messages"`
|
||||
Cursor *LegacyHistoryCursor `json:"cursor,omitempty"`
|
||||
ErrorMessage string `json:"error_message,omitempty"`
|
||||
}
|
||||
|
||||
type LegacyHistoryCursor struct {
|
||||
PubsubTopic string `json:"pubsubTopic"`
|
||||
SenderTime string `json:"senderTime"`
|
||||
StoreTime string `json:"storeTime"`
|
||||
Digest []byte `json:"digest"`
|
||||
}
|
||||
|
||||
type LegacyStoreWakuMessage struct {
|
||||
Payload []byte `json:"payload"`
|
||||
ContentTopic string `json:"contentTopic"`
|
||||
Version *uint32 `json:"version,omitempty"`
|
||||
Timestamp *int64 `json:"timestamp,omitempty"`
|
||||
Meta []byte `json:"meta,omitempty"`
|
||||
}
|
||||
|
||||
const routeLegacyStoreMessagesV1 = "/store/v1/messages"
|
||||
|
||||
func NewLegacyStoreService(node *node.WakuNode, m *chi.Mux) *LegacyStoreService {
|
||||
s := &LegacyStoreService{
|
||||
node: node,
|
||||
mux: m,
|
||||
}
|
||||
|
||||
m.Get(routeLegacyStoreMessagesV1, s.getV1Messages)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func getLegacyStoreParams(r *http.Request) (*legacy_store.Query, []legacy_store.HistoryRequestOption, error) {
|
||||
query := &legacy_store.Query{}
|
||||
var options []legacy_store.HistoryRequestOption
|
||||
var err error
|
||||
peerAddrStr := r.URL.Query().Get("peerAddr")
|
||||
var m multiaddr.Multiaddr
|
||||
if peerAddrStr != "" {
|
||||
m, err = multiaddr.NewMultiaddr(peerAddrStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
options = append(options, legacy_store.WithPeerAddr(m))
|
||||
} else {
|
||||
// The user didn't specify a peer address and self-node is configured as a store node.
|
||||
// In this case we assume that the user is willing to retrieve the messages stored by
|
||||
// the local/self store node.
|
||||
options = append(options, legacy_store.WithLocalQuery())
|
||||
}
|
||||
|
||||
query.PubsubTopic = r.URL.Query().Get("pubsubTopic")
|
||||
|
||||
contentTopics := r.URL.Query().Get("contentTopics")
|
||||
if contentTopics != "" {
|
||||
query.ContentTopics = strings.Split(contentTopics, ",")
|
||||
}
|
||||
|
||||
startTimeStr := r.URL.Query().Get("startTime")
|
||||
if startTimeStr != "" {
|
||||
startTime, err := strconv.ParseInt(startTimeStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
query.StartTime = &startTime
|
||||
}
|
||||
|
||||
endTimeStr := r.URL.Query().Get("endTime")
|
||||
if endTimeStr != "" {
|
||||
endTime, err := strconv.ParseInt(endTimeStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
query.EndTime = &endTime
|
||||
}
|
||||
|
||||
var cursor *pb.Index
|
||||
|
||||
senderTimeStr := r.URL.Query().Get("senderTime")
|
||||
storeTimeStr := r.URL.Query().Get("storeTime")
|
||||
digestStr := r.URL.Query().Get("digest")
|
||||
|
||||
if senderTimeStr != "" || storeTimeStr != "" || digestStr != "" {
|
||||
cursor = &pb.Index{}
|
||||
|
||||
if senderTimeStr != "" {
|
||||
cursor.SenderTime, err = strconv.ParseInt(senderTimeStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if storeTimeStr != "" {
|
||||
cursor.ReceiverTime, err = strconv.ParseInt(storeTimeStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if digestStr != "" {
|
||||
cursor.Digest, err = base64.URLEncoding.DecodeString(digestStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
cursor.PubsubTopic = query.PubsubTopic
|
||||
|
||||
options = append(options, legacy_store.WithCursor(cursor))
|
||||
}
|
||||
|
||||
pageSizeStr := r.URL.Query().Get("pageSize")
|
||||
ascendingStr := r.URL.Query().Get("ascending")
|
||||
if ascendingStr != "" || pageSizeStr != "" {
|
||||
ascending := true
|
||||
pageSize := uint64(legacy_store.DefaultPageSize)
|
||||
if ascendingStr != "" {
|
||||
ascending, err = strconv.ParseBool(ascendingStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if pageSizeStr != "" {
|
||||
pageSize, err = strconv.ParseUint(pageSizeStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if pageSize > legacy_store.MaxPageSize {
|
||||
pageSize = legacy_store.MaxPageSize
|
||||
}
|
||||
}
|
||||
|
||||
options = append(options, legacy_store.WithPaging(ascending, pageSize))
|
||||
}
|
||||
|
||||
return query, options, nil
|
||||
}
|
||||
|
||||
func writeLegacyStoreError(w http.ResponseWriter, code int, err error) {
|
||||
writeResponse(w, LegacyStoreResponse{ErrorMessage: err.Error()}, code)
|
||||
}
|
||||
|
||||
func toLegacyStoreResponse(result *legacy_store.Result) LegacyStoreResponse {
|
||||
response := LegacyStoreResponse{}
|
||||
|
||||
cursor := result.Cursor()
|
||||
if cursor != nil {
|
||||
response.Cursor = &LegacyHistoryCursor{
|
||||
PubsubTopic: cursor.PubsubTopic,
|
||||
SenderTime: fmt.Sprintf("%d", cursor.SenderTime),
|
||||
StoreTime: fmt.Sprintf("%d", cursor.ReceiverTime),
|
||||
Digest: cursor.Digest,
|
||||
}
|
||||
}
|
||||
|
||||
for _, m := range result.Messages {
|
||||
response.Messages = append(response.Messages, LegacyStoreWakuMessage{
|
||||
Payload: m.Payload,
|
||||
ContentTopic: m.ContentTopic,
|
||||
Version: m.Version,
|
||||
Timestamp: m.Timestamp,
|
||||
Meta: m.Meta,
|
||||
})
|
||||
}
|
||||
|
||||
return response
|
||||
}
|
||||
|
||||
func (d *LegacyStoreService) getV1Messages(w http.ResponseWriter, r *http.Request) {
|
||||
query, options, err := getLegacyStoreParams(r)
|
||||
if err != nil {
|
||||
writeLegacyStoreError(w, http.StatusBadRequest, err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result, err := d.node.LegacyStore().Query(ctx, *query, options...)
|
||||
if err != nil {
|
||||
writeLegacyStoreError(w, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
|
||||
writeErrOrResponse(w, nil, toLegacyStoreResponse(result))
|
||||
}
|
||||
@ -1,6 +1,6 @@
|
||||
openapi: 3.0.3
|
||||
info:
|
||||
title: Waku V2 node REST API
|
||||
title: Waku V2 node Store REST API
|
||||
version: 1.0.0
|
||||
contact:
|
||||
name: VAC Team
|
||||
@ -17,6 +17,7 @@ import (
|
||||
"github.com/waku-org/go-waku/waku/v2/node"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestGetMessages(t *testing.T) {
|
||||
@ -32,16 +33,16 @@ func TestGetMessages(t *testing.T) {
|
||||
topic1 := "1"
|
||||
pubsubTopic1 := "topic1"
|
||||
|
||||
now := utils.GetUnixEpoch()
|
||||
msg1 := tests.CreateWakuMessage(topic1, now+1)
|
||||
msg2 := tests.CreateWakuMessage(topic1, now+2)
|
||||
msg3 := tests.CreateWakuMessage(topic1, now+3)
|
||||
now := *utils.GetUnixEpoch()
|
||||
msg1 := tests.CreateWakuMessage(topic1, proto.Int64(now+1))
|
||||
msg2 := tests.CreateWakuMessage(topic1, proto.Int64(now+2))
|
||||
msg3 := tests.CreateWakuMessage(topic1, proto.Int64(now+3))
|
||||
|
||||
node1.Broadcaster().Submit(protocol.NewEnvelope(msg1, utils.GetUnixEpoch(), pubsubTopic1))
|
||||
node1.Broadcaster().Submit(protocol.NewEnvelope(msg2, utils.GetUnixEpoch(), pubsubTopic1))
|
||||
node1.Broadcaster().Submit(protocol.NewEnvelope(msg3, utils.GetUnixEpoch(), pubsubTopic1))
|
||||
node1.Broadcaster().Submit(protocol.NewEnvelope(msg1, *utils.GetUnixEpoch(), pubsubTopic1))
|
||||
node1.Broadcaster().Submit(protocol.NewEnvelope(msg2, *utils.GetUnixEpoch(), pubsubTopic1))
|
||||
node1.Broadcaster().Submit(protocol.NewEnvelope(msg3, *utils.GetUnixEpoch(), pubsubTopic1))
|
||||
|
||||
n1HostInfo, _ := multiaddr.NewMultiaddr(fmt.Sprintf("/p2p/%s", node1.Host().ID().Pretty()))
|
||||
n1HostInfo, _ := multiaddr.NewMultiaddr(fmt.Sprintf("/p2p/%s", node1.Host().ID().String()))
|
||||
n1Addr := node1.ListenAddresses()[0].Encapsulate(n1HostInfo)
|
||||
|
||||
node2, err := node.New()
|
||||
@ -51,7 +52,7 @@ func TestGetMessages(t *testing.T) {
|
||||
defer node2.Stop()
|
||||
router := chi.NewRouter()
|
||||
|
||||
_ = NewStoreService(node2, router)
|
||||
_ = NewLegacyStoreService(node2, router)
|
||||
|
||||
// TEST: get cursor
|
||||
// TEST: get no messages
|
||||
@ -63,12 +64,12 @@ func TestGetMessages(t *testing.T) {
|
||||
"pubsubTopic": {pubsubTopic1},
|
||||
"pageSize": {"2"},
|
||||
}
|
||||
path := ROUTE_STORE_MESSAGESV1 + "?" + queryParams.Encode()
|
||||
path := routeLegacyStoreMessagesV1 + "?" + queryParams.Encode()
|
||||
req, _ := http.NewRequest(http.MethodGet, path, nil)
|
||||
router.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
|
||||
response := StoreResponse{}
|
||||
response := LegacyStoreResponse{}
|
||||
err = json.Unmarshal(rr.Body.Bytes(), &response)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, response.Messages, 2)
|
||||
@ -83,12 +84,12 @@ func TestGetMessages(t *testing.T) {
|
||||
"digest": {base64.URLEncoding.EncodeToString(response.Cursor.Digest)},
|
||||
"pageSize": {"2"},
|
||||
}
|
||||
path = ROUTE_STORE_MESSAGESV1 + "?" + queryParams.Encode()
|
||||
path = routeLegacyStoreMessagesV1 + "?" + queryParams.Encode()
|
||||
req, _ = http.NewRequest(http.MethodGet, path, nil)
|
||||
router.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
|
||||
response = StoreResponse{}
|
||||
response = LegacyStoreResponse{}
|
||||
err = json.Unmarshal(rr.Body.Bytes(), &response)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, response.Messages, 1)
|
||||
84
cmd/waku/server/rest/lightpush_api.yaml
Normal file
84
cmd/waku/server/rest/lightpush_api.yaml
Normal file
@ -0,0 +1,84 @@
|
||||
openapi: 3.0.3
|
||||
info:
|
||||
title: Waku V2 node REST API
|
||||
version: 1.0.0
|
||||
contact:
|
||||
name: VAC Team
|
||||
url: https://forum.vac.dev/
|
||||
|
||||
tags:
|
||||
- name: lightpush
|
||||
description: Lightpush REST API for WakuV2 node
|
||||
|
||||
paths:
|
||||
/lightpush/v1/message:
|
||||
post:
|
||||
summary: Request a message relay from a LightPush service provider
|
||||
description: Push a message to be relayed on a PubSub topic.
|
||||
operationId: postMessagesToPubsubTopic
|
||||
tags:
|
||||
- lightpush
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/PushRequest'
|
||||
responses:
|
||||
'200':
|
||||
description: OK
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
'400':
|
||||
description: Bad request.
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
'500':
|
||||
description: Internal server error
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
'503':
|
||||
description: Service not available
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
|
||||
components:
|
||||
schemas:
|
||||
PubsubTopic:
|
||||
type: string
|
||||
|
||||
ContentTopic:
|
||||
type: string
|
||||
|
||||
WakuMessage:
|
||||
type: object
|
||||
properties:
|
||||
payload:
|
||||
type: string
|
||||
format: byte
|
||||
contentTopic:
|
||||
$ref: '#/components/schemas/ContentTopic'
|
||||
version:
|
||||
type: number
|
||||
timestamp:
|
||||
type: number
|
||||
required:
|
||||
- payload
|
||||
- contentTopic
|
||||
|
||||
PushRequest:
|
||||
type: object
|
||||
properties:
|
||||
pusbsubTopic:
|
||||
$ref: '#/components/schemas/PubsubTopic'
|
||||
message:
|
||||
$ref: '#/components/schemas/WakuMessage'
|
||||
required:
|
||||
- message
|
||||
96
cmd/waku/server/rest/lightpush_rest.go
Normal file
96
cmd/waku/server/rest/lightpush_rest.go
Normal file
@ -0,0 +1,96 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/waku-org/go-waku/waku/v2/node"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/lightpush"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const routeLightPushV1Messages = "/lightpush/v1/message"
|
||||
|
||||
type LightpushService struct {
|
||||
node *node.WakuNode
|
||||
log *zap.Logger
|
||||
}
|
||||
|
||||
func NewLightpushService(node *node.WakuNode, m *chi.Mux, log *zap.Logger) *LightpushService {
|
||||
serv := &LightpushService{
|
||||
node: node,
|
||||
log: log.Named("lightpush"),
|
||||
}
|
||||
|
||||
m.Post(routeLightPushV1Messages, serv.postMessagev1)
|
||||
|
||||
return serv
|
||||
}
|
||||
|
||||
func (msg lightpushRequest) Check() error {
|
||||
if msg.Message == nil {
|
||||
return errors.New("waku message is required")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type lightpushRequest struct {
|
||||
PubSubTopic string `json:"pubsubTopic"`
|
||||
Message *RestWakuMessage `json:"message"`
|
||||
}
|
||||
|
||||
// handled error codes are 200, 400, 500, 503
|
||||
func (serv *LightpushService) postMessagev1(w http.ResponseWriter, req *http.Request) {
|
||||
request := &lightpushRequest{}
|
||||
decoder := json.NewDecoder(req.Body)
|
||||
if err := decoder.Decode(request); err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
defer req.Body.Close()
|
||||
|
||||
if err := request.Check(); err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
_, err = w.Write([]byte(err.Error()))
|
||||
serv.log.Error("writing response", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if serv.node.Lightpush() == nil {
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
message, err := request.Message.ToProto()
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
_, err = w.Write([]byte(err.Error()))
|
||||
if err != nil {
|
||||
serv.log.Error("writing response", zap.Error(err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err = message.Validate(); err != nil {
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
_, err = w.Write([]byte(err.Error()))
|
||||
if err != nil {
|
||||
serv.log.Error("writing response", zap.Error(err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
_, err = serv.node.Lightpush().Publish(req.Context(), message, lightpush.WithPubSubTopic(request.PubSubTopic))
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
_, err = w.Write([]byte(err.Error()))
|
||||
if err != nil {
|
||||
serv.log.Error("writing response", zap.Error(err))
|
||||
}
|
||||
} else {
|
||||
writeErrOrResponse(w, err, true)
|
||||
}
|
||||
}
|
||||
68
cmd/waku/server/rest/lightpush_rest_test.go
Normal file
68
cmd/waku/server/rest/lightpush_rest_test.go
Normal file
@ -0,0 +1,68 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/waku-org/go-waku/tests"
|
||||
"github.com/waku-org/go-waku/waku/v2/node"
|
||||
wakupeerstore "github.com/waku-org/go-waku/waku/v2/peerstore"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/lightpush"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
)
|
||||
|
||||
// node2 connects to node1
|
||||
func twoLightPushConnectedNodes(t *testing.T, pubSubTopic string) (*node.WakuNode, *node.WakuNode) {
|
||||
node1 := createNode(t, node.WithLightPush(), node.WithWakuRelay())
|
||||
node2 := createNode(t, node.WithLightPush(), node.WithWakuRelay())
|
||||
|
||||
_, err := node1.Relay().Subscribe(context.Background(), protocol.NewContentFilter(pubSubTopic))
|
||||
require.NoError(t, err)
|
||||
_, err = node2.Relay().Subscribe(context.Background(), protocol.NewContentFilter(pubSubTopic))
|
||||
require.NoError(t, err)
|
||||
|
||||
node2.Host().Peerstore().AddAddr(node1.Host().ID(), tests.GetHostAddress(node1.Host()), peerstore.PermanentAddrTTL)
|
||||
err = node2.Host().Peerstore().AddProtocols(node1.Host().ID(), lightpush.LightPushID_v20beta1)
|
||||
require.NoError(t, err)
|
||||
err = node2.Host().Peerstore().(*wakupeerstore.WakuPeerstoreImpl).SetPubSubTopics(node1.Host().ID(), []string{pubSubTopic})
|
||||
require.NoError(t, err)
|
||||
return node1, node2
|
||||
}
|
||||
|
||||
func TestLightpushMessagev1(t *testing.T) {
|
||||
pubSubTopic := "/waku/2/default-waku/proto"
|
||||
node1, node2 := twoLightPushConnectedNodes(t, pubSubTopic)
|
||||
defer func() {
|
||||
node1.Stop()
|
||||
node2.Stop()
|
||||
}()
|
||||
|
||||
router := chi.NewRouter()
|
||||
serv := NewLightpushService(node2, router, utils.Logger())
|
||||
_ = serv
|
||||
|
||||
msg := lightpushRequest{
|
||||
PubSubTopic: pubSubTopic,
|
||||
Message: &RestWakuMessage{
|
||||
Payload: []byte{1, 2, 3},
|
||||
ContentTopic: "abc",
|
||||
Timestamp: utils.GetUnixEpoch(),
|
||||
},
|
||||
}
|
||||
msgJSONBytes, err := json.Marshal(msg)
|
||||
require.NoError(t, err)
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest(http.MethodPost, routeLightPushV1Messages, bytes.NewReader(msgJSONBytes))
|
||||
router.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
require.Equal(t, "true", rr.Body.String())
|
||||
}
|
||||
49
cmd/waku/server/rest/message.go
Normal file
49
cmd/waku/server/rest/message.go
Normal file
@ -0,0 +1,49 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/waku-org/go-waku/cmd/waku/server"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
)
|
||||
|
||||
type RestWakuMessage struct {
|
||||
Payload server.Base64URLByte `json:"payload"`
|
||||
ContentTopic string `json:"contentTopic"`
|
||||
Version *uint32 `json:"version,omitempty"`
|
||||
Timestamp *int64 `json:"timestamp,omitempty"`
|
||||
Meta []byte `json:"meta,omitempty"`
|
||||
Ephemeral *bool `json:"ephemeral"`
|
||||
}
|
||||
|
||||
func (r *RestWakuMessage) FromProto(input *pb.WakuMessage) error {
|
||||
if err := input.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Payload = input.Payload
|
||||
r.ContentTopic = input.ContentTopic
|
||||
r.Timestamp = input.Timestamp
|
||||
r.Version = input.Version
|
||||
r.Meta = input.Meta
|
||||
r.Ephemeral = input.Ephemeral
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RestWakuMessage) ToProto() (*pb.WakuMessage, error) {
|
||||
if r == nil {
|
||||
return nil, errors.New("wakumessage is missing")
|
||||
}
|
||||
|
||||
msg := &pb.WakuMessage{
|
||||
Payload: r.Payload,
|
||||
ContentTopic: r.ContentTopic,
|
||||
Version: r.Version,
|
||||
Timestamp: r.Timestamp,
|
||||
Meta: r.Meta,
|
||||
Ephemeral: r.Ephemeral,
|
||||
}
|
||||
|
||||
return msg, nil
|
||||
}
|
||||
316
cmd/waku/server/rest/relay.go
Normal file
316
cmd/waku/server/rest/relay.go
Normal file
@ -0,0 +1,316 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/waku-org/go-waku/cmd/waku/server"
|
||||
"github.com/waku-org/go-waku/waku/v2/node"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const routeRelayV1Subscriptions = "/relay/v1/subscriptions"
|
||||
const routeRelayV1Messages = "/relay/v1/messages/{topic}"
|
||||
|
||||
const routeRelayV1AutoSubscriptions = "/relay/v1/auto/subscriptions"
|
||||
const routeRelayV1AutoMessages = "/relay/v1/auto/messages"
|
||||
|
||||
// RelayService represents the REST service for WakuRelay
|
||||
type RelayService struct {
|
||||
node *node.WakuNode
|
||||
|
||||
log *zap.Logger
|
||||
|
||||
cacheCapacity uint
|
||||
}
|
||||
|
||||
// NewRelayService returns an instance of RelayService
|
||||
func NewRelayService(node *node.WakuNode, m *chi.Mux, cacheCapacity uint, log *zap.Logger) *RelayService {
|
||||
s := &RelayService{
|
||||
node: node,
|
||||
log: log.Named("relay"),
|
||||
cacheCapacity: cacheCapacity,
|
||||
}
|
||||
|
||||
m.Post(routeRelayV1Subscriptions, s.postV1Subscriptions)
|
||||
m.Delete(routeRelayV1Subscriptions, s.deleteV1Subscriptions)
|
||||
m.Get(routeRelayV1Messages, s.getV1Messages)
|
||||
m.Post(routeRelayV1Messages, s.postV1Message)
|
||||
|
||||
m.Post(routeRelayV1AutoSubscriptions, s.postV1AutoSubscriptions)
|
||||
m.Delete(routeRelayV1AutoSubscriptions, s.deleteV1AutoSubscriptions)
|
||||
|
||||
m.Route(routeRelayV1AutoMessages, func(r chi.Router) {
|
||||
r.Get("/{contentTopic}", s.getV1AutoMessages)
|
||||
r.Post("/", s.postV1AutoMessage)
|
||||
})
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *RelayService) deleteV1Subscriptions(w http.ResponseWriter, req *http.Request) {
|
||||
var topics []string
|
||||
decoder := json.NewDecoder(req.Body)
|
||||
if err := decoder.Decode(&topics); err != nil {
|
||||
r.log.Error("decoding request failure", zap.Error(err))
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
defer req.Body.Close()
|
||||
|
||||
var err error
|
||||
for _, topic := range topics {
|
||||
err = r.node.Relay().Unsubscribe(req.Context(), protocol.NewContentFilter(topic))
|
||||
if err != nil {
|
||||
r.log.Error("unsubscribing from topic", zap.String("topic", strings.Replace(strings.Replace(topic, "\n", "", -1), "\r", "", -1)), zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
writeErrOrResponse(w, err, true)
|
||||
}
|
||||
|
||||
func (r *RelayService) postV1Subscriptions(w http.ResponseWriter, req *http.Request) {
|
||||
var topics []string
|
||||
decoder := json.NewDecoder(req.Body)
|
||||
if err := decoder.Decode(&topics); err != nil {
|
||||
r.log.Error("decoding request failure", zap.Error(err))
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
defer req.Body.Close()
|
||||
|
||||
var err error
|
||||
var successCnt int
|
||||
var topicToSubscribe string
|
||||
for _, topic := range topics {
|
||||
if topic == "" {
|
||||
topicToSubscribe = relay.DefaultWakuTopic
|
||||
} else {
|
||||
topicToSubscribe = topic
|
||||
}
|
||||
_, err = r.node.Relay().Subscribe(r.node.Relay().Context(), protocol.NewContentFilter(topicToSubscribe), relay.WithCacheSize(r.cacheCapacity))
|
||||
|
||||
if err != nil {
|
||||
r.log.Error("subscribing to topic", zap.String("topic", strings.Replace(topicToSubscribe, "\n", "", -1)), zap.Error(err))
|
||||
continue
|
||||
}
|
||||
successCnt++
|
||||
}
|
||||
|
||||
// on partial subscribe failure
|
||||
if successCnt > 0 && err != nil {
|
||||
r.log.Error("partial subscribe failed", zap.Error(err))
|
||||
// on partial failure
|
||||
writeResponse(w, err, http.StatusOK)
|
||||
return
|
||||
}
|
||||
|
||||
writeErrOrResponse(w, err, true)
|
||||
}
|
||||
|
||||
func (r *RelayService) getV1Messages(w http.ResponseWriter, req *http.Request) {
|
||||
topic := topicFromPath(w, req, "topic", r.log)
|
||||
if topic == "" {
|
||||
r.log.Debug("topic is not specified, using default waku topic")
|
||||
topic = relay.DefaultWakuTopic
|
||||
}
|
||||
//TODO: Update the API to also take a contentTopic since relay now supports filtering based on contentTopic as well.
|
||||
sub, err := r.node.Relay().GetSubscriptionWithPubsubTopic(topic, "")
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
_, err = w.Write([]byte(err.Error()))
|
||||
r.log.Error("writing response", zap.Error(err))
|
||||
return
|
||||
}
|
||||
var response []*RestWakuMessage
|
||||
done := false
|
||||
for {
|
||||
if done || len(response) > int(r.cacheCapacity) {
|
||||
break
|
||||
}
|
||||
select {
|
||||
case envelope, open := <-sub.Ch:
|
||||
if !open {
|
||||
r.log.Error("consume channel is closed for subscription", zap.String("pubsubTopic", topic))
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
_, err = w.Write([]byte("consume channel is closed for subscription"))
|
||||
if err != nil {
|
||||
r.log.Error("writing response", zap.Error(err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
message := &RestWakuMessage{}
|
||||
if err := message.FromProto(envelope.Message()); err != nil {
|
||||
r.log.Error("converting protobuffer msg into rest msg", zap.Error(err))
|
||||
} else {
|
||||
response = append(response, message)
|
||||
}
|
||||
case <-req.Context().Done():
|
||||
done = true
|
||||
default:
|
||||
done = true
|
||||
}
|
||||
}
|
||||
|
||||
writeErrOrResponse(w, nil, response)
|
||||
}
|
||||
|
||||
func (r *RelayService) postV1Message(w http.ResponseWriter, req *http.Request) {
|
||||
topic := topicFromPath(w, req, "topic", r.log)
|
||||
if topic == "" {
|
||||
r.log.Debug("topic is not specified, using default waku topic")
|
||||
topic = relay.DefaultWakuTopic
|
||||
}
|
||||
|
||||
var restMessage *RestWakuMessage
|
||||
decoder := json.NewDecoder(req.Body)
|
||||
if err := decoder.Decode(&restMessage); err != nil {
|
||||
r.log.Error("decoding request failure", zap.Error(err))
|
||||
writeErrResponse(w, r.log, err, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
defer req.Body.Close()
|
||||
|
||||
message, err := restMessage.ToProto()
|
||||
if err != nil {
|
||||
r.log.Error("failed to convert message to proto", zap.Error(err))
|
||||
writeErrResponse(w, r.log, err, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := server.AppendRLNProof(r.node, message); err != nil {
|
||||
r.log.Error("failed to append RLN proof for the message", zap.Error(err))
|
||||
writeErrOrResponse(w, err, nil)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = r.node.Relay().Publish(req.Context(), message, relay.WithPubSubTopic(strings.Replace(topic, "\n", "", -1)))
|
||||
if err != nil {
|
||||
r.log.Error("publishing message", zap.Error(err))
|
||||
if err == pb.ErrMissingPayload || err == pb.ErrMissingContentTopic || err == pb.ErrInvalidMetaLength {
|
||||
writeErrResponse(w, r.log, err, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
writeErrOrResponse(w, err, true)
|
||||
}
|
||||
|
||||
func (r *RelayService) deleteV1AutoSubscriptions(w http.ResponseWriter, req *http.Request) {
|
||||
var cTopics []string
|
||||
decoder := json.NewDecoder(req.Body)
|
||||
if err := decoder.Decode(&cTopics); err != nil {
|
||||
r.log.Error("decoding request failure", zap.Error(err))
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
defer req.Body.Close()
|
||||
|
||||
err := r.node.Relay().Unsubscribe(req.Context(), protocol.NewContentFilter("", cTopics...))
|
||||
if err != nil {
|
||||
r.log.Error("unsubscribing from topics", zap.Strings("contentTopics", cTopics), zap.Error(err))
|
||||
}
|
||||
|
||||
writeErrOrResponse(w, err, true)
|
||||
}
|
||||
|
||||
func (r *RelayService) postV1AutoSubscriptions(w http.ResponseWriter, req *http.Request) {
|
||||
var cTopics []string
|
||||
decoder := json.NewDecoder(req.Body)
|
||||
if err := decoder.Decode(&cTopics); err != nil {
|
||||
r.log.Error("decoding request failure", zap.Error(err))
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
defer req.Body.Close()
|
||||
|
||||
var err error
|
||||
_, err = r.node.Relay().Subscribe(r.node.Relay().Context(), protocol.NewContentFilter("", cTopics...), relay.WithCacheSize(r.cacheCapacity))
|
||||
if err != nil {
|
||||
r.log.Error("subscribing to topics", zap.Strings("contentTopics", cTopics), zap.Error(err))
|
||||
}
|
||||
r.log.Debug("subscribed to topics", zap.Strings("contentTopics", cTopics))
|
||||
|
||||
if err != nil {
|
||||
r.log.Error("writing response", zap.Error(err))
|
||||
writeErrResponse(w, r.log, err, http.StatusBadRequest)
|
||||
} else {
|
||||
writeErrOrResponse(w, err, true)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (r *RelayService) getV1AutoMessages(w http.ResponseWriter, req *http.Request) {
|
||||
|
||||
cTopic := topicFromPath(w, req, "contentTopic", r.log)
|
||||
sub, err := r.node.Relay().GetSubscription(cTopic)
|
||||
if err != nil {
|
||||
r.log.Error("writing response", zap.Error(err))
|
||||
writeErrResponse(w, r.log, err, http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
var response []*RestWakuMessage
|
||||
done := false
|
||||
for {
|
||||
if done || len(response) > int(r.cacheCapacity) {
|
||||
break
|
||||
}
|
||||
select {
|
||||
case envelope := <-sub.Ch:
|
||||
message := &RestWakuMessage{}
|
||||
if err := message.FromProto(envelope.Message()); err != nil {
|
||||
r.log.Error("converting protobuffer msg into rest msg", zap.Error(err))
|
||||
} else {
|
||||
response = append(response, message)
|
||||
}
|
||||
case <-req.Context().Done():
|
||||
done = true
|
||||
default:
|
||||
done = true
|
||||
}
|
||||
}
|
||||
|
||||
writeErrOrResponse(w, nil, response)
|
||||
}
|
||||
|
||||
func (r *RelayService) postV1AutoMessage(w http.ResponseWriter, req *http.Request) {
|
||||
|
||||
var restMessage *RestWakuMessage
|
||||
decoder := json.NewDecoder(req.Body)
|
||||
if err := decoder.Decode(&restMessage); err != nil {
|
||||
r.log.Error("decoding request failure", zap.Error(err))
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
defer req.Body.Close()
|
||||
|
||||
message, err := restMessage.ToProto()
|
||||
if err != nil {
|
||||
writeErrOrResponse(w, err, nil)
|
||||
return
|
||||
}
|
||||
|
||||
if err = server.AppendRLNProof(r.node, message); err != nil {
|
||||
writeErrOrResponse(w, err, nil)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = r.node.Relay().Publish(req.Context(), message)
|
||||
if err != nil {
|
||||
r.log.Error("publishing message", zap.Error(err))
|
||||
if err == pb.ErrMissingPayload || err == pb.ErrMissingContentTopic || err == pb.ErrInvalidMetaLength {
|
||||
writeErrResponse(w, r.log, err, http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
writeErrResponse(w, r.log, err, http.StatusBadRequest)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
}
|
||||
@ -1,6 +1,6 @@
|
||||
openapi: 3.0.3
|
||||
info:
|
||||
title: Waku V2 node REST API
|
||||
title: Waku V2 node Relay REST API
|
||||
version: 1.0.0
|
||||
contact:
|
||||
name: VAC Team
|
||||
@ -106,6 +106,103 @@ paths:
|
||||
'5XX':
|
||||
description: Unexpected error.
|
||||
|
||||
/relay/v1/auto/messages/{contentTopic}: # Note the plural in messages
|
||||
get: # get_waku_v2_relay_v1_auto_messages
|
||||
summary: Get the latest messages on the polled topic
|
||||
description: Get a list of messages that were received on a subscribed Content topic after the last time this method was called.
|
||||
operationId: getMessagesByTopic
|
||||
tags:
|
||||
- relay
|
||||
parameters:
|
||||
- in: path
|
||||
name: contentTopic # Note the name is the same as in the path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
description: The user ID
|
||||
responses:
|
||||
'200':
|
||||
description: The latest messages on the polled topic.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RelayGetMessagesResponse'
|
||||
'4XX':
|
||||
description: Bad request.
|
||||
'5XX':
|
||||
description: Unexpected error.
|
||||
|
||||
/relay/v1/auto/messages: # Note the plural in messages
|
||||
post: # post_waku_v2_relay_v1_auto_message
|
||||
summary: Publish a message to be relayed
|
||||
description: Publishes a message to be relayed on a Content topic.
|
||||
operationId: postMessagesToTopic
|
||||
tags:
|
||||
- relay
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RelayPostMessagesRequest'
|
||||
responses:
|
||||
'200':
|
||||
description: OK
|
||||
'4XX':
|
||||
description: Bad request.
|
||||
'5XX':
|
||||
description: Unexpected error.
|
||||
|
||||
/relay/v1/auto/subscriptions:
|
||||
post: # post_waku_v2_relay_v1_auto_subscriptions
|
||||
summary: Subscribe a node to an array of topics
|
||||
description: Subscribe a node to an array of Content topics.
|
||||
operationId: postSubscriptions
|
||||
tags:
|
||||
- relay
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type array:
|
||||
items:
|
||||
$ref: '#/components/schemas/ContentTopic'
|
||||
responses:
|
||||
'200':
|
||||
description: OK
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
'4XX':
|
||||
description: Bad request.
|
||||
'5XX':
|
||||
description: Unexpected error.
|
||||
|
||||
delete: # delete_waku_v2_relay_v1_auto_subscriptions
|
||||
summary: Unsubscribe a node from an array of topics
|
||||
description: Unsubscribe a node from an array of Content topics.
|
||||
operationId: deleteSubscriptions
|
||||
tags:
|
||||
- relay
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type array:
|
||||
items:
|
||||
$ref: '#/components/schemas/ContentTopic'
|
||||
responses:
|
||||
'200':
|
||||
description: OK
|
||||
content:
|
||||
text/plain:
|
||||
schema:
|
||||
type: string
|
||||
'4XX':
|
||||
description: Bad request.
|
||||
'5XX':
|
||||
description: Unexpected error.
|
||||
|
||||
components:
|
||||
schemas:
|
||||
PubSubTopic:
|
||||
@ -145,4 +242,4 @@ components:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/PubSubTopic'
|
||||
|
||||
|
||||
303
cmd/waku/server/rest/relay_test.go
Normal file
303
cmd/waku/server/rest/relay_test.go
Normal file
@ -0,0 +1,303 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/waku-org/go-waku/tests"
|
||||
"github.com/waku-org/go-waku/waku/v2/node"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/utils"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func makeRelayService(t *testing.T, mux *chi.Mux) *RelayService {
|
||||
options := node.WithWakuRelayAndMinPeers(0)
|
||||
n, err := node.New(options)
|
||||
require.NoError(t, err)
|
||||
err = n.Start(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
return NewRelayService(n, mux, 3, utils.Logger())
|
||||
}
|
||||
|
||||
func TestPostV1Message(t *testing.T) {
|
||||
router := chi.NewRouter()
|
||||
testTopic := "test"
|
||||
|
||||
r := makeRelayService(t, router)
|
||||
msg := &RestWakuMessage{
|
||||
Payload: []byte{1, 2, 3},
|
||||
ContentTopic: "abc",
|
||||
Timestamp: utils.GetUnixEpoch(),
|
||||
}
|
||||
msgJSONBytes, err := json.Marshal(msg)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = r.node.Relay().Subscribe(context.Background(), protocol.NewContentFilter(testTopic))
|
||||
require.NoError(t, err)
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest(http.MethodPost, "/relay/v1/messages/"+testTopic, bytes.NewReader(msgJSONBytes))
|
||||
router.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
require.Equal(t, "true", rr.Body.String())
|
||||
}
|
||||
|
||||
func TestRelaySubscription(t *testing.T) {
|
||||
router := chi.NewRouter()
|
||||
|
||||
r := makeRelayService(t, router)
|
||||
|
||||
// Wait for node to start
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
topics := []string{"test"}
|
||||
topicsJSONBytes, err := json.Marshal(topics)
|
||||
require.NoError(t, err)
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest(http.MethodPost, routeRelayV1Subscriptions, bytes.NewReader(topicsJSONBytes))
|
||||
router.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
require.Equal(t, "true", rr.Body.String())
|
||||
|
||||
// Test max messages in subscription
|
||||
now := *utils.GetUnixEpoch()
|
||||
_, err = r.node.Relay().Publish(context.Background(),
|
||||
tests.CreateWakuMessage("test", proto.Int64(now+1)), relay.WithPubSubTopic("test"))
|
||||
require.NoError(t, err)
|
||||
_, err = r.node.Relay().Publish(context.Background(),
|
||||
tests.CreateWakuMessage("test", proto.Int64(now+2)), relay.WithPubSubTopic("test"))
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = r.node.Relay().Publish(context.Background(),
|
||||
tests.CreateWakuMessage("test", proto.Int64(now+3)), relay.WithPubSubTopic("test"))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for the messages to be processed
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
|
||||
// Test deletion
|
||||
rr = httptest.NewRecorder()
|
||||
req, _ = http.NewRequest(http.MethodDelete, routeRelayV1Subscriptions, bytes.NewReader(topicsJSONBytes))
|
||||
router.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
require.Equal(t, "true", rr.Body.String())
|
||||
}
|
||||
|
||||
func TestRelayGetV1Messages(t *testing.T) {
|
||||
router := chi.NewRouter()
|
||||
router1 := chi.NewRouter()
|
||||
|
||||
serviceA := makeRelayService(t, router)
|
||||
|
||||
serviceB := makeRelayService(t, router1)
|
||||
|
||||
hostInfo, err := multiaddr.NewMultiaddr(fmt.Sprintf("/p2p/%s", serviceB.node.Host().ID().String()))
|
||||
require.NoError(t, err)
|
||||
|
||||
var addr multiaddr.Multiaddr
|
||||
for _, a := range serviceB.node.Host().Addrs() {
|
||||
addr = a.Encapsulate(hostInfo)
|
||||
break
|
||||
}
|
||||
err = serviceA.node.DialPeerWithMultiAddress(context.Background(), addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for the dial to complete
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
topics := []string{"test"}
|
||||
topicsJSONBytes, err := json.Marshal(topics)
|
||||
require.NoError(t, err)
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest(http.MethodPost, routeRelayV1Subscriptions, bytes.NewReader(topicsJSONBytes))
|
||||
router.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
|
||||
// Wait for the subscription to be started
|
||||
time.Sleep(1 * time.Second)
|
||||
ephemeral := true
|
||||
msg := &RestWakuMessage{
|
||||
Payload: []byte{1, 2, 3},
|
||||
ContentTopic: "test",
|
||||
Timestamp: utils.GetUnixEpoch(),
|
||||
Ephemeral: &ephemeral,
|
||||
}
|
||||
msgJsonBytes, err := json.Marshal(msg)
|
||||
require.NoError(t, err)
|
||||
|
||||
rr = httptest.NewRecorder()
|
||||
req, _ = http.NewRequest(http.MethodPost, "/relay/v1/messages/test", bytes.NewReader(msgJsonBytes))
|
||||
router.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
|
||||
// Wait for the message to be received
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
rr = httptest.NewRecorder()
|
||||
req, _ = http.NewRequest(http.MethodGet, "/relay/v1/messages/test", bytes.NewReader([]byte{}))
|
||||
router.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
|
||||
var messages []*pb.WakuMessage
|
||||
err = json.Unmarshal(rr.Body.Bytes(), &messages)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, messages, 1)
|
||||
require.Equal(t, *messages[0].Ephemeral, true)
|
||||
|
||||
rr = httptest.NewRecorder()
|
||||
req, _ = http.NewRequest(http.MethodGet, "/relay/v1/messages/test", bytes.NewReader([]byte{}))
|
||||
router1.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusNotFound, rr.Code)
|
||||
|
||||
}
|
||||
|
||||
func TestPostAutoV1Message(t *testing.T) {
|
||||
router := chi.NewRouter()
|
||||
|
||||
_ = makeRelayService(t, router)
|
||||
msg := &RestWakuMessage{
|
||||
Payload: []byte{1, 2, 3},
|
||||
ContentTopic: "/toychat/1/huilong/proto",
|
||||
Timestamp: utils.GetUnixEpoch(),
|
||||
}
|
||||
msgJSONBytes, err := json.Marshal(msg)
|
||||
require.NoError(t, err)
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest(http.MethodPost, routeRelayV1AutoMessages, bytes.NewReader(msgJSONBytes))
|
||||
router.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
func TestRelayAutoSubUnsub(t *testing.T) {
|
||||
router := chi.NewRouter()
|
||||
|
||||
r := makeRelayService(t, router)
|
||||
|
||||
// Wait for node to start
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
cTopic1 := "/toychat/1/huilong/proto"
|
||||
|
||||
cTopics := []string{cTopic1}
|
||||
topicsJSONBytes, err := json.Marshal(cTopics)
|
||||
require.NoError(t, err)
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest(http.MethodPost, routeRelayV1AutoSubscriptions, bytes.NewReader(topicsJSONBytes))
|
||||
router.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
require.Equal(t, "true", rr.Body.String())
|
||||
|
||||
// Test publishing messages after subscription
|
||||
now := *utils.GetUnixEpoch()
|
||||
_, err = r.node.Relay().Publish(context.Background(),
|
||||
tests.CreateWakuMessage(cTopic1, proto.Int64(now+1)))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for the messages to be processed
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
|
||||
// Test deletion
|
||||
rr = httptest.NewRecorder()
|
||||
req, _ = http.NewRequest(http.MethodDelete, routeRelayV1AutoSubscriptions, bytes.NewReader(topicsJSONBytes))
|
||||
router.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
require.Equal(t, "true", rr.Body.String())
|
||||
|
||||
cTopics = append(cTopics, "test")
|
||||
topicsJSONBytes, err = json.Marshal(cTopics)
|
||||
require.NoError(t, err)
|
||||
|
||||
rr = httptest.NewRecorder()
|
||||
req, _ = http.NewRequest(http.MethodPost, routeRelayV1AutoSubscriptions, bytes.NewReader(topicsJSONBytes))
|
||||
router.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusBadRequest, rr.Code)
|
||||
|
||||
}
|
||||
|
||||
func TestRelayGetV1AutoMessages(t *testing.T) {
|
||||
router := chi.NewRouter()
|
||||
router1 := chi.NewRouter()
|
||||
|
||||
serviceA := makeRelayService(t, router)
|
||||
|
||||
serviceB := makeRelayService(t, router1)
|
||||
|
||||
hostInfo, err := multiaddr.NewMultiaddr(fmt.Sprintf("/p2p/%s", serviceB.node.Host().ID().String()))
|
||||
require.NoError(t, err)
|
||||
|
||||
var addr multiaddr.Multiaddr
|
||||
for _, a := range serviceB.node.Host().Addrs() {
|
||||
addr = a.Encapsulate(hostInfo)
|
||||
break
|
||||
}
|
||||
err = serviceA.node.DialPeerWithMultiAddress(context.Background(), addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for the dial to complete
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
cTopic1 := "/toychat/1/huilong/proto"
|
||||
|
||||
cTopics := []string{cTopic1}
|
||||
topicsJSONBytes, err := json.Marshal(cTopics)
|
||||
require.NoError(t, err)
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest(http.MethodPost, routeRelayV1AutoSubscriptions, bytes.NewReader(topicsJSONBytes))
|
||||
router.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
require.Equal(t, "true", rr.Body.String())
|
||||
|
||||
// Wait for the subscription to be started
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
msg := &RestWakuMessage{
|
||||
Payload: []byte{1, 2, 3},
|
||||
ContentTopic: cTopic1,
|
||||
Timestamp: utils.GetUnixEpoch(),
|
||||
}
|
||||
msgJsonBytes, err := json.Marshal(msg)
|
||||
require.NoError(t, err)
|
||||
|
||||
rr = httptest.NewRecorder()
|
||||
req, _ = http.NewRequest(http.MethodPost, routeRelayV1AutoMessages, bytes.NewReader(msgJsonBytes))
|
||||
router.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
|
||||
// Wait for the message to be received
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
rr = httptest.NewRecorder()
|
||||
req, _ = http.NewRequest(http.MethodGet, fmt.Sprintf("%s/%s", routeRelayV1AutoMessages, url.QueryEscape(cTopic1)), bytes.NewReader([]byte{}))
|
||||
router.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusOK, rr.Code)
|
||||
|
||||
var messages []*pb.WakuMessage
|
||||
err = json.Unmarshal(rr.Body.Bytes(), &messages)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, messages, 1)
|
||||
|
||||
rr = httptest.NewRecorder()
|
||||
req, _ = http.NewRequest(http.MethodGet, fmt.Sprintf("%s/%s", routeRelayV1AutoMessages, url.QueryEscape(cTopic1)), bytes.NewReader([]byte{}))
|
||||
router1.ServeHTTP(rr, req)
|
||||
require.Equal(t, http.StatusNotFound, rr.Code)
|
||||
|
||||
}
|
||||
@ -11,7 +11,7 @@ type Adder func(msg *protocol.Envelope)
|
||||
|
||||
type runnerService struct {
|
||||
broadcaster relay.Broadcaster
|
||||
sub relay.Subscription
|
||||
sub *relay.Subscription
|
||||
cancel context.CancelFunc
|
||||
adder Adder
|
||||
}
|
||||
@ -26,7 +26,7 @@ func newRunnerService(broadcaster relay.Broadcaster, adder Adder) *runnerService
|
||||
func (r *runnerService) Start(ctx context.Context) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
r.cancel = cancel
|
||||
r.sub = r.broadcaster.RegisterForAll(1024)
|
||||
r.sub = r.broadcaster.RegisterForAll(relay.WithBufferSize(relay.DefaultRelaySubscriptionBufferSize))
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
189
cmd/waku/server/rest/store.go
Normal file
189
cmd/waku/server/rest/store.go
Normal file
@ -0,0 +1,189 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/waku-org/go-waku/waku/v2/node"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/legacy_store"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/store"
|
||||
storepb "github.com/waku-org/go-waku/waku/v2/protocol/store/pb"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type StoreQueryService struct {
|
||||
node *node.WakuNode
|
||||
mux *chi.Mux
|
||||
}
|
||||
|
||||
const routeStoreMessagesV1 = "/store/v3/messages"
|
||||
|
||||
func NewStoreQueryService(node *node.WakuNode, m *chi.Mux) *StoreQueryService {
|
||||
s := &StoreQueryService{
|
||||
node: node,
|
||||
mux: m,
|
||||
}
|
||||
|
||||
m.Get(routeStoreMessagesV1, s.getV3Messages)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func getStoreParams(r *http.Request) (store.Criteria, []store.RequestOption, error) {
|
||||
var options []store.RequestOption
|
||||
var err error
|
||||
peerAddrStr := r.URL.Query().Get("peerAddr")
|
||||
var m multiaddr.Multiaddr
|
||||
if peerAddrStr != "" {
|
||||
m, err = multiaddr.NewMultiaddr(peerAddrStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
options = append(options, store.WithPeerAddr(m))
|
||||
}
|
||||
|
||||
includeData := false
|
||||
includeDataStr := r.URL.Query().Get("includeData")
|
||||
if includeDataStr != "" {
|
||||
includeData, err = strconv.ParseBool(includeDataStr)
|
||||
if err != nil {
|
||||
return nil, nil, errors.New("invalid value for includeData. Use true|false")
|
||||
}
|
||||
}
|
||||
options = append(options, store.IncludeData(includeData))
|
||||
|
||||
pubsubTopic := r.URL.Query().Get("pubsubTopic")
|
||||
|
||||
contentTopics := r.URL.Query().Get("contentTopics")
|
||||
var contentTopicsArr []string
|
||||
if contentTopics != "" {
|
||||
contentTopicsArr = strings.Split(contentTopics, ",")
|
||||
}
|
||||
|
||||
hashesStr := r.URL.Query().Get("hashes")
|
||||
var hashes []pb.MessageHash
|
||||
if hashesStr != "" {
|
||||
hashesStrArr := strings.Split(hashesStr, ",")
|
||||
for _, hashStr := range hashesStrArr {
|
||||
hash, err := base64.URLEncoding.DecodeString(hashStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
hashes = append(hashes, pb.ToMessageHash(hash))
|
||||
}
|
||||
}
|
||||
|
||||
isMsgHashCriteria := false
|
||||
if len(hashes) != 0 {
|
||||
isMsgHashCriteria = true
|
||||
if pubsubTopic != "" || len(contentTopics) != 0 {
|
||||
return nil, nil, errors.New("cant use content filters while specifying message hashes")
|
||||
}
|
||||
} else {
|
||||
if pubsubTopic == "" || len(contentTopicsArr) == 0 {
|
||||
return nil, nil, errors.New("pubsubTopic and contentTopics are required")
|
||||
}
|
||||
}
|
||||
|
||||
startTimeStr := r.URL.Query().Get("startTime")
|
||||
var startTime *int64
|
||||
if startTimeStr != "" {
|
||||
startTimeValue, err := strconv.ParseInt(startTimeStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
startTime = &startTimeValue
|
||||
}
|
||||
|
||||
endTimeStr := r.URL.Query().Get("endTime")
|
||||
var endTime *int64
|
||||
if endTimeStr != "" {
|
||||
endTimeValue, err := strconv.ParseInt(endTimeStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
endTime = &endTimeValue
|
||||
}
|
||||
|
||||
var cursor []byte
|
||||
cursorStr := r.URL.Query().Get("cursor")
|
||||
if cursorStr != "" {
|
||||
cursor, err = base64.URLEncoding.DecodeString(cursorStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
options = append(options, store.WithCursor(cursor))
|
||||
}
|
||||
|
||||
pageSizeStr := r.URL.Query().Get("pageSize")
|
||||
ascendingStr := r.URL.Query().Get("ascending")
|
||||
if ascendingStr != "" || pageSizeStr != "" {
|
||||
ascending := true
|
||||
pageSize := uint64(legacy_store.DefaultPageSize)
|
||||
if ascendingStr != "" {
|
||||
ascending, err = strconv.ParseBool(ascendingStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if pageSizeStr != "" {
|
||||
pageSize, err = strconv.ParseUint(pageSizeStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if pageSize > legacy_store.MaxPageSize {
|
||||
pageSize = legacy_store.MaxPageSize
|
||||
}
|
||||
}
|
||||
|
||||
options = append(options, store.WithPaging(ascending, pageSize))
|
||||
}
|
||||
|
||||
var query store.Criteria
|
||||
if isMsgHashCriteria {
|
||||
query = store.MessageHashCriteria{
|
||||
MessageHashes: hashes,
|
||||
}
|
||||
} else {
|
||||
query = store.FilterCriteria{
|
||||
ContentFilter: protocol.NewContentFilter(pubsubTopic, contentTopicsArr...),
|
||||
TimeStart: startTime,
|
||||
TimeEnd: endTime,
|
||||
}
|
||||
}
|
||||
|
||||
return query, options, nil
|
||||
}
|
||||
|
||||
func writeStoreError(w http.ResponseWriter, code int, err error) {
|
||||
writeResponse(w, &storepb.StoreQueryResponse{StatusCode: proto.Uint32(uint32(code)), StatusDesc: proto.String(err.Error())}, code)
|
||||
}
|
||||
|
||||
func (d *StoreQueryService) getV3Messages(w http.ResponseWriter, r *http.Request) {
|
||||
query, options, err := getStoreParams(r)
|
||||
if err != nil {
|
||||
writeStoreError(w, http.StatusBadRequest, err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result, err := d.node.Store().Request(ctx, query, options...)
|
||||
if err != nil {
|
||||
writeLegacyStoreError(w, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
|
||||
writeErrOrResponse(w, nil, result.Response())
|
||||
}
|
||||
135
cmd/waku/server/rest/store_api.yaml
Normal file
135
cmd/waku/server/rest/store_api.yaml
Normal file
@ -0,0 +1,135 @@
|
||||
# /store/v3/messages:
|
||||
get:
|
||||
summary: Gets message history
|
||||
description: >
|
||||
Retrieves Waku message history. The returned history
|
||||
can be potentially filtered by optional request parameters.
|
||||
operationId: getMessageHistory
|
||||
tags:
|
||||
- store
|
||||
parameters:
|
||||
- name: peerAddr
|
||||
in: query
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
description: >
|
||||
P2P fully qualified peer multiaddress
|
||||
in the format `(ip4|ip6)/tcp/p2p/$peerId` and URL-encoded.
|
||||
example: '%2Fip4%2F127.0.0.1%2Ftcp%2F60001%2Fp2p%2F16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN'
|
||||
|
||||
- name: includeData
|
||||
in: query
|
||||
schema:
|
||||
type: string
|
||||
description: >
|
||||
Boolean indicating if the query should return messages (data) or hashes only.
|
||||
A value of 'false' returns hashes only.
|
||||
A value of 'true' returns hashes AND messages.
|
||||
Default value is 'false'
|
||||
example: 'true'
|
||||
|
||||
- name: pubsubTopic
|
||||
in: query
|
||||
schema:
|
||||
type: string
|
||||
description: >
|
||||
The pubsub topic on which a WakuMessage is published.
|
||||
If left empty, no filtering is applied.
|
||||
It is also intended for pagination purposes.
|
||||
It should be a URL-encoded string.
|
||||
example: 'my%20pubsub%20topic'
|
||||
|
||||
- name: contentTopics
|
||||
in: query
|
||||
schema: string
|
||||
description: >
|
||||
Comma-separated list of content topics. When specified,
|
||||
only WakuMessages that are linked to any of the given
|
||||
content topics will be delivered in the get response.
|
||||
It should be a URL-encoded-comma-separated string.
|
||||
example: 'my%20first%20content%20topic%2Cmy%20second%20content%20topic%2Cmy%20third%20content%20topic'
|
||||
|
||||
- name: startTime
|
||||
in: query
|
||||
schema:
|
||||
type: string
|
||||
description: >
|
||||
The inclusive lower bound on the timestamp of
|
||||
queried WakuMessages. This field holds the
|
||||
Unix epoch time in nanoseconds as a 64-bits
|
||||
integer value.
|
||||
example: '1680590945000000000'
|
||||
|
||||
- name: endTime
|
||||
in: query
|
||||
schema:
|
||||
type: string
|
||||
description: >
|
||||
The inclusive upper bound on the timestamp of
|
||||
queried WakuMessages. This field holds the
|
||||
Unix epoch time in nanoseconds as a 64-bits
|
||||
integer value.
|
||||
example: '1680590945000000000'
|
||||
|
||||
- name: hashes
|
||||
in: query
|
||||
schema:
|
||||
type: string
|
||||
description: >
|
||||
Comma-separated list of message hashes.
|
||||
URL-base64-encoded string computed as a hash of messages.
|
||||
Used to find messages by hash.
|
||||
example: 'Gc4ACThW5t2QQO82huq3WnDv%2FapPPJpD%2FwJfxDxAnR0%3D'
|
||||
|
||||
- name: cursor
|
||||
in: query
|
||||
schema:
|
||||
type: string
|
||||
description: >
|
||||
Cursor field intended for pagination purposes.
|
||||
URL-base64-encoded string computed as a hash of a message.
|
||||
It could be empty for retrieving the first page,
|
||||
and will be returned from the GET response so that
|
||||
it can be part of the next page request.
|
||||
example: 'Gc4ACThW5t2QQO82huq3WnDv%2FapPPJpD%2FwJfxDxAnR0%3D'
|
||||
|
||||
- name: pageSize
|
||||
in: query
|
||||
schema:
|
||||
type: string
|
||||
description: >
|
||||
Number of messages to retrieve per page
|
||||
example: '5'
|
||||
|
||||
- name: ascending
|
||||
in: query
|
||||
schema:
|
||||
type: string
|
||||
description: >
|
||||
"true" for paging forward, "false" for paging backward.
|
||||
If not specified or if specified with an invalid value, the default is "true".
|
||||
example: "true"
|
||||
|
||||
responses:
|
||||
'200':
|
||||
description: Waku message history.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/StoreQueryResponse'
|
||||
'400':
|
||||
description: Bad request error.
|
||||
content:
|
||||
text/plain:
|
||||
type: string
|
||||
'412':
|
||||
description: Precondition failed.
|
||||
content:
|
||||
text/plain:
|
||||
type: string
|
||||
'500':
|
||||
description: Internal server error.
|
||||
content:
|
||||
text/plain:
|
||||
type: string
|
||||
83
cmd/waku/server/rest/utils.go
Normal file
83
cmd/waku/server/rest/utils.go
Normal file
@ -0,0 +1,83 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// The functions writes error response in plain text format with specified statusCode
|
||||
func writeErrResponse(w http.ResponseWriter, log *zap.Logger, err error, statusCode int) {
|
||||
w.WriteHeader(statusCode)
|
||||
_, err = w.Write([]byte(err.Error()))
|
||||
if err != nil {
|
||||
log.Error("error while writing response", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// This function writes error or response in json format with statusCode as 500 in case of error
|
||||
func writeErrOrResponse(w http.ResponseWriter, err error, value interface{}) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
|
||||
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
jsonResponse, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = w.Write(jsonResponse)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// This function writes a response in json format
|
||||
func writeResponse(w http.ResponseWriter, value interface{}, code int) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
|
||||
jsonResponse, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// w.Write implicitly writes a 200 status code
|
||||
// and only once we can write 2xx-5xx status code
|
||||
// so any statusCode apart from 1xx being written to the header, will be ignored.
|
||||
w.WriteHeader(code)
|
||||
_, _ = w.Write(jsonResponse)
|
||||
}
|
||||
|
||||
func topicFromPath(w http.ResponseWriter, req *http.Request, field string, logger *zap.Logger) string {
|
||||
topic := chi.URLParam(req, field)
|
||||
if topic == "" {
|
||||
errMissing := fmt.Errorf("missing %s", field)
|
||||
writeGetMessageErr(w, errMissing, http.StatusBadRequest, logger)
|
||||
return ""
|
||||
}
|
||||
topic, err := url.QueryUnescape(topic)
|
||||
if err != nil {
|
||||
errInvalid := fmt.Errorf("invalid %s format", field)
|
||||
writeGetMessageErr(w, errInvalid, http.StatusBadRequest, logger)
|
||||
return ""
|
||||
}
|
||||
return topic
|
||||
}
|
||||
|
||||
func writeGetMessageErr(w http.ResponseWriter, err error, code int, logger *zap.Logger) {
|
||||
// write status before the body
|
||||
w.WriteHeader(code)
|
||||
logger.Error("get message", zap.Error(err))
|
||||
if _, err := w.Write([]byte(err.Error())); err != nil {
|
||||
logger.Error("writing response", zap.Error(err))
|
||||
}
|
||||
}
|
||||
@ -4,6 +4,7 @@ import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/waku-org/go-waku/waku/persistence"
|
||||
"github.com/waku-org/go-waku/waku/persistence/sqlite"
|
||||
@ -12,10 +13,10 @@ import (
|
||||
|
||||
func MemoryDB(t *testing.T) *persistence.DBStore {
|
||||
var db *sql.DB
|
||||
db, migration, err := sqlite.NewDB(":memory:")
|
||||
db, err := sqlite.NewDB(":memory:", utils.Logger())
|
||||
require.NoError(t, err)
|
||||
|
||||
dbStore, err := persistence.NewDBStore(utils.Logger(), persistence.WithDB(db), persistence.WithMigrations(migration))
|
||||
dbStore, err := persistence.NewDBStore(prometheus.DefaultRegisterer, utils.Logger(), persistence.WithDB(db), persistence.WithMigrations(sqlite.Migrations))
|
||||
require.NoError(t, err)
|
||||
|
||||
return dbStore
|
||||
104
cmd/waku/server/rest/waku_rest.go
Normal file
104
cmd/waku/server/rest/waku_rest.go
Normal file
@ -0,0 +1,104 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/waku-org/go-waku/waku/v2/node"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type WakuRest struct {
|
||||
node *node.WakuNode
|
||||
server *http.Server
|
||||
|
||||
log *zap.Logger
|
||||
|
||||
relayService *RelayService
|
||||
filterService *FilterService
|
||||
}
|
||||
|
||||
type RestConfig struct {
|
||||
Address string
|
||||
Port uint
|
||||
EnablePProf bool
|
||||
EnableAdmin bool
|
||||
RelayCacheCapacity uint
|
||||
FilterCacheCapacity uint
|
||||
}
|
||||
|
||||
func NewWakuRest(node *node.WakuNode, config RestConfig, log *zap.Logger) *WakuRest {
|
||||
wrpc := new(WakuRest)
|
||||
wrpc.log = log.Named("rest")
|
||||
|
||||
mux := chi.NewRouter()
|
||||
mux.Use(middleware.Logger)
|
||||
mux.Use(middleware.NoCache)
|
||||
mux.Use(func(h http.Handler) http.Handler {
|
||||
fn := func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
h.ServeHTTP(w, r)
|
||||
}
|
||||
return http.HandlerFunc(fn)
|
||||
})
|
||||
if config.EnablePProf {
|
||||
mux.Mount("/debug", middleware.Profiler())
|
||||
}
|
||||
|
||||
_ = NewDebugService(node, mux)
|
||||
_ = NewHealthService(node, mux)
|
||||
_ = NewStoreQueryService(node, mux)
|
||||
_ = NewLegacyStoreService(node, mux)
|
||||
_ = NewLightpushService(node, mux, log)
|
||||
|
||||
listenAddr := fmt.Sprintf("%s:%d", config.Address, config.Port)
|
||||
|
||||
server := &http.Server{
|
||||
Addr: listenAddr,
|
||||
Handler: mux,
|
||||
}
|
||||
|
||||
wrpc.node = node
|
||||
wrpc.server = server
|
||||
|
||||
if node.Relay() != nil {
|
||||
relayService := NewRelayService(node, mux, config.RelayCacheCapacity, log)
|
||||
wrpc.relayService = relayService
|
||||
}
|
||||
|
||||
if config.EnableAdmin {
|
||||
_ = NewAdminService(node, mux, wrpc.log)
|
||||
}
|
||||
|
||||
if node.FilterLightnode() != nil {
|
||||
filterService := NewFilterService(node, mux, int(config.FilterCacheCapacity), log)
|
||||
server.RegisterOnShutdown(func() {
|
||||
filterService.Stop()
|
||||
})
|
||||
wrpc.filterService = filterService
|
||||
}
|
||||
|
||||
return wrpc
|
||||
}
|
||||
|
||||
func (r *WakuRest) Start(ctx context.Context, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
|
||||
if r.node.FilterLightnode() != nil {
|
||||
go r.filterService.Start(ctx)
|
||||
}
|
||||
|
||||
go func() {
|
||||
_ = r.server.ListenAndServe()
|
||||
}()
|
||||
r.log.Info("server started", zap.String("addr", r.server.Addr))
|
||||
}
|
||||
|
||||
func (r *WakuRest) Stop(ctx context.Context) error {
|
||||
r.log.Info("shutting down server")
|
||||
return r.server.Shutdown(ctx)
|
||||
}
|
||||
@ -13,7 +13,7 @@ func TestWakuRest(t *testing.T) {
|
||||
n, err := node.New(options)
|
||||
require.NoError(t, err)
|
||||
|
||||
rpc := NewWakuRest(n, "127.0.0.1", 8080, true, true, false, 10, utils.Logger())
|
||||
rpc := NewWakuRest(n, RestConfig{Address: "127.0.0.1", Port: 8080, EnablePProf: false, EnableAdmin: false, RelayCacheCapacity: 10}, utils.Logger())
|
||||
require.NotNil(t, rpc.server)
|
||||
require.Equal(t, rpc.server.Addr, "127.0.0.1:8080")
|
||||
}
|
||||
23
cmd/waku/server/rln.go
Normal file
23
cmd/waku/server/rln.go
Normal file
@ -0,0 +1,23 @@
|
||||
//go:build !gowaku_no_rln
|
||||
// +build !gowaku_no_rln
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/waku-org/go-waku/waku/v2/node"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/rln"
|
||||
)
|
||||
|
||||
func AppendRLNProof(node *node.WakuNode, msg *pb.WakuMessage) error {
|
||||
_, rlnEnabled := node.RLNRelay().(*rln.WakuRLNRelay)
|
||||
if rlnEnabled {
|
||||
err := node.RLNRelay().AppendRLNProof(msg, node.Timesource().Now())
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not append rln proof: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
50
cmd/waku/server/utils.go
Normal file
50
cmd/waku/server/utils.go
Normal file
@ -0,0 +1,50 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"strings"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/filter"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/legacy_store"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/lightpush"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/store"
|
||||
)
|
||||
|
||||
func IsWakuProtocol(protocol protocol.ID) bool {
|
||||
return protocol == filter.FilterPushID_v20beta1 ||
|
||||
protocol == filter.FilterSubscribeID_v20beta1 ||
|
||||
protocol == relay.WakuRelayID_v200 ||
|
||||
protocol == lightpush.LightPushID_v20beta1 ||
|
||||
protocol == legacy_store.StoreID_v20beta4 ||
|
||||
protocol == store.StoreQueryID_v300
|
||||
}
|
||||
|
||||
type Base64URLByte []byte
|
||||
|
||||
// UnmarshalText is used by json.Unmarshal to decode both url-safe and standard
|
||||
// base64 encoded strings with and without padding
|
||||
func (h *Base64URLByte) UnmarshalText(b []byte) error {
|
||||
inputValue := ""
|
||||
if b != nil {
|
||||
inputValue = string(b)
|
||||
}
|
||||
|
||||
enc := base64.StdEncoding
|
||||
if strings.ContainsAny(inputValue, "-_") {
|
||||
enc = base64.URLEncoding
|
||||
}
|
||||
if len(inputValue)%4 != 0 {
|
||||
enc = enc.WithPadding(base64.NoPadding)
|
||||
}
|
||||
|
||||
decodedBytes, err := enc.DecodeString(inputValue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*h = decodedBytes
|
||||
|
||||
return nil
|
||||
}
|
||||
11
cmd/waku/sys_not_unix.go
Normal file
11
cmd/waku/sys_not_unix.go
Normal file
@ -0,0 +1,11 @@
|
||||
//go:build !linux && !darwin && !windows
|
||||
|
||||
package main
|
||||
|
||||
import "runtime"
|
||||
|
||||
// TODO: figure out how to get the number of file descriptors on Windows and other systems
|
||||
func getNumFDs() int {
|
||||
fmt.Println("cannot determine number of file descriptors on ", runtime.GOOS)
|
||||
return 0
|
||||
}
|
||||
18
cmd/waku/sys_unix.go
Normal file
18
cmd/waku/sys_unix.go
Normal file
@ -0,0 +1,18 @@
|
||||
//go:build linux || darwin
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func getNumFDs() int {
|
||||
var l unix.Rlimit
|
||||
if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &l); err != nil {
|
||||
fmt.Println("failed to get fd limit:" + err.Error())
|
||||
return 0
|
||||
}
|
||||
return int(l.Cur)
|
||||
}
|
||||
11
cmd/waku/sys_windows.go
Normal file
11
cmd/waku/sys_windows.go
Normal file
@ -0,0 +1,11 @@
|
||||
//go:build windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"math"
|
||||
)
|
||||
|
||||
func getNumFDs() int {
|
||||
return math.MaxInt
|
||||
}
|
||||
33
default.nix
Normal file
33
default.nix
Normal file
@ -0,0 +1,33 @@
|
||||
{
|
||||
pkgs ? import <nixpkgs> { },
|
||||
self ? ./.,
|
||||
subPkgs ? "cmd/waku",
|
||||
ldflags ? [],
|
||||
output ? null,
|
||||
commit ? builtins.substring 0 7 (self.rev or "dirty"),
|
||||
version ? builtins.readFile ./VERSION,
|
||||
}:
|
||||
|
||||
pkgs.buildGo123Module {
|
||||
name = "go-waku";
|
||||
src = self;
|
||||
|
||||
subPackages = subPkgs;
|
||||
tags = ["gowaku_no_rln"];
|
||||
ldflags = [
|
||||
"-X github.com/waku-org/go-waku/waku/v2/node.GitCommit=${commit}"
|
||||
"-X github.com/waku-org/go-waku/waku/v2/node.Version=${version}"
|
||||
] ++ ldflags;
|
||||
doCheck = false;
|
||||
|
||||
# Otherwise library would be just called bin/c.
|
||||
postInstall = if builtins.isString output then ''
|
||||
mv $out/bin/* $out/bin/${output}
|
||||
'' else "";
|
||||
|
||||
# FIXME: This needs to be manually changed when updating modules.
|
||||
vendorHash = "sha256-uz9IVTEd+3UypZQc2CVWCFeLE4xEagn9YT9W2hr0K/o=";
|
||||
|
||||
# Fix for 'nix run' trying to execute 'go-waku'.
|
||||
meta = { mainProgram = "waku"; };
|
||||
}
|
||||
30
docker/Dockerfile.test.amd64
Normal file
30
docker/Dockerfile.test.amd64
Normal file
@ -0,0 +1,30 @@
|
||||
# BUILD IMAGE --------------------------------------------------------
|
||||
FROM golang:1.20 as builder
|
||||
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
|
||||
# Build the final node binary
|
||||
RUN make -j$(nproc) build
|
||||
|
||||
# ACTUAL IMAGE -------------------------------------------------------
|
||||
|
||||
FROM debian:12.1-slim
|
||||
|
||||
LABEL maintainer="romanzac@status.im"
|
||||
LABEL source="https://github.com/waku-org/go-waku"
|
||||
LABEL description="go-waku: Waku V2 node - test"
|
||||
|
||||
# color, nocolor, json
|
||||
ENV GOLOG_LOG_FMT=nocolor
|
||||
|
||||
RUN apt update && apt install -y ca-certificates
|
||||
|
||||
# go-waku default ports
|
||||
EXPOSE 9000 30303 60000 60001 8008 8009
|
||||
|
||||
COPY --from=builder /app/build/waku /usr/bin/waku
|
||||
|
||||
ENTRYPOINT ["/usr/bin/waku"]
|
||||
# By default just show help if called without arguments
|
||||
CMD ["--help"]
|
||||
@ -30,7 +30,7 @@ import (
|
||||
)
|
||||
|
||||
func main() {
|
||||
discoveryURL := "enrtree://AOGECG2SPND25EEFMAJ5WF3KSGJNSGV356DSTL2YVLLZWIV6SAYBM@test.waku.nodes.status.im"
|
||||
discoveryURL := "enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im"
|
||||
nodes, err := dnsdisc.RetrieveNodes(context.Background(), discoveryURL)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -40,4 +40,4 @@ func main() {
|
||||
}
|
||||
```
|
||||
|
||||
`dnsdisc.RetrieveNodes` can also accept a `WithNameserver(nameserver string)` option which can be used to specify the nameserver to use to retrieve the TXT record from the domain name
|
||||
`dnsdisc.RetrieveNodes` can also accept a `WithNameserver(nameserver string)` option which can be used to specify the nameserver to use to retrieve the TXT record from the domain name
|
||||
|
||||
@ -15,15 +15,13 @@ import (
|
||||
)
|
||||
|
||||
...
|
||||
wakuNode, err := node.New(context.Background(),
|
||||
node.WithWakuFilter(false),
|
||||
)
|
||||
wakuNode, err := node.New(node.WithWakuFilter(false))
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := wakuNode.Start(); err != nil {
|
||||
if err := wakuNode.Start(context.Background()); err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
@ -40,7 +38,7 @@ One of these options must be specified when instantiating a node supporting the
|
||||
```go
|
||||
...
|
||||
|
||||
peerAddr, err := multiaddr.NewMultiaddr("/dns4/node-01.do-ams3.wakuv2.test.statusim.net/tcp/30303/p2p/16Uiu2HAmPLe7Mzm8TsYUubgCAW1aJoeFScxrLj8ppHFivPo97bUZ")
|
||||
peerAddr, err := multiaddr.NewMultiaddr("/dns4/node-01.do-ams3.waku.test.status.im/tcp/30303/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@ -25,15 +25,13 @@ import (
|
||||
)
|
||||
|
||||
...
|
||||
wakuNode, err := node.New(context.Background(),
|
||||
node.WithLightPush(),
|
||||
)
|
||||
wakuNode, err := node.New(node.WithLightPush())
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := wakuNode.Start(); err != nil {
|
||||
if err := wakuNode.Start(context.Background()); err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
@ -67,22 +65,17 @@ if err != nil {
|
||||
```
|
||||
|
||||
|
||||
To send a message, it needs to be wrapped into a [`WakuMessage`](https://rfc.vac.dev/spec/14/) protobuffer. The payload of the message is not limited to strings. Any kind of data that can be serialized
|
||||
To send a message, it needs to be wrapped into a [`WakuMessage`](https://rfc.vac.dev/spec/14/) protobuffer.
|
||||
The payload of the message is not limited to strings. Any kind of data that can be serialized
|
||||
into a `[]byte` can be sent as long as it does not exceed the maximum length a message can have (~1MB)
|
||||
|
||||
The following functions can be used to publish a message:
|
||||
- `wakuNode.Lightpush().Publish(ctx, msg, opts...)` - to send a message to the default waku pubsub topic
|
||||
- `wakuNode.Lightpush().PublishToTopic(ctx, msg, topic, opts...)` - to send a message to a custom pubsub topic
|
||||
`wakuNode.Lightpush().Publish(ctx, msg, opts...)` is used to publish a message. This function will return a message id on success, or an error if the message could not be published.
|
||||
|
||||
Both of these functions will return a message id on success, or an error if the message could not be published.
|
||||
|
||||
If no options are specified, go-waku will automatically choose the peer used to broadcast the message via Lightpush. This behaviour can be controlled via options:
|
||||
If no options are specified, go-waku will automatically choose the peer used to broadcast the message via Lightpush and publish the message to a pubsub topic derived from the content topic of the message. This behaviour can be controlled via options:
|
||||
|
||||
### Options
|
||||
|
||||
- `lightpush.WithPubSubTopic(topic)` - broadcast the message using a custom pubsub topic
|
||||
- `lightpush.WithDefaultPubsubTopic()` - broadcast the message to the default pubsub topic
|
||||
- `lightpush.WithPeer(peerID)` - use an specific peer ID (which should be part of the node peerstore) to broadcast the message with
|
||||
- `lightpush.WithAutomaticPeerSelection(host)` - automatically select a peer that supports lightpush protocol from the peerstore to broadcast the message with
|
||||
- `lightpush.WithFastestPeerSelection(ctx)` - automatically select a peer based on its ping reply time
|
||||
|
||||
|
||||
|
||||
|
||||
@ -16,15 +16,13 @@ import (
|
||||
)
|
||||
|
||||
...
|
||||
wakuNode, err := node.New(context.Background(),
|
||||
node.WithWakuRelay(),
|
||||
)
|
||||
wakuNode, err := node.New(node.WithWakuRelay())
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := wakuNode.Start(); err != nil {
|
||||
if err := wakuNode.Start(context.Background()); err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
@ -43,34 +41,19 @@ One of these options must be specified when instantiating a node supporting the
|
||||
## Receiving messages
|
||||
```go
|
||||
...
|
||||
sub, err := wakuNode.Relay().Subscribe(context.Background())
|
||||
contentFilter := protocol.NewContentFilter(relay.DefaultWakuTopic)
|
||||
sub, err := wakuNode.Relay().Subscribe(context.Background, contentFilter) ([]*Subscription, error)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
for value := range sub.C {
|
||||
for value := range sub[0].C {
|
||||
fmt.Println("Received msg:", string(value.Message().Payload))
|
||||
}
|
||||
...
|
||||
```
|
||||
To receive messages sent via the relay protocol, you need to subscribe to a pubsub topic. This can be done via any of these functions:
|
||||
- `wakuNode.Relay().Subscribe(ctx)` - subscribes to the default waku pubsub topic `/waku/2/default-waku/proto`
|
||||
- `wakuNode.Relay().SubscribeToTopic(ctx, topic)` - subscribes to a custom pubsub topic
|
||||
|
||||
These functions return a `Subscription` struct containing a channel on which messages will be received. To stop receiving messages in this channel `sub.Unsubscribe()` can be executed which will close the channel (without unsubscribing from the pubsub topic)
|
||||
|
||||
> Pubsub topics should follow the [recommended usage](https://rfc.vac.dev/spec/23/) structure. For this purpose, the `NewPubsubTopic` helper function was created:
|
||||
```go
|
||||
import "github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
|
||||
topic := protocol.NewPubsubTopic("the_topic_name", "the_encoding")
|
||||
/*
|
||||
fmt.Println(topic.String()) // => `/waku/2/the_topic_name/the_encoding`
|
||||
*/
|
||||
```
|
||||
|
||||
|
||||
To receive messages sent via the relay protocol, you need to subscribe specifying a content filter with the function `Subscribe(ctx context.Context, contentFilter waku_proto.ContentFilter, opts ...RelaySubscribeOption) ([]*Subscription, error)`. This functions return a list of `Subscription` struct containing a channel on which messages will be received. To stop receiving messages `WakuRelay`'s `Unsubscribe(ctx context.Context, contentFilter waku_proto.ContentFilter) error` can be executed which will close the channel (without unsubscribing from the pubsub topic) which will make sure the subscription is stopped, and if no other subscriptions exist for underlying pubsub topic, the pubsub is also unsubscribed.
|
||||
|
||||
## Sending messages
|
||||
|
||||
@ -97,11 +80,13 @@ if err != nil {
|
||||
To send a message, it needs to be wrapped into a [`WakuMessage`](https://rfc.vac.dev/spec/14/) protobuffer. The payload of the message is not limited to strings. Any kind of data that can be serialized
|
||||
into a `[]byte` can be sent as long as it does not exceed the maximum length a message can have (~1MB)
|
||||
|
||||
The following functions can be used to publish a message:
|
||||
- `wakuNode.Relay().Publish(ctx, msg)` - to send a message to the default waku pubsub topic
|
||||
- `wakuNode.Relay().PublishToTopic(ctx, msg, topic)` - to send a message to a custom pubsub topic
|
||||
`wakuNode.Relay().Publish(ctx, msg, opts...)` is used to publish a message. This function will return a message id on success, or an error if the message could not be published.
|
||||
|
||||
Both of these functions will return a message id on success, or an error if the message could not be published.
|
||||
If no options are specified, go-waku will automatically choose the peer used to broadcast the message via Relay and publish the message to a pubsub topic derived from the content topic of the message. This behaviour can be controlled via options:
|
||||
|
||||
### Options
|
||||
- `relay.WithPubSubTopic(topic)` - broadcast the message using a custom pubsub topic
|
||||
- `relay.WithDefaultPubsubTopic()` - broadcast the message to the default pubsub topic
|
||||
|
||||
> If `WithWakuRelayAndMinPeers` was used during the instantiation of the wakuNode, it should be possible to verify if there's enough peers for publishing to a topic with `wakuNode.Relay().EnoughPeersToPublish()` and `wakuNode.Relay().EnoughPeersToPublishToTopic(topic)`
|
||||
|
||||
|
||||
@ -28,20 +28,15 @@ if err != nil {
|
||||
// Handle error ...
|
||||
}
|
||||
|
||||
for {
|
||||
hasNext, err := result.Next(ctx)
|
||||
if err != nil {
|
||||
// Handle error ...
|
||||
break
|
||||
}
|
||||
|
||||
if !hasNext { // No more messages available
|
||||
break
|
||||
}
|
||||
|
||||
for !result.IsComplete() {
|
||||
for _, msg := range result.GetMessages() {
|
||||
// Do something with the messages
|
||||
}
|
||||
|
||||
err := result.Next(ctx)
|
||||
if err != nil {
|
||||
// Handle error ...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@ -4,7 +4,7 @@ Go-waku can be built on Linux, macOS and Windows
|
||||
|
||||
## Installing dependencies
|
||||
|
||||
Cloning and building go-waku requires Go +1.17, a C compiler, Make, Bash and Git.
|
||||
Cloning and building go-waku requires Go +1.19, a C compiler, Make, Bash and Git.
|
||||
|
||||
Go can be installed by following [these instructions](https://go.dev/doc/install)
|
||||
|
||||
@ -31,13 +31,13 @@ Assuming you use [Homebrew](https://brew.sh/) to manage packages
|
||||
brew install cmake
|
||||
```
|
||||
|
||||
## Building nwaku
|
||||
## Building go-waku
|
||||
|
||||
### 1. Clone the nwaku repository
|
||||
### 1. Clone the go-waku repository
|
||||
|
||||
```sh
|
||||
git clone https://github.com/waku-org/go-waku
|
||||
cd nwaku
|
||||
cd go-waku
|
||||
```
|
||||
|
||||
### 2. Build waku
|
||||
|
||||
@ -24,7 +24,7 @@ A node will attempt connection to all discovered nodes.
|
||||
|
||||
This can be used, for example, to connect to one of the existing fleets.
|
||||
Current URLs for the published fleet lists:
|
||||
- production fleet: `enrtree://AOGECG2SPND25EEFMAJ5WF3KSGJNSGV356DSTL2YVLLZWIV6SAYBM@prod.waku.nodes.status.im`
|
||||
- test fleet: `enrtree://AOGECG2SPND25EEFMAJ5WF3KSGJNSGV356DSTL2YVLLZWIV6SAYBM@test.waku.nodes.status.im`
|
||||
- production fleet: `enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im`
|
||||
- test fleet: `enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im`
|
||||
|
||||
See the [separate tutorial](../../tutorial/dns-disc.md) for a complete guide to DNS discovery.
|
||||
See the [separate tutorial](../../tutorial/dns-disc.md) for a complete guide to DNS discovery.
|
||||
|
||||
@ -17,12 +17,12 @@ or store and serve historical messages itself.
|
||||
|
||||
Ensure that `store` is enabled (this is `true` by default) and provide at least one store service node address with the `--storenode` CLI option.
|
||||
|
||||
See the following example, using the peer at `/dns4/node-01.ac-cn-hongkong-c.wakuv2.test.statusim.net/tcp/30303/p2p/16Uiu2HAkvWiyFsgRhuJEb9JfjYxEkoHLgnUQmr1N5mKWnYjxYRVm` as store service node.
|
||||
See the following example, using the peer at `/dns4/node-01.ac-cn-hongkong-c.waku.test.status.im/tcp/30303/p2p/16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp` as store service node.
|
||||
|
||||
```sh
|
||||
./build/waku \
|
||||
--store=true \
|
||||
--storenode=/dns4/node-01.ac-cn-hongkong-c.wakuv2.test.statusim.net/tcp/30303/p2p/16Uiu2HAkvWiyFsgRhuJEb9JfjYxEkoHLgnUQmr1N5mKWnYjxYRVm
|
||||
--storenode=/dns4/node-01.ac-cn-hongkong-c.waku.test.status.im/tcp/30303/p2p/16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp
|
||||
```
|
||||
|
||||
Your node can now send queries to retrieve historical messages
|
||||
|
||||
74
docs/operators/how-to/run-with-rln.md
Normal file
74
docs/operators/how-to/run-with-rln.md
Normal file
@ -0,0 +1,74 @@
|
||||
# How to run spam prevention on your go-waku node (RLN)
|
||||
|
||||
This guide explains how to run a go-waku node with RLN (Rate Limiting Nullifier) enabled.
|
||||
|
||||
[RLN](https://rfc.vac.dev/spec/32/) is a protocol integrated into waku v2,
|
||||
which prevents spam-based attacks on the network.
|
||||
|
||||
For further background on the research for RLN tailored to waku, refer
|
||||
to [this](https://rfc.vac.dev/spec/17/) RFC.
|
||||
|
||||
Registering to the membership group has been left out for brevity.
|
||||
If you would like to register to the membership group and send messages with RLN,
|
||||
refer to the [on-chain chat2 tutorial](../../tutorial/onchain-rln-relay-chat2.md).
|
||||
|
||||
This guide specifically allows a node to participate in RLN testnet
|
||||
You may alter the rln-specific arguments as required.
|
||||
|
||||
|
||||
## 1. Update the runtime arguments
|
||||
|
||||
Follow the steps from the [build](./build.md) and [run](./run.md) guides while replacing the run command with -
|
||||
|
||||
```bash
|
||||
export WAKU_FLEET=<enrtree of the fleet>
|
||||
export SEPOLIA_WS_NODE_ADDRESS=<WS RPC URL to a Sepolia Node>
|
||||
export RLN_RELAY_CONTRACT_ADDRESS="0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4" # Replace this with any compatible implementation
|
||||
$WAKUNODE_DIR/build/waku \
|
||||
--dns-discovery \
|
||||
--dns-discovery-url="$WAKU_FLEET" \
|
||||
--discv5-discovery=true \
|
||||
--rln-relay=true \
|
||||
--rln-relay-dynamic=true \
|
||||
--rln-relay-eth-contract-address="$RLN_RELAY_CONTRACT_ADDRESS" \
|
||||
--rln-relay-eth-client-address="$SEPOLIA_WS_NODE_ADDRESS"
|
||||
```
|
||||
|
||||
OR
|
||||
|
||||
If you installed go-waku using a `.dpkg` or `.rpm` package, you can use the `waku` command instead of building go-waku yourself
|
||||
|
||||
OR
|
||||
|
||||
If you have the go-waku node within docker, you can replace the run command with -
|
||||
|
||||
```bash
|
||||
export WAKU_FLEET=<enrtree of the fleet>
|
||||
export SEPOLIA_WS_NODE_ADDRESS=<WS RPC URL to a Sepolia Node>
|
||||
export RLN_RELAY_CONTRACT_ADDRESS="0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4" # Replace this with any compatible implementation
|
||||
docker run -i -t -p 60000:60000 -p 9000:9000/udp \
|
||||
-v /absolute/path/to/your/rlnKeystore.json:/rlnKeystore.json:ro \
|
||||
wakuorg/go-waku:latest \
|
||||
--dns-discovery=true \
|
||||
--dns-discovery-url="$WAKU_FLEET" \
|
||||
--discv5-discovery \
|
||||
--rln-relay=true \
|
||||
--rln-relay-dynamic=true \
|
||||
--rln-relay-eth-contract-address="$RLN_RELAY_CONTRACT_ADDRESS" \
|
||||
--rln-relay-eth-client-address="$SEPOLIA_WS_NODE_ADDRESS"
|
||||
```
|
||||
|
||||
Following is the list of additional fields that have been added to the
|
||||
runtime arguments -
|
||||
|
||||
1. `--rln-relay`: Allows waku-rln-relay to be mounted into the setup of the go-waku node. All messages sent and received in this node will require to contain a valid proof that will be verified, and nodes that relay messages with invalid proofs will have their peer scoring affected negatively and will be eventually disconnected.
|
||||
2. `--rln-relay-dynamic`: Enables waku-rln-relay to connect to an ethereum node to fetch the membership group
|
||||
3. `--rln-relay-eth-contract-address`: The contract address of an RLN membership group
|
||||
4. `--rln-relay-eth-client-address`: The websocket url to a Sepolia ethereum node
|
||||
|
||||
The `--dns-discovery-url` flag should contain a valid URL with nodes encoded according to EIP-1459. You can read more about DNS Discovery [here](https://github.com/waku-org/nwaku/blob/master/docs/tutorial/dns-disc.md)
|
||||
|
||||
You should now have go-waku running, with RLN enabled!
|
||||
|
||||
|
||||
> Note: This guide will be updated in the future to include features like slashing.
|
||||
@ -17,13 +17,13 @@ See [this tutorial](./configure-key.md) if you want to generate and configure a
|
||||
- enable `relay` protocol
|
||||
- subscribe to the default pubsub topic, namely `/waku/2/default-waku/proto`
|
||||
- enable `store` protocol, but only as a client.
|
||||
This implies that the nwaku node will not persist any historical messages itself,
|
||||
This implies that the go-waku node will not persist any historical messages itself,
|
||||
but can query `store` service peers who do so.
|
||||
To configure `store` as a service node,
|
||||
see [this tutorial](./configure-store.md).
|
||||
|
||||
> **Note:** The `filter` and `lightpush` protocols are _not_ enabled by default.
|
||||
Consult the [configuration guide](./configure.md) on how to configure your nwaku node to run these protocols.
|
||||
Consult the [configuration guide](./configure.md) on how to configure your go-waku node to run these protocols.
|
||||
|
||||
Some typical non-default configurations are explained below.
|
||||
For more advanced configuration, see the [configuration guide](./configure.md).
|
||||
@ -33,7 +33,7 @@ Different ways to connect to other nodes are expanded upon in our [connection gu
|
||||
|
||||
Find the log entry beginning with `Listening on`.
|
||||
It should be printed at INFO level when you start your node
|
||||
and contains a list of all publically announced listening addresses for the nwaku node.
|
||||
and contains a list of all publically announced listening addresses for the go-waku node.
|
||||
|
||||
For example
|
||||
|
||||
@ -80,7 +80,7 @@ returns a response similar to
|
||||
|
||||
## Finding your discoverable ENR address(es)
|
||||
|
||||
A nwaku node can encode its addressing information in an [Ethereum Node Record (ENR)](https://eips.ethereum.org/EIPS/eip-778) according to [`31/WAKU2-ENR`](https://rfc.vac.dev/spec/31/).
|
||||
A go-waku node can encode its addressing information in an [Ethereum Node Record (ENR)](https://eips.ethereum.org/EIPS/eip-778) according to [`31/WAKU2-ENR`](https://rfc.vac.dev/spec/31/).
|
||||
These ENR are most often used for discovery purposes.
|
||||
|
||||
### ENR for DNS discovery and DiscV5
|
||||
@ -111,10 +111,10 @@ to continually discover and connect to random peers for a more robust mesh.
|
||||
|
||||
A typical run configuration for a go-waku node is to connect to existing peers with known listening addresses using the `--staticnode` option.
|
||||
The `--staticnode` option can be repeated for each peer you want to connect to on startup.
|
||||
This is also useful if you want to run several nwaku instances locally
|
||||
This is also useful if you want to run several go-waku instances locally
|
||||
and therefore know the listening addresses of all peers.
|
||||
|
||||
As an example, consider a nwaku node that connects to two known peers
|
||||
As an example, consider a go-waku node that connects to two known peers
|
||||
on the same local host (with IP `0.0.0.0`)
|
||||
with TCP ports `60002` and `60003`,
|
||||
and peer IDs `16Uiu2HAkzjwwgEAXfeGNMKFPSpc6vGBRqCdTLG5q3Gmk2v4pQw7H` and `16Uiu2HAmFBA7LGtwY5WVVikdmXVo3cKLqkmvVtuDu63fe8safeQJ` respectively.
|
||||
@ -130,7 +130,7 @@ We include an example below.
|
||||
```
|
||||
|
||||
|
||||
### Connecting to the `wakuv2.prod` network
|
||||
### Connecting to the `waku.sandbox` network
|
||||
|
||||
You can use DNS discovery to bootstrap connection to the existing production network.
|
||||
Discovery v5 will attempt to extract the ENRs of the discovered nodes as bootstrap entries to the routing table.
|
||||
@ -138,11 +138,11 @@ Discovery v5 will attempt to extract the ENRs of the discovered nodes as bootstr
|
||||
```sh
|
||||
./build/waku \
|
||||
--dns-discovery=true \
|
||||
--dns-discovery-url=enrtree://ANTL4SLG2COUILKAPE7EF2BYNL2SHSHVCHLRD5J7ZJLN5R3PRJD2Y@prod.waku.nodes.status.im \
|
||||
--dns-discovery-url=enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im \
|
||||
--discv5-discovery=true
|
||||
```
|
||||
|
||||
### Connecting to the `wakuv2.test` network
|
||||
### Connecting to the `waku.test` network
|
||||
|
||||
You can use DNS discovery to bootstrap connection to the existing test network.
|
||||
Discovery v5 will attempt to extract the ENRs of the discovered nodes as bootstrap entries to the routing table.
|
||||
@ -150,7 +150,7 @@ Discovery v5 will attempt to extract the ENRs of the discovered nodes as bootstr
|
||||
```sh
|
||||
./build/waku \
|
||||
--dns-discovery=true \
|
||||
--dns-discovery-url=enrtree://AOGECG2SPND25EEFMAJ5WF3KSGJNSGV356DSTL2YVLLZWIV6SAYBM@test.waku.nodes.status.im \
|
||||
--dns-discovery-url=enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im \
|
||||
--discv5-discovery=true
|
||||
```
|
||||
|
||||
@ -159,7 +159,7 @@ Discovery v5 will attempt to extract the ENRs of the discovered nodes as bootstr
|
||||
Often go-waku nodes choose to also store historical messages
|
||||
from where it can be queried by other peers who may have been temporarily offline.
|
||||
For example, a typical configuration for such a store service node,
|
||||
[connecting to the `wakuv2.test`](#connecting-to-the-wakuv2test-fleet) fleet on startup,
|
||||
[connecting to the `waku.test`](#connecting-to-the-wakutest-network) fleet on startup,
|
||||
appears below.
|
||||
|
||||
```sh
|
||||
@ -169,7 +169,7 @@ appears below.
|
||||
--db-path=/mnt/go-waku/data/db1/ \
|
||||
--store-capacity=150000 \
|
||||
--dns-discovery=true \
|
||||
--dns-discovery-url=enrtree://AOGECG2SPND25EEFMAJ5WF3KSGJNSGV356DSTL2YVLLZWIV6SAYBM@test.waku.nodes.status.im \
|
||||
--dns-discovery-url=enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im \
|
||||
--discv5-discovery=true
|
||||
```
|
||||
|
||||
@ -180,5 +180,5 @@ See our [store configuration tutorial](./configure-store.md) for more.
|
||||
A running go-waku node can be interacted with using the [Waku v2 JSON RPC API](https://rfc.vac.dev/spec/16/).
|
||||
|
||||
> **Note:** Private and Admin API functionality are disabled by default.
|
||||
To configure a nwaku node with these enabled,
|
||||
use the `--rpc-admin:true` and `--rpc-private:true` CLI options.
|
||||
To configure a go-waku node with these enabled,
|
||||
use the `--rpc-admin:true` and `--rpc-private:true` CLI options.
|
||||
|
||||
@ -8,7 +8,7 @@ For a more advanced configuration see our [configuration guides](./how-to/config
|
||||
|
||||
[Build the go-waku node](./how-to/build.md)
|
||||
or download a precompiled binary from our [releases page](https://github.com/waku-org/go-waku/releases).
|
||||
<!-- Docker images are published to [statusteam/go-waku](https://hub.docker.com/r/statusteam/go-waku/tags) on DockerHub. -->
|
||||
<!-- Docker images are published to [wakuorg/go-waku](https://hub.docker.com/r/wakuorg/go-waku/tags) on DockerHub. -->
|
||||
<!-- TODO: more advanced explanation on finding and using docker images -->
|
||||
|
||||
## 2. Run
|
||||
@ -16,7 +16,7 @@ or download a precompiled binary from our [releases page](https://github.com/wak
|
||||
[Run the go-waku node](./how-to/run.md) using a default or common configuration
|
||||
or [configure](./how-to/configure.md) the node for more advanced use cases.
|
||||
|
||||
[Connect](./how-to/connect.md) the nwaku node to other peers to start communicating.
|
||||
[Connect](./how-to/connect.md) the go-waku node to other peers to start communicating.
|
||||
|
||||
## 3. Interact
|
||||
|
||||
|
||||
@ -1,33 +1,28 @@
|
||||
# Spam-protected chat2 application with on-chain group management
|
||||
|
||||
This document is a tutorial on how to run the chat2 application in the spam-protected mode using the Waku-RLN-Relay protocol and with dynamic/on-chain group management.
|
||||
In the on-chain/dynamic group management, the state of the group members i.e., their identity commitment keys is moderated via a membership smart contract deployed on the Goerli network which is one of the Ethereum testnets.
|
||||
In the on-chain/dynamic group management, the state of the group members i.e., their identity commitment keys is moderated via a membership smart contract deployed on the Sepolia network which is one of the Ethereum testnets.
|
||||
Members can be dynamically added to the group and the group size can grow up to 2^20 members.
|
||||
This differs from the prior test scenarios in which the RLN group was static and the set of members' keys was hardcoded and fixed.
|
||||
|
||||
|
||||
## Prerequisites
|
||||
To complete this tutorial, you will need 1) an account with at least `0.001` ethers on the Goerli testnet and 2) a hosted node on the Goerli testnet.
|
||||
In case you are not familiar with either of these two steps, you may follow the following tutorial to fulfill the [prerequisites of running on-chain spam-protected chat2](./pre-requisites-of-running-on-chain-spam-protected-chat2.md).
|
||||
Note that the required `0.001` ethers correspond to the registration fee,
|
||||
however, you still need to have more funds in your account to cover the cost of the transaction gas fee.
|
||||
|
||||
To complete this tutorial, you will need
|
||||
1. An rln keystore file with credentials to the rln membership smart contract you wish to use. You may obtain this by registering to the smart contract and generating a keystore. It is possible to use go-waku to register into the smart contract:
|
||||
```
|
||||
make
|
||||
./build/waku generate-rln-credentials --eth-account-private-key=<private-key> --eth-contract-address=<0x000...> --eth-client-address=<eth-client-rpc-or-wss-endpoint> --cred-path=./rlnKeystore.json
|
||||
```
|
||||
Once this command is executed, A keystore file will be generated at the path defined in the `--cred-path` flag. You may now use this keystore with wakunode2 or chat2.
|
||||
|
||||
|
||||
## Overview
|
||||
Figure 1 provides an overview of the interaction of the chat2 clients with the test fleets and the membership contract.
|
||||
At a high level, when a chat2 client is run with Waku-RLN-Relay mounted in on-chain mode, it creates an RLN credential (i.e., an identity key and an identity commitment key) and
|
||||
sends a transaction to the membership contract to register the corresponding membership identity commitment key.
|
||||
This transaction will also transfer `0.001` Ethers to the contract as a membership fee.
|
||||
This amount plus the transaction fee will be deducted from the supplied Goerli account.
|
||||
Once the transaction is mined and the registration is successful, the registered credential will get displayed on the console of your chat2 client.
|
||||
You may copy the displayed RLN credential and reuse them for the future execution of the chat2 application.
|
||||
Proper instructions in this regard is provided in the following [section](#how-to-persist-and-reuse-rln-credential).
|
||||
If you choose not to reuse the same credential, then for each execution, a new registration will take place and more funds will get deducted from your Goerli account.
|
||||
At a high level, when a chat2 client is run with Waku-RLN-Relay mounted in on-chain mode, the passed in credential will get displayed on the console of your chat2 client.
|
||||
Under the hood, the chat2 client constantly listens to the membership contract and keeps itself updated with the latest state of the group.
|
||||
|
||||
In the following test setting, the chat2 clients are to be connected to the Waku test fleets as their first hop.
|
||||
The test fleets will act as routers and are also set to run Waku-RLN-Relay over the same pubsub topic and content topic as chat2 clients i.e., the default pubsub topic of `/waku/2/default-waku/proto` and the content topic of `/toy-chat/2/luzhou/proto`.
|
||||
The test fleets will act as routers and are also set to run Waku-RLN-Relay over the same pubsub topic and content topic as chat2 clients i.e., the default pubsub topic of `/waku/2/default-waku/proto` and the content topic of `/toy-chat/3/mingde/proto`.
|
||||
Spam messages published on the said combination of topics will be caught by the test fleet nodes and will not be routed.
|
||||
Note that spam protection does not rely on the presence of the test fleets.
|
||||
In fact, all the chat2 clients are also capable of catching and dropping spam messages if they receive any.
|
||||
@ -48,7 +43,7 @@ git clone https://github.com/waku-org/go-waku
|
||||
cd go-waku
|
||||
```
|
||||
## Build chat2
|
||||
```
|
||||
```bash
|
||||
make chat2
|
||||
```
|
||||
|
||||
@ -56,34 +51,40 @@ make chat2
|
||||
|
||||
Run the following command to set up your chat2 client.
|
||||
|
||||
```
|
||||
./build/chat2 --fleet=test --content-topic=/toy-chat/2/luzhou/proto --rln-relay=true --rln-relay-dynamic=true --rln-relay-eth-contract-address=0x4252105670fe33d2947e8ead304969849e64f2a6 --rln-relay-eth-account-private-key=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx --rln-relay-eth-client-address=xxxx
|
||||
```bash
|
||||
./build/chat2 --fleet=test \
|
||||
--content-topic=/toy-chat/3/mingde/proto \
|
||||
--rln-relay=true \
|
||||
--rln-relay-dynamic=true \
|
||||
--rln-relay-eth-contract-address=0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4 \
|
||||
--rln-relay-cred-path=xxx/xx/rlnKeystore.json \
|
||||
--rln-relay-cred-password=xxxx \
|
||||
--rln-relay-eth-client-address=xxxx
|
||||
```
|
||||
|
||||
In this command
|
||||
- the `--fleet=test` indicates that the chat2 app gets connected to the test fleets.
|
||||
- the `toy-chat/2/luzhou/proto` passed to the `--content-topic` option indicates the content topic on which the chat2 application is going to run.
|
||||
- the `toy-chat/3/mingde/proto` passed to the `--content-topic` option indicates the content topic on which the chat2 application is going to run.
|
||||
- the `--rln-relay` flag is set to `true` to enable the Waku-RLN-Relay protocol for spam protection.
|
||||
- the `--rln-relay-dynamic` flag is set to `true` to enable the on-chain mode of Waku-RLN-Relay protocol with dynamic group management.
|
||||
- the `--rln-relay-eth-contract-address` option gets the address of the membership contract.
|
||||
The current address of the contract is `0x4252105670fe33d2947e8ead304969849e64f2a6`.
|
||||
You may check the state of the contract on the [Goerli testnet](https://goerli.etherscan.io/address/0x4252105670fe33d2947e8ead304969849e64f2a6).
|
||||
- the `--rln-relay-eth-account-private-key` option is for your account private key on the Goerli testnet.
|
||||
It is made up of 64 hex characters (not sensitive to the `0x` prefix).
|
||||
- the `--rln-relay-eth-client-address` is the WebSocket address of the hosted node on the Goerli testnet.
|
||||
The current address of the contract is `0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4`.
|
||||
You may check the state of the contract on the [Sepolia testnet](https://sepolia.etherscan.io/address/0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4).
|
||||
- the `--rln-relay-cred-path` option denotes the path to the keystore file described above
|
||||
- the `--rln-relay-cred-password` option denotes the password to the keystore
|
||||
- the `rln-relay-eth-client-address` is the WebSocket address of the hosted node on the Sepolia testnet.
|
||||
You need to replace the `xxxx` with the actual node's address.
|
||||
|
||||
For the last three config options i.e., `rln-relay-eth-account-address`, `rln-relay-eth-account-private-key`, and `rln-relay-eth-client-address`, if you do not know how to obtain those, you may use the following tutorial on the [prerequisites of running on-chain spam-protected chat2](./pre-requisites-of-running-on-chain-spam-protected-chat2.md).
|
||||
For `--rln-relay-eth-client-address`, if you do not know how to obtain it, you may use the following tutorial on the [prerequisites of running on-chain spam-protected chat2](./pre-requisites-of-running-on-chain-spam-protected-chat2.md).
|
||||
|
||||
You may set up more than one chat client,
|
||||
using the `--rln-relay-cred-path` flag, specifying in each client a different path to store the credentials, and using a different `--tcp-port`.
|
||||
using the `--rln-relay-cred-path` flag, specifying in each client a different path to store the credentials.
|
||||
|
||||
Once you run the command, you will see the following message:
|
||||
```
|
||||
Setting up dynamic rln...
|
||||
```
|
||||
At this phase, your RLN credential are getting created and a transaction is being sent to the membership smart contract.
|
||||
It will take some time for the transaction to be finalized. Afterwards, messages related to setting up the connections of your chat app will be shown,
|
||||
At this phase, RLN is being setup by obtaining the membership information from the smart contract. Afterwards, messages related to setting up the connections of your chat app will be shown,
|
||||
the content may differ on your screen though:
|
||||
```
|
||||
INFO: Welcome, Anonymous!
|
||||
@ -101,10 +102,10 @@ INFO: RLN config:
|
||||
- Your RLN identity commitment key is: 6c6598126ba10d1b70100893b76d7f8d7343eeb8f5ecfd48371b421c5aa6f012
|
||||
|
||||
INFO: attempting DNS discovery with enrtree://ANTL4SLG2COUILKAPE7EF2BYNL2SHSHVCHLRD5J7ZJLN5R3PRJD2Y@prod.waku.nodes.status.im
|
||||
INFO: Discovered and connecting to [/dns4/node-01.gc-us-central1-a.wakuv2.prod.statusim.net/tcp/8000/wss/p2p/16Uiu2HAmVkKntsECaYfefR1V2yCR79CegLATuTPE6B9TxgxBiiiA /dns4/node-01.ac-cn-hongkong- c.wakuv2.prod.statusim.net/tcp/8000/wss/p2p/16Uiu2HAm4v86W3bmT1BiH6oSPzcsSr24iDQpSN5Qa992BCjjwgrD /dns4/node-01.do-ams3.wakuv2.prod.statusim.net/tcp/8000/wss/p2p/16Uiu2HAmL5okWopX7NqZWBUKVqW8iUxCEmd5GMHLVPwCgzYzQv3e]
|
||||
INFO: Connected to 16Uiu2HAmVkKntsECaYfefR1V2yCR79CegLATuTPE6B9TxgxBiiiA
|
||||
INFO: Connected to 16Uiu2HAmL5okWopX7NqZWBUKVqW8iUxCEmd5GMHLVPwCgzYzQv3e
|
||||
INFO: Connected to 16Uiu2HAm4v86W3bmT1BiH6oSPzcsSr24iDQpSN5Qa992BCjjwgrD
|
||||
INFO: Discovered and connecting to [/dns4/node-01.gc-us-central1-a.waku.sandbox.status.im/tcp/8000/wss/p2p/16Uiu2HAm6fyqE1jB5MonzvoMdU8v76bWV8ZeNpncDamY1MQXfjdB /dns4/node-01.ac-cn-hongkong-c.waku.sandbox.status.im/tcp/8000/wss/p2p/16Uiu2HAmSJvSJphxRdbnigUV5bjRRZFBhTtWFTSyiKaQByCjwmpV /dns4/node-01.do-ams3.waku.sandbox.status.im/tcp/8000/wss/p2p/16Uiu2HAmQSMNExfUYUqfuXWkD5DaNZnMYnigRxFKbk3tcEFQeQeE]
|
||||
INFO: Connected to 16Uiu2HAm6fyqE1jB5MonzvoMdU8v76bWV8ZeNpncDamY1MQXfjdB
|
||||
INFO: Connected to 16Uiu2HAmQSMNExfUYUqfuXWkD5DaNZnMYnigRxFKbk3tcEFQeQeE
|
||||
INFO: Connected to 16Uiu2HAmSJvSJphxRdbnigUV5bjRRZFBhTtWFTSyiKaQByCjwmpV
|
||||
INFO: No store node configured. Choosing one at random...
|
||||
```
|
||||
You will also see some historical messages being fetched, again the content may be different on your end:
|
||||
@ -145,9 +146,13 @@ The reason is that under the hood a zero-knowledge proof is being generated and
|
||||
|
||||
Try to spam the network by violating the message rate limit i.e.,
|
||||
sending more than one message per epoch.
|
||||
Your messages will be routed via test fleets that are running in spam-protected mode over the same content topic i.e., `/toy-chat/2/luzhou/proto` as your chat client.
|
||||
Your messages will be routed via test fleets that are running in spam-protected mode over the same content topic i.e., `/toy-chat/3/mingde/proto` as your chat client.
|
||||
Your spam activity will be detected by them and your message will not reach the rest of the chat clients.
|
||||
You can check this by running a second chat user and verifying that spam messages are not displayed as they are filtered by the test fleets.
|
||||
Furthermore, the chat client will prompt you with the following warning message indicating that the message rate is being violated:
|
||||
```
|
||||
ERROR: message rate violation!
|
||||
```
|
||||
A sample test scenario is illustrated in the [Sample test output section](#sample-test-output).
|
||||
|
||||
Once you are done with the test, make sure you close all the chat2 clients by typing the `/exit` command.
|
||||
@ -156,28 +161,32 @@ Once you are done with the test, make sure you close all the chat2 clients by ty
|
||||
Bye!
|
||||
```
|
||||
|
||||
## How to persist and reuse RLN credential
|
||||
## How to reuse RLN credential
|
||||
|
||||
You may pass the `--rln-relay-cred-path` config option to specify a path for 1) persisting RLN credentials and 2) retrieving persisted RLN credentials.
|
||||
RLN credential is persisted in the `rlnCredentials.txt` file under the specified path.
|
||||
If this file does not already exist under the supplied path, then a new credential is generated and persisted in the `rlnCredentials.txt` file.
|
||||
Otherwise, the chat client does not generate a new credential and will use, instead, the persisted RLN credential.
|
||||
You may pass the `--rln-relay-cred-path` config option to specify a path to a file for retrieving persisted RLN credentials.
|
||||
If the keystore exists in the path provided, it is used, and will default to the 0th element in the credential array.
|
||||
If the keystore does not exist in the path provided, a new keystore will be created and added to the directory it was supposed to be in.
|
||||
|
||||
You may provide an index to the credential you wish to use by passing the `--rln-relay-cred-index` config option.
|
||||
|
||||
You may provide an index to the membership you wish to use (within the same membership set) by passing the `--rln-relay-membership-group-index` config option.
|
||||
|
||||
```bash
|
||||
./build/chat2 --fleet:test --content-topic:/toy-chat/2/luzhou/proto --rln-relay=true --rln-relay-dynamic=true --rln-relay-eth-contract-address=0x4252105670fe33d2947e8ead304969849e64f2a6 --rln-relay-eth-account-private-key=your_eth_private_key --rln-relay-eth-client-address==your_goerli_node --rln-relay-cred-path:./
|
||||
```
|
||||
|
||||
Note: If you are reusing credentials, you can omit the `--rln-relay-eth-account-private-key` flags
|
||||
|
||||
Therefore, the command to start chat2 would be -
|
||||
|
||||
```bash
|
||||
./build/chat2 --fleet=test --content-topic=/toy-chat/2/luzhou/proto --rln-relay=true --rln-relay-dynamic=true --rln-relay-eth-contract-address=0x4252105670fe33d2947e8ead304969849e64f2a6 --rln-relay-eth-client-address=your_goerli_node --rln-relay-cred-path=./
|
||||
./build/chat2 --fleet=test \
|
||||
--content-topic=/toy-chat/3/mingde/proto \
|
||||
--rln-relay=true \
|
||||
--rln-relay-dynamic=true \
|
||||
--rln-relay-eth-contract-address=0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4 \
|
||||
--rln-relay-eth-client-address=your_sepolia_node \
|
||||
--rln-relay-cred-path=./rlnKeystore.json \
|
||||
--rln-relay-cred-password=your_password \
|
||||
--rln-relay-membership-index=0 \
|
||||
--rln-relay-membership-group-index=0
|
||||
```
|
||||
|
||||
# Sample test output
|
||||
In this section, a sample test of running two chat clients is provided.
|
||||
Note that the values used for `--rln-relay-eth-account-private-key`, and `--rln-relay-eth-client-address` in the following code snippets are junk and not valid.
|
||||
Note that the value used for `--rln-relay-eth-client-address` in the following code snippets is junk and not valid.
|
||||
|
||||
The two chat clients namely `Alice` and `Bob` are connected to the test fleets.
|
||||
`Alice` sends 4 messages i.e., `message1`, `message2`, `message3`, and `message4`.
|
||||
@ -187,9 +196,11 @@ The test fleets do not relay `message3` further, hence `Bob` never receives it.
|
||||
You can check this fact by looking at `Bob`'s console, where `message3` is missing.
|
||||
|
||||
**Alice**
|
||||
```
|
||||
./build/chat2 --fleet=test --content-topic=/toy-chat/2/luzhou/proto --rln-relay=true --rln-relay-dynamic=true --rln-relay-eth-contract-address=0x4252105670fe33d2947e8ead304969849e64f2a6 --rln-relay-eth-account-private-key=your_eth_private_key --rln-relay-eth-client-address=your_goerli_node --rln-relay-cred-path=./path/to/alice/folder --nickname=Alice
|
||||
```bash
|
||||
./build/chat2 --fleet=test --content-topic=/toy-chat/3/mingde/proto --rln-relay=true --rln-relay-dynamic=true --rln-relay-eth-contract-address=0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4 --rln-relay-cred-path=rlnKeystore.json --rln-relay-cred-password=password --rln-relay-eth-client-address=wss://sepolia.infura.io/ws/v3/12345678901234567890123456789012 --nickname=Alice
|
||||
```
|
||||
|
||||
```
|
||||
Seting up dynamic rln
|
||||
INFO: Welcome, Alice!
|
||||
INFO: type /help to see available commands
|
||||
@ -203,10 +214,10 @@ INFO: RLN config:
|
||||
- Your rln identity commitment key is: bd093cbf14fb933d53f596c33f98b3df83b7e9f7a1906cf4355fac712077cb28
|
||||
|
||||
INFO: attempting DNS discovery with enrtree://ANTL4SLG2COUILKAPE7EF2BYNL2SHSHVCHLRD5J7ZJLN5R3PRJD2Y@prod.waku.nodes.status.im
|
||||
INFO: Discovered and connecting to [/dns4/node-01.gc-us-central1-a.wakuv2.prod.statusim.net/tcp/8000/wss/p2p/16Uiu2HAmVkKntsECaYfefR1V2yCR79CegLATuTPE6B9TxgxBiiiA /dns4/node-01.ac-cn-hongkong- c.wakuv2.prod.statusim.net/tcp/8000/wss/p2p/16Uiu2HAm4v86W3bmT1BiH6oSPzcsSr24iDQpSN5Qa992BCjjwgrD /dns4/node-01.do-ams3.wakuv2.prod.statusim.net/tcp/8000/wss/p2p/16Uiu2HAmL5okWopX7NqZWBUKVqW8iUxCEmd5GMHLVPwCgzYzQv3e]
|
||||
INFO: Connected to 16Uiu2HAmVkKntsECaYfefR1V2yCR79CegLATuTPE6B9TxgxBiiiA
|
||||
INFO: Connected to 16Uiu2HAmL5okWopX7NqZWBUKVqW8iUxCEmd5GMHLVPwCgzYzQv3e
|
||||
INFO: Connected to 16Uiu2HAm4v86W3bmT1BiH6oSPzcsSr24iDQpSN5Qa992BCjjwgrD
|
||||
INFO: Discovered and connecting to [/dns4/node-01.gc-us-central1-a.waku.sandbox.status.im/tcp/8000/wss/p2p/16Uiu2HAm6fyqE1jB5MonzvoMdU8v76bWV8ZeNpncDamY1MQXfjdB /dns4/node-01.ac-cn-hongkong-c.waku.sandbox.status.im/tcp/8000/wss/p2p/16Uiu2HAmSJvSJphxRdbnigUV5bjRRZFBhTtWFTSyiKaQByCjwmpV /dns4/node-01.do-ams3.waku.sandbox.status.im/tcp/8000/wss/p2p/16Uiu2HAmQSMNExfUYUqfuXWkD5DaNZnMYnigRxFKbk3tcEFQeQeE]
|
||||
INFO: Connected to 16Uiu2HAm6fyqE1jB5MonzvoMdU8v76bWV8ZeNpncDamY1MQXfjdB
|
||||
INFO: Connected to 16Uiu2HAmQSMNExfUYUqfuXWkD5DaNZnMYnigRxFKbk3tcEFQeQeE
|
||||
INFO: Connected to 16Uiu2HAmSJvSJphxRdbnigUV5bjRRZFBhTtWFTSyiKaQByCjwmpV
|
||||
INFO: No store node configured. Choosing one at random...
|
||||
|
||||
>> message1
|
||||
@ -235,9 +246,11 @@ INFO RLN Epoch: 165886593
|
||||
```
|
||||
|
||||
**Bob**
|
||||
```
|
||||
./build/chat2 --fleet=test --content-topic=/toy-chat/2/luzhou/proto --rln-relay=true --rln-relay-dynamic=true --rln-relay-eth-contract-address=0x4252105670fe33d2947e8ead304969849e64f2a6 --rln-relay-eth-account-private-key=your_eth_private_key --rln-relay-eth-client-address=your_goerli_node --rln-relay-cred-path=./path/to/bob/folder --nickname=Bob
|
||||
```bash
|
||||
./build/chat2 --fleet=test --content-topic=/toy-chat/3/mingde/proto --rln-relay=true --rln-relay-dynamic=true --rln-relay-eth-contract-address=0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4 --rln-relay-cred-path=rlnKeystore.json --rln-relay-cred-index=1 --rln-relay-cred-password=password --rln-relay-eth-client-address=wss://sepolia.infura.io/ws/v3/12345678901234567890123456789012 --nickname=Bob
|
||||
```
|
||||
|
||||
```
|
||||
Seting up dynamic rln
|
||||
INFO: Welcome, Bob!
|
||||
INFO: type /help to see available commands
|
||||
@ -251,13 +264,13 @@ INFO: RLN config:
|
||||
- Your rln identity commitment key is: bd093cbf14fb933d53f596c33f98b3df83b7e9f7a1906cf4355fac712077cb28
|
||||
|
||||
INFO: attempting DNS discovery with enrtree://ANTL4SLG2COUILKAPE7EF2BYNL2SHSHVCHLRD5J7ZJLN5R3PRJD2Y@prod.waku.nodes.status.im
|
||||
INFO: Discovered and connecting to [/dns4/node-01.gc-us-central1-a.wakuv2.prod.statusim.net/tcp/8000/wss/p2p/16Uiu2HAmVkKntsECaYfefR1V2yCR79CegLATuTPE6B9TxgxBiiiA /dns4/node-01.ac-cn-hongkong- c.wakuv2.prod.statusim.net/tcp/8000/wss/p2p/16Uiu2HAm4v86W3bmT1BiH6oSPzcsSr24iDQpSN5Qa992BCjjwgrD /dns4/node-01.do-ams3.wakuv2.prod.statusim.net/tcp/8000/wss/p2p/16Uiu2HAmL5okWopX7NqZWBUKVqW8iUxCEmd5GMHLVPwCgzYzQv3e]
|
||||
INFO: Connected to 16Uiu2HAmVkKntsECaYfefR1V2yCR79CegLATuTPE6B9TxgxBiiiA
|
||||
INFO: Connected to 16Uiu2HAmL5okWopX7NqZWBUKVqW8iUxCEmd5GMHLVPwCgzYzQv3e
|
||||
INFO: Connected to 16Uiu2HAm4v86W3bmT1BiH6oSPzcsSr24iDQpSN5Qa992BCjjwgrD
|
||||
INFO: Discovered and connecting to [/dns4/node-01.gc-us-central1-a.waku.sandbox.status.im/tcp/8000/wss/p2p/16Uiu2HAm6fyqE1jB5MonzvoMdU8v76bWV8ZeNpncDamY1MQXfjdB /dns4/node-01.ac-cn-hongkong-c.waku.sandbox.status.im/tcp/8000/wss/p2p/16Uiu2HAmSJvSJphxRdbnigUV5bjRRZFBhTtWFTSyiKaQByCjwmpV /dns4/node-01.do-ams3.waku.sandbox.status.im/tcp/8000/wss/p2p/16Uiu2HAmQSMNExfUYUqfuXWkD5DaNZnMYnigRxFKbk3tcEFQeQeE]
|
||||
INFO: Connected to 16Uiu2HAm6fyqE1jB5MonzvoMdU8v76bWV8ZeNpncDamY1MQXfjdB
|
||||
INFO: Connected to 16Uiu2HAmQSMNExfUYUqfuXWkD5DaNZnMYnigRxFKbk3tcEFQeQeE
|
||||
INFO: Connected to 16Uiu2HAmSJvSJphxRdbnigUV5bjRRZFBhTtWFTSyiKaQByCjwmpV
|
||||
INFO: No store node configured. Choosing one at random...
|
||||
|
||||
[Jul 26 13:05 Alice] message1
|
||||
[Jul 26 13:05 Alice] message2
|
||||
[Jul 26 13:05 Alice] message4
|
||||
```
|
||||
```
|
||||
@ -0,0 +1,111 @@
|
||||
In this tutotial you will learn how to:
|
||||
1. Create Sepolia Ethereum Account and obtain its private key.
|
||||
2. Obtain Sepolia Ethers from faucet.
|
||||
3. Set up a hosted node on Sepolia Testnet using Infura.
|
||||
|
||||
If you already have an Ethereum account with sufficient ethers on the Sepolia testnet then you can skip the first two sections.
|
||||
## Creating Sepolia Ethereum Account and obtaining its private key
|
||||
|
||||
|
||||
1. Download and install Metamask. [https://metamask.io/download/](https://metamask.io/download/)
|
||||
2. Create a new wallet and save your secret recovery key.
|
||||
|
||||

|
||||
|
||||
3. Login to Metamask.
|
||||
|
||||

|
||||
|
||||
4. You should already see an account created. As you can see on top right, it should be pointing to Ethereum mainnet.
|
||||
|
||||

|
||||
|
||||
5. You can use the same account for different networks. For Waku we need to connect to the Sepolia test network.
|
||||
6. You can switch to a test network by clicking on the drop down menu. Select Sepolia test network for Waku:
|
||||
|
||||

|
||||
|
||||
7. Click on Show/hide test networks.
|
||||
8. Enable “Show Test Networks".
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
9. Close the settings and now you should see the test networks in the drop down on the top right.
|
||||
|
||||

|
||||
|
||||
10. Switch to Sepolia Test Network.
|
||||
11. You can see that the same account can be used with different networks. But note that the ETH balance on different networks are different and are different tokens.
|
||||
|
||||

|
||||
|
||||
12. Now to export your private key for the account, if needed, please click on the three dots beside the account.
|
||||
|
||||

|
||||
|
||||
13. Click on Account Details.
|
||||
14. Click on Export Private Key.
|
||||
|
||||

|
||||
|
||||
15. Enter your Metamask password when prompted.
|
||||
|
||||

|
||||
|
||||
16. You will be shown the private key. Copy it as needed.
|
||||
|
||||
Obtain Sepolia Ethers from faucet
|
||||
---
|
||||
|
||||
1. Ethers on Sepolia test networks can be obtained from different faucets.
|
||||
2. One of the faucets is as follows:
|
||||
1. [https://sepoliafaucet.com//](https://sepoliafaucet.com/)
|
||||
(NOTE: We have not tested the security of these faucets so please feel free to do your own research or obtain Sepolia ethers from other faucets if needed.)
|
||||
3. Please follow the instructions on the webpages of these faucets.
|
||||
4. A lot of faucets limit the Sepolia ETH to 0.05 ETH/day.
|
||||
5. To obtain more eth, you can do some POW mining. One of those POW faucet is:
|
||||
[https://sepolia-faucet.pk910.de/](https://sepolia-faucet.pk910.de/)
|
||||
6. Enter your Eth account address, accept Captcha and start mining.
|
||||
|
||||

|
||||
|
||||
7. You can see the estimated Sepolia ETH mined per hour. Each session is restricted to a few hours.
|
||||
|
||||

|
||||
|
||||
8. When you exceed the hour limit of the session, then the mining will be stopped.
|
||||
9. Alternatively, stop the mining when mined enough sepolia ether.
|
||||
10. Do not forget to claim your sepolia ether.
|
||||
|
||||

|
||||
|
||||
|
||||
Setting up a hosted node on Sepolia Testnet using Infura
|
||||
---
|
||||
|
||||
(Note: Infura provides a simple straight-forward way of setting up endpoints for interaction with the Ethereum chain and the Waku RLN smart contract without having to run a dedicated Ethereum node. Setting up infura is not mandatory. Operators concerned with the centralized aspect introduced by infura can setup their own node.)
|
||||
|
||||
1. Sign up to infura if you do not have an account already. [https://infura.io/register](https://infura.io/register)
|
||||
|
||||

|
||||
|
||||
2. After registering and verifying the account, create a new project using Ethereum and give it a name.
|
||||
|
||||

|
||||
|
||||
3. After creating the project, you will be presented with a dashboard like follows. Note that your Project Id and secret will be different.
|
||||
|
||||

|
||||
|
||||
4. Select Sepolia network in Endpoints.
|
||||
|
||||

|
||||
|
||||
5. You can find the endpoints for the hosted node using https and wss. The wss endpoint is the relevant one for connecting the waku node to the RLN contract on Sepolia network. Like follows:
|
||||
|
||||

|
||||
|
||||
6. You can change security settings or view usage options as required in the dashboard.
|
||||
7. Congratulations, you are now ready to use the Infura node.
|
||||
@ -59,7 +59,7 @@ class MainActivity : AppCompatActivity() {
|
||||
lbl.text = (lbl.text.toString() + ">>> Default pubsub topic: " + defaultPubsubTopic() + "\n");
|
||||
|
||||
try {
|
||||
node.connect("/dns4/node-01.gc-us-central1-a.wakuv2.test.statusim.net/tcp/30303/p2p/16Uiu2HAmJb2e28qLXxT5kZxVUUoJt72EMzNGXB47Rxx5hw3q4YjS")
|
||||
node.connect("/dns4/node-01.gc-us-central1-a.waku.test.status.im/tcp/30303/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG")
|
||||
lbl.text = (lbl.text.toString() + ">>> Connected to Peer" + "\n")
|
||||
|
||||
node.peers().forEach {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user