mirror of
https://github.com/status-im/libp2p-test-plans.git
synced 2025-01-11 15:24:11 +00:00
Update Go & JS tests to conform to the multidim interop test spec. (#121)
* Update Go test implementations to match new spec * Update JS test implementation * Update Rust test Implementation * Update root Makefile * Update runner to new spec * Use composite action and S3 caching * Not using GHA cache anymore * Try removing access key from env * Test workflow without cache keys (#131) * Test if it works without s3 cache keys * Fix if statement * Fix if statement * Always use buildkit * Undo debug change * Add no cache workflow * Skip test in no-cache workflow * Update .github/workflows/no-cache-multidim-interop.yml * Same workflow; use CACHING_OPTIONS * Add Browser WebRTC test (#130) * Add webrtc to JS test * Add onlyDial to all queries * Update versions.ts * Remove unneeded timeout overrides
This commit is contained in:
parent
ebdf52d20c
commit
3b6d87b532
32
.github/actions/run-interop-ping-test/action.yml
vendored
32
.github/actions/run-interop-ping-test/action.yml
vendored
@ -9,9 +9,38 @@ inputs:
|
||||
description: "Space-separated paths to JSON files describing additional images"
|
||||
required: false
|
||||
default: ""
|
||||
s3-cache-bucket:
|
||||
description: "Which S3 bucket to use for container layer caching"
|
||||
required: false
|
||||
default: ""
|
||||
s3-access-key-id:
|
||||
description: "S3 Access key id for the cache"
|
||||
required: false
|
||||
default: ""
|
||||
s3-secret-access-key:
|
||||
description: "S3 secret key id for the cache"
|
||||
required: false
|
||||
default: ""
|
||||
aws-region:
|
||||
description: "Which AWS region to use"
|
||||
required: false
|
||||
default: "us-east-1"
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- run: |
|
||||
echo "AWS_BUCKET=${{ inputs.s3-cache-bucket }}" >> $GITHUB_ENV
|
||||
echo "AWS_REGION=${{ inputs.aws-region }}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Configure AWS credentials for S3 build cache
|
||||
if: inputs.s3-access-key-id != '' && inputs.s3-secret-access-key != ''
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ inputs.s3-access-key-id }}
|
||||
aws-secret-access-key: ${{ inputs.s3-secret-access-key}}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
|
||||
# This depends on where this file is within this repository. This walks up
|
||||
# from here to the multidim-interop folder
|
||||
- run: |
|
||||
@ -24,9 +53,6 @@ runs:
|
||||
with:
|
||||
node-version: 18
|
||||
|
||||
- name: Expose GitHub Runtime # Needed for docker buildx to cache properly (See https://docs.docker.com/build/cache/backends/gha/#authentication)
|
||||
uses: crazy-max/ghaction-github-runtime@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
17
.github/workflows/multidim-interop.yml
vendored
17
.github/workflows/multidim-interop.yml
vendored
@ -9,11 +9,20 @@ name: libp2p multidimensional interop test
|
||||
|
||||
jobs:
|
||||
run-multidim-interop:
|
||||
uses: "./.github/workflows/run-testplans.yml"
|
||||
with:
|
||||
dir: "multidim-interop"
|
||||
run-multidim-interop-ng:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: ./.github/actions/run-interop-ping-test
|
||||
with:
|
||||
s3-cache-bucket: libp2p-by-tf-aws-bootstrap
|
||||
s3-access-key-id: ${{ vars.S3_AWS_ACCESS_KEY_ID }}
|
||||
s3-secret-access-key: ${{ secrets.S3_AWS_SECRET_ACCESS_KEY }}
|
||||
build-without-cache:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
# Purposely not using cache to replicate how forks will behave.
|
||||
- uses: ./.github/actions/run-interop-ping-test
|
||||
with:
|
||||
# It's okay to not run the tests, we only care to check if the tests build without cache.
|
||||
test-filter: '"no test matches this, skip all"'
|
||||
|
101
.github/workflows/run-testplans.yml
vendored
101
.github/workflows/run-testplans.yml
vendored
@ -1,101 +0,0 @@
|
||||
name: Run composition file with a custom git reference
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
dir:
|
||||
description: the directory with the testplans test
|
||||
required: true
|
||||
type: string
|
||||
extra-versions:
|
||||
description: artifact name for the extra-versions.json file
|
||||
required: false
|
||||
type: string
|
||||
image-tar:
|
||||
description: artifact name for the image.tar(s) to import
|
||||
required: false
|
||||
type: string
|
||||
test-filter:
|
||||
description: "Filter which test runs"
|
||||
type: string
|
||||
required: false
|
||||
default: ""
|
||||
test-plans_ref:
|
||||
description: "branch of test-plans to checkout"
|
||||
type: string
|
||||
required: false
|
||||
default: ""
|
||||
jobs:
|
||||
run_test:
|
||||
name: Run testplans test
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
TEST_PLAN_DIR: ${{ inputs.dir }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: test-plans
|
||||
repository: "libp2p/test-plans"
|
||||
ref: ${{ inputs.test-plans_ref }}
|
||||
# Download input data
|
||||
- uses: actions/download-artifact@v3
|
||||
if: ${{ inputs.extra-versions != '' }}
|
||||
with:
|
||||
name: ${{ inputs.extra-versions }}
|
||||
path: /tmp/extra-versions
|
||||
- uses: actions/download-artifact@v3
|
||||
if: ${{ inputs.image-tar != '' }}
|
||||
with:
|
||||
name: ${{ inputs.image-tar }}
|
||||
path: /tmp/images/
|
||||
- name: Load docker images
|
||||
if: ${{ inputs.image-tar != '' }}
|
||||
run: for FILE in /tmp/images/*; do docker image load -i $FILE; done
|
||||
# Continue with the test as before
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 17
|
||||
cache: "npm"
|
||||
cache-dependency-path: ./test-plans/${{ env.TEST_PLAN_DIR }}/package-lock.json
|
||||
- name: Expose GitHub Runtime # Needed for docker buildx to cache properly (See https://docs.docker.com/build/cache/backends/gha/#authentication)
|
||||
uses: crazy-max/ghaction-github-runtime@v2
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Install deps
|
||||
working-directory: ./test-plans/${{ env.TEST_PLAN_DIR }}/
|
||||
run: npm ci
|
||||
- name: Build images
|
||||
working-directory: ./test-plans/${{ env.TEST_PLAN_DIR }}/
|
||||
run: make
|
||||
- name: Run the test
|
||||
timeout-minutes: 30
|
||||
working-directory: ./test-plans/${{ env.TEST_PLAN_DIR }}/
|
||||
run: WORKER_COUNT=2 npm run test -- --extra-versions-dir=/tmp/extra-versions --name-filter=${{ inputs.test-filter }}
|
||||
- name: Print the results
|
||||
working-directory: ./test-plans/${{ env.TEST_PLAN_DIR }}/
|
||||
run: cat results.csv
|
||||
- name: Render results
|
||||
working-directory: ./test-plans/${{ env.TEST_PLAN_DIR }}/
|
||||
run: npm run renderResults > ./dashboard.md
|
||||
- name: Show Dashboard Output
|
||||
working-directory: ./test-plans/${{ env.TEST_PLAN_DIR }}/
|
||||
run: cat ./dashboard.md >> $GITHUB_STEP_SUMMARY
|
||||
- name: Exit with Error
|
||||
working-directory: ./test-plans/${{ env.TEST_PLAN_DIR }}/
|
||||
run: |
|
||||
if grep -q ":red_circle:" ./dashboard.md; then
|
||||
exit 1
|
||||
else
|
||||
exit 0
|
||||
fi
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: test-plans-output
|
||||
path: |
|
||||
./test-plans/${{ env.TEST_PLAN_DIR }}/results.csv
|
||||
./test-plans/${{ env.TEST_PLAN_DIR }}/dashboard.md
|
@ -1,13 +1,14 @@
|
||||
GO_SUBDIRS := $(wildcard go/*/.)
|
||||
JS_SUBDIRS := $(wildcard js/*/.)
|
||||
RUST_SUBDIRS := $(wildcard rust/*/.)
|
||||
|
||||
all: $(GO_SUBDIRS) $(JS_SUBDIRS) rust/.
|
||||
all: $(GO_SUBDIRS) $(JS_SUBDIRS) $(RUST_SUBDIRS)
|
||||
$(JS_SUBDIRS):
|
||||
$(MAKE) -C $@
|
||||
$(GO_SUBDIRS):
|
||||
$(MAKE) -C $@
|
||||
rust/.:
|
||||
$(RUST_SUBDIRS):
|
||||
$(MAKE) -C $@
|
||||
|
||||
|
||||
.PHONY: $(GO_SUBDIRS) $(JS_SUBDIRS) rust/. all
|
||||
.PHONY: $(GO_SUBDIRS) $(JS_SUBDIRS) $(RUST_SUBDIRS) all
|
@ -1,13 +1,13 @@
|
||||
#!/usr/bin/env /bin/bash
|
||||
|
||||
# If in CI use caching
|
||||
if [[ -n "${CI}" ]]; then
|
||||
docker buildx build \
|
||||
--load \
|
||||
-t $IMAGE_NAME \
|
||||
--cache-to type=gha,mode=max,scope=$GITHUB_REF_NAME-$IMAGE_NAME \
|
||||
--cache-from type=gha,scope=$GITHUB_REF_NAME-$IMAGE_NAME "$@"
|
||||
else
|
||||
docker build -t $IMAGE_NAME "$@"
|
||||
CACHING_OPTIONS=""
|
||||
# If in CI and we have a defined cache bucket, use caching
|
||||
if [[ -n "${CI}" ]] && [[ -n "${AWS_BUCKET}" ]]; then
|
||||
CACHING_OPTIONS="\
|
||||
--cache-to type=s3,mode=max,bucket=$AWS_BUCKET,region=$AWS_REGION,prefix=buildCache,name=$IMAGE_NAME \
|
||||
--cache-from type=s3,mode=max,bucket=$AWS_BUCKET,region=$AWS_REGION,prefix=buildCache,name=$IMAGE_NAME"
|
||||
fi
|
||||
|
||||
docker buildx build \
|
||||
--load \
|
||||
-t $IMAGE_NAME $CACHING_OPTIONS "$@"
|
||||
|
@ -2,7 +2,9 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
@ -24,15 +26,15 @@ import (
|
||||
func main() {
|
||||
var (
|
||||
transport = os.Getenv("transport")
|
||||
secureChannel = os.Getenv("security")
|
||||
muxer = os.Getenv("muxer")
|
||||
secureChannel = os.Getenv("security")
|
||||
isDialerStr = os.Getenv("is_dialer")
|
||||
ip = os.Getenv("ip")
|
||||
testTimeoutStr = os.Getenv("test_timeout")
|
||||
redisAddr = os.Getenv("REDIS_ADDR")
|
||||
redisAddr = os.Getenv("redis_addr")
|
||||
testTimeoutStr = os.Getenv("test_timeout_seconds")
|
||||
)
|
||||
|
||||
var testTimeout = 10 * time.Second
|
||||
var testTimeout = 3 * time.Minute
|
||||
if testTimeoutStr != "" {
|
||||
secs, err := strconv.ParseInt(testTimeoutStr, 10, 32)
|
||||
if err == nil {
|
||||
@ -40,6 +42,10 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
if ip == "" {
|
||||
ip = "0.0.0.0"
|
||||
}
|
||||
|
||||
if redisAddr == "" {
|
||||
redisAddr = "redis:6379"
|
||||
}
|
||||
@ -80,56 +86,70 @@ func main() {
|
||||
options = append(options, libp2p.Transport(libp2pquic.NewTransport))
|
||||
listenAddr = fmt.Sprintf("/ip4/%s/udp/0/quic", ip)
|
||||
default:
|
||||
panic("Unsupported transport")
|
||||
log.Fatalf("Unsupported transport: %s", transport)
|
||||
}
|
||||
options = append(options, libp2p.ListenAddrStrings(listenAddr))
|
||||
|
||||
switch secureChannel {
|
||||
case "tls":
|
||||
options = append(options, libp2p.Security(libp2ptls.ID, libp2ptls.New))
|
||||
case "noise":
|
||||
options = append(options, libp2p.Security(noise.ID, noise.New))
|
||||
// Skipped for certain transports
|
||||
var skipMuxer bool
|
||||
var skipSecureChannel bool
|
||||
switch transport {
|
||||
case "quic":
|
||||
default:
|
||||
panic("Unsupported secure channel")
|
||||
skipMuxer = true
|
||||
skipSecureChannel = true
|
||||
}
|
||||
|
||||
switch muxer {
|
||||
case "yamux":
|
||||
options = append(options, libp2p.Muxer("/yamux/1.0.0", yamux.DefaultTransport))
|
||||
case "mplex":
|
||||
options = append(options, libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport))
|
||||
case "quic":
|
||||
default:
|
||||
panic("Unsupported muxer")
|
||||
if !skipSecureChannel {
|
||||
switch secureChannel {
|
||||
case "tls":
|
||||
options = append(options, libp2p.Security(libp2ptls.ID, libp2ptls.New))
|
||||
case "noise":
|
||||
options = append(options, libp2p.Security(noise.ID, noise.New))
|
||||
default:
|
||||
log.Fatalf("Unsupported secure channel: %s", secureChannel)
|
||||
}
|
||||
}
|
||||
|
||||
if !skipMuxer {
|
||||
switch muxer {
|
||||
case "yamux":
|
||||
options = append(options, libp2p.Muxer("/yamux/1.0.0", yamux.DefaultTransport))
|
||||
case "mplex":
|
||||
options = append(options, libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport))
|
||||
default:
|
||||
log.Fatalf("Unsupported muxer: %s", muxer)
|
||||
}
|
||||
}
|
||||
|
||||
host, err := libp2p.New(options...)
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to instantiate libp2p instance: %s", err))
|
||||
log.Fatalf("failed to instantiate libp2p instance: %s", err)
|
||||
}
|
||||
defer host.Close()
|
||||
|
||||
log.Println("My multiaddr is: ", host.Addrs())
|
||||
|
||||
if is_dialer {
|
||||
val, err := rClient.BLPop(ctx, testTimeout, "listenerAddr").Result()
|
||||
if err != nil {
|
||||
panic("Failed to wait for listener to be ready")
|
||||
log.Fatal("Failed to wait for listener to be ready")
|
||||
}
|
||||
otherMa := ma.StringCast(val[1])
|
||||
fmt.Println("My multiaddr is: ", host.Addrs())
|
||||
fmt.Println("Other peer multiaddr is: ", otherMa)
|
||||
log.Println("Other peer multiaddr is: ", otherMa)
|
||||
otherMa, p2pComponent := ma.SplitLast(otherMa)
|
||||
otherPeerId, err := peer.Decode(p2pComponent.Value())
|
||||
if err != nil {
|
||||
panic("Failed to get peer id from multiaddr")
|
||||
log.Fatal("Failed to get peer id from multiaddr")
|
||||
}
|
||||
|
||||
handshakeStartInstant := time.Now()
|
||||
err = host.Connect(ctx, peer.AddrInfo{
|
||||
ID: otherPeerId,
|
||||
Addrs: []ma.Multiaddr{otherMa},
|
||||
})
|
||||
if err != nil {
|
||||
panic("Failed to connect to other peer")
|
||||
log.Fatal("Failed to connect to other peer")
|
||||
}
|
||||
|
||||
ping := ping.NewPingService(host)
|
||||
@ -138,19 +158,27 @@ func main() {
|
||||
if res.Error != nil {
|
||||
panic(res.Error)
|
||||
}
|
||||
handshakePlusOneRTT := time.Since(handshakeStartInstant)
|
||||
|
||||
fmt.Println("Ping successful: ", res.RTT)
|
||||
testResult := struct {
|
||||
HandshakePlusOneRTTMillis float32 `json:"handshakePlusOneRTTMillis"`
|
||||
PingRTTMilllis float32 `json:"pingRTTMilllis"`
|
||||
}{
|
||||
HandshakePlusOneRTTMillis: float32(handshakePlusOneRTT.Microseconds()) / 1000,
|
||||
PingRTTMilllis: float32(res.RTT.Microseconds()) / 1000,
|
||||
}
|
||||
|
||||
rClient.RPush(ctx, "dialerDone", "").Result()
|
||||
testResultJSON, err := json.Marshal(testResult)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to marshal test result: %v", err)
|
||||
}
|
||||
fmt.Println(string(testResultJSON))
|
||||
} else {
|
||||
_, err := rClient.RPush(ctx, "listenerAddr", host.Addrs()[0].Encapsulate(ma.StringCast("/p2p/"+host.ID().String())).String()).Result()
|
||||
if err != nil {
|
||||
panic("Failed to send listener address")
|
||||
log.Fatal("Failed to send listener address")
|
||||
}
|
||||
_, err = rClient.BLPop(ctx, testTimeout, "dialerDone").Result()
|
||||
if err != nil {
|
||||
panic("Failed to wait for dialer conclusion")
|
||||
}
|
||||
|
||||
time.Sleep(testTimeout)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
@ -2,7 +2,9 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
@ -18,22 +20,21 @@ import (
|
||||
libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
|
||||
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
"github.com/libp2p/go-libp2p/p2p/transport/websocket"
|
||||
libp2pwebtransport "github.com/libp2p/go-libp2p/p2p/transport/webtransport"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var (
|
||||
transport = os.Getenv("transport")
|
||||
secureChannel = os.Getenv("security")
|
||||
muxer = os.Getenv("muxer")
|
||||
secureChannel = os.Getenv("security")
|
||||
isDialerStr = os.Getenv("is_dialer")
|
||||
ip = os.Getenv("ip")
|
||||
testTimeoutStr = os.Getenv("test_timeout")
|
||||
redisAddr = os.Getenv("REDIS_ADDR")
|
||||
redisAddr = os.Getenv("redis_addr")
|
||||
testTimeoutStr = os.Getenv("test_timeout_seconds")
|
||||
)
|
||||
|
||||
var testTimeout = 10 * time.Second
|
||||
var testTimeout = 3 * time.Minute
|
||||
if testTimeoutStr != "" {
|
||||
secs, err := strconv.ParseInt(testTimeoutStr, 10, 32)
|
||||
if err == nil {
|
||||
@ -41,6 +42,10 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
if ip == "" {
|
||||
ip = "0.0.0.0"
|
||||
}
|
||||
|
||||
if redisAddr == "" {
|
||||
redisAddr = "redis:6379"
|
||||
}
|
||||
@ -80,60 +85,71 @@ func main() {
|
||||
case "quic":
|
||||
options = append(options, libp2p.Transport(libp2pquic.NewTransport))
|
||||
listenAddr = fmt.Sprintf("/ip4/%s/udp/0/quic", ip)
|
||||
case "webtransport":
|
||||
options = append(options, libp2p.Transport(libp2pwebtransport.New))
|
||||
listenAddr = fmt.Sprintf("/ip4/%s/udp/0/quic/webtransport", ip)
|
||||
default:
|
||||
panic("Unsupported transport")
|
||||
log.Fatalf("Unsupported transport: %s", transport)
|
||||
}
|
||||
options = append(options, libp2p.ListenAddrStrings(listenAddr))
|
||||
|
||||
switch secureChannel {
|
||||
case "tls":
|
||||
options = append(options, libp2p.Security(libp2ptls.ID, libp2ptls.New))
|
||||
case "noise":
|
||||
options = append(options, libp2p.Security(noise.ID, noise.New))
|
||||
// Skipped for certain transports
|
||||
var skipMuxer bool
|
||||
var skipSecureChannel bool
|
||||
switch transport {
|
||||
case "quic":
|
||||
default:
|
||||
panic("Unsupported secure channel")
|
||||
skipMuxer = true
|
||||
skipSecureChannel = true
|
||||
}
|
||||
|
||||
switch muxer {
|
||||
case "yamux":
|
||||
options = append(options, libp2p.Muxer("/yamux/1.0.0", yamux.DefaultTransport))
|
||||
case "mplex":
|
||||
options = append(options, libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport))
|
||||
case "quic":
|
||||
default:
|
||||
panic("Unsupported muxer")
|
||||
if !skipSecureChannel {
|
||||
switch secureChannel {
|
||||
case "tls":
|
||||
options = append(options, libp2p.Security(libp2ptls.ID, libp2ptls.New))
|
||||
case "noise":
|
||||
options = append(options, libp2p.Security(noise.ID, noise.New))
|
||||
default:
|
||||
log.Fatalf("Unsupported secure channel: %s", secureChannel)
|
||||
}
|
||||
}
|
||||
|
||||
if !skipMuxer {
|
||||
switch muxer {
|
||||
case "yamux":
|
||||
options = append(options, libp2p.Muxer("/yamux/1.0.0", yamux.DefaultTransport))
|
||||
case "mplex":
|
||||
options = append(options, libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport))
|
||||
default:
|
||||
log.Fatalf("Unsupported muxer: %s", muxer)
|
||||
}
|
||||
}
|
||||
|
||||
host, err := libp2p.New(options...)
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to instantiate libp2p instance: %s", err))
|
||||
log.Fatalf("failed to instantiate libp2p instance: %s", err)
|
||||
}
|
||||
defer host.Close()
|
||||
|
||||
log.Println("My multiaddr is: ", host.Addrs())
|
||||
|
||||
if is_dialer {
|
||||
val, err := rClient.BLPop(ctx, testTimeout, "listenerAddr").Result()
|
||||
if err != nil {
|
||||
panic("Failed to wait for listener to be ready")
|
||||
log.Fatal("Failed to wait for listener to be ready")
|
||||
}
|
||||
otherMa := ma.StringCast(val[1])
|
||||
fmt.Println("My multiaddr is: ", host.Addrs())
|
||||
fmt.Println("Other peer multiaddr is: ", otherMa)
|
||||
log.Println("Other peer multiaddr is: ", otherMa)
|
||||
otherMa, p2pComponent := ma.SplitLast(otherMa)
|
||||
otherPeerId, err := peer.Decode(p2pComponent.Value())
|
||||
if err != nil {
|
||||
panic("Failed to get peer id from multiaddr")
|
||||
log.Fatal("Failed to get peer id from multiaddr")
|
||||
}
|
||||
|
||||
handshakeStartInstant := time.Now()
|
||||
err = host.Connect(ctx, peer.AddrInfo{
|
||||
ID: otherPeerId,
|
||||
Addrs: []ma.Multiaddr{otherMa},
|
||||
})
|
||||
if err != nil {
|
||||
panic("Failed to connect to other peer")
|
||||
log.Fatal("Failed to connect to other peer")
|
||||
}
|
||||
|
||||
ping := ping.NewPingService(host)
|
||||
@ -142,18 +158,27 @@ func main() {
|
||||
if res.Error != nil {
|
||||
panic(res.Error)
|
||||
}
|
||||
handshakePlusOneRTT := time.Since(handshakeStartInstant)
|
||||
|
||||
fmt.Println("Ping successful: ", res.RTT)
|
||||
testResult := struct {
|
||||
HandshakePlusOneRTTMillis float32 `json:"handshakePlusOneRTTMillis"`
|
||||
PingRTTMilllis float32 `json:"pingRTTMilllis"`
|
||||
}{
|
||||
HandshakePlusOneRTTMillis: float32(handshakePlusOneRTT.Microseconds()) / 1000,
|
||||
PingRTTMilllis: float32(res.RTT.Microseconds()) / 1000,
|
||||
}
|
||||
|
||||
rClient.RPush(ctx, "dialerDone", "").Result()
|
||||
testResultJSON, err := json.Marshal(testResult)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to marshal test result: %v", err)
|
||||
}
|
||||
fmt.Println(string(testResultJSON))
|
||||
} else {
|
||||
_, err := rClient.RPush(ctx, "listenerAddr", host.Addrs()[0].Encapsulate(ma.StringCast("/p2p/"+host.ID().String())).String()).Result()
|
||||
if err != nil {
|
||||
panic("Failed to send listener address")
|
||||
}
|
||||
_, err = rClient.BLPop(ctx, testTimeout, "dialerDone").Result()
|
||||
if err != nil {
|
||||
panic("Failed to wait for dialer conclusion")
|
||||
log.Fatal("Failed to send listener address")
|
||||
}
|
||||
time.Sleep(testTimeout)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
@ -2,7 +2,9 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
@ -25,15 +27,15 @@ import (
|
||||
func main() {
|
||||
var (
|
||||
transport = os.Getenv("transport")
|
||||
secureChannel = os.Getenv("security")
|
||||
muxer = os.Getenv("muxer")
|
||||
secureChannel = os.Getenv("security")
|
||||
isDialerStr = os.Getenv("is_dialer")
|
||||
ip = os.Getenv("ip")
|
||||
testTimeoutStr = os.Getenv("test_timeout")
|
||||
redisAddr = os.Getenv("REDIS_ADDR")
|
||||
redisAddr = os.Getenv("redis_addr")
|
||||
testTimeoutStr = os.Getenv("test_timeout_seconds")
|
||||
)
|
||||
|
||||
var testTimeout = 10 * time.Second
|
||||
var testTimeout = 3 * time.Minute
|
||||
if testTimeoutStr != "" {
|
||||
secs, err := strconv.ParseInt(testTimeoutStr, 10, 32)
|
||||
if err == nil {
|
||||
@ -41,6 +43,10 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
if ip == "" {
|
||||
ip = "0.0.0.0"
|
||||
}
|
||||
|
||||
if redisAddr == "" {
|
||||
redisAddr = "redis:6379"
|
||||
}
|
||||
@ -87,57 +93,76 @@ func main() {
|
||||
options = append(options, libp2p.Transport(libp2pwebtransport.New))
|
||||
listenAddr = fmt.Sprintf("/ip4/%s/udp/0/quic-v1/webtransport", ip)
|
||||
default:
|
||||
panic("Unsupported transport")
|
||||
log.Fatalf("Unsupported transport: %s", transport)
|
||||
}
|
||||
options = append(options, libp2p.ListenAddrStrings(listenAddr))
|
||||
|
||||
switch secureChannel {
|
||||
case "tls":
|
||||
options = append(options, libp2p.Security(libp2ptls.ID, libp2ptls.New))
|
||||
case "noise":
|
||||
options = append(options, libp2p.Security(noise.ID, noise.New))
|
||||
// Skipped for certain transports
|
||||
var skipMuxer bool
|
||||
var skipSecureChannel bool
|
||||
switch transport {
|
||||
case "quic":
|
||||
default:
|
||||
panic("Unsupported secure channel")
|
||||
fallthrough
|
||||
case "quic-v1":
|
||||
fallthrough
|
||||
case "webtransport":
|
||||
fallthrough
|
||||
case "webrtc":
|
||||
skipMuxer = true
|
||||
skipSecureChannel = true
|
||||
}
|
||||
|
||||
switch muxer {
|
||||
case "yamux":
|
||||
options = append(options, libp2p.Muxer("/yamux/1.0.0", yamux.DefaultTransport))
|
||||
case "mplex":
|
||||
options = append(options, libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport))
|
||||
case "quic":
|
||||
default:
|
||||
panic("Unsupported muxer")
|
||||
if !skipSecureChannel {
|
||||
switch secureChannel {
|
||||
case "tls":
|
||||
options = append(options, libp2p.Security(libp2ptls.ID, libp2ptls.New))
|
||||
case "noise":
|
||||
options = append(options, libp2p.Security(noise.ID, noise.New))
|
||||
default:
|
||||
log.Fatalf("Unsupported secure channel: %s", secureChannel)
|
||||
}
|
||||
}
|
||||
|
||||
if !skipMuxer {
|
||||
switch muxer {
|
||||
case "yamux":
|
||||
options = append(options, libp2p.Muxer("/yamux/1.0.0", yamux.DefaultTransport))
|
||||
case "mplex":
|
||||
options = append(options, libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport))
|
||||
default:
|
||||
log.Fatalf("Unsupported muxer: %s", muxer)
|
||||
}
|
||||
}
|
||||
|
||||
host, err := libp2p.New(options...)
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to instantiate libp2p instance: %s", err))
|
||||
log.Fatalf("failed to instantiate libp2p instance: %s", err)
|
||||
}
|
||||
defer host.Close()
|
||||
|
||||
fmt.Println("My multiaddr is: ", host.Addrs())
|
||||
log.Println("My multiaddr is: ", host.Addrs())
|
||||
|
||||
if is_dialer {
|
||||
val, err := rClient.BLPop(ctx, testTimeout, "listenerAddr").Result()
|
||||
if err != nil {
|
||||
panic("Failed to wait for listener to be ready")
|
||||
log.Fatal("Failed to wait for listener to be ready")
|
||||
}
|
||||
otherMa := ma.StringCast(val[1])
|
||||
fmt.Println("Other peer multiaddr is: ", otherMa)
|
||||
log.Println("Other peer multiaddr is: ", otherMa)
|
||||
otherMa, p2pComponent := ma.SplitLast(otherMa)
|
||||
otherPeerId, err := peer.Decode(p2pComponent.Value())
|
||||
if err != nil {
|
||||
panic("Failed to get peer id from multiaddr")
|
||||
log.Fatal("Failed to get peer id from multiaddr")
|
||||
}
|
||||
|
||||
handshakeStartInstant := time.Now()
|
||||
err = host.Connect(ctx, peer.AddrInfo{
|
||||
ID: otherPeerId,
|
||||
Addrs: []ma.Multiaddr{otherMa},
|
||||
})
|
||||
if err != nil {
|
||||
panic("Failed to connect to other peer")
|
||||
log.Fatal("Failed to connect to other peer")
|
||||
}
|
||||
|
||||
ping := ping.NewPingService(host)
|
||||
@ -146,18 +171,27 @@ func main() {
|
||||
if res.Error != nil {
|
||||
panic(res.Error)
|
||||
}
|
||||
handshakePlusOneRTT := time.Since(handshakeStartInstant)
|
||||
|
||||
fmt.Println("Ping successful: ", res.RTT)
|
||||
testResult := struct {
|
||||
HandshakePlusOneRTTMillis float32 `json:"handshakePlusOneRTTMillis"`
|
||||
PingRTTMilllis float32 `json:"pingRTTMilllis"`
|
||||
}{
|
||||
HandshakePlusOneRTTMillis: float32(handshakePlusOneRTT.Microseconds()) / 1000,
|
||||
PingRTTMilllis: float32(res.RTT.Microseconds()) / 1000,
|
||||
}
|
||||
|
||||
rClient.RPush(ctx, "dialerDone", "").Result()
|
||||
testResultJSON, err := json.Marshal(testResult)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to marshal test result: %v", err)
|
||||
}
|
||||
fmt.Println(string(testResultJSON))
|
||||
} else {
|
||||
_, err := rClient.RPush(ctx, "listenerAddr", host.Addrs()[0].Encapsulate(ma.StringCast("/p2p/"+host.ID().String())).String()).Result()
|
||||
if err != nil {
|
||||
panic("Failed to send listener address")
|
||||
}
|
||||
_, err = rClient.BLPop(ctx, testTimeout, "dialerDone").Result()
|
||||
if err != nil {
|
||||
panic("Failed to wait for dialer conclusion")
|
||||
log.Fatal("Failed to send listener address")
|
||||
}
|
||||
time.Sleep(testTimeout)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ import { createClient } from 'redis'
|
||||
import http from "http"
|
||||
|
||||
const isDialer = process.env.is_dialer === "true"
|
||||
const REDIS_ADDR = process.env.REDIS_ADDR || 'redis:6379'
|
||||
const redis_addr = process.env.redis_addr || 'redis:6379'
|
||||
|
||||
// Used to preinstall the browsers in the docker image
|
||||
const initialSetup = process.env.initial_setup === "true"
|
||||
@ -18,7 +18,7 @@ export default {
|
||||
}
|
||||
|
||||
const redisClient = createClient({
|
||||
url: `redis://${REDIS_ADDR}`
|
||||
url: `redis://${redis_addr}`
|
||||
})
|
||||
redisClient.on('error', (err) => console.error(`Redis Client Error: ${err}`))
|
||||
await redisClient.connect()
|
||||
|
@ -3,12 +3,12 @@ TEST_SOURCES := $(wildcard test/*.ts)
|
||||
|
||||
all: chromium-image.json node-image.json
|
||||
|
||||
chromium-image.json: ChromiumDockerfile $(TEST_SOURCES) package.json package-lock.json
|
||||
chromium-image.json: ChromiumDockerfile $(TEST_SOURCES) package.json package-lock.json .aegir.js
|
||||
IMAGE_NAME=chromium-${image_name} ../../dockerBuildWrapper.sh -f ChromiumDockerfile .
|
||||
docker image inspect chromium-${image_name} -f "{{.Id}}" | \
|
||||
xargs -I {} echo "{\"imageID\": \"{}\"}" > $@
|
||||
|
||||
node-image.json: Dockerfile $(TEST_SOURCES) package.json package-lock.json
|
||||
node-image.json: Dockerfile $(TEST_SOURCES) package.json package-lock.json .aegir.js
|
||||
IMAGE_NAME=node-${image_name} ../../dockerBuildWrapper.sh -f Dockerfile .
|
||||
docker image inspect node-${image_name} -f "{{.Id}}" | \
|
||||
xargs -I {} echo "{\"imageID\": \"{}\"}" > $@
|
||||
|
268
multidim-interop/js/v0.41/package-lock.json
generated
268
multidim-interop/js/v0.41/package-lock.json
generated
@ -13,6 +13,7 @@
|
||||
"@chainsafe/libp2p-yamux": "^3.0.4",
|
||||
"@libp2p/mplex": "^7.1.1",
|
||||
"@libp2p/tcp": "^6.0.8",
|
||||
"@libp2p/webrtc": "^1.0.3",
|
||||
"@libp2p/websockets": "^5.0.1",
|
||||
"@libp2p/webtransport": "^1.0.7",
|
||||
"@multiformats/multiaddr": "^11.1.4",
|
||||
@ -26,7 +27,7 @@
|
||||
"typescript": "^4.9.4"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=16"
|
||||
"node": ">=18"
|
||||
}
|
||||
},
|
||||
"node_modules/@achingbrain/ip-address": {
|
||||
@ -2904,6 +2905,94 @@
|
||||
"npm": ">=7.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@libp2p/webrtc": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/@libp2p/webrtc/-/webrtc-1.0.3.tgz",
|
||||
"integrity": "sha512-54TqJ6nK3dZR6kr6sR0BrkBhufv7adqeH+DiEsOxKGqgEsXJBhuPbH5roNdLiQkC2Rq55Zdvm0kwEULtw9UtzA==",
|
||||
"dependencies": {
|
||||
"@chainsafe/libp2p-noise": "^11.0.0",
|
||||
"@libp2p/interface-connection": "^3.0.2",
|
||||
"@libp2p/interface-peer-id": "^2.0.0",
|
||||
"@libp2p/interface-stream-muxer": "^3.0.0",
|
||||
"@libp2p/interface-transport": "^2.0.0",
|
||||
"@libp2p/logger": "^2.0.0",
|
||||
"@libp2p/peer-id": "^2.0.0",
|
||||
"@multiformats/multiaddr": "^11.0.3",
|
||||
"@protobuf-ts/runtime": "^2.8.0",
|
||||
"err-code": "^3.0.1",
|
||||
"it-length-prefixed": "^8.0.3",
|
||||
"it-merge": "^2.0.0",
|
||||
"it-pipe": "^2.0.4",
|
||||
"it-pushable": "^3.1.0",
|
||||
"it-stream-types": "^1.0.4",
|
||||
"multiformats": "^11.0.0",
|
||||
"multihashes": "^4.0.3",
|
||||
"p-defer": "^4.0.0",
|
||||
"uint8arraylist": "^2.3.3",
|
||||
"uint8arrays": "^4.0.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=16.0.0",
|
||||
"npm": ">=7.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@libp2p/webrtc/node_modules/@chainsafe/libp2p-noise": {
|
||||
"version": "11.0.0",
|
||||
"resolved": "https://registry.npmjs.org/@chainsafe/libp2p-noise/-/libp2p-noise-11.0.0.tgz",
|
||||
"integrity": "sha512-NEl5aIv6muz9OL+dsa3INEU89JX0NViBxOy7NwwG8eNRPUDHo5E3ZTMSHXQpVx1K/ofoNS4ANO9xwezY6ss5GA==",
|
||||
"dependencies": {
|
||||
"@libp2p/crypto": "^1.0.0",
|
||||
"@libp2p/interface-connection-encrypter": "^3.0.0",
|
||||
"@libp2p/interface-keys": "^1.0.2",
|
||||
"@libp2p/interface-metrics": "^4.0.2",
|
||||
"@libp2p/interface-peer-id": "^2.0.0",
|
||||
"@libp2p/logger": "^2.0.0",
|
||||
"@libp2p/peer-id": "^2.0.0",
|
||||
"@stablelib/chacha20poly1305": "^1.0.1",
|
||||
"@stablelib/hkdf": "^1.0.1",
|
||||
"@stablelib/sha256": "^1.0.1",
|
||||
"@stablelib/x25519": "^1.0.1",
|
||||
"it-length-prefixed": "^8.0.2",
|
||||
"it-pair": "^2.0.2",
|
||||
"it-pb-stream": "^2.0.2",
|
||||
"it-pipe": "^2.0.3",
|
||||
"it-stream-types": "^1.0.4",
|
||||
"protons-runtime": "^4.0.1",
|
||||
"uint8arraylist": "^2.3.2",
|
||||
"uint8arrays": "^4.0.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=16.0.0",
|
||||
"npm": ">=7.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@libp2p/webrtc/node_modules/@libp2p/interface-peer-id": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@libp2p/interface-peer-id/-/interface-peer-id-2.0.1.tgz",
|
||||
"integrity": "sha512-k01hKHTAZWMOiBC+yyFsmBguEMvhPkXnQtqLtFqga2fVZu8Zve7zFAtQYLhQjeJ4/apeFtO6ddTS8mCE6hl4OA==",
|
||||
"dependencies": {
|
||||
"multiformats": "^11.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=16.0.0",
|
||||
"npm": ">=7.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@libp2p/webrtc/node_modules/@libp2p/peer-id": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@libp2p/peer-id/-/peer-id-2.0.1.tgz",
|
||||
"integrity": "sha512-uGIR4rS+j+IzzIu0kih4MonZEfRmjGNfXaSPMIFOeMxZItZT6TIpxoVNYxHl4YtneSFKzlLnf9yx9EhRcyfy8Q==",
|
||||
"dependencies": {
|
||||
"@libp2p/interface-peer-id": "^2.0.0",
|
||||
"@libp2p/interfaces": "^3.2.0",
|
||||
"multiformats": "^11.0.0",
|
||||
"uint8arrays": "^4.0.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=16.0.0",
|
||||
"npm": ">=7.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@libp2p/websockets": {
|
||||
"version": "5.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@libp2p/websockets/-/websockets-5.0.2.tgz",
|
||||
@ -3008,6 +3097,11 @@
|
||||
"npm": ">=7.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@multiformats/base-x": {
|
||||
"version": "4.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@multiformats/base-x/-/base-x-4.0.1.tgz",
|
||||
"integrity": "sha512-eMk0b9ReBbV23xXU693TAIrLyeO5iTgBZGSJfpqriG8UkYvr/hC9u9pyMlAakDNHWmbhMZCDs6KQO0jzKD8OTw=="
|
||||
},
|
||||
"node_modules/@multiformats/mafmt": {
|
||||
"version": "11.0.3",
|
||||
"resolved": "https://registry.npmjs.org/@multiformats/mafmt/-/mafmt-11.0.3.tgz",
|
||||
@ -3330,6 +3424,11 @@
|
||||
"integrity": "sha512-oZLYFEAzUKyi3SKnXvj32ZCEGH6RDnao7COuCVhDydMS9NrCSVXhM79VaKyP5+Zc33m0QXEd2DN3UkU7OsHcfw==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/@protobuf-ts/runtime": {
|
||||
"version": "2.8.2",
|
||||
"resolved": "https://registry.npmjs.org/@protobuf-ts/runtime/-/runtime-2.8.2.tgz",
|
||||
"integrity": "sha512-PVxsH81y9kEbHldxxG/8Y3z2mTXWQytRl8zNS0mTPUjkEC+8GUX6gj6LsA8EFp25fAs9V0ruh+aNWmPccEI9MA=="
|
||||
},
|
||||
"node_modules/@protobufjs/aspromise": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz",
|
||||
@ -13643,6 +13742,19 @@
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
|
||||
"integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
|
||||
},
|
||||
"node_modules/multibase": {
|
||||
"version": "4.0.6",
|
||||
"resolved": "https://registry.npmjs.org/multibase/-/multibase-4.0.6.tgz",
|
||||
"integrity": "sha512-x23pDe5+svdLz/k5JPGCVdfn7Q5mZVMBETiC+ORfO+sor9Sgs0smJzAjfTbM5tckeCqnaUuMYoz+k3RXMmJClQ==",
|
||||
"deprecated": "This module has been superseded by the multiformats module",
|
||||
"dependencies": {
|
||||
"@multiformats/base-x": "^4.0.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12.0.0",
|
||||
"npm": ">=6.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/multiformats": {
|
||||
"version": "11.0.0",
|
||||
"resolved": "https://registry.npmjs.org/multiformats/-/multiformats-11.0.0.tgz",
|
||||
@ -13652,6 +13764,38 @@
|
||||
"npm": ">=7.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/multihashes": {
|
||||
"version": "4.0.3",
|
||||
"resolved": "https://registry.npmjs.org/multihashes/-/multihashes-4.0.3.tgz",
|
||||
"integrity": "sha512-0AhMH7Iu95XjDLxIeuCOOE4t9+vQZsACyKZ9Fxw2pcsRmlX4iCn1mby0hS0bb+nQOVpdQYWPpnyusw4da5RPhA==",
|
||||
"dependencies": {
|
||||
"multibase": "^4.0.1",
|
||||
"uint8arrays": "^3.0.0",
|
||||
"varint": "^5.0.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12.0.0",
|
||||
"npm": ">=6.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/multihashes/node_modules/multiformats": {
|
||||
"version": "9.9.0",
|
||||
"resolved": "https://registry.npmjs.org/multiformats/-/multiformats-9.9.0.tgz",
|
||||
"integrity": "sha512-HoMUjhH9T8DDBNT+6xzkrd9ga/XiBI4xLr58LJACwK6G3HTOPeMz4nB4KJs33L2BelrIJa7P0VuNaVF3hMYfjg=="
|
||||
},
|
||||
"node_modules/multihashes/node_modules/uint8arrays": {
|
||||
"version": "3.1.1",
|
||||
"resolved": "https://registry.npmjs.org/uint8arrays/-/uint8arrays-3.1.1.tgz",
|
||||
"integrity": "sha512-+QJa8QRnbdXVpHYjLoTpJIdCTiw9Ir62nocClWuXIq2JIh4Uta0cQsTSpFL678p2CN8B+XSApwcU+pQEqVpKWg==",
|
||||
"dependencies": {
|
||||
"multiformats": "^9.4.2"
|
||||
}
|
||||
},
|
||||
"node_modules/multihashes/node_modules/varint": {
|
||||
"version": "5.0.2",
|
||||
"resolved": "https://registry.npmjs.org/varint/-/varint-5.0.2.tgz",
|
||||
"integrity": "sha512-lKxKYG6H03yCZUpAGOPOsMcGxd1RHCu1iKvEHYDPmTyq2HueGhD73ssNBqqQWfvYs04G9iUFRvmAVLW20Jw6ow=="
|
||||
},
|
||||
"node_modules/multimatch": {
|
||||
"version": "5.0.0",
|
||||
"resolved": "https://registry.npmjs.org/multimatch/-/multimatch-5.0.0.tgz",
|
||||
@ -25221,6 +25365,80 @@
|
||||
"uint8arraylist": "^2.3.2"
|
||||
}
|
||||
},
|
||||
"@libp2p/webrtc": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/@libp2p/webrtc/-/webrtc-1.0.3.tgz",
|
||||
"integrity": "sha512-54TqJ6nK3dZR6kr6sR0BrkBhufv7adqeH+DiEsOxKGqgEsXJBhuPbH5roNdLiQkC2Rq55Zdvm0kwEULtw9UtzA==",
|
||||
"requires": {
|
||||
"@chainsafe/libp2p-noise": "^11.0.0",
|
||||
"@libp2p/interface-connection": "^3.0.2",
|
||||
"@libp2p/interface-peer-id": "^2.0.0",
|
||||
"@libp2p/interface-stream-muxer": "^3.0.0",
|
||||
"@libp2p/interface-transport": "^2.0.0",
|
||||
"@libp2p/logger": "^2.0.0",
|
||||
"@libp2p/peer-id": "^2.0.0",
|
||||
"@multiformats/multiaddr": "^11.0.3",
|
||||
"@protobuf-ts/runtime": "^2.8.0",
|
||||
"err-code": "^3.0.1",
|
||||
"it-length-prefixed": "^8.0.3",
|
||||
"it-merge": "^2.0.0",
|
||||
"it-pipe": "^2.0.4",
|
||||
"it-pushable": "^3.1.0",
|
||||
"it-stream-types": "^1.0.4",
|
||||
"multiformats": "^11.0.0",
|
||||
"multihashes": "^4.0.3",
|
||||
"p-defer": "^4.0.0",
|
||||
"uint8arraylist": "^2.3.3",
|
||||
"uint8arrays": "^4.0.2"
|
||||
},
|
||||
"dependencies": {
|
||||
"@chainsafe/libp2p-noise": {
|
||||
"version": "11.0.0",
|
||||
"resolved": "https://registry.npmjs.org/@chainsafe/libp2p-noise/-/libp2p-noise-11.0.0.tgz",
|
||||
"integrity": "sha512-NEl5aIv6muz9OL+dsa3INEU89JX0NViBxOy7NwwG8eNRPUDHo5E3ZTMSHXQpVx1K/ofoNS4ANO9xwezY6ss5GA==",
|
||||
"requires": {
|
||||
"@libp2p/crypto": "^1.0.0",
|
||||
"@libp2p/interface-connection-encrypter": "^3.0.0",
|
||||
"@libp2p/interface-keys": "^1.0.2",
|
||||
"@libp2p/interface-metrics": "^4.0.2",
|
||||
"@libp2p/interface-peer-id": "^2.0.0",
|
||||
"@libp2p/logger": "^2.0.0",
|
||||
"@libp2p/peer-id": "^2.0.0",
|
||||
"@stablelib/chacha20poly1305": "^1.0.1",
|
||||
"@stablelib/hkdf": "^1.0.1",
|
||||
"@stablelib/sha256": "^1.0.1",
|
||||
"@stablelib/x25519": "^1.0.1",
|
||||
"it-length-prefixed": "^8.0.2",
|
||||
"it-pair": "^2.0.2",
|
||||
"it-pb-stream": "^2.0.2",
|
||||
"it-pipe": "^2.0.3",
|
||||
"it-stream-types": "^1.0.4",
|
||||
"protons-runtime": "^4.0.1",
|
||||
"uint8arraylist": "^2.3.2",
|
||||
"uint8arrays": "^4.0.2"
|
||||
}
|
||||
},
|
||||
"@libp2p/interface-peer-id": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@libp2p/interface-peer-id/-/interface-peer-id-2.0.1.tgz",
|
||||
"integrity": "sha512-k01hKHTAZWMOiBC+yyFsmBguEMvhPkXnQtqLtFqga2fVZu8Zve7zFAtQYLhQjeJ4/apeFtO6ddTS8mCE6hl4OA==",
|
||||
"requires": {
|
||||
"multiformats": "^11.0.0"
|
||||
}
|
||||
},
|
||||
"@libp2p/peer-id": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@libp2p/peer-id/-/peer-id-2.0.1.tgz",
|
||||
"integrity": "sha512-uGIR4rS+j+IzzIu0kih4MonZEfRmjGNfXaSPMIFOeMxZItZT6TIpxoVNYxHl4YtneSFKzlLnf9yx9EhRcyfy8Q==",
|
||||
"requires": {
|
||||
"@libp2p/interface-peer-id": "^2.0.0",
|
||||
"@libp2p/interfaces": "^3.2.0",
|
||||
"multiformats": "^11.0.0",
|
||||
"uint8arrays": "^4.0.2"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"@libp2p/websockets": {
|
||||
"version": "5.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@libp2p/websockets/-/websockets-5.0.2.tgz",
|
||||
@ -25307,6 +25525,11 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"@multiformats/base-x": {
|
||||
"version": "4.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@multiformats/base-x/-/base-x-4.0.1.tgz",
|
||||
"integrity": "sha512-eMk0b9ReBbV23xXU693TAIrLyeO5iTgBZGSJfpqriG8UkYvr/hC9u9pyMlAakDNHWmbhMZCDs6KQO0jzKD8OTw=="
|
||||
},
|
||||
"@multiformats/mafmt": {
|
||||
"version": "11.0.3",
|
||||
"resolved": "https://registry.npmjs.org/@multiformats/mafmt/-/mafmt-11.0.3.tgz",
|
||||
@ -25567,6 +25790,11 @@
|
||||
"integrity": "sha512-oZLYFEAzUKyi3SKnXvj32ZCEGH6RDnao7COuCVhDydMS9NrCSVXhM79VaKyP5+Zc33m0QXEd2DN3UkU7OsHcfw==",
|
||||
"dev": true
|
||||
},
|
||||
"@protobuf-ts/runtime": {
|
||||
"version": "2.8.2",
|
||||
"resolved": "https://registry.npmjs.org/@protobuf-ts/runtime/-/runtime-2.8.2.tgz",
|
||||
"integrity": "sha512-PVxsH81y9kEbHldxxG/8Y3z2mTXWQytRl8zNS0mTPUjkEC+8GUX6gj6LsA8EFp25fAs9V0ruh+aNWmPccEI9MA=="
|
||||
},
|
||||
"@protobufjs/aspromise": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz",
|
||||
@ -33047,11 +33275,49 @@
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
|
||||
"integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
|
||||
},
|
||||
"multibase": {
|
||||
"version": "4.0.6",
|
||||
"resolved": "https://registry.npmjs.org/multibase/-/multibase-4.0.6.tgz",
|
||||
"integrity": "sha512-x23pDe5+svdLz/k5JPGCVdfn7Q5mZVMBETiC+ORfO+sor9Sgs0smJzAjfTbM5tckeCqnaUuMYoz+k3RXMmJClQ==",
|
||||
"requires": {
|
||||
"@multiformats/base-x": "^4.0.1"
|
||||
}
|
||||
},
|
||||
"multiformats": {
|
||||
"version": "11.0.0",
|
||||
"resolved": "https://registry.npmjs.org/multiformats/-/multiformats-11.0.0.tgz",
|
||||
"integrity": "sha512-vqF8bmMtbxw9Zn3eTpk0OZQdBVmAT/+bTGwXb3C2qCNkp45aJMmkCDds3lrtObECWPf+KFjFtTOHkvCaT/c/xQ=="
|
||||
},
|
||||
"multihashes": {
|
||||
"version": "4.0.3",
|
||||
"resolved": "https://registry.npmjs.org/multihashes/-/multihashes-4.0.3.tgz",
|
||||
"integrity": "sha512-0AhMH7Iu95XjDLxIeuCOOE4t9+vQZsACyKZ9Fxw2pcsRmlX4iCn1mby0hS0bb+nQOVpdQYWPpnyusw4da5RPhA==",
|
||||
"requires": {
|
||||
"multibase": "^4.0.1",
|
||||
"uint8arrays": "^3.0.0",
|
||||
"varint": "^5.0.2"
|
||||
},
|
||||
"dependencies": {
|
||||
"multiformats": {
|
||||
"version": "9.9.0",
|
||||
"resolved": "https://registry.npmjs.org/multiformats/-/multiformats-9.9.0.tgz",
|
||||
"integrity": "sha512-HoMUjhH9T8DDBNT+6xzkrd9ga/XiBI4xLr58LJACwK6G3HTOPeMz4nB4KJs33L2BelrIJa7P0VuNaVF3hMYfjg=="
|
||||
},
|
||||
"uint8arrays": {
|
||||
"version": "3.1.1",
|
||||
"resolved": "https://registry.npmjs.org/uint8arrays/-/uint8arrays-3.1.1.tgz",
|
||||
"integrity": "sha512-+QJa8QRnbdXVpHYjLoTpJIdCTiw9Ir62nocClWuXIq2JIh4Uta0cQsTSpFL678p2CN8B+XSApwcU+pQEqVpKWg==",
|
||||
"requires": {
|
||||
"multiformats": "^9.4.2"
|
||||
}
|
||||
},
|
||||
"varint": {
|
||||
"version": "5.0.2",
|
||||
"resolved": "https://registry.npmjs.org/varint/-/varint-5.0.2.tgz",
|
||||
"integrity": "sha512-lKxKYG6H03yCZUpAGOPOsMcGxd1RHCu1iKvEHYDPmTyq2HueGhD73ssNBqqQWfvYs04G9iUFRvmAVLW20Jw6ow=="
|
||||
}
|
||||
}
|
||||
},
|
||||
"multimatch": {
|
||||
"version": "5.0.0",
|
||||
"resolved": "https://registry.npmjs.org/multimatch/-/multimatch-5.0.0.tgz",
|
||||
|
@ -20,6 +20,7 @@
|
||||
"@chainsafe/libp2p-yamux": "^3.0.4",
|
||||
"@libp2p/mplex": "^7.1.1",
|
||||
"@libp2p/tcp": "^6.0.8",
|
||||
"@libp2p/webrtc": "^1.0.3",
|
||||
"@libp2p/websockets": "^5.0.1",
|
||||
"@libp2p/webtransport": "^1.0.7",
|
||||
"@multiformats/multiaddr": "^11.1.4",
|
||||
@ -35,4 +36,4 @@
|
||||
"standard": "^17.0.0",
|
||||
"typescript": "^4.9.4"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ import { noise } from '@chainsafe/libp2p-noise'
|
||||
import { mplex } from '@libp2p/mplex'
|
||||
import { yamux } from '@chainsafe/libp2p-yamux'
|
||||
import { multiaddr } from '@multiformats/multiaddr'
|
||||
import { webRTC } from '@libp2p/webrtc'
|
||||
|
||||
async function redisProxy(commands: any[]): Promise<any> {
|
||||
const res = await fetch(`http://localhost:${process.env.proxyPort}/`, { body: JSON.stringify(commands), method: "POST" })
|
||||
@ -25,8 +26,8 @@ describe('ping test', () => {
|
||||
const SECURE_CHANNEL = process.env.security
|
||||
const MUXER = process.env.muxer
|
||||
const isDialer = process.env.is_dialer === "true"
|
||||
const IP = process.env.ip
|
||||
const timeoutSecs: string = process.env.test_timeout || "10"
|
||||
const IP = process.env.ip || "0.0.0.0"
|
||||
const timeoutSecs: string = process.env.test_timeout_secs || "180"
|
||||
|
||||
const options: Libp2pOptions = {
|
||||
start: true
|
||||
@ -45,6 +46,12 @@ describe('ping test', () => {
|
||||
throw new Error("WebTransport is not supported as a listener")
|
||||
}
|
||||
break
|
||||
case 'webrtc':
|
||||
options.transports = [webRTC()]
|
||||
options.addresses = {
|
||||
listen: isDialer ? [] : [`/ip4/${IP}/udp/0/webrtc`]
|
||||
}
|
||||
break
|
||||
case 'ws':
|
||||
options.transports = [webSockets()]
|
||||
options.addresses = {
|
||||
@ -55,28 +62,44 @@ describe('ping test', () => {
|
||||
throw new Error(`Unknown transport: ${TRANSPORT}`)
|
||||
}
|
||||
|
||||
switch (SECURE_CHANNEL) {
|
||||
case 'noise':
|
||||
options.connectionEncryption = [noise()]
|
||||
break
|
||||
case 'quic':
|
||||
options.connectionEncryption = [noise()]
|
||||
break
|
||||
default:
|
||||
throw new Error(`Unknown secure channel: ${SECURE_CHANNEL}`)
|
||||
let skipSecureChannel = false
|
||||
let skipMuxer = false
|
||||
switch (TRANSPORT) {
|
||||
case 'webtransport':
|
||||
case 'webrtc':
|
||||
skipSecureChannel = true
|
||||
skipMuxer = true
|
||||
}
|
||||
|
||||
switch (MUXER) {
|
||||
case 'mplex':
|
||||
options.streamMuxers = [mplex()]
|
||||
break
|
||||
case 'yamux':
|
||||
options.streamMuxers = [yamux()]
|
||||
break
|
||||
case 'quic':
|
||||
break
|
||||
default:
|
||||
throw new Error(`Unknown muxer: ${MUXER}`)
|
||||
if (!skipSecureChannel) {
|
||||
switch (SECURE_CHANNEL) {
|
||||
case 'noise':
|
||||
options.connectionEncryption = [noise()]
|
||||
break
|
||||
case 'quic':
|
||||
options.connectionEncryption = [noise()]
|
||||
break
|
||||
default:
|
||||
throw new Error(`Unknown secure channel: ${SECURE_CHANNEL}`)
|
||||
}
|
||||
} else {
|
||||
// Libp2p requires at least one encryption module. Even if unused.
|
||||
options.connectionEncryption = [noise()]
|
||||
}
|
||||
|
||||
if (!skipMuxer) {
|
||||
switch (MUXER) {
|
||||
case 'mplex':
|
||||
options.streamMuxers = [mplex()]
|
||||
break
|
||||
case 'yamux':
|
||||
options.streamMuxers = [yamux()]
|
||||
break
|
||||
case 'quic':
|
||||
break
|
||||
default:
|
||||
throw new Error(`Unknown muxer: ${MUXER}`)
|
||||
}
|
||||
}
|
||||
|
||||
const node = await createLibp2p(options)
|
||||
@ -84,21 +107,26 @@ describe('ping test', () => {
|
||||
try {
|
||||
if (isDialer) {
|
||||
const otherMa = (await redisProxy(["BLPOP", "listenerAddr", timeoutSecs]).catch(err => { throw new Error("Failed to wait for listener") }))[1]
|
||||
console.log(`node ${node.peerId} pings: ${otherMa}`)
|
||||
const rtt = await node.ping(multiaddr(otherMa))
|
||||
console.log(`Ping successful: ${rtt}`)
|
||||
await redisProxy(["RPUSH", "dialerDone", ""])
|
||||
console.error(`node ${node.peerId} pings: ${otherMa}`)
|
||||
const handshakeStartInstant = Date.now()
|
||||
await node.dial(multiaddr(otherMa))
|
||||
const pingRTT = await node.ping(multiaddr(otherMa))
|
||||
const handshakePlusOneRTT = Date.now() - handshakeStartInstant
|
||||
console.log(JSON.stringify({
|
||||
handshakePlusOneRTTMillis: handshakePlusOneRTT,
|
||||
pingRTTMilllis: pingRTT
|
||||
}))
|
||||
} else {
|
||||
const multiaddrs = node.getMultiaddrs().map(ma => ma.toString()).filter(maString => !maString.includes("127.0.0.1"))
|
||||
console.log("My multiaddrs are", multiaddrs)
|
||||
console.error("My multiaddrs are", multiaddrs)
|
||||
// Send the listener addr over the proxy server so this works on both the Browser and Node
|
||||
await redisProxy(["RPUSH", "listenerAddr", multiaddrs[0]])
|
||||
try {
|
||||
await redisProxy(["BLPOP", "dialerDone", timeoutSecs])
|
||||
} catch {
|
||||
throw new Error("Failed to wait for dialer to finish")
|
||||
}
|
||||
// Wait
|
||||
await new Promise(resolve => setTimeout(resolve, 1000 * parseInt(timeoutSecs, 10)))
|
||||
}
|
||||
} catch (err) {
|
||||
console.error("unexpected exception in ping test", err)
|
||||
throw err
|
||||
} finally {
|
||||
try {
|
||||
// We don't care if this fails
|
||||
|
1
multidim-interop/rust/.gitignore
vendored
1
multidim-interop/rust/.gitignore
vendored
@ -1 +0,0 @@
|
||||
target
|
4186
multidim-interop/rust/Cargo.lock
generated
4186
multidim-interop/rust/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -1,19 +0,0 @@
|
||||
[package]
|
||||
edition = "2021"
|
||||
name = "testplan"
|
||||
version = "0.2.0"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1"
|
||||
async-trait = "0.1.58"
|
||||
env_logger = "0.9.0"
|
||||
futures = "0.3.1"
|
||||
if-addrs = "0.7.0"
|
||||
log = "0.4"
|
||||
redis = { version = "0.22.1", features = ["tokio-native-tls-comp", "tokio-comp"] }
|
||||
tokio = { version = "1.24.1", features = ["full"] }
|
||||
|
||||
libp2pv0500 = { package = "libp2p", version = "0.50.0", features = ["websocket", "webrtc", "quic", "mplex", "yamux", "tcp", "tokio", "ping", "noise", "tls", "dns", "rsa", "macros"] }
|
||||
rand = "0.8.5"
|
||||
strum = { version = "0.24.1", features = ["derive"] }
|
||||
|
@ -1,32 +0,0 @@
|
||||
FROM rust:1.62-bullseye as builder
|
||||
WORKDIR /usr/src/testplan
|
||||
|
||||
# TODO fix this, it breaks reproducibility
|
||||
RUN apt-get update && apt-get install -y cmake protobuf-compiler
|
||||
|
||||
RUN mkdir -p ./plan/src
|
||||
COPY ./src/main.rs ./plan/src/main.rs
|
||||
COPY ./Cargo.toml ./plan/Cargo.toml
|
||||
COPY ./Cargo.lock ./plan/Cargo.lock
|
||||
|
||||
RUN cd ./plan/ && cargo build # Initial build acts as a cache.
|
||||
|
||||
ARG GIT_TARGET=""
|
||||
RUN if [ ! -z "${GIT_TARGET}" ]; then sed -i "s,^git.*,git = \"https://${GIT_TARGET}\"," ./plan/Cargo.toml; fi
|
||||
|
||||
ARG GIT_REF=""
|
||||
RUN if [ "master" = "${GIT_REF}" ]; then sed -i "s/^rev.*/branch= \"master\"/" ./plan/Cargo.toml; elif [ ! -z "${GIT_REF}" ]; then sed -i "s/^rev.*/rev = \"${GIT_REF}\"/" ./plan/Cargo.toml; fi
|
||||
|
||||
COPY ./src/lib.rs ./plan/src/lib.rs
|
||||
COPY ./src/bin/ ./plan/src/bin/
|
||||
|
||||
# Build the requested binary: Cargo will update lockfile on changed manifest (i.e. if one of the above `sed`s patched it).
|
||||
ARG BINARY_NAME
|
||||
RUN cd ./plan/ \
|
||||
&& cargo build --bin=${BINARY_NAME} \
|
||||
&& mv /usr/src/testplan/plan/target/debug/${BINARY_NAME} /usr/local/bin/testplan
|
||||
|
||||
FROM debian:bullseye-slim
|
||||
COPY --from=builder /usr/local/bin/testplan /usr/local/bin/testplan
|
||||
ENV RUST_BACKTRACE=1
|
||||
ENTRYPOINT ["testplan"]
|
@ -1,12 +0,0 @@
|
||||
all: v0.50/image.json
|
||||
|
||||
v0.50/image.json: Dockerfile src/*/*.rs src/*.rs Cargo.lock Cargo.toml
|
||||
mkdir -p $(shell dirname $@)
|
||||
IMAGE_NAME=rust-$@ ../dockerBuildWrapper.sh . --build-arg BINARY_NAME=testplan_0500
|
||||
docker image inspect rust-$@ -f "{{.Id}}" | \
|
||||
xargs -I {} echo "{\"imageID\": \"{}\"}" > $@
|
||||
|
||||
clean:
|
||||
rm v0.50/image.json
|
||||
|
||||
.PHONY: clean all
|
@ -1,222 +0,0 @@
|
||||
use std::collections::HashSet;
|
||||
use std::env;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use async_trait::async_trait;
|
||||
use futures::{AsyncRead, AsyncWrite, StreamExt};
|
||||
use libp2p::core::muxing::StreamMuxerBox;
|
||||
use libp2p::core::transport::Boxed;
|
||||
use libp2p::core::upgrade::EitherUpgrade;
|
||||
use libp2p::swarm::{keep_alive, NetworkBehaviour, SwarmEvent};
|
||||
use libp2p::websocket::WsConfig;
|
||||
use libp2p::{
|
||||
core, identity, mplex, noise, ping, webrtc, yamux, Multiaddr, PeerId, Swarm, Transport as _,
|
||||
};
|
||||
use libp2pv0500 as libp2p;
|
||||
use testplan::{run_ping, Muxer, PingSwarm, SecProtocol, Transport};
|
||||
|
||||
fn build_builder<T, C>(
|
||||
builder: core::transport::upgrade::Builder<T>,
|
||||
secure_channel_param: SecProtocol,
|
||||
muxer_param: Muxer,
|
||||
local_key: &identity::Keypair,
|
||||
) -> Boxed<(libp2p::PeerId, StreamMuxerBox)>
|
||||
where
|
||||
T: libp2p::Transport<Output = C> + Send + Unpin + 'static,
|
||||
<T as libp2p::Transport>::Error: Sync + Send + 'static,
|
||||
<T as libp2p::Transport>::ListenerUpgrade: Send,
|
||||
<T as libp2p::Transport>::Dial: Send,
|
||||
C: AsyncRead + AsyncWrite + Send + Unpin + 'static,
|
||||
{
|
||||
let mux_upgrade = match muxer_param {
|
||||
Muxer::Yamux => EitherUpgrade::A(yamux::YamuxConfig::default()),
|
||||
Muxer::Mplex => EitherUpgrade::B(mplex::MplexConfig::default()),
|
||||
};
|
||||
|
||||
let timeout = Duration::from_secs(5);
|
||||
|
||||
match secure_channel_param {
|
||||
SecProtocol::Noise => builder
|
||||
.authenticate(noise::NoiseAuthenticated::xx(&local_key).unwrap())
|
||||
.multiplex(mux_upgrade)
|
||||
.timeout(timeout)
|
||||
.boxed(),
|
||||
SecProtocol::Tls => builder
|
||||
.authenticate(libp2p::tls::Config::new(&local_key).unwrap())
|
||||
.multiplex(mux_upgrade)
|
||||
.timeout(timeout)
|
||||
.boxed(),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
let local_key = identity::Keypair::generate_ed25519();
|
||||
let local_peer_id = PeerId::from(local_key.public());
|
||||
|
||||
let transport_param: Transport =
|
||||
testplan::from_env("transport").context("unsupported transport")?;
|
||||
|
||||
let ip = env::var("ip").context("ip environment variable is not set")?;
|
||||
|
||||
let is_dialer = env::var("is_dialer")
|
||||
.unwrap_or("true".into())
|
||||
.parse::<bool>()?;
|
||||
|
||||
let timeout_secs: usize = env::var("test_timeout")
|
||||
.ok()
|
||||
.and_then(|timeout| timeout.parse().ok())
|
||||
.unwrap_or(10);
|
||||
|
||||
let redis_addr = env::var("REDIS_ADDR")
|
||||
.map(|addr| format!("redis://{addr}"))
|
||||
.unwrap_or("redis://redis:6379".into());
|
||||
|
||||
let client = redis::Client::open(redis_addr).context("Could not connect to redis")?;
|
||||
|
||||
let (boxed_transport, local_addr) = match transport_param {
|
||||
Transport::QuicV1 => {
|
||||
let builder =
|
||||
libp2p::quic::tokio::Transport::new(libp2p::quic::Config::new(&local_key))
|
||||
.map(|(p, c), _| (p, StreamMuxerBox::new(c)));
|
||||
(builder.boxed(), format!("/ip4/{ip}/udp/0/quic-v1"))
|
||||
}
|
||||
Transport::Tcp => {
|
||||
let builder = libp2p::tcp::tokio::Transport::new(libp2p::tcp::Config::new())
|
||||
.upgrade(libp2p::core::upgrade::Version::V1Lazy);
|
||||
|
||||
let secure_channel_param: SecProtocol =
|
||||
testplan::from_env("security").context("unsupported secure channel")?;
|
||||
|
||||
let muxer_param: Muxer =
|
||||
testplan::from_env("muxer").context("unsupported multiplexer")?;
|
||||
|
||||
(
|
||||
build_builder(builder, secure_channel_param, muxer_param, &local_key),
|
||||
format!("/ip4/{ip}/tcp/0"),
|
||||
)
|
||||
}
|
||||
Transport::Ws => {
|
||||
let builder = WsConfig::new(libp2p::tcp::tokio::Transport::new(
|
||||
libp2p::tcp::Config::new(),
|
||||
))
|
||||
.upgrade(libp2p::core::upgrade::Version::V1Lazy);
|
||||
|
||||
let secure_channel_param: SecProtocol =
|
||||
testplan::from_env("security").context("unsupported secure channel")?;
|
||||
|
||||
let muxer_param: Muxer =
|
||||
testplan::from_env("muxer").context("unsupported multiplexer")?;
|
||||
|
||||
(
|
||||
build_builder(builder, secure_channel_param, muxer_param, &local_key),
|
||||
format!("/ip4/{ip}/tcp/0/ws"),
|
||||
)
|
||||
}
|
||||
Transport::Webrtc => (
|
||||
webrtc::tokio::Transport::new(
|
||||
local_key,
|
||||
webrtc::tokio::Certificate::generate(&mut rand::thread_rng())?,
|
||||
)
|
||||
.map(|(peer_id, conn), _| (peer_id, StreamMuxerBox::new(conn)))
|
||||
.boxed(),
|
||||
format!("/ip4/{ip}/udp/0/webrtc"),
|
||||
),
|
||||
};
|
||||
|
||||
let swarm = OrphanRuleWorkaround(Swarm::with_tokio_executor(
|
||||
boxed_transport,
|
||||
Behaviour {
|
||||
ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(1))),
|
||||
keep_alive: keep_alive::Behaviour,
|
||||
},
|
||||
local_peer_id,
|
||||
));
|
||||
|
||||
// Use peer id as a String so that `run_ping` does not depend on a specific libp2p version.
|
||||
let local_peer_id = local_peer_id.to_string();
|
||||
run_ping(
|
||||
client,
|
||||
swarm,
|
||||
&local_addr,
|
||||
&local_peer_id,
|
||||
is_dialer,
|
||||
timeout_secs,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(NetworkBehaviour)]
|
||||
#[behaviour(prelude = "libp2pv0500::swarm::derive_prelude")]
|
||||
struct Behaviour {
|
||||
ping: ping::Behaviour,
|
||||
keep_alive: keep_alive::Behaviour,
|
||||
}
|
||||
struct OrphanRuleWorkaround(Swarm<Behaviour>);
|
||||
|
||||
#[async_trait]
|
||||
impl PingSwarm for OrphanRuleWorkaround {
|
||||
async fn listen_on(&mut self, address: &str) -> Result<String> {
|
||||
let id = self.0.listen_on(address.parse()?)?;
|
||||
|
||||
loop {
|
||||
if let Some(SwarmEvent::NewListenAddr {
|
||||
listener_id,
|
||||
address,
|
||||
}) = self.0.next().await
|
||||
{
|
||||
if address.to_string().contains("127.0.0.1") {
|
||||
continue;
|
||||
}
|
||||
if listener_id == id {
|
||||
return Ok(address.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn dial(&mut self, address: &str) -> Result<()> {
|
||||
self.0.dial(address.parse::<Multiaddr>()?)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn await_connections(&mut self, number: usize) {
|
||||
let mut connected = HashSet::with_capacity(number);
|
||||
|
||||
while connected.len() < number {
|
||||
if let Some(SwarmEvent::ConnectionEstablished { peer_id, .. }) = self.0.next().await {
|
||||
connected.insert(peer_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn await_pings(&mut self, number: usize) -> Vec<Duration> {
|
||||
let mut received_pings = Vec::with_capacity(number);
|
||||
|
||||
while received_pings.len() < number {
|
||||
if let Some(SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event {
|
||||
peer: _,
|
||||
result: Ok(ping::Success::Ping { rtt }),
|
||||
}))) = self.0.next().await
|
||||
{
|
||||
received_pings.push(rtt);
|
||||
}
|
||||
}
|
||||
|
||||
received_pings
|
||||
}
|
||||
|
||||
async fn loop_on_next(&mut self) {
|
||||
loop {
|
||||
self.0.next().await;
|
||||
}
|
||||
}
|
||||
|
||||
fn local_peer_id(&self) -> String {
|
||||
self.0.local_peer_id().to_string()
|
||||
}
|
||||
}
|
@ -1,122 +0,0 @@
|
||||
use std::{env, str::FromStr, time::Duration};
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use env_logger::Env;
|
||||
use log::info;
|
||||
use redis::{AsyncCommands, Client as Rclient};
|
||||
use strum::EnumString;
|
||||
|
||||
/// Supported transports by rust-libp2p.
|
||||
#[derive(Clone, Debug, EnumString)]
|
||||
#[strum(serialize_all = "kebab-case")]
|
||||
pub enum Transport {
|
||||
Tcp,
|
||||
QuicV1,
|
||||
Webrtc,
|
||||
Ws,
|
||||
}
|
||||
|
||||
/// Supported stream multiplexers by rust-libp2p.
|
||||
#[derive(Clone, Debug, EnumString)]
|
||||
#[strum(serialize_all = "kebab-case")]
|
||||
pub enum Muxer {
|
||||
Mplex,
|
||||
Yamux,
|
||||
}
|
||||
|
||||
/// Supported security protocols by rust-libp2p.
|
||||
#[derive(Clone, Debug, EnumString)]
|
||||
#[strum(serialize_all = "kebab-case")]
|
||||
pub enum SecProtocol {
|
||||
Noise,
|
||||
Tls,
|
||||
}
|
||||
|
||||
/// Helper function to get a ENV variable into an test parameter like `Transport`.
|
||||
pub fn from_env<T>(env_var: &str) -> Result<T>
|
||||
where
|
||||
T: FromStr,
|
||||
T::Err: std::error::Error + Send + Sync + 'static,
|
||||
{
|
||||
env::var(env_var)
|
||||
.with_context(|| format!("{env_var} environment variable is not set"))?
|
||||
.parse()
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
/// PingSwarm allows us to abstract over libp2p versions for `run_ping`.
|
||||
#[async_trait::async_trait]
|
||||
pub trait PingSwarm: Sized + Send + 'static {
|
||||
async fn listen_on(&mut self, address: &str) -> Result<String>;
|
||||
|
||||
fn dial(&mut self, address: &str) -> Result<()>;
|
||||
|
||||
async fn await_connections(&mut self, number: usize);
|
||||
|
||||
async fn await_pings(&mut self, number: usize) -> Vec<Duration>;
|
||||
|
||||
async fn loop_on_next(&mut self);
|
||||
|
||||
fn local_peer_id(&self) -> String;
|
||||
}
|
||||
|
||||
/// Run a ping interop test. Based on `is_dialer`, either dial the address
|
||||
/// retrieved via `listenAddr` key over the redis connection. Or wait to be pinged and have
|
||||
/// `dialerDone` key ready on the redis connection.
|
||||
pub async fn run_ping<S>(
|
||||
client: Rclient,
|
||||
mut swarm: S,
|
||||
local_addr: &str,
|
||||
local_peer_id: &str,
|
||||
is_dialer: bool,
|
||||
redis_timeout_secs: usize,
|
||||
) -> Result<()>
|
||||
where
|
||||
S: PingSwarm,
|
||||
{
|
||||
let mut conn = client.get_async_connection().await?;
|
||||
|
||||
info!("Running ping test: {}", swarm.local_peer_id());
|
||||
env_logger::Builder::from_env(Env::default().default_filter_or("info")).init();
|
||||
|
||||
info!(
|
||||
"Test instance, listening for incoming connections on: {:?}.",
|
||||
local_addr
|
||||
);
|
||||
let local_addr = swarm.listen_on(local_addr).await?;
|
||||
|
||||
if is_dialer {
|
||||
let result: Vec<String> = conn.blpop("listenerAddr", redis_timeout_secs).await?;
|
||||
let other = result
|
||||
.get(1)
|
||||
.context("Failed to wait for listener to be ready")?;
|
||||
|
||||
swarm.dial(other)?;
|
||||
info!("Test instance, dialing multiaddress on: {}.", other);
|
||||
|
||||
swarm.await_connections(1).await;
|
||||
|
||||
let results = swarm.await_pings(1).await;
|
||||
|
||||
conn.rpush("dialerDone", "").await?;
|
||||
info!(
|
||||
"Ping successful: {:?}",
|
||||
results.first().expect("Should have a ping result")
|
||||
);
|
||||
} else {
|
||||
let ma = format!("{local_addr}/p2p/{local_peer_id}");
|
||||
conn.rpush("listenerAddr", ma).await?;
|
||||
|
||||
// Drive Swarm in the background while we await for `dialerDone` to be ready.
|
||||
tokio::spawn(async move {
|
||||
swarm.loop_on_next().await;
|
||||
});
|
||||
|
||||
let done: Vec<String> = conn.blpop("dialerDone", redis_timeout_secs).await?;
|
||||
done.get(1)
|
||||
.context("Failed to wait for dialer conclusion")?;
|
||||
info!("Ping successful");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
@ -1,3 +0,0 @@
|
||||
fn main() {
|
||||
println!("This is a dummy main file that is used for creating a cache layer inside the docker container.")
|
||||
}
|
4
multidim-interop/rust/v0.50/.gitignore
vendored
Normal file
4
multidim-interop/rust/v0.50/.gitignore
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
rust-libp2p-*.zip
|
||||
rust-libp2p-*
|
||||
rust-libp2p-*/*
|
||||
image.json
|
20
multidim-interop/rust/v0.50/Makefile
Normal file
20
multidim-interop/rust/v0.50/Makefile
Normal file
@ -0,0 +1,20 @@
|
||||
image_name := rust-v0.50
|
||||
commitSha := 1f0f0e240cc25480d1b5fb488abfb69bf06812f0
|
||||
|
||||
all: image.json
|
||||
|
||||
image.json: rust-libp2p-${commitSha}
|
||||
cd rust-libp2p-${commitSha} && IMAGE_NAME=${image_name} ../../../dockerBuildWrapper.sh -f interop-tests/Dockerfile .
|
||||
docker image inspect ${image_name} -f "{{.Id}}" | \
|
||||
xargs -I {} echo "{\"imageID\": \"{}\"}" > $@
|
||||
|
||||
rust-libp2p-${commitSha}: rust-libp2p-${commitSha}.zip
|
||||
unzip -o rust-libp2p-${commitSha}.zip
|
||||
|
||||
rust-libp2p-${commitSha}.zip:
|
||||
wget -O $@ "https://github.com/libp2p/rust-libp2p/archive/${commitSha}.zip"
|
||||
|
||||
clean:
|
||||
rm image.json
|
||||
rm rust-libp2p-*.zip
|
||||
rm -rf rust-libp2p-*
|
@ -20,8 +20,8 @@ export type RunFailure = any
|
||||
|
||||
export async function run(namespace: string, compose: ComposeSpecification, opts: RunOpts): Promise<RunFailure | null> {
|
||||
// sanitize namespace
|
||||
namespace = namespace.replace(/[^a-zA-Z0-9]/g, "-")
|
||||
const dir = path.join(tmpdir(), "compose-runner", namespace)
|
||||
const sanitizedNamespace = namespace.replace(/[^a-zA-Z0-9]/g, "-")
|
||||
const dir = path.join(tmpdir(), "compose-runner", sanitizedNamespace)
|
||||
|
||||
// Check if directory exists
|
||||
try {
|
||||
@ -43,17 +43,22 @@ export async function run(namespace: string, compose: ComposeSpecification, opts
|
||||
}
|
||||
|
||||
try {
|
||||
const { stdout, stderr } = await exec(`docker compose -f ${path.join(dir, "compose.yaml")} up ${upFlags.join(" ")}`);
|
||||
console.log("Finished:", stdout)
|
||||
} catch (e: any) {
|
||||
if (e !== null && typeof e === "object" && typeof e["stdout"] === "string") {
|
||||
if (e["stdout"].match(/dialer.*ping successful/i) !== null) {
|
||||
// The ping succeeded, but the listener exited first. Common if
|
||||
// the dialer tear-down is slow as is the case with browser
|
||||
// tests.
|
||||
return null
|
||||
}
|
||||
const timeoutSecs = 3 * 60
|
||||
let timeoutId
|
||||
const { stdout, stderr } =
|
||||
(await Promise.race([
|
||||
exec(`docker compose -f ${path.join(dir, "compose.yaml")} up ${upFlags.join(" ")}`),
|
||||
// Timeout - uses any type because this will only reject the promise.
|
||||
new Promise<any>((resolve, reject) => { timeoutId = setTimeout(() => reject("Timeout"), 1000 * timeoutSecs) })
|
||||
]))
|
||||
clearTimeout(timeoutId)
|
||||
const testResults = stdout.match(/.*dialer.*({.*)/)
|
||||
if (testResults === null || testResults.length < 2) {
|
||||
throw new Error("Test JSON results not found")
|
||||
}
|
||||
const testResultsParsed = JSON.parse(testResults[1])
|
||||
console.log("Finished:", namespace, testResultsParsed)
|
||||
} catch (e: any) {
|
||||
console.log("Failure", e)
|
||||
return e
|
||||
} finally {
|
||||
|
@ -5,8 +5,7 @@ import { ComposeSpecification } from "../compose-spec/compose-spec";
|
||||
|
||||
function buildExtraEnv(timeoutOverride: { [key: string]: number }, test1ID: string, test2ID: string): { [key: string]: string } {
|
||||
const maxTimeout = Math.max(timeoutOverride[test1ID] || 0, timeoutOverride[test2ID] || 0)
|
||||
console.log("Max is", maxTimeout)
|
||||
return maxTimeout > 0 ? { "test_timeout": maxTimeout.toString(10) } : {}
|
||||
return maxTimeout > 0 ? { "test_timeout_seconds": maxTimeout.toString(10) } : {}
|
||||
}
|
||||
|
||||
export async function buildTestSpecs(versions: Array<Version>): Promise<Array<ComposeSpecification>> {
|
||||
@ -69,12 +68,14 @@ export async function buildTestSpecs(versions: Array<Version>): Promise<Array<Co
|
||||
await db.all(`SELECT DISTINCT a.id as id1, b.id as id2, a.transport
|
||||
FROM transports a, transports b
|
||||
WHERE a.transport == b.transport
|
||||
AND NOT b.onlyDial
|
||||
-- Only quic transports
|
||||
AND a.transport == "quic";`);
|
||||
const quicV1QueryResults =
|
||||
await db.all(`SELECT DISTINCT a.id as id1, b.id as id2, a.transport
|
||||
FROM transports a, transports b
|
||||
WHERE a.transport == b.transport
|
||||
AND NOT b.onlyDial
|
||||
-- Only quic transports
|
||||
AND a.transport == "quic-v1";`);
|
||||
const webtransportQueryResults =
|
||||
@ -88,6 +89,7 @@ export async function buildTestSpecs(versions: Array<Version>): Promise<Array<Co
|
||||
await db.all(`SELECT DISTINCT a.id as id1, b.id as id2, a.transport
|
||||
FROM transports a, transports b
|
||||
WHERE a.transport == b.transport
|
||||
AND NOT b.onlyDial
|
||||
-- Only webrtc transports
|
||||
AND a.transport == "webrtc";`);
|
||||
await db.close();
|
||||
@ -111,8 +113,6 @@ export async function buildTestSpecs(versions: Array<Version>): Promise<Array<Co
|
||||
dialerID: test.id1,
|
||||
listenerID: test.id2,
|
||||
transport: test.transport,
|
||||
muxer: "quic",
|
||||
security: "quic",
|
||||
extraEnv: buildExtraEnv(timeoutOverride, test.id1, test.id2)
|
||||
})))
|
||||
.concat(webrtcQueryResults
|
||||
@ -121,15 +121,13 @@ export async function buildTestSpecs(versions: Array<Version>): Promise<Array<Co
|
||||
dialerID: test.id1,
|
||||
listenerID: test.id2,
|
||||
transport: test.transport,
|
||||
muxer: "webrtc",
|
||||
security: "webrtc",
|
||||
extraEnv: buildExtraEnv(timeoutOverride, test.id1, test.id2)
|
||||
})))
|
||||
|
||||
return testSpecs
|
||||
}
|
||||
|
||||
function buildSpec(containerImages: { [key: string]: string }, { name, dialerID, listenerID, transport, muxer, security, extraEnv }: { name: string, dialerID: string, listenerID: string, transport: string, muxer: string, security: string, extraEnv?: { [key: string]: string } }): ComposeSpecification {
|
||||
function buildSpec(containerImages: { [key: string]: string }, { name, dialerID, listenerID, transport, muxer, security, extraEnv }: { name: string, dialerID: string, listenerID: string, transport: string, muxer?: string, security?: string, extraEnv?: { [key: string]: string } }): ComposeSpecification {
|
||||
return {
|
||||
name,
|
||||
services: {
|
||||
@ -139,27 +137,35 @@ function buildSpec(containerImages: { [key: string]: string }, { name, dialerID,
|
||||
environment: {
|
||||
version: dialerID,
|
||||
transport,
|
||||
muxer,
|
||||
security,
|
||||
is_dialer: true,
|
||||
ip: "0.0.0.0",
|
||||
...(!!muxer && { muxer }),
|
||||
...(!!security && { security }),
|
||||
...extraEnv,
|
||||
}
|
||||
},
|
||||
listener: {
|
||||
// Add init process to be PID 1 to proxy signals. Rust doesn't
|
||||
// handle SIGINT without this
|
||||
init: true,
|
||||
image: containerImages[listenerID],
|
||||
depends_on: ["redis"],
|
||||
environment: {
|
||||
version: listenerID,
|
||||
transport,
|
||||
muxer,
|
||||
security,
|
||||
is_dialer: false,
|
||||
ip: "0.0.0.0",
|
||||
...(!!muxer && { muxer }),
|
||||
...(!!security && { security }),
|
||||
...extraEnv,
|
||||
}
|
||||
},
|
||||
redis: { image: "redis/redis-stack", }
|
||||
redis: {
|
||||
image: "redis/redis-stack",
|
||||
environment: {
|
||||
REDIS_ARGS: "--loglevel warning"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -26,16 +26,14 @@ export const versions: Array<Version> = [
|
||||
{
|
||||
id: "js-v0.41.0",
|
||||
containerImageID: jsV041.imageID,
|
||||
timeoutSecs: 30,
|
||||
transports: ["tcp", "ws"],
|
||||
secureChannels: ["noise"],
|
||||
muxers: ["mplex", "yamux"],
|
||||
},
|
||||
{
|
||||
id: "chromium-js-v0.41.0",
|
||||
timeoutSecs: 30,
|
||||
containerImageID: chromiumJsV041.imageID,
|
||||
transports: [{ name: "webtransport", onlyDial: true }],
|
||||
transports: [{ name: "webtransport", onlyDial: true }, { name: "webrtc", onlyDial: true }],
|
||||
secureChannels: [],
|
||||
muxers: []
|
||||
},
|
||||
|
Loading…
x
Reference in New Issue
Block a user