feat(perf): continuosly measure on single conn (iperf-style) (#276)

Our current throughput tests open a connection, open a stream,
up- or download 100MB and close the connection. 100 MB is not enough on the
given path (60ms, ~5gbit/s) to exit congestion controller's slow-start. See
https://github.com/libp2p/test-plans/issues/261 for details.

Instead of downloading 100MB multiple times, each on a new connection, establish
a single connection and continuously measure the throughput for a fixed
duration (20s).
This commit is contained in:
Max Inden 2023-10-25 13:24:08 +02:00 committed by GitHub
parent 9247c9fd99
commit 0a8dbab87c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 32392 additions and 1551 deletions

View File

@ -79,11 +79,33 @@ Given you have provisioned your infrastructure, you can now build and run the li
- `--download-bytes` number of bytes to download per stream.
- Output
- Logging MUST go to `stderr`.
- Measurement output is printed to `stdout` as JSON in the form of:
```json
{"latency": 0.246442851}
```
Note that the measurement includes the time to (1) establish the
connection, (2) upload the bytes and (3) download the bytes.
- Measurement output is printed to `stdout` as JSON.
- The output schema is:
``` typescript
interface Data {
type: "intermediary" | "final";
timeSeconds: number;
uploadBytes: number;
downloadBytes: number;
}
```
- Every second the client must print the current progress to stdout. See example below. Note the `type: "intermediary"`.
``` json
{
"type": "intermediary",
"timeSeconds": 1.004957645,
"uploadBytes": 73039872,
"downloadBytes": 0
},
```
- Before terminating the client must print a final summary. See example below. Note the `type: "final"`. Also note that the measurement includes the time to (1) establish the connection, (2) upload the bytes and (3) download the bytes.
``` json
{
"type": "final",
"timeSeconds": 60.127230659,
"uploadBytes": 4382392320,
"downloadBytes": 0
}
```
2. For a new implementation, in [`impl/Makefile` include your implementation in the `all` target.](./impl/Makefile#L7)
3. For a new version, reference version in [`runner/src/versions.ts`](./runner/src/versions.ts#L7-L43).

View File

@ -89,7 +89,10 @@ func main() {
}
jsonB, err := json.Marshal(Result{
Latency: time.Since(start).Seconds(),
TimeSeconds: time.Since(start).Seconds(),
UploadBytes: *uploadBytes,
DownloadBytes: *downloadBytes,
Type: "final",
})
if err != nil {
log.Fatalf("failed to marshal perf result: %s", err)
@ -99,7 +102,10 @@ func main() {
}
type Result struct {
Latency float64 `json:"latency"`
Type string `json:"type"`
TimeSeconds float64 `json:"timeSeconds"`
UploadBytes uint64 `json:"uploadBytes"`
DownloadBytes uint64 `json:"downloadBytes"`
}
type simpleReader struct {

View File

@ -3,8 +3,10 @@ package main
import (
"context"
"encoding/binary"
"encoding/json"
"fmt"
"io"
"time"
logging "github.com/ipfs/go-log/v2"
pool "github.com/libp2p/go-buffer-pool"
@ -89,7 +91,26 @@ func sendBytes(s io.Writer, bytesToSend uint64) error {
buf := pool.Get(blockSize)
defer pool.Put(buf)
lastReportTime := time.Now()
lastReportWrite := uint64(0)
for bytesToSend > 0 {
now := time.Now()
if now.Sub(lastReportTime) >= time.Second {
jsonB, err := json.Marshal(Result{
TimeSeconds: now.Sub(lastReportTime).Seconds(),
UploadBytes: lastReportWrite,
Type: "intermediary",
})
if err != nil {
log.Fatalf("failed to marshal perf result: %s", err)
}
fmt.Println(string(jsonB))
lastReportTime = now
lastReportWrite = 0
}
toSend := buf
if bytesToSend < blockSize {
toSend = buf[:bytesToSend]
@ -100,15 +121,49 @@ func sendBytes(s io.Writer, bytesToSend uint64) error {
return err
}
bytesToSend -= uint64(n)
lastReportWrite += uint64(n)
}
return nil
}
func drainStream(s io.Reader) (uint64, error) {
var recvd int64
recvd, err := io.Copy(io.Discard, s)
recvd, err := io.Copy(io.Discard, &reportingReader{orig: s, LastReportTime: time.Now()})
if err != nil && err != io.EOF {
return uint64(recvd), err
}
return uint64(recvd), nil
}
type reportingReader struct {
orig io.Reader
LastReportTime time.Time
lastReportRead uint64
}
var _ io.Reader = &reportingReader{}
func (r *reportingReader) Read(b []byte) (int, error) {
n, err := r.orig.Read(b)
r.lastReportRead += uint64(n)
now := time.Now()
if now.Sub(r.LastReportTime) >= time.Second {
result := Result{
TimeSeconds: now.Sub(r.LastReportTime).Seconds(),
Type: "intermediary",
DownloadBytes: r.lastReportRead,
}
jsonB, err := json.Marshal(result)
if err != nil {
log.Fatalf("failed to marshal perf result: %s", err)
}
fmt.Println(string(jsonB))
r.LastReportTime = now
r.lastReportRead = 0
}
return n, err
}

View File

@ -89,7 +89,10 @@ func main() {
}
jsonB, err := json.Marshal(Result{
Latency: time.Since(start).Seconds(),
TimeSeconds: time.Since(start).Seconds(),
UploadBytes: *uploadBytes,
DownloadBytes: *downloadBytes,
Type: "final",
})
if err != nil {
log.Fatalf("failed to marshal perf result: %s", err)
@ -99,7 +102,10 @@ func main() {
}
type Result struct {
Latency float64 `json:"latency"`
Type string `json:"type"`
TimeSeconds float64 `json:"timeSeconds"`
UploadBytes uint64 `json:"uploadBytes"`
DownloadBytes uint64 `json:"downloadBytes"`
}
type simpleReader struct {

View File

@ -3,8 +3,10 @@ package main
import (
"context"
"encoding/binary"
"encoding/json"
"fmt"
"io"
"time"
logging "github.com/ipfs/go-log/v2"
pool "github.com/libp2p/go-buffer-pool"
@ -89,7 +91,26 @@ func sendBytes(s io.Writer, bytesToSend uint64) error {
buf := pool.Get(blockSize)
defer pool.Put(buf)
lastReportTime := time.Now()
lastReportWrite := uint64(0)
for bytesToSend > 0 {
now := time.Now()
if now.Sub(lastReportTime) >= time.Second {
jsonB, err := json.Marshal(Result{
TimeSeconds: now.Sub(lastReportTime).Seconds(),
UploadBytes: lastReportWrite,
Type: "intermediary",
})
if err != nil {
log.Fatalf("failed to marshal perf result: %s", err)
}
fmt.Println(string(jsonB))
lastReportTime = now
lastReportWrite = 0
}
toSend := buf
if bytesToSend < blockSize {
toSend = buf[:bytesToSend]
@ -100,15 +121,49 @@ func sendBytes(s io.Writer, bytesToSend uint64) error {
return err
}
bytesToSend -= uint64(n)
lastReportWrite += uint64(n)
}
return nil
}
func drainStream(s io.Reader) (uint64, error) {
var recvd int64
recvd, err := io.Copy(io.Discard, s)
recvd, err := io.Copy(io.Discard, &reportingReader{orig: s, LastReportTime: time.Now()})
if err != nil && err != io.EOF {
return uint64(recvd), err
}
return uint64(recvd), nil
}
type reportingReader struct {
orig io.Reader
LastReportTime time.Time
lastReportRead uint64
}
var _ io.Reader = &reportingReader{}
func (r *reportingReader) Read(b []byte) (int, error) {
n, err := r.orig.Read(b)
r.lastReportRead += uint64(n)
now := time.Now()
if now.Sub(r.LastReportTime) >= time.Second {
result := Result{
TimeSeconds: now.Sub(r.LastReportTime).Seconds(),
Type: "intermediary",
DownloadBytes: r.lastReportRead,
}
jsonB, err := json.Marshal(result)
if err != nil {
log.Fatalf("failed to marshal perf result: %s", err)
}
fmt.Println(string(jsonB))
r.LastReportTime = now
r.lastReportRead = 0
}
return n, err
}

View File

@ -89,7 +89,10 @@ func main() {
}
jsonB, err := json.Marshal(Result{
Latency: time.Since(start).Seconds(),
TimeSeconds: time.Since(start).Seconds(),
UploadBytes: *uploadBytes,
DownloadBytes: *downloadBytes,
Type: "final",
})
if err != nil {
log.Fatalf("failed to marshal perf result: %s", err)
@ -99,7 +102,10 @@ func main() {
}
type Result struct {
Latency float64 `json:"latency"`
Type string `json:"type"`
TimeSeconds float64 `json:"timeSeconds"`
UploadBytes uint64 `json:"uploadBytes"`
DownloadBytes uint64 `json:"downloadBytes"`
}
type simpleReader struct {

View File

@ -3,8 +3,10 @@ package main
import (
"context"
"encoding/binary"
"encoding/json"
"fmt"
"io"
"time"
logging "github.com/ipfs/go-log/v2"
pool "github.com/libp2p/go-buffer-pool"
@ -89,7 +91,26 @@ func sendBytes(s io.Writer, bytesToSend uint64) error {
buf := pool.Get(blockSize)
defer pool.Put(buf)
lastReportTime := time.Now()
lastReportWrite := uint64(0)
for bytesToSend > 0 {
now := time.Now()
if now.Sub(lastReportTime) >= time.Second {
jsonB, err := json.Marshal(Result{
TimeSeconds: now.Sub(lastReportTime).Seconds(),
UploadBytes: lastReportWrite,
Type: "intermediary",
})
if err != nil {
log.Fatalf("failed to marshal perf result: %s", err)
}
fmt.Println(string(jsonB))
lastReportTime = now
lastReportWrite = 0
}
toSend := buf
if bytesToSend < blockSize {
toSend = buf[:bytesToSend]
@ -100,15 +121,49 @@ func sendBytes(s io.Writer, bytesToSend uint64) error {
return err
}
bytesToSend -= uint64(n)
lastReportWrite += uint64(n)
}
return nil
}
func drainStream(s io.Reader) (uint64, error) {
var recvd int64
recvd, err := io.Copy(io.Discard, s)
recvd, err := io.Copy(io.Discard, &reportingReader{orig: s, LastReportTime: time.Now()})
if err != nil && err != io.EOF {
return uint64(recvd), err
}
return uint64(recvd), nil
}
type reportingReader struct {
orig io.Reader
LastReportTime time.Time
lastReportRead uint64
}
var _ io.Reader = &reportingReader{}
func (r *reportingReader) Read(b []byte) (int, error) {
n, err := r.orig.Read(b)
r.lastReportRead += uint64(n)
now := time.Now()
if now.Sub(r.LastReportTime) >= time.Second {
result := Result{
TimeSeconds: now.Sub(r.LastReportTime).Seconds(),
Type: "intermediary",
DownloadBytes: r.lastReportRead,
}
jsonB, err := json.Marshal(result)
if err != nil {
log.Fatalf("failed to marshal perf result: %s", err)
}
fmt.Println(string(jsonB))
r.LastReportTime = now
r.lastReportRead = 0
}
return n, err
}

View File

@ -89,7 +89,10 @@ func main() {
}
jsonB, err := json.Marshal(Result{
Latency: time.Since(start).Seconds(),
TimeSeconds: time.Since(start).Seconds(),
UploadBytes: *uploadBytes,
DownloadBytes: *downloadBytes,
Type: "final",
})
if err != nil {
log.Fatalf("failed to marshal perf result: %s", err)
@ -99,7 +102,10 @@ func main() {
}
type Result struct {
Latency float64 `json:"latency"`
Type string `json:"type"`
TimeSeconds float64 `json:"timeSeconds"`
UploadBytes uint64 `json:"uploadBytes"`
DownloadBytes uint64 `json:"downloadBytes"`
}
type simpleReader struct {

View File

@ -3,8 +3,10 @@ package main
import (
"context"
"encoding/binary"
"encoding/json"
"fmt"
"io"
"time"
logging "github.com/ipfs/go-log/v2"
pool "github.com/libp2p/go-buffer-pool"
@ -89,7 +91,26 @@ func sendBytes(s io.Writer, bytesToSend uint64) error {
buf := pool.Get(blockSize)
defer pool.Put(buf)
lastReportTime := time.Now()
lastReportWrite := uint64(0)
for bytesToSend > 0 {
now := time.Now()
if now.Sub(lastReportTime) >= time.Second {
jsonB, err := json.Marshal(Result{
TimeSeconds: now.Sub(lastReportTime).Seconds(),
UploadBytes: lastReportWrite,
Type: "intermediary",
})
if err != nil {
log.Fatalf("failed to marshal perf result: %s", err)
}
fmt.Println(string(jsonB))
lastReportTime = now
lastReportWrite = 0
}
toSend := buf
if bytesToSend < blockSize {
toSend = buf[:bytesToSend]
@ -100,15 +121,49 @@ func sendBytes(s io.Writer, bytesToSend uint64) error {
return err
}
bytesToSend -= uint64(n)
lastReportWrite += uint64(n)
}
return nil
}
func drainStream(s io.Reader) (uint64, error) {
var recvd int64
recvd, err := io.Copy(io.Discard, s)
recvd, err := io.Copy(io.Discard, &reportingReader{orig: s, LastReportTime: time.Now()})
if err != nil && err != io.EOF {
return uint64(recvd), err
}
return uint64(recvd), nil
}
type reportingReader struct {
orig io.Reader
LastReportTime time.Time
lastReportRead uint64
}
var _ io.Reader = &reportingReader{}
func (r *reportingReader) Read(b []byte) (int, error) {
n, err := r.orig.Read(b)
r.lastReportRead += uint64(n)
now := time.Now()
if now.Sub(r.LastReportTime) >= time.Second {
result := Result{
TimeSeconds: now.Sub(r.LastReportTime).Seconds(),
Type: "intermediary",
DownloadBytes: r.lastReportRead,
}
jsonB, err := json.Marshal(result)
if err != nil {
log.Fatalf("failed to marshal perf result: %s", err)
}
fmt.Println(string(jsonB))
r.LastReportTime = now
r.lastReportRead = 0
}
return n, err
}

View File

@ -49,26 +49,6 @@ func handleRequest(w http.ResponseWriter, r *http.Request) {
}
}
type nullReader struct {
N uint64
read uint64
}
var _ io.Reader = &nullReader{}
func (r *nullReader) Read(b []byte) (int, error) {
remaining := r.N - r.read
l := uint64(len(b))
if uint64(len(b)) > remaining {
l = remaining
}
r.read += l
if r.read == r.N {
return int(l), io.EOF
}
return int(l), nil
}
func runClient(serverAddr string, uploadBytes, downloadBytes uint64) (time.Duration, error) {
client := &http.Client{
Transport: &http.Transport{
@ -84,7 +64,7 @@ func runClient(serverAddr string, uploadBytes, downloadBytes uint64) (time.Durat
fmt.Sprintf("https://%s/", serverAddr),
io.MultiReader(
bytes.NewReader(b),
&nullReader{N: uploadBytes},
&reportingReader{orig: &nullReader{N: uploadBytes}, LastReportTime: time.Now(), isUpload: true},
),
)
if err != nil {
@ -167,7 +147,10 @@ func generateEphemeralCertificate() (tls.Certificate, error) {
}
type Result struct {
Latency float64 `json:"latency"`
Type string `json:"type"`
TimeSeconds float64 `json:"timeSeconds"`
UploadBytes uint64 `json:"uploadBytes"`
DownloadBytes uint64 `json:"downloadBytes"`
}
func main() {
@ -220,7 +203,10 @@ func main() {
}
jsonB, err := json.Marshal(Result{
Latency: latency.Seconds(),
TimeSeconds: latency.Seconds(),
UploadBytes: *uploadBytes,
DownloadBytes: *downloadBytes,
Type: "final",
})
if err != nil {
log.Fatalf("failed to marshal perf result: %s", err)
@ -249,9 +235,72 @@ func sendBytes(s io.Writer, bytesToSend uint64) error {
func drainStream(s io.Reader) (uint64, error) {
var recvd int64
recvd, err := io.Copy(io.Discard, s)
recvd, err := io.Copy(io.Discard, &reportingReader{orig: s, LastReportTime: time.Now(), isUpload: false})
if err != nil && err != io.EOF {
return uint64(recvd), err
}
return uint64(recvd), nil
}
type reportingReader struct {
orig io.Reader
LastReportTime time.Time
lastReportRead uint64
isUpload bool
}
var _ io.Reader = &reportingReader{}
func (r *reportingReader) Read(b []byte) (int, error) {
n, err := r.orig.Read(b)
r.lastReportRead += uint64(n)
now := time.Now()
if now.Sub(r.LastReportTime) >= time.Second {
// This section is analogous to your Read implementation
result := Result{
TimeSeconds: now.Sub(r.LastReportTime).Seconds(),
Type: "intermediary",
}
if r.isUpload {
result.UploadBytes = r.lastReportRead
} else {
result.DownloadBytes = r.lastReportRead
}
jsonB, err := json.Marshal(result)
if err != nil {
log.Fatalf("failed to marshal perf result: %s", err)
}
fmt.Println(string(jsonB))
r.LastReportTime = now
r.lastReportRead = 0
}
return n, err
}
type nullReader struct {
N uint64
read uint64
LastReportTime time.Time
lastReportRead uint64
}
var _ io.Reader = &nullReader{}
func (r *nullReader) Read(b []byte) (int, error) {
remaining := r.N - r.read
l := uint64(len(b))
if uint64(len(b)) > remaining {
l = remaining
}
r.read += l
if r.read == r.N {
return int(l), io.EOF
}
return int(l), nil
}

View File

@ -1,4 +1,4 @@
commitSha := a5cd126c97b6d8d8328141bfa84cc57e74ebc57c
commitSha := 3a12ea9207e40de20533b0a6aa2e40e3727aa796
all: perf
@ -10,7 +10,8 @@ perf-${commitSha}: perf-${commitSha}.zip
unzip -o perf-${commitSha}.zip
perf-${commitSha}.zip:
wget -O $@ "https://github.com/quic-go/perf/archive/${commitSha}.zip"
# TODO: revert
wget -O $@ "https://github.com/mxinden/perf/archive/${commitSha}.zip"
clean:
rm perf-*.zip

View File

@ -1,4 +1,4 @@
commitSha := e8759c85c278006f5fc94e823c2a3620abaaf697
commitSha := d15bb69a9d2b353d73ead79a29f668dca3e1dc4a
all: perf
@ -6,7 +6,7 @@ perf: rust-libp2p-${commitSha}/target/release/perf
cp ./rust-libp2p-${commitSha}/target/release/perf .
rust-libp2p-${commitSha}/target/release/perf: rust-libp2p-${commitSha}
docker run --rm --user "$(shell id -u):$(shell id -g)" -v "$(shell pwd)/rust-libp2p-${commitSha}":/usr/src/myapp -w /usr/src/myapp rust:1.69 cargo build --release --bin perf
docker run --rm --user "$(shell id -u):$(shell id -g)" -v "$(shell pwd)/rust-libp2p-${commitSha}":/usr/src/myapp -w /usr/src/myapp rust:1.73 cargo build --release --bin perf
rust-libp2p-${commitSha}: rust-libp2p-${commitSha}.zip
unzip -o rust-libp2p-${commitSha}.zip

File diff suppressed because it is too large Load Diff

View File

@ -36,7 +36,10 @@ export type Result = {
};
export type ResultValue = {
latency: number
type: "itermediate" | "final",
time_seconds: number,
upload_bytes: number,
download_bytes: number,
};
export type Comparison = {

View File

@ -12,33 +12,36 @@ async function main(clientPublicIP: string, serverPublicIP: string, testing: boo
copyAndBuildPerfImplementations(clientPublicIP);
const benchmarks = [
runBenchmarkAcrossVersions({
name: "Single Connection throughput Upload 100 MiB",
clientPublicIP,
serverPublicIP,
uploadBytes: 100 << 20,
downloadBytes: 0,
unit: "bit/s",
iterations: testing ? 1 : 10,
}),
runBenchmarkAcrossVersions({
name: "Single Connection throughput Download 100 MiB",
clientPublicIP,
serverPublicIP,
uploadBytes: 0,
downloadBytes: 100 << 20,
unit: "bit/s",
iterations: testing ? 1 : 10,
}),
runBenchmarkAcrossVersions({
name: "Connection establishment + 1 byte round trip latencies",
clientPublicIP,
serverPublicIP,
uploadBytes: 1,
downloadBytes: 1,
unit: "s",
iterations: testing ? 1 : 100,
}),
runBenchmarkAcrossVersions({
name: "throughput/upload",
clientPublicIP,
serverPublicIP,
uploadBytes: Number.MAX_SAFE_INTEGER,
downloadBytes: 0,
unit: "bit/s",
iterations: testing ? 1 : 10,
durationSecondsPerIteration: testing ? 5 : 20,
}),
runBenchmarkAcrossVersions({
name: "throughput/download",
clientPublicIP,
serverPublicIP,
uploadBytes: 0,
downloadBytes: Number.MAX_SAFE_INTEGER,
unit: "bit/s",
iterations: testing ? 1 : 10,
durationSecondsPerIteration: testing ? 5 : 20,
}),
runBenchmarkAcrossVersions({
name: "Connection establishment + 1 byte round trip latencies",
clientPublicIP,
serverPublicIP,
uploadBytes: 1,
downloadBytes: 1,
unit: "s",
iterations: testing ? 1 : 100,
durationSecondsPerIteration: Number.MAX_SAFE_INTEGER,
}),
];
const benchmarkResults: BenchmarkResults = {
@ -84,7 +87,7 @@ function runIPerf(clientPublicIP: string, serverPublicIP: string, testing: boole
const serverSTDOUT = execCommand(serverCMD);
console.error(serverSTDOUT);
const cmd = `ssh -o StrictHostKeyChecking=no ec2-user@${clientPublicIP} 'iperf3 -c ${serverPublicIP} -b 25g -t ${iPerfIterations}'`;
const cmd = `ssh -o StrictHostKeyChecking=no ec2-user@${clientPublicIP} 'iperf3 -c ${serverPublicIP} -t ${iPerfIterations} -N'`;
const stdout = execSync(cmd).toString();
// Extract the bitrate from each relevant line
@ -114,6 +117,7 @@ interface ArgsRunBenchmarkAcrossVersions {
downloadBytes: number,
unit: "bit/s" | "s",
iterations: number,
durationSecondsPerIteration: number,
}
function runBenchmarkAcrossVersions(args: ArgsRunBenchmarkAcrossVersions): Benchmark {
@ -144,6 +148,7 @@ function runBenchmarkAcrossVersions(args: ArgsRunBenchmarkAcrossVersions): Bench
uploadBytes: args.uploadBytes,
downloadBytes: args.downloadBytes,
iterations: args.iterations,
durationSecondsPerIteration: args.durationSecondsPerIteration,
});
results.push({
@ -176,15 +181,19 @@ interface ArgsRunBenchmark {
uploadBytes: number,
downloadBytes: number,
iterations: number,
durationSecondsPerIteration: number,
}
function runClient(args: ArgsRunBenchmark): ResultValue[] {
console.error(`=== Starting client ${args.implementation}/${args.id}/${args.transportStack}`);
const perfCMD = `./impl/${args.implementation}/${args.id}/perf --server-address ${args.serverPublicIP}:4001 --transport ${args.transportStack} --upload-bytes ${args.uploadBytes} --download-bytes ${args.downloadBytes}`
const cmd = `ssh -o StrictHostKeyChecking=no ec2-user@${args.clientPublicIP} 'for i in {1..${args.iterations}}; do ${perfCMD}; done'`
const cmd = `./impl/${args.implementation}/${args.id}/perf --server-address ${args.serverPublicIP}:4001 --transport ${args.transportStack} --upload-bytes ${args.uploadBytes} --download-bytes ${args.downloadBytes}`
// Note 124 is timeout's exit code when timeout is hit which is not a failure here.
const withTimeout = `timeout ${args.durationSecondsPerIteration}s ${cmd} || [ $? -eq 124 ]`
const withForLoop = `for i in {1..${args.iterations}}; do ${withTimeout}; done`
const withSSH = `ssh -o StrictHostKeyChecking=no ec2-user@${args.clientPublicIP} '${withForLoop}'`
const stdout = execCommand(cmd);
const stdout = execCommand(withSSH);
const lines = stdout.toString().trim().split('\n');

View File

@ -11,7 +11,7 @@ export const versions: Array<Version> = [
transportStacks: ["quic-v1"]
},
{
id: "v0.52",
id: "v0.53",
implementation: "rust-libp2p",
transportStacks: ["tcp", "quic-v1"]
},
@ -36,8 +36,8 @@ export const versions: Array<Version> = [
transportStacks: ["tcp", "quic-v1"]
},
{
id: "v0.46",
implementation: "js-libp2p",
transportStacks: ["tcp"]
}
id: "v0.31",
implementation: "go-libp2p",
transportStacks: ["tcp", "quic-v1"]
},
]