feat(perf): continuosly measure on single conn (iperf-style) (#276)

Our current throughput tests open a connection, open a stream,
up- or download 100MB and close the connection. 100 MB is not enough on the
given path (60ms, ~5gbit/s) to exit congestion controller's slow-start. See
https://github.com/libp2p/test-plans/issues/261 for details.

Instead of downloading 100MB multiple times, each on a new connection, establish
a single connection and continuously measure the throughput for a fixed
duration (20s).
This commit is contained in:
Max Inden 2023-10-25 13:24:08 +02:00 committed by GitHub
parent 9247c9fd99
commit 0a8dbab87c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 32392 additions and 1551 deletions

View File

@ -79,11 +79,33 @@ Given you have provisioned your infrastructure, you can now build and run the li
- `--download-bytes` number of bytes to download per stream. - `--download-bytes` number of bytes to download per stream.
- Output - Output
- Logging MUST go to `stderr`. - Logging MUST go to `stderr`.
- Measurement output is printed to `stdout` as JSON in the form of: - Measurement output is printed to `stdout` as JSON.
```json - The output schema is:
{"latency": 0.246442851} ``` typescript
``` interface Data {
Note that the measurement includes the time to (1) establish the type: "intermediary" | "final";
connection, (2) upload the bytes and (3) download the bytes. timeSeconds: number;
uploadBytes: number;
downloadBytes: number;
}
```
- Every second the client must print the current progress to stdout. See example below. Note the `type: "intermediary"`.
``` json
{
"type": "intermediary",
"timeSeconds": 1.004957645,
"uploadBytes": 73039872,
"downloadBytes": 0
},
```
- Before terminating the client must print a final summary. See example below. Note the `type: "final"`. Also note that the measurement includes the time to (1) establish the connection, (2) upload the bytes and (3) download the bytes.
``` json
{
"type": "final",
"timeSeconds": 60.127230659,
"uploadBytes": 4382392320,
"downloadBytes": 0
}
```
2. For a new implementation, in [`impl/Makefile` include your implementation in the `all` target.](./impl/Makefile#L7) 2. For a new implementation, in [`impl/Makefile` include your implementation in the `all` target.](./impl/Makefile#L7)
3. For a new version, reference version in [`runner/src/versions.ts`](./runner/src/versions.ts#L7-L43). 3. For a new version, reference version in [`runner/src/versions.ts`](./runner/src/versions.ts#L7-L43).

View File

@ -89,7 +89,10 @@ func main() {
} }
jsonB, err := json.Marshal(Result{ jsonB, err := json.Marshal(Result{
Latency: time.Since(start).Seconds(), TimeSeconds: time.Since(start).Seconds(),
UploadBytes: *uploadBytes,
DownloadBytes: *downloadBytes,
Type: "final",
}) })
if err != nil { if err != nil {
log.Fatalf("failed to marshal perf result: %s", err) log.Fatalf("failed to marshal perf result: %s", err)
@ -99,7 +102,10 @@ func main() {
} }
type Result struct { type Result struct {
Latency float64 `json:"latency"` Type string `json:"type"`
TimeSeconds float64 `json:"timeSeconds"`
UploadBytes uint64 `json:"uploadBytes"`
DownloadBytes uint64 `json:"downloadBytes"`
} }
type simpleReader struct { type simpleReader struct {

View File

@ -3,8 +3,10 @@ package main
import ( import (
"context" "context"
"encoding/binary" "encoding/binary"
"encoding/json"
"fmt" "fmt"
"io" "io"
"time"
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
pool "github.com/libp2p/go-buffer-pool" pool "github.com/libp2p/go-buffer-pool"
@ -89,7 +91,26 @@ func sendBytes(s io.Writer, bytesToSend uint64) error {
buf := pool.Get(blockSize) buf := pool.Get(blockSize)
defer pool.Put(buf) defer pool.Put(buf)
lastReportTime := time.Now()
lastReportWrite := uint64(0)
for bytesToSend > 0 { for bytesToSend > 0 {
now := time.Now()
if now.Sub(lastReportTime) >= time.Second {
jsonB, err := json.Marshal(Result{
TimeSeconds: now.Sub(lastReportTime).Seconds(),
UploadBytes: lastReportWrite,
Type: "intermediary",
})
if err != nil {
log.Fatalf("failed to marshal perf result: %s", err)
}
fmt.Println(string(jsonB))
lastReportTime = now
lastReportWrite = 0
}
toSend := buf toSend := buf
if bytesToSend < blockSize { if bytesToSend < blockSize {
toSend = buf[:bytesToSend] toSend = buf[:bytesToSend]
@ -100,15 +121,49 @@ func sendBytes(s io.Writer, bytesToSend uint64) error {
return err return err
} }
bytesToSend -= uint64(n) bytesToSend -= uint64(n)
lastReportWrite += uint64(n)
} }
return nil return nil
} }
func drainStream(s io.Reader) (uint64, error) { func drainStream(s io.Reader) (uint64, error) {
var recvd int64 var recvd int64
recvd, err := io.Copy(io.Discard, s) recvd, err := io.Copy(io.Discard, &reportingReader{orig: s, LastReportTime: time.Now()})
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return uint64(recvd), err return uint64(recvd), err
} }
return uint64(recvd), nil return uint64(recvd), nil
} }
type reportingReader struct {
orig io.Reader
LastReportTime time.Time
lastReportRead uint64
}
var _ io.Reader = &reportingReader{}
func (r *reportingReader) Read(b []byte) (int, error) {
n, err := r.orig.Read(b)
r.lastReportRead += uint64(n)
now := time.Now()
if now.Sub(r.LastReportTime) >= time.Second {
result := Result{
TimeSeconds: now.Sub(r.LastReportTime).Seconds(),
Type: "intermediary",
DownloadBytes: r.lastReportRead,
}
jsonB, err := json.Marshal(result)
if err != nil {
log.Fatalf("failed to marshal perf result: %s", err)
}
fmt.Println(string(jsonB))
r.LastReportTime = now
r.lastReportRead = 0
}
return n, err
}

View File

@ -89,7 +89,10 @@ func main() {
} }
jsonB, err := json.Marshal(Result{ jsonB, err := json.Marshal(Result{
Latency: time.Since(start).Seconds(), TimeSeconds: time.Since(start).Seconds(),
UploadBytes: *uploadBytes,
DownloadBytes: *downloadBytes,
Type: "final",
}) })
if err != nil { if err != nil {
log.Fatalf("failed to marshal perf result: %s", err) log.Fatalf("failed to marshal perf result: %s", err)
@ -99,7 +102,10 @@ func main() {
} }
type Result struct { type Result struct {
Latency float64 `json:"latency"` Type string `json:"type"`
TimeSeconds float64 `json:"timeSeconds"`
UploadBytes uint64 `json:"uploadBytes"`
DownloadBytes uint64 `json:"downloadBytes"`
} }
type simpleReader struct { type simpleReader struct {

View File

@ -3,8 +3,10 @@ package main
import ( import (
"context" "context"
"encoding/binary" "encoding/binary"
"encoding/json"
"fmt" "fmt"
"io" "io"
"time"
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
pool "github.com/libp2p/go-buffer-pool" pool "github.com/libp2p/go-buffer-pool"
@ -89,7 +91,26 @@ func sendBytes(s io.Writer, bytesToSend uint64) error {
buf := pool.Get(blockSize) buf := pool.Get(blockSize)
defer pool.Put(buf) defer pool.Put(buf)
lastReportTime := time.Now()
lastReportWrite := uint64(0)
for bytesToSend > 0 { for bytesToSend > 0 {
now := time.Now()
if now.Sub(lastReportTime) >= time.Second {
jsonB, err := json.Marshal(Result{
TimeSeconds: now.Sub(lastReportTime).Seconds(),
UploadBytes: lastReportWrite,
Type: "intermediary",
})
if err != nil {
log.Fatalf("failed to marshal perf result: %s", err)
}
fmt.Println(string(jsonB))
lastReportTime = now
lastReportWrite = 0
}
toSend := buf toSend := buf
if bytesToSend < blockSize { if bytesToSend < blockSize {
toSend = buf[:bytesToSend] toSend = buf[:bytesToSend]
@ -100,15 +121,49 @@ func sendBytes(s io.Writer, bytesToSend uint64) error {
return err return err
} }
bytesToSend -= uint64(n) bytesToSend -= uint64(n)
lastReportWrite += uint64(n)
} }
return nil return nil
} }
func drainStream(s io.Reader) (uint64, error) { func drainStream(s io.Reader) (uint64, error) {
var recvd int64 var recvd int64
recvd, err := io.Copy(io.Discard, s) recvd, err := io.Copy(io.Discard, &reportingReader{orig: s, LastReportTime: time.Now()})
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return uint64(recvd), err return uint64(recvd), err
} }
return uint64(recvd), nil return uint64(recvd), nil
} }
type reportingReader struct {
orig io.Reader
LastReportTime time.Time
lastReportRead uint64
}
var _ io.Reader = &reportingReader{}
func (r *reportingReader) Read(b []byte) (int, error) {
n, err := r.orig.Read(b)
r.lastReportRead += uint64(n)
now := time.Now()
if now.Sub(r.LastReportTime) >= time.Second {
result := Result{
TimeSeconds: now.Sub(r.LastReportTime).Seconds(),
Type: "intermediary",
DownloadBytes: r.lastReportRead,
}
jsonB, err := json.Marshal(result)
if err != nil {
log.Fatalf("failed to marshal perf result: %s", err)
}
fmt.Println(string(jsonB))
r.LastReportTime = now
r.lastReportRead = 0
}
return n, err
}

View File

@ -89,7 +89,10 @@ func main() {
} }
jsonB, err := json.Marshal(Result{ jsonB, err := json.Marshal(Result{
Latency: time.Since(start).Seconds(), TimeSeconds: time.Since(start).Seconds(),
UploadBytes: *uploadBytes,
DownloadBytes: *downloadBytes,
Type: "final",
}) })
if err != nil { if err != nil {
log.Fatalf("failed to marshal perf result: %s", err) log.Fatalf("failed to marshal perf result: %s", err)
@ -99,7 +102,10 @@ func main() {
} }
type Result struct { type Result struct {
Latency float64 `json:"latency"` Type string `json:"type"`
TimeSeconds float64 `json:"timeSeconds"`
UploadBytes uint64 `json:"uploadBytes"`
DownloadBytes uint64 `json:"downloadBytes"`
} }
type simpleReader struct { type simpleReader struct {

View File

@ -3,8 +3,10 @@ package main
import ( import (
"context" "context"
"encoding/binary" "encoding/binary"
"encoding/json"
"fmt" "fmt"
"io" "io"
"time"
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
pool "github.com/libp2p/go-buffer-pool" pool "github.com/libp2p/go-buffer-pool"
@ -89,7 +91,26 @@ func sendBytes(s io.Writer, bytesToSend uint64) error {
buf := pool.Get(blockSize) buf := pool.Get(blockSize)
defer pool.Put(buf) defer pool.Put(buf)
lastReportTime := time.Now()
lastReportWrite := uint64(0)
for bytesToSend > 0 { for bytesToSend > 0 {
now := time.Now()
if now.Sub(lastReportTime) >= time.Second {
jsonB, err := json.Marshal(Result{
TimeSeconds: now.Sub(lastReportTime).Seconds(),
UploadBytes: lastReportWrite,
Type: "intermediary",
})
if err != nil {
log.Fatalf("failed to marshal perf result: %s", err)
}
fmt.Println(string(jsonB))
lastReportTime = now
lastReportWrite = 0
}
toSend := buf toSend := buf
if bytesToSend < blockSize { if bytesToSend < blockSize {
toSend = buf[:bytesToSend] toSend = buf[:bytesToSend]
@ -100,15 +121,49 @@ func sendBytes(s io.Writer, bytesToSend uint64) error {
return err return err
} }
bytesToSend -= uint64(n) bytesToSend -= uint64(n)
lastReportWrite += uint64(n)
} }
return nil return nil
} }
func drainStream(s io.Reader) (uint64, error) { func drainStream(s io.Reader) (uint64, error) {
var recvd int64 var recvd int64
recvd, err := io.Copy(io.Discard, s) recvd, err := io.Copy(io.Discard, &reportingReader{orig: s, LastReportTime: time.Now()})
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return uint64(recvd), err return uint64(recvd), err
} }
return uint64(recvd), nil return uint64(recvd), nil
} }
type reportingReader struct {
orig io.Reader
LastReportTime time.Time
lastReportRead uint64
}
var _ io.Reader = &reportingReader{}
func (r *reportingReader) Read(b []byte) (int, error) {
n, err := r.orig.Read(b)
r.lastReportRead += uint64(n)
now := time.Now()
if now.Sub(r.LastReportTime) >= time.Second {
result := Result{
TimeSeconds: now.Sub(r.LastReportTime).Seconds(),
Type: "intermediary",
DownloadBytes: r.lastReportRead,
}
jsonB, err := json.Marshal(result)
if err != nil {
log.Fatalf("failed to marshal perf result: %s", err)
}
fmt.Println(string(jsonB))
r.LastReportTime = now
r.lastReportRead = 0
}
return n, err
}

View File

@ -89,7 +89,10 @@ func main() {
} }
jsonB, err := json.Marshal(Result{ jsonB, err := json.Marshal(Result{
Latency: time.Since(start).Seconds(), TimeSeconds: time.Since(start).Seconds(),
UploadBytes: *uploadBytes,
DownloadBytes: *downloadBytes,
Type: "final",
}) })
if err != nil { if err != nil {
log.Fatalf("failed to marshal perf result: %s", err) log.Fatalf("failed to marshal perf result: %s", err)
@ -99,7 +102,10 @@ func main() {
} }
type Result struct { type Result struct {
Latency float64 `json:"latency"` Type string `json:"type"`
TimeSeconds float64 `json:"timeSeconds"`
UploadBytes uint64 `json:"uploadBytes"`
DownloadBytes uint64 `json:"downloadBytes"`
} }
type simpleReader struct { type simpleReader struct {

View File

@ -3,8 +3,10 @@ package main
import ( import (
"context" "context"
"encoding/binary" "encoding/binary"
"encoding/json"
"fmt" "fmt"
"io" "io"
"time"
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
pool "github.com/libp2p/go-buffer-pool" pool "github.com/libp2p/go-buffer-pool"
@ -89,7 +91,26 @@ func sendBytes(s io.Writer, bytesToSend uint64) error {
buf := pool.Get(blockSize) buf := pool.Get(blockSize)
defer pool.Put(buf) defer pool.Put(buf)
lastReportTime := time.Now()
lastReportWrite := uint64(0)
for bytesToSend > 0 { for bytesToSend > 0 {
now := time.Now()
if now.Sub(lastReportTime) >= time.Second {
jsonB, err := json.Marshal(Result{
TimeSeconds: now.Sub(lastReportTime).Seconds(),
UploadBytes: lastReportWrite,
Type: "intermediary",
})
if err != nil {
log.Fatalf("failed to marshal perf result: %s", err)
}
fmt.Println(string(jsonB))
lastReportTime = now
lastReportWrite = 0
}
toSend := buf toSend := buf
if bytesToSend < blockSize { if bytesToSend < blockSize {
toSend = buf[:bytesToSend] toSend = buf[:bytesToSend]
@ -100,15 +121,49 @@ func sendBytes(s io.Writer, bytesToSend uint64) error {
return err return err
} }
bytesToSend -= uint64(n) bytesToSend -= uint64(n)
lastReportWrite += uint64(n)
} }
return nil return nil
} }
func drainStream(s io.Reader) (uint64, error) { func drainStream(s io.Reader) (uint64, error) {
var recvd int64 var recvd int64
recvd, err := io.Copy(io.Discard, s) recvd, err := io.Copy(io.Discard, &reportingReader{orig: s, LastReportTime: time.Now()})
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return uint64(recvd), err return uint64(recvd), err
} }
return uint64(recvd), nil return uint64(recvd), nil
} }
type reportingReader struct {
orig io.Reader
LastReportTime time.Time
lastReportRead uint64
}
var _ io.Reader = &reportingReader{}
func (r *reportingReader) Read(b []byte) (int, error) {
n, err := r.orig.Read(b)
r.lastReportRead += uint64(n)
now := time.Now()
if now.Sub(r.LastReportTime) >= time.Second {
result := Result{
TimeSeconds: now.Sub(r.LastReportTime).Seconds(),
Type: "intermediary",
DownloadBytes: r.lastReportRead,
}
jsonB, err := json.Marshal(result)
if err != nil {
log.Fatalf("failed to marshal perf result: %s", err)
}
fmt.Println(string(jsonB))
r.LastReportTime = now
r.lastReportRead = 0
}
return n, err
}

View File

@ -49,26 +49,6 @@ func handleRequest(w http.ResponseWriter, r *http.Request) {
} }
} }
type nullReader struct {
N uint64
read uint64
}
var _ io.Reader = &nullReader{}
func (r *nullReader) Read(b []byte) (int, error) {
remaining := r.N - r.read
l := uint64(len(b))
if uint64(len(b)) > remaining {
l = remaining
}
r.read += l
if r.read == r.N {
return int(l), io.EOF
}
return int(l), nil
}
func runClient(serverAddr string, uploadBytes, downloadBytes uint64) (time.Duration, error) { func runClient(serverAddr string, uploadBytes, downloadBytes uint64) (time.Duration, error) {
client := &http.Client{ client := &http.Client{
Transport: &http.Transport{ Transport: &http.Transport{
@ -84,7 +64,7 @@ func runClient(serverAddr string, uploadBytes, downloadBytes uint64) (time.Durat
fmt.Sprintf("https://%s/", serverAddr), fmt.Sprintf("https://%s/", serverAddr),
io.MultiReader( io.MultiReader(
bytes.NewReader(b), bytes.NewReader(b),
&nullReader{N: uploadBytes}, &reportingReader{orig: &nullReader{N: uploadBytes}, LastReportTime: time.Now(), isUpload: true},
), ),
) )
if err != nil { if err != nil {
@ -167,7 +147,10 @@ func generateEphemeralCertificate() (tls.Certificate, error) {
} }
type Result struct { type Result struct {
Latency float64 `json:"latency"` Type string `json:"type"`
TimeSeconds float64 `json:"timeSeconds"`
UploadBytes uint64 `json:"uploadBytes"`
DownloadBytes uint64 `json:"downloadBytes"`
} }
func main() { func main() {
@ -220,7 +203,10 @@ func main() {
} }
jsonB, err := json.Marshal(Result{ jsonB, err := json.Marshal(Result{
Latency: latency.Seconds(), TimeSeconds: latency.Seconds(),
UploadBytes: *uploadBytes,
DownloadBytes: *downloadBytes,
Type: "final",
}) })
if err != nil { if err != nil {
log.Fatalf("failed to marshal perf result: %s", err) log.Fatalf("failed to marshal perf result: %s", err)
@ -249,9 +235,72 @@ func sendBytes(s io.Writer, bytesToSend uint64) error {
func drainStream(s io.Reader) (uint64, error) { func drainStream(s io.Reader) (uint64, error) {
var recvd int64 var recvd int64
recvd, err := io.Copy(io.Discard, s) recvd, err := io.Copy(io.Discard, &reportingReader{orig: s, LastReportTime: time.Now(), isUpload: false})
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return uint64(recvd), err return uint64(recvd), err
} }
return uint64(recvd), nil return uint64(recvd), nil
} }
type reportingReader struct {
orig io.Reader
LastReportTime time.Time
lastReportRead uint64
isUpload bool
}
var _ io.Reader = &reportingReader{}
func (r *reportingReader) Read(b []byte) (int, error) {
n, err := r.orig.Read(b)
r.lastReportRead += uint64(n)
now := time.Now()
if now.Sub(r.LastReportTime) >= time.Second {
// This section is analogous to your Read implementation
result := Result{
TimeSeconds: now.Sub(r.LastReportTime).Seconds(),
Type: "intermediary",
}
if r.isUpload {
result.UploadBytes = r.lastReportRead
} else {
result.DownloadBytes = r.lastReportRead
}
jsonB, err := json.Marshal(result)
if err != nil {
log.Fatalf("failed to marshal perf result: %s", err)
}
fmt.Println(string(jsonB))
r.LastReportTime = now
r.lastReportRead = 0
}
return n, err
}
type nullReader struct {
N uint64
read uint64
LastReportTime time.Time
lastReportRead uint64
}
var _ io.Reader = &nullReader{}
func (r *nullReader) Read(b []byte) (int, error) {
remaining := r.N - r.read
l := uint64(len(b))
if uint64(len(b)) > remaining {
l = remaining
}
r.read += l
if r.read == r.N {
return int(l), io.EOF
}
return int(l), nil
}

View File

@ -1,4 +1,4 @@
commitSha := a5cd126c97b6d8d8328141bfa84cc57e74ebc57c commitSha := 3a12ea9207e40de20533b0a6aa2e40e3727aa796
all: perf all: perf
@ -10,7 +10,8 @@ perf-${commitSha}: perf-${commitSha}.zip
unzip -o perf-${commitSha}.zip unzip -o perf-${commitSha}.zip
perf-${commitSha}.zip: perf-${commitSha}.zip:
wget -O $@ "https://github.com/quic-go/perf/archive/${commitSha}.zip" # TODO: revert
wget -O $@ "https://github.com/mxinden/perf/archive/${commitSha}.zip"
clean: clean:
rm perf-*.zip rm perf-*.zip

View File

@ -1,4 +1,4 @@
commitSha := e8759c85c278006f5fc94e823c2a3620abaaf697 commitSha := d15bb69a9d2b353d73ead79a29f668dca3e1dc4a
all: perf all: perf
@ -6,7 +6,7 @@ perf: rust-libp2p-${commitSha}/target/release/perf
cp ./rust-libp2p-${commitSha}/target/release/perf . cp ./rust-libp2p-${commitSha}/target/release/perf .
rust-libp2p-${commitSha}/target/release/perf: rust-libp2p-${commitSha} rust-libp2p-${commitSha}/target/release/perf: rust-libp2p-${commitSha}
docker run --rm --user "$(shell id -u):$(shell id -g)" -v "$(shell pwd)/rust-libp2p-${commitSha}":/usr/src/myapp -w /usr/src/myapp rust:1.69 cargo build --release --bin perf docker run --rm --user "$(shell id -u):$(shell id -g)" -v "$(shell pwd)/rust-libp2p-${commitSha}":/usr/src/myapp -w /usr/src/myapp rust:1.73 cargo build --release --bin perf
rust-libp2p-${commitSha}: rust-libp2p-${commitSha}.zip rust-libp2p-${commitSha}: rust-libp2p-${commitSha}.zip
unzip -o rust-libp2p-${commitSha}.zip unzip -o rust-libp2p-${commitSha}.zip

File diff suppressed because it is too large Load Diff

View File

@ -36,7 +36,10 @@ export type Result = {
}; };
export type ResultValue = { export type ResultValue = {
latency: number type: "itermediate" | "final",
time_seconds: number,
upload_bytes: number,
download_bytes: number,
}; };
export type Comparison = { export type Comparison = {

View File

@ -12,33 +12,36 @@ async function main(clientPublicIP: string, serverPublicIP: string, testing: boo
copyAndBuildPerfImplementations(clientPublicIP); copyAndBuildPerfImplementations(clientPublicIP);
const benchmarks = [ const benchmarks = [
runBenchmarkAcrossVersions({ runBenchmarkAcrossVersions({
name: "Single Connection throughput Upload 100 MiB", name: "throughput/upload",
clientPublicIP, clientPublicIP,
serverPublicIP, serverPublicIP,
uploadBytes: 100 << 20, uploadBytes: Number.MAX_SAFE_INTEGER,
downloadBytes: 0, downloadBytes: 0,
unit: "bit/s", unit: "bit/s",
iterations: testing ? 1 : 10, iterations: testing ? 1 : 10,
}), durationSecondsPerIteration: testing ? 5 : 20,
runBenchmarkAcrossVersions({ }),
name: "Single Connection throughput Download 100 MiB", runBenchmarkAcrossVersions({
clientPublicIP, name: "throughput/download",
serverPublicIP, clientPublicIP,
uploadBytes: 0, serverPublicIP,
downloadBytes: 100 << 20, uploadBytes: 0,
unit: "bit/s", downloadBytes: Number.MAX_SAFE_INTEGER,
iterations: testing ? 1 : 10, unit: "bit/s",
}), iterations: testing ? 1 : 10,
runBenchmarkAcrossVersions({ durationSecondsPerIteration: testing ? 5 : 20,
name: "Connection establishment + 1 byte round trip latencies", }),
clientPublicIP, runBenchmarkAcrossVersions({
serverPublicIP, name: "Connection establishment + 1 byte round trip latencies",
uploadBytes: 1, clientPublicIP,
downloadBytes: 1, serverPublicIP,
unit: "s", uploadBytes: 1,
iterations: testing ? 1 : 100, downloadBytes: 1,
}), unit: "s",
iterations: testing ? 1 : 100,
durationSecondsPerIteration: Number.MAX_SAFE_INTEGER,
}),
]; ];
const benchmarkResults: BenchmarkResults = { const benchmarkResults: BenchmarkResults = {
@ -84,7 +87,7 @@ function runIPerf(clientPublicIP: string, serverPublicIP: string, testing: boole
const serverSTDOUT = execCommand(serverCMD); const serverSTDOUT = execCommand(serverCMD);
console.error(serverSTDOUT); console.error(serverSTDOUT);
const cmd = `ssh -o StrictHostKeyChecking=no ec2-user@${clientPublicIP} 'iperf3 -c ${serverPublicIP} -b 25g -t ${iPerfIterations}'`; const cmd = `ssh -o StrictHostKeyChecking=no ec2-user@${clientPublicIP} 'iperf3 -c ${serverPublicIP} -t ${iPerfIterations} -N'`;
const stdout = execSync(cmd).toString(); const stdout = execSync(cmd).toString();
// Extract the bitrate from each relevant line // Extract the bitrate from each relevant line
@ -114,6 +117,7 @@ interface ArgsRunBenchmarkAcrossVersions {
downloadBytes: number, downloadBytes: number,
unit: "bit/s" | "s", unit: "bit/s" | "s",
iterations: number, iterations: number,
durationSecondsPerIteration: number,
} }
function runBenchmarkAcrossVersions(args: ArgsRunBenchmarkAcrossVersions): Benchmark { function runBenchmarkAcrossVersions(args: ArgsRunBenchmarkAcrossVersions): Benchmark {
@ -144,6 +148,7 @@ function runBenchmarkAcrossVersions(args: ArgsRunBenchmarkAcrossVersions): Bench
uploadBytes: args.uploadBytes, uploadBytes: args.uploadBytes,
downloadBytes: args.downloadBytes, downloadBytes: args.downloadBytes,
iterations: args.iterations, iterations: args.iterations,
durationSecondsPerIteration: args.durationSecondsPerIteration,
}); });
results.push({ results.push({
@ -176,15 +181,19 @@ interface ArgsRunBenchmark {
uploadBytes: number, uploadBytes: number,
downloadBytes: number, downloadBytes: number,
iterations: number, iterations: number,
durationSecondsPerIteration: number,
} }
function runClient(args: ArgsRunBenchmark): ResultValue[] { function runClient(args: ArgsRunBenchmark): ResultValue[] {
console.error(`=== Starting client ${args.implementation}/${args.id}/${args.transportStack}`); console.error(`=== Starting client ${args.implementation}/${args.id}/${args.transportStack}`);
const perfCMD = `./impl/${args.implementation}/${args.id}/perf --server-address ${args.serverPublicIP}:4001 --transport ${args.transportStack} --upload-bytes ${args.uploadBytes} --download-bytes ${args.downloadBytes}` const cmd = `./impl/${args.implementation}/${args.id}/perf --server-address ${args.serverPublicIP}:4001 --transport ${args.transportStack} --upload-bytes ${args.uploadBytes} --download-bytes ${args.downloadBytes}`
const cmd = `ssh -o StrictHostKeyChecking=no ec2-user@${args.clientPublicIP} 'for i in {1..${args.iterations}}; do ${perfCMD}; done'` // Note 124 is timeout's exit code when timeout is hit which is not a failure here.
const withTimeout = `timeout ${args.durationSecondsPerIteration}s ${cmd} || [ $? -eq 124 ]`
const withForLoop = `for i in {1..${args.iterations}}; do ${withTimeout}; done`
const withSSH = `ssh -o StrictHostKeyChecking=no ec2-user@${args.clientPublicIP} '${withForLoop}'`
const stdout = execCommand(cmd); const stdout = execCommand(withSSH);
const lines = stdout.toString().trim().split('\n'); const lines = stdout.toString().trim().split('\n');

View File

@ -11,7 +11,7 @@ export const versions: Array<Version> = [
transportStacks: ["quic-v1"] transportStacks: ["quic-v1"]
}, },
{ {
id: "v0.52", id: "v0.53",
implementation: "rust-libp2p", implementation: "rust-libp2p",
transportStacks: ["tcp", "quic-v1"] transportStacks: ["tcp", "quic-v1"]
}, },
@ -36,8 +36,8 @@ export const versions: Array<Version> = [
transportStacks: ["tcp", "quic-v1"] transportStacks: ["tcp", "quic-v1"]
}, },
{ {
id: "v0.46", id: "v0.31",
implementation: "js-libp2p", implementation: "go-libp2p",
transportStacks: ["tcp"] transportStacks: ["tcp", "quic-v1"]
} },
] ]