migrate dht test plan from testground/testground. (#1)

* migrate dht test plan from testground/testground.

* dht: update module and import paths.

* rm redundant scripts; rm redundant line in manifest.
This commit is contained in:
Raúl Kripalani 2020-04-22 17:47:58 +01:00 committed by GitHub
parent 74bddc45a7
commit 4d4f24ad8f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
37 changed files with 5543 additions and 0 deletions

74
dht/README.md Normal file
View File

@ -0,0 +1,74 @@
# `Plan:` The go-libp2p DHT behaves
![](https://img.shields.io/badge/status-wip-orange.svg?style=flat-square)
IPFS can safely rely on the latest DHT upgrades by running go-libp2p DHT tests directly
## What is being optimized (min/max, reach)
- (Minimize) Numbers of peers dialed to as part of the call
- (Minimize) Number of failed dials to peers
- (Minimize) Time that it took for the test
- (Minimize) Routing efficiency (# of hops (XOR metric should decrease at every step))
## Plan Parameters
- **Network Parameters**
- `instances` - Number of nodes that are spawned for the test (from 10 to 1000000)
- **Image Parameters**
- Single Image - The go-libp2p commit that is being tested
- Image Resources CPU & Ram
## Tests
### `Test:` Find Peers
- **Test Parameters**
- `auto-refresh` - Enable autoRefresh (equivalent to running random-walk multiple times automatically) (true/false, default: false)
- `random-walk` - Run random-walk manually 5 times (true/false, default: false)
- `bucket-size` - Kademlia DHT bucket size (default: 20)
- `n-find-peers` - Number of times a Find Peers call is executed from each node (picking another node PeerId at random that is not yet in our Routing table) (default: 1)
- **Narrative**
- **Warm up**
- All nodes boot up
- Each node as it boots up, connects to the node that previously joined
- Nodes ran 5 random-walk queries to populate their Routing Tables
- **Act I**
- Each node calls Find Peers `n-find-peers` times
### `Test:` Find Providers
- **Test Parameters**
- `auto-refresh` - Enable autoRefresh (equivalent to running random-walk multiple times automatically) (true/false, default: false)
- `random-walk` - Run random-walk manually 5 times (true/false, default: false)
- `bucket-size` - Kademlia DHT bucket size (default: 20)
- `p-providing` - Percentage of nodes providing a record
- `p-resolving` - Percentage of nodes trying to resolve the network a record
- `p-failing` - Percentage of nodes trying to resolve a record that hasn't been provided
- **Narrative**
- **Warm up**
- All nodes boot up
- Each node as it boots up, connects to the node that previously joined
- Nodes ran 5 random-walk queries to populate their Routing Tables
- **Act I**
- `p-providing` of the nodes provide a record and store its key on redis
- **Act II**
- `p-resolving` of the nodes attempt to resolve the records provided before
- `p-failing` of the nodes attempt to resolve records that do not exist
### `Test:` Provide Stress
- **Test Parameters**
- `auto-refresh` - Enable autoRefresh (equivalent to running random-walk multiple times automatically) (true/false, default: false)
- `random-walk` - Run random-walk manually 5 times (true/false, default: false)
- `bucket-size` - Kademlia DHT bucket size (default: 20)
- `n-provides` - The number of provide calls that are done by each node
- `i-provides` - The interval between each provide call (in seconds)
- **Narrative**
- **Warm up**
- All nodes boot up
- Each node as it boots up, connects to the node that previously joined
- Nodes ran 5 random-walk queries to populate their Routing Tables
- **Act I**
- Each node calls Provide for `i-provides` until it reaches a total of `n-provides`

View File

@ -0,0 +1,181 @@
[metadata]
name = "all-balsam"
author = "adin"
[global]
plan = "dht"
case = "all"
total_instances = 1000
builder = "docker:go"
runner = "cluster:k8s"
[global.build_config]
push_registry = true
registry_type = "aws"
[[groups]]
id = "balsam-undialable-provider-small"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:018e66855db4"
[groups.run.test_params]
bs_strategy = "6"
bucket_size = "10"
record_count = "1"
timeout_secs = "600"
undialable = "true"
[[groups]]
id = "balsam-undialable-provider-medium"
[groups.instances]
count = 15
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:018e66855db4"
[groups.run.test_params]
bs_strategy = "6"
bucket_size = "10"
record_count = "1"
timeout_secs = "600"
undialable = "true"
[[groups]]
id = "balsam-undialable-provider-large"
[groups.instances]
count = 100
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:018e66855db4"
[groups.run.test_params]
bs_strategy = "6"
bucket_size = "10"
record_count = "1"
timeout_secs = "600"
undialable = "true"
[[groups]]
id = "balsam-dialable-provider-small"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:018e66855db4"
[groups.run.test_params]
bs_strategy = "6"
bucket_size = "10"
record_count = "1"
timeout_secs = "600"
[[groups]]
id = "balsam-dialable-provider-medium"
[groups.instances]
count = 15
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:018e66855db4"
[groups.run.test_params]
bs_strategy = "6"
bucket_size = "10"
record_count = "1"
timeout_secs = "600"
[[groups]]
id = "balsam-dialable-provider-large"
[groups.instances]
count = 100
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:018e66855db4"
[groups.run.test_params]
bs_strategy = "6"
bucket_size = "10"
record_count = "1"
timeout_secs = "600"
[[groups]]
id = "balsam-undialable-searcher"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:018e66855db4"
[groups.run.test_params]
bs_strategy = "6"
bucket_size = "10"
search_records = "true"
timeout_secs = "600"
undialable = "true"
[[groups]]
id = "balsam-dialable-searcher"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:018e66855db4"
[groups.run.test_params]
bs_strategy = "6"
bucket_size = "10"
search_records = "true"
timeout_secs = "600"
[[groups]]
id = "balsam-undialable-passive"
[groups.instances]
count = 495
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:018e66855db4"
[groups.run.test_params]
bs_strategy = "6"
bucket_size = "10"
timeout_secs = "600"
undialable = "true"
[[groups]]
id = "balsam-dialable-passive"
[groups.instances]
count = 250
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:018e66855db4"
[groups.run.test_params]
bs_strategy = "6"
bucket_size = "10"
timeout_secs = "600"
[[groups]]
id = "balsam-bs"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:018e66855db4"
[groups.run.test_params]
bootstrapper = "true"
bs_strategy = "6"
bucket_size = "10"
timeout_secs = "600"

View File

@ -0,0 +1,214 @@
[metadata]
name = "all-both"
author = "adin"
[global]
plan = "dht"
case = "all"
total_instances = 1000
builder = "docker:go"
runner = "cluster:k8s"
[global.build_config]
push_registry = true
registry_type = "aws"
[[groups]]
id = "balsam-undialable-provider"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:701251a63b92"
[groups.run.test_params]
bs_strategy = "7"
bucket_size = "10"
expect_dht = "false"
group_order = "4"
latency = "100"
record_count = "1"
timeout_secs = "600"
undialable = "true"
[[groups]]
id = "balsam-undialable-searcher"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:701251a63b92"
[groups.run.test_params]
bs_strategy = "7"
bucket_size = "10"
expect_dht = "false"
group_order = "5"
latency = "100"
search_records = "true"
timeout_secs = "600"
undialable = "true"
[[groups]]
id = "balsam-dialable-passive"
[groups.instances]
count = 780
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:701251a63b92"
[groups.run.test_params]
bs_strategy = "7"
bucket_size = "10"
expect_dht = "false"
group_order = "6"
latency = "100"
timeout_secs = "600"
undialable = "false"
[[groups]]
id = "balsam-dialable-provider"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:701251a63b92"
[groups.run.test_params]
bs_strategy = "7"
bucket_size = "10"
expect_dht = "false"
group_order = "7"
latency = "100"
record_count = "1"
timeout_secs = "600"
undialable = "false"
[[groups]]
id = "balsam-dialable-searcher"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:701251a63b92"
[groups.run.test_params]
bs_strategy = "7"
bucket_size = "10"
expect_dht = "false"
group_order = "8"
latency = "100"
search_records = "true"
timeout_secs = "600"
undialable = "false"
[[groups]]
id = "cypress-passive"
[groups.instances]
count = 185
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:ca78473d669d"
[groups.run.test_params]
alpha = "6"
beta = "3"
bs_strategy = "7"
bucket_size = "10"
group_order = "1"
latency = "100"
timeout_secs = "600"
[[groups]]
id = "cypress-provider"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:ca78473d669d"
[groups.run.test_params]
alpha = "6"
beta = "3"
bs_strategy = "7"
bucket_size = "10"
group_order = "2"
latency = "100"
record_count = "1"
timeout_secs = "600"
[[groups]]
id = "cypress-searcher"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:ca78473d669d"
[groups.run.test_params]
alpha = "6"
beta = "3"
bs_strategy = "7"
bucket_size = "10"
group_order = "3"
latency = "100"
search_records = "true"
timeout_secs = "600"
[[groups]]
id = "cypress-bs"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:ca78473d669d"
[groups.run.test_params]
alpha = "6"
beta = "3"
bootstrapper = "true"
bs_strategy = "7"
bucket_size = "10"
group_order = "0"
latency = "100"
timeout_secs = "600"

View File

@ -0,0 +1,169 @@
[metadata]
name = "all-cypress"
author = "adin"
[global]
plan = "dht"
case = "all"
total_instances = 1170
builder = "docker:go"
runner = "cluster:k8s"
[global.build_config]
push_registry = true
registry_type = "aws"
[[groups]]
id = "cypress-passive"
[groups.instances]
count = 1020
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
[groups.run.test_params]
alpha = "6"
beta = "3"
bs_strategy = "6"
bucket_size = "10"
group_order = "1"
latency = "100"
timeout_secs = "600"
[[groups]]
id = "cypress-provider-small"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
[groups.run.test_params]
alpha = "6"
beta = "3"
bs_strategy = "6"
bucket_size = "10"
group_order = "4"
latency = "100"
record_count = "1"
timeout_secs = "600"
[[groups]]
id = "cypress-provider-medium"
[groups.instances]
count = 15
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
[groups.run.test_params]
alpha = "6"
beta = "3"
bs_strategy = "6"
bucket_size = "10"
group_order = "3"
latency = "100"
record_count = "1"
timeout_secs = "600"
[[groups]]
id = "cypress-provider-large"
[groups.instances]
count = 100
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
[groups.run.test_params]
alpha = "6"
beta = "3"
bs_strategy = "6"
bucket_size = "10"
group_order = "2"
latency = "100"
record_count = "1"
timeout_secs = "600"
[[groups]]
id = "cypress-searcher"
[groups.instances]
count = 25
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
[groups.run.test_params]
alpha = "6"
beta = "3"
bs_strategy = "6"
bucket_size = "10"
group_order = "5"
latency = "100"
n_find_peers = "5"
search_records = "true"
timeout_secs = "600"
[[groups]]
id = "cypress-bs"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
[groups.run.test_params]
alpha = "6"
beta = "3"
bootstrapper = "true"
bs_strategy = "6"
bucket_size = "10"
group_order = "0"
latency = "100"
timeout_secs = "600"

View File

@ -0,0 +1,92 @@
[metadata]
name = "bs-network"
author = "adin"
[global]
plan = "dht"
case = "bootstrap-network"
total_instances = 100
builder = "docker:go"
runner = "cluster:k8s"
[global.build_config]
push_registry = true
registry_type = "aws"
[[groups]]
id = "cypress-passive"
[groups.instances]
count = 65
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:adaa263d3b51"
[groups.run.test_params]
alpha = "6"
beta = "3"
bs_strategy = "6"
bucket_size = "10"
group_order = "1"
latency = "100"
timeout_secs = "600"
[[groups]]
id = "cypress-last"
[groups.instances]
count = 30
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:adaa263d3b51"
[groups.run.test_params]
alpha = "6"
beta = "3"
bs_strategy = "6"
bucket_size = "10"
group_order = "2"
latency = "100"
timeout_secs = "600"
[[groups]]
id = "cypress-bs"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:adaa263d3b51"
[groups.run.test_params]
alpha = "6"
beta = "3"
bootstrapper = "true"
bs_strategy = "6"
bucket_size = "10"
group_order = "0"
latency = "100"
timeout_secs = "600"

View File

@ -0,0 +1,43 @@
## THIS IS AN EXAMPLE COMPOSITION
##
## It performs a run of the dht/find-peers test case, with 100 instances:
##
##  * 10 of them are boostrappers, with upstream dependency overrides.
## * 45% are DHT clients.
## * 45% are DHT servers.
##
[metadata]
name = "find-peers-01"
author = "raulk"
[global]
plan = "dht"
case = "find-peers"
builder = "exec:go"
runner = "local:exec"
total_instances = 50
[[groups]]
id = "bootstrappers"
instances = { count = 1 }
[groups.build]
selectors = ["foo"]
dependencies = [
{ module = "github.com/libp2p/go-libp2p-kad-dht", version = "995fee9e5345fdd7c151a5fe871252262db4e788"},
{ module = "github.com/libp2p/go-libp2p", version = "76944c4fc848530530f6be36fb22b70431ca506c"},
]
[groups.run]
test_params = { random_walk = "true", n_bootstrap = "1" }
[[groups]]
id = "peers"
instances = { count = 49 }
[groups.run]
test_params = { random_walk = "true", n_bootstrap = "1" }
[groups.build]
selectors = ["bar", "bee"]

View File

@ -0,0 +1,183 @@
[metadata]
name = "find-provs"
author = "adin"
[global]
plan = "dht"
case = "find-providers"
total_instances = 1000
builder = "docker:go"
runner = "cluster:k8s"
[global.build_config]
push_registry = true
registry_type = "aws"
[[groups]]
id = "balsam-undialable-provider-small"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:018e66855db4"
[groups.run.test_params]
bs_strategy = "6"
bucket_size = "10"
record_count = "1"
timeout_secs = "600"
undialable = "true"
[[groups]]
id = "balsam-undialable-provider-medium"
[groups.instances]
count = 15
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:018e66855db4"
[groups.run.test_params]
bs_strategy = "6"
bucket_size = "10"
record_count = "1"
timeout_secs = "600"
undialable = "true"
[[groups]]
id = "balsam-undialable-provider-large"
[groups.instances]
count = 100
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:018e66855db4"
[groups.run.test_params]
bs_strategy = "6"
bucket_size = "10"
record_count = "1"
timeout_secs = "600"
undialable = "true"
[[groups]]
id = "balsam-dialable-provider-small"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:018e66855db4"
[groups.run.test_params]
bs_strategy = "6"
bucket_size = "10"
record_count = "1"
timeout_secs = "600"
[[groups]]
id = "balsam-dialable-provider-medium"
[groups.instances]
count = 15
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:018e66855db4"
[groups.run.test_params]
bs_strategy = "6"
bucket_size = "10"
record_count = "1"
timeout_secs = "600"
[[groups]]
id = "balsam-dialable-provider-large"
[groups.instances]
count = 100
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:018e66855db4"
[groups.run.test_params]
bs_strategy = "6"
bucket_size = "10"
record_count = "1"
timeout_secs = "600"
[[groups]]
id = "balsam-undialable-searcher"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:018e66855db4"
[groups.run.test_params]
bs_strategy = "6"
bucket_size = "10"
search_records = "true"
timeout_secs = "600"
undialable = "true"
[[groups]]
id = "balsam-dialable-searcher"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:018e66855db4"
[groups.run.test_params]
bs_strategy = "6"
bucket_size = "10"
search_records = "true"
timeout_secs = "600"
[[groups]]
id = "balsam-undialable-passive"
[groups.instances]
count = 495
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:018e66855db4"
[groups.run.test_params]
bs_strategy = "6"
bucket_size = "10"
timeout_secs = "600"
undialable = "true"
[[groups]]
id = "balsam-dialable-passive"
[groups.instances]
count = 250
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:018e66855db4"
[groups.run.test_params]
bs_strategy = "6"
bucket_size = "10"
timeout_secs = "600"
[[groups]]
id = "balsam-bs"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:018e66855db4"
[groups.run.test_params]
bootstrapper = "true"
bs_strategy = "6"
bucket_size = "10"
timeout_secs = "600"

View File

@ -0,0 +1,214 @@
[metadata]
name = "find-provs"
author = "adin"
[global]
plan = "dht"
case = "find-providers"
total_instances = 1000
builder = "docker:go"
runner = "cluster:k8s"
[global.build_config]
push_registry = true
registry_type = "aws"
[[groups]]
id = "balsam-undialable-provider"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:701251a63b92"
[groups.run.test_params]
bs_strategy = "7"
bucket_size = "10"
expect_dht = "false"
group_order = "4"
latency = "100"
record_count = "1"
timeout_secs = "600"
undialable = "true"
[[groups]]
id = "balsam-undialable-searcher"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:701251a63b92"
[groups.run.test_params]
bs_strategy = "7"
bucket_size = "10"
expect_dht = "false"
group_order = "5"
latency = "100"
search_records = "true"
timeout_secs = "600"
undialable = "true"
[[groups]]
id = "balsam-dialable-passive"
[groups.instances]
count = 780
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:701251a63b92"
[groups.run.test_params]
bs_strategy = "7"
bucket_size = "10"
expect_dht = "false"
group_order = "6"
latency = "100"
timeout_secs = "600"
undialable = "false"
[[groups]]
id = "balsam-dialable-provider"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:701251a63b92"
[groups.run.test_params]
bs_strategy = "7"
bucket_size = "10"
expect_dht = "false"
group_order = "7"
latency = "100"
record_count = "1"
timeout_secs = "600"
undialable = "false"
[[groups]]
id = "balsam-dialable-searcher"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:701251a63b92"
[groups.run.test_params]
bs_strategy = "7"
bucket_size = "10"
expect_dht = "false"
group_order = "8"
latency = "100"
search_records = "true"
timeout_secs = "600"
undialable = "false"
[[groups]]
id = "cypress-passive"
[groups.instances]
count = 185
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:ca78473d669d"
[groups.run.test_params]
alpha = "6"
beta = "3"
bs_strategy = "7"
bucket_size = "10"
group_order = "1"
latency = "100"
timeout_secs = "600"
[[groups]]
id = "cypress-provider"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:ca78473d669d"
[groups.run.test_params]
alpha = "6"
beta = "3"
bs_strategy = "7"
bucket_size = "10"
group_order = "2"
latency = "100"
record_count = "1"
timeout_secs = "600"
[[groups]]
id = "cypress-searcher"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:ca78473d669d"
[groups.run.test_params]
alpha = "6"
beta = "3"
bs_strategy = "7"
bucket_size = "10"
group_order = "3"
latency = "100"
search_records = "true"
timeout_secs = "600"
[[groups]]
id = "cypress-bs"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:ca78473d669d"
[groups.run.test_params]
alpha = "6"
beta = "3"
bootstrapper = "true"
bs_strategy = "7"
bucket_size = "10"
group_order = "0"
latency = "100"
timeout_secs = "600"

View File

@ -0,0 +1,198 @@
[metadata]
name = "find-provs"
author = "adin"
[global]
plan = "dht"
case = "find-providers"
total_instances = 150
builder = "docker:go"
runner = "local:docker"
[[groups]]
id = "balsam-undialable-provider"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "0a67fb8fbbc9"
[groups.run.test_params]
bs_strategy = "3"
bucket_size = "10"
expect_dht = "false"
group_order = "4"
record_count = "1"
timeout_secs = "600"
undialable = "true"
[[groups]]
id = "balsam-undialable-searcher"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "0a67fb8fbbc9"
[groups.run.test_params]
bs_strategy = "3"
bucket_size = "10"
expect_dht = "false"
group_order = "5"
search_records = "true"
timeout_secs = "600"
undialable = "true"
[[groups]]
id = "balsam-dialable-passive"
[groups.instances]
count = 80
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "0a67fb8fbbc9"
[groups.run.test_params]
bs_strategy = "3"
bucket_size = "10"
expect_dht = "false"
group_order = "6"
timeout_secs = "600"
undialable = "false"
[[groups]]
id = "balsam-dialable-provider"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "0a67fb8fbbc9"
[groups.run.test_params]
bs_strategy = "3"
bucket_size = "10"
expect_dht = "false"
group_order = "7"
record_count = "1"
timeout_secs = "600"
undialable = "false"
[[groups]]
id = "balsam-dialable-searcher"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["balsam"]
[groups.run]
artifact = "0a67fb8fbbc9"
[groups.run.test_params]
bs_strategy = "3"
bucket_size = "10"
expect_dht = "false"
group_order = "8"
search_records = "true"
timeout_secs = "600"
undialable = "false"
[[groups]]
id = "cypress-passive"
[groups.instances]
count = 35
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "f1d89aed47ea"
[groups.run.test_params]
alpha = "6"
bs_strategy = "3"
bucket_size = "10"
group_order = "1"
timeout_secs = "600"
[[groups]]
id = "cypress-provider"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "f1d89aed47ea"
[groups.run.test_params]
alpha = "6"
bs_strategy = "3"
bucket_size = "10"
group_order = "2"
record_count = "1"
timeout_secs = "600"
[[groups]]
id = "cypress-searcher"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "f1d89aed47ea"
[groups.run.test_params]
alpha = "6"
bs_strategy = "3"
bucket_size = "10"
group_order = "3"
search_records = "true"
timeout_secs = "600"
[[groups]]
id = "cypress-bs"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "f1d89aed47ea"
[groups.run.test_params]
alpha = "6"
bootstrapper = "true"
bs_strategy = "1"
bucket_size = "10"
group_order = "0"
timeout_secs = "600"

View File

@ -0,0 +1,174 @@
[metadata]
name = "find-provs"
author = "adin"
[global]
plan = "dht"
case = "find-providers"
total_instances = 1170
builder = "docker:go"
runner = "cluster:k8s"
[global.build_config]
push_registry = true
registry_type = "aws"
[[groups]]
id = "cypress-passive"
[groups.instances]
count = 1020
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:3c690e7169d5"
[groups.run.test_params]
alpha = "6"
beta = "3"
bs_strategy = "6"
bucket_size = "10"
group_order = "1"
latency = "100"
timeout_secs = "600"
[[groups]]
id = "cypress-provider-small"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:3c690e7169d5"
[groups.run.test_params]
alpha = "6"
beta = "3"
bs_strategy = "6"
bucket_size = "10"
group_order = "4"
latency = "100"
record_count = "1"
timeout_secs = "600"
[[groups]]
id = "cypress-provider-medium"
[groups.instances]
count = 15
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:3c690e7169d5"
[groups.run.test_params]
alpha = "6"
beta = "3"
bs_strategy = "6"
bucket_size = "10"
group_order = "3"
latency = "100"
record_count = "1"
timeout_secs = "600"
[[groups]]
id = "cypress-provider-large"
[groups.instances]
count = 100
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:3c690e7169d5"
[groups.run.test_params]
alpha = "6"
beta = "3"
bs_strategy = "6"
bucket_size = "10"
group_order = "2"
latency = "100"
record_count = "1"
timeout_secs = "600"
[[groups]]
id = "cypress-searcher"
[groups.instances]
count = 25
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:3c690e7169d5"
[groups.run.test_params]
alpha = "6"
beta = "3"
bs_strategy = "6"
bucket_size = "10"
group_order = "5"
latency = "100"
search_records = "true"
timeout_secs = "600"
[[groups]]
id = "cypress-bs"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:3c690e7169d5"
[groups.run.test_params]
alpha = "6"
beta = "3"
bootstrapper = "true"
bs_strategy = "6"
bucket_size = "10"
group_order = "0"
latency = "100"
timeout_secs = "600"

View File

@ -0,0 +1,56 @@
[metadata]
name = "find-provs"
author = "adin"
[global]
plan = "dht"
case = "find-providers"
total_instances = 100
builder = "docker:go"
runner = "cluster:k8s"
[global.build_config]
push_registry = true
registry_type = "aws"
[[groups]]
id = "balsam"
[groups.instances]
count = 100
percentage = 0.0
[groups.build]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-dht:07736f7b34c3"
[groups.run.test_params]
bucket_size = "4"
client_mode = "false"
f_undialable = "0.9"
n_bootstrap = "-4"
n_providing = "1"
record_count = "5"
timeout_secs = "2000"
autonat_ok = "false"
[[groups]]
id = "cypress"
[groups.instances]
count = 0
percentage = 0.0
[groups.build]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "v0.4.2-0.20200204202258-35d3e4a5d43e"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-autonat"
version = "v0.1.2-0.20200204200147-902af8cb7b6a"
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-dht:d4709f2f74cd"
[groups.run.test_params]
bucket_size = "4"
client_mode = "false"
f_undialable = "0.9"
n_bootstrap = "-4"
n_providing = "1"
record_count = "5"
timeout_secs = "2000"

View File

@ -0,0 +1,54 @@
[metadata]
name = "find-provs"
author = "adin"
[global]
plan = "dht"
case = "find-providers"
total_instances = 150
builder = "docker:go"
runner = "local:docker"
[[groups]]
id = "balsam"
[groups.instances]
count = 0
percentage = 0.0
[groups.build]
[groups.run]
artifact = "d0b0506b5d41"
[groups.run.test_params]
bucket_size = "4"
client_mode = "false"
f_undialable = "0.0"
n_bootstrap = "-4"
n_paths = "10"
n_providing = "1"
record_count = "5"
timeout_secs = "200"
[[groups]]
id = "cypress"
[groups.instances]
count = 0
percentage = 1.0
[groups.build]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "v0.4.2-0.20200204202258-35d3e4a5d43e"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-autonat"
version = "v0.1.2-0.20200204200147-902af8cb7b6a"
[groups.run]
artifact = "ec4c0ec62a02"
[groups.run.test_params]
bucket_size = "4"
client_mode = "false"
f_undialable = "0.9"
n_bootstrap = "-4"
n_paths = "0"
n_providing = "1"
record_count = "5"
timeout_secs = "200"

View File

@ -0,0 +1,32 @@
[metadata]
name = "gcp"
author = "adin"
[global]
plan = "dht"
case = "get-closest-peers"
total_instances = 400
builder = "docker:go"
runner = "cluster:k8s"
[global.build_config]
push_registry = true
registry_type = "aws"
[[groups]]
id = "balsam"
[groups.instances]
count = 0
percentage = 1.0
[groups.build]
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-dht:7177e811244e"
[groups.run.test_params]
bucket_size = "10"
client_mode = "false"
f_undialable = "0.5"
n_bootstrap = "-4"
n_find_peers = "5"
n_providing = "1"
record_count = "5"
timeout_secs = "600"
autonat_ok = "false"

View File

@ -0,0 +1,94 @@
[metadata]
name = "gcp"
author = "adin"
[global]
plan = "dht"
case = "get-closest-peers"
total_instances = 1000
builder = "docker:go"
runner = "cluster:k8s"
[global.build_config]
push_registry = true
registry_type = "aws"
[[groups]]
id = "cypress-passive"
[groups.instances]
count = 975
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:aa0340067f21"
[groups.run.test_params]
alpha = "6"
beta = "3"
bs_strategy = "6"
bucket_size = "10"
group_order = "1"
latency = "100"
timeout_secs = "600"
[[groups]]
id = "cypress-searcher"
[groups.instances]
count = 20
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:aa0340067f21"
[groups.run.test_params]
alpha = "6"
beta = "3"
bs_strategy = "6"
bucket_size = "10"
group_order = "2"
latency = "100"
record_count = "1"
search_records = "true"
timeout_secs = "600"
[[groups]]
id = "cypress-bs"
[groups.instances]
count = 5
percentage = 0.0
[groups.build]
Selectors = ["cypress"]
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-kad-dht"
version = "180be07b8303d536e39809bc39c58be5407fedd9"
[[groups.build.dependencies]]
module = "github.com/libp2p/go-libp2p-xor"
version = "df24f5b04bcbdc0059b27989163a6090f4f6dc7a"
[groups.run]
artifact = "909427826938.dkr.ecr.us-east-1.amazonaws.com/testground-us-east-1-dht:aa0340067f21"
[groups.run.test_params]
alpha = "6"
beta = "3"
bootstrapper = "true"
bs_strategy = "6"
bucket_size = "10"
group_order = "0"
latency = "100"
timeout_secs = "600"

38
dht/go.mod Normal file
View File

@ -0,0 +1,38 @@
module github.com/libp2p/test-plans/dht
go 1.14
require (
github.com/gogo/protobuf v1.3.1
github.com/ipfs/go-cid v0.0.3
github.com/ipfs/go-datastore v0.4.1
github.com/ipfs/go-ds-leveldb v0.4.1
github.com/ipfs/go-ipfs-util v0.0.1
github.com/ipfs/go-ipns v0.0.2
github.com/libp2p/go-libp2p v0.4.2
github.com/libp2p/go-libp2p-connmgr v0.2.1
github.com/libp2p/go-libp2p-core v0.3.0
github.com/libp2p/go-libp2p-kad-dht v0.4.1
github.com/libp2p/go-libp2p-kbucket v0.2.2
github.com/libp2p/go-libp2p-swarm v0.2.3-0.20200210151353-6e99a7602774
github.com/libp2p/go-libp2p-transport-upgrader v0.1.1
github.com/libp2p/go-libp2p-xor v0.0.0-20200330160054-7c8ff159b6e9
github.com/libp2p/go-tcp-transport v0.1.1
github.com/mattn/go-colorable v0.1.2 // indirect
github.com/mattn/go-isatty v0.0.9 // indirect
github.com/multiformats/go-multiaddr v0.2.0
github.com/multiformats/go-multiaddr-net v0.1.2
github.com/opentracing/opentracing-go v1.1.0 // indirect
github.com/pkg/errors v0.9.1
github.com/testground/sdk-go v0.1.1
go.uber.org/zap v1.14.1
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
)
//replace github.com/libp2p/go-libp2p-swarm => ../../../../libp2p/go-libp2p-swarm
//replace github.com/libp2p/go-libp2p-autonat => github.com/willscott/go-libp2p-autonat v0.1.2-0.20200310184838-ce79942134d7
//replace github.com/libp2p/go-libp2p-autonat-svc => github.com/libp2p/go-libp2p-autonat-svc v0.1.1-0.20200310185508-f21360000124
//replace github.com/libp2p/go-libp2p-kad-dht => ../../../../libp2p/go-libp2p-kad-dht
//replace github.com/libp2p/go-libp2p-kad-dht => github.com/libp2p/go-libp2p-kad-dht v0.5.2-0.20200310202241-7ada018b2a13
//replace github.com/libp2p/go-libp2p => github.com/libp2p/go-libp2p v0.6.1-0.20200310185355-89c193e0ca37
//replace github.com/libp2p/go-libp2p-core => github.com/libp2p/go-libp2p-core v0.5.0

502
dht/go.sum Normal file
View File

@ -0,0 +1,502 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8=
github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ=
github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-redis/redis/v7 v7.2.0 h1:CrCexy/jYWZjW0AyVoHlcJUeZN19VWlbepTh1Vq6dJs=
github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU=
github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk=
github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo=
github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
github.com/ipfs/go-cid v0.0.3 h1:UIAh32wymBpStoe83YCzwVQQ5Oy/H0FdxvUS6DJDzms=
github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw=
github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw=
github.com/ipfs/go-datastore v0.4.1 h1:W4ZfzyhNi3xmuU5dQhjfuRn/wFuqEE1KnOmmQiOevEY=
github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA=
github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8=
github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s=
github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk=
github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc=
github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8=
github.com/ipfs/go-ds-leveldb v0.4.1 h1:zaoLcP8zs4Aj9k8fA4cowyOyNBIvy9Dnt6hf7mHRY7s=
github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s=
github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50=
github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc=
github.com/ipfs/go-ipns v0.0.2 h1:oq4ErrV4hNQ2Eim257RTYRgfOSV/s8BDaf9iIl4NwFs=
github.com/ipfs/go-ipns v0.0.2/go.mod h1:WChil4e0/m9cIINWLxZe1Jtf77oz5L05rO2ei/uKJ5U=
github.com/ipfs/go-log v0.0.1 h1:9XTUN/rW64BCG1YhPK9Hoy3q8nr4gOmHHBpgFdfw6Lc=
github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM=
github.com/ipfs/go-todocounter v0.0.2 h1:9UBngSQhylg2UDcxSAtpkT+rEWFr26hDPXVStE8LFyc=
github.com/ipfs/go-todocounter v0.0.2/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC8BHOplkWvZ4=
github.com/jackpal/gateway v1.0.5 h1:qzXWUJfuMdlLMtt0a3Dgt+xkWQiA5itDEITVJtuSwMc=
github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA=
github.com/jackpal/go-nat-pmp v1.0.1 h1:i0LektDkO1QlrTm/cSuP+PyBCDnYvjPLGl4LdWEMiaA=
github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs=
github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA=
github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2 h1:vhC1OXXiT9R2pczegwz6moDvuRpggaroAXhPIseh57A=
github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs=
github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY=
github.com/jbenet/goprocess v0.1.3 h1:YKyIEECS/XvcfHtBzxtjBBbWK+MbvA6dG8ASiqwvr10=
github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d h1:68u9r4wEvL3gYg2jvAOgROwZ3H+Y3hIDk4tbbmIjcYQ=
github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/libp2p/go-addr-util v0.0.1 h1:TpTQm9cXVRVSKsYbgQ7GKc3KbbHVTnbostgGaDEP+88=
github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ=
github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ=
github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs=
github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM=
github.com/libp2p/go-conn-security-multistream v0.1.0 h1:aqGmto+ttL/uJgX0JtQI0tD21CIEy5eYd1Hlp0juHY0=
github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc=
github.com/libp2p/go-eventbus v0.1.0 h1:mlawomSAjjkk97QnYiEmHsLu7E136+2oCWSHRUvMfzQ=
github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4=
github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8=
github.com/libp2p/go-flow-metrics v0.0.2/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs=
github.com/libp2p/go-flow-metrics v0.0.3 h1:8tAs/hSdNvUiLgtlSy3mxwxWP4I9y/jlkPFT7epKdeM=
github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs=
github.com/libp2p/go-libp2p v0.4.2 h1:p0cthB0jDNHO4gH2HzS8/nAMMXbfUlFHs0jwZ4U+F2g=
github.com/libp2p/go-libp2p v0.4.2/go.mod h1:MNmgUxUw5pMsdOzMlT0EE7oKjRasl+WyVwM0IBlpKgQ=
github.com/libp2p/go-libp2p-autonat v0.1.1 h1:WLBZcIRsjZlWdAZj9CiBSvU2wQXoUOiS1Zk1tM7DTJI=
github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE=
github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro=
github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU=
github.com/libp2p/go-libp2p-circuit v0.1.4 h1:Phzbmrg3BkVzbqd4ZZ149JxCuUWu2wZcXf/Kr6hZJj8=
github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU=
github.com/libp2p/go-libp2p-connmgr v0.2.1 h1:1ed0HFhCb39sIMK7QYgRBW0vibBBqFQMs4xt9a9AalY=
github.com/libp2p/go-libp2p-connmgr v0.2.1/go.mod h1:JReKEFcgzSHKT9lL3rhYcUtXBs9uMIiMKJGM1tl3xJE=
github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco=
github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I=
github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI=
github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0=
github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g=
github.com/libp2p/go-libp2p-core v0.2.5/go.mod h1:6+5zJmKhsf7yHn1RbmYDu08qDUpIUxGdqHuEZckmZOA=
github.com/libp2p/go-libp2p-core v0.3.0 h1:F7PqduvrztDtFsAa/bcheQ3azmNo+Nq7m8hQY5GiUW8=
github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw=
github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI=
github.com/libp2p/go-libp2p-discovery v0.1.0 h1:j+R6cokKcGbnZLf4kcNwpx6mDEUPF3N6SrqMymQhmvs=
github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g=
github.com/libp2p/go-libp2p-kad-dht v0.4.1 h1:N++/IVD7KemtNqwoqBLsmpc1PxROW1cxi81ja+wsJCg=
github.com/libp2p/go-libp2p-kad-dht v0.4.1/go.mod h1:Qf5Ddk5Csgi657ja2u5+NugbWz/QOVeVfrM1HTRDcfQ=
github.com/libp2p/go-libp2p-kbucket v0.2.2 h1:Jg/JUbQix6mvTnj+86FasRqkh01JFQNrN+H26Gxxsg0=
github.com/libp2p/go-libp2p-kbucket v0.2.2/go.mod h1:opWrBZSWnBYPc315q497huxY3sz1t488X6OiXUEYWKA=
github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8=
github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90=
github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo=
github.com/libp2p/go-libp2p-mplex v0.2.1 h1:E1xaJBQnbSiTHGI1gaBKmKhu1TUKkErKJnE8iGvirYI=
github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE=
github.com/libp2p/go-libp2p-nat v0.0.5 h1:/mH8pXFVKleflDL1YwqMg27W9GD8kjEx7NY0P6eGc98=
github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE=
github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU=
github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY=
github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY=
github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI=
github.com/libp2p/go-libp2p-peerstore v0.1.4 h1:d23fvq5oYMJ/lkkbO4oTwBp/JP+I/1m5gZJobNXCE/k=
github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs=
github.com/libp2p/go-libp2p-record v0.1.2 h1:M50VKzWnmUrk/M5/Dz99qO9Xh4vs8ijsK+7HkJvRP+0=
github.com/libp2p/go-libp2p-record v0.1.2/go.mod h1:pal0eNcT5nqZaTV7UGhqeGqxFgGdsU/9W//C8dqjQDk=
github.com/libp2p/go-libp2p-routing v0.1.0 h1:hFnj3WR3E2tOcKaGpyzfP4gvFZ3t8JkQmbapN0Ct+oU=
github.com/libp2p/go-libp2p-routing v0.1.0/go.mod h1:zfLhI1RI8RLEzmEaaPwzonRvXeeSHddONWkcTcB54nE=
github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8=
github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g=
github.com/libp2p/go-libp2p-secio v0.2.1 h1:eNWbJTdyPA7NxhP7J3c5lT97DC5d+u+IldkgCYFTPVA=
github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8=
github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4=
github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU=
github.com/libp2p/go-libp2p-swarm v0.2.3-0.20200210151353-6e99a7602774 h1:GRotlsHPT1Jl6qTzSNgNu4PjbCBBHxakAuUVBZKFrFI=
github.com/libp2p/go-libp2p-swarm v0.2.3-0.20200210151353-6e99a7602774/go.mod h1:bUFsDXKZ2i5p45JxrNwZyFs/g5+vVZPbEHawgDbKpVo=
github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0=
github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0=
github.com/libp2p/go-libp2p-transport-upgrader v0.1.1 h1:PZMS9lhjK9VytzMCW3tWHAXtKXmlURSc3ZdvwEcKCzw=
github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA=
github.com/libp2p/go-libp2p-xor v0.0.0-20200330160054-7c8ff159b6e9/go.mod h1:i0t7yEDID1o5va91VahoqBbJE8xDKdt0Rwd1uGFAM7A=
github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8=
github.com/libp2p/go-libp2p-yamux v0.2.1 h1:Q3XYNiKCC2vIxrvUJL+Jg1kiyeEaIDNKLjgEjo3VQdI=
github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI=
github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q=
github.com/libp2p/go-maddr-filter v0.0.5 h1:CW3AgbMO6vUvT4kf87y4N+0P8KUl2aqLYhrGyDUbLSg=
github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M=
github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0=
github.com/libp2p/go-mplex v0.1.0 h1:/nBTy5+1yRyY82YaO6HXQRnO5IAGsXTjEJaR3LdTPc0=
github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU=
github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ=
github.com/libp2p/go-msgio v0.0.4 h1:agEFehY3zWJFUHK6SEMR7UYmk2z6kC3oeCM7ybLhguA=
github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ=
github.com/libp2p/go-nat v0.0.4 h1:KbizNnq8YIf7+Hn7+VFL/xE0eDrkPru2zIO9NMwL8UQ=
github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo=
github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0=
github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
github.com/libp2p/go-reuseport v0.0.1 h1:7PhkfH73VXfPJYKQ6JwS5I/eVcoyYi9IMNGc6FWpFLw=
github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA=
github.com/libp2p/go-reuseport-transport v0.0.2 h1:WglMwyXyBu61CMkjCCtnmqNqnjib0GIEjMiHTwR/KN4=
github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs=
github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14=
github.com/libp2p/go-stream-muxer-multistream v0.2.0 h1:714bRJ4Zy9mdhyTLJ+ZKiROmAFwUHpeRidG+q7LTQOg=
github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc=
github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc=
github.com/libp2p/go-tcp-transport v0.1.1 h1:yGlqURmqgNA2fvzjSgZNlHcsd/IulAnKM8Ncu+vlqnw=
github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY=
github.com/libp2p/go-ws-transport v0.1.2 h1:VnxQcLfSGtqupqPpBNu8fUiCv+IN1RJ2BcVqQEM+z8E=
github.com/libp2p/go-ws-transport v0.1.2/go.mod h1:dsh2Ld8F+XNmzpkaAijmg5Is+e9l6/1tK/6VFOdN69Y=
github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow=
github.com/libp2p/go-yamux v1.2.3 h1:xX8A36vpXb59frIzWFdEgptLMsOANMFq2K7fPRlunYI=
github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8=
github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8=
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc=
github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI=
github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA=
github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44=
github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44=
github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44=
github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44=
github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
github.com/multiformats/go-multiaddr v0.2.0 h1:lR52sFwcTCuQb6bTfnXF6zA2XfyYvyd+5a9qECv/J90=
github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4=
github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q=
github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q=
github.com/multiformats/go-multiaddr-dns v0.2.0 h1:YWJoIDwLePniH7OU5hBnDZV6SWuvJqJ0YtN6pLeH9zA=
github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0=
github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q=
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU=
github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ=
github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ=
github.com/multiformats/go-multiaddr-net v0.1.2 h1:P7zcBH9FRETdPkDrylcXVjQLQ2t1JQtNItZULWNWgeg=
github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y=
github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA=
github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs=
github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U=
github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po=
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
github.com/multiformats/go-multihash v0.0.10 h1:lMoNbh2Ssd9PUF74Nz008KGzGPlfeV6wH3rit5IIGCM=
github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
github.com/multiformats/go-multistream v0.1.0 h1:UpO6jrsjqs46mqAK3n6wKRYFhugss9ArzbyUzU+4wkQ=
github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
github.com/multiformats/go-varint v0.0.1 h1:TR/0rdQtnNxuN2IhiB639xC3tWM4IUi7DkTBVTdGW/M=
github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA=
github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY=
github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0=
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/testground/sdk-go v0.1.1 h1:PfBte3SGdslQ7sFTfHRCg+FqP7+IB8xT5aDjmMVRj4s=
github.com/testground/sdk-go v0.1.1/go.mod h1:wsWyhI6GfdCk9GUyD7Wu5UKX3x3jZcRh/Wp1XoFOP+A=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k=
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc=
github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc h1:9lDbC6Rz4bwmou+oE6Dt4Cb2BGMur5eR/GYptkKUVHo=
github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM=
github.com/whyrusleeping/mafmt v1.2.8 h1:TCghSl5kkwEE0j+sU/gudyhVMRlpBin8fMBBHg59EbA=
github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA=
github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4=
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds=
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI=
github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA=
go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo=
go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8=
gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=

47
dht/libp2p/addrs.go Normal file
View File

@ -0,0 +1,47 @@
package libp2p
import (
"context"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/pkg/errors"
"github.com/libp2p/test-plans/dht/utils"
"github.com/testground/sdk-go/sync"
)
func ShareAddresses(ctx context.Context, ri *utils.RunInfo, nodeInfo *NodeInfo) (map[peer.ID]*NodeInfo, error) {
otherNodes := make(map[peer.ID]*NodeInfo)
subCtx, cancel := context.WithCancel(ctx)
defer cancel()
attribCh := make(chan *NodeInfo)
if _, _, err := ri.Client.PublishSubscribe(subCtx, PeerAttribTopic, nodeInfo, attribCh); err != nil {
return nil, errors.Wrap(err, "peer attrib publish/subscribe failure")
}
for i := 0; i < ri.RunEnv.TestInstanceCount; i++ {
select {
case info := <-attribCh:
if info.Seq == nodeInfo.Seq {
continue
}
otherNodes[info.Addrs.ID] = info
case <-ctx.Done():
return nil, ctx.Err()
}
}
return otherNodes, nil
}
type NodeInfo struct {
Seq int // sequence number within the test
GroupSeq int // sequence number within the test group
Group string
Addrs *peer.AddrInfo
}
// PeerAttribTopic represents a subtree under the test run's sync tree where peers
// participating in this distributed test advertise their attributes.
var PeerAttribTopic = sync.NewTopic("attribs", &NodeInfo{})

20
dht/main.go Normal file
View File

@ -0,0 +1,20 @@
package main
import (
test "github.com/libp2p/test-plans/dht/test"
"github.com/testground/sdk-go/runtime"
)
var testCases = map[string]runtime.TestCaseFn{
"find-peers": test.FindPeers,
"find-providers": test.FindProviders,
"provide-stress": test.ProvideStress,
"store-get-value": test.StoreGetValue,
"get-closest-peers": test.GetClosestPeers,
"bootstrap-network": test.BootstrapNetwork,
"all": test.All,
}
func main() {
runtime.InvokeMap(testCases)
}

172
dht/manifest.toml Normal file
View File

@ -0,0 +1,172 @@
name = "dht"
[defaults]
builder = "exec:go"
runner = "local:exec"
[builders."docker:go"]
enabled = true
go_version = "1.13"
module_path = "github.com/libp2p/test-plans/dht"
exec_pkg = "."
# TODO: exec:go is not ready yet
[builders."exec:go"]
enabled = true
module_path = "github.com/libp2p/test-plans/dht"
exec_pkg = "."
[runners."local:docker"]
enabled = true
[runners."local:exec"]
enabled = true
[runners."cluster:swarm"]
enabled = true
[runners."cluster:k8s"]
enabled = true
[[testcases]]
name = "find-peers"
instances = { min = 16, max = 10000, default = 16 }
[testcases.params]
timeout_secs = { type = "int", desc = "test timeout", unit = "seconds", default = 300 }
latency = { type = "int", desc = "latency between peers", unit = "ms", default = 100 }
auto_refresh = { type = "bool", desc = "enable DHT routing table autorefresh", unit = "bool", default = true }
random_walk = { type = "bool", desc = "run 5 random walks before the test", unit = "bool", default = false }
bucket_size = { type = "int", desc = "routing table bucket size", unit = "peers", default = 2 }
alpha = { type = "int", desc = "dht concurrency parameter", unit = "int", default = 3 }
beta = { type = "int", desc = "dht resiliency parameter", unit = "int", default = 3 }
client_mode = { type = "bool", desc = "all undialable nodes are clients", unit = "bool", default = "false" }
datastore = { type = "int", desc = "datastore type", unit = "int", default = 0 }
peer_id_seed = { type = "int", desc = "seed used to generate all peer IDs - must be smaller than MaxInt-instances", default = 0 }
bootstrapper = { type = "bool", desc = "node is a bootstrapper", unit = "bool", default = false}
bs_strategy = { type = "int", desc = "the identifier for the nodes bootstrap strategy", unit = "int", default = 0}
undialable = { type = "bool", desc = "node is undialable", unit = "bool", default = false}
group_order = { type = "int", desc = "the order in which the node is bootstrapped, may be tied with another node", unit ="int", default = 0}
expect_dht = { type = "bool", desc = "the node expects to be a dht server", unit ="bool", default = true}
n_find_peers = { type = "int", desc = "number of peers to find", unit = "peers", default = 1 }
[[testcases]]
name = "find-providers"
instances = { min = 16, max = 10000, default = 16 }
[testcases.params]
timeout_secs = { type = "int", desc = "test timeout", unit = "seconds", default = 300 }
latency = { type = "int", desc = "latency between peers", unit = "ms", default = 100 }
auto_refresh = { type = "bool", desc = "enable DHT routing table autorefresh", unit = "bool", default = true }
random_walk = { type = "bool", desc = "run 5 random walks before the test", unit = "bool", default = false }
bucket_size = { type = "int", desc = "routing table bucket size", unit = "peers", default = 2 }
alpha = { type = "int", desc = "dht concurrency parameter", unit = "int", default = 3 }
beta = { type = "int", desc = "dht resiliency parameter", unit = "int", default = 3 }
client_mode = { type = "bool", desc = "all undialable nodes are clients", unit = "bool", default = "false" }
datastore = { type = "int", desc = "datastore type", unit = "int", default = 0 }
peer_id_seed = { type = "int", desc = "seed used to generate all peer IDs - must be smaller than MaxInt-instances", default = 0 }
bootstrapper = { type = "bool", desc = "node is a bootstrapper", unit = "bool", default = false}
bs_strategy = { type = "int", desc = "the identifier for the nodes bootstrap strategy", unit = "int", default = 0}
undialable = { type = "bool", desc = "node is undialable", unit = "bool", default = false}
group_order = { type = "int", desc = "the order in which the node is bootstrapped, may be tied with another node", unit ="int", default = 0}
expect_dht = { type = "bool", desc = "the node expects to be a dht server", unit ="bool", default = true}
record_seed = { type = "int", desc = "the seed used to generate records", unit = "int", default = 0 }
record_count = { type = "int", desc = "number of records a peer provides", unit = "int", default = 0 }
search_records = { type = "bool", desc = "node will search for records", unit = "bool", default = false }
#p_providing = { type = "int", desc = "", unit = "% of nodes" }
#p_resolving = { type = "int", desc = "", unit = "% of nodes" }
#p_failing = { type = "int", desc = "", unit = "% of nodes" }
[[testcases]]
name = "provide-stress"
instances = { min = 16, max = 250, default = 16 }
[testcases.params]
bucket_size = { type = "int", desc = "bucket size", unit = "peers" }
auto_refresh = { type = "bool", desc = "", unit = "bool" }
random_walk = { type = "bool", desc = "", unit = "bool" }
n_bootstrap = { type = "int", desc = "number of bootstrap nodes", unit = "int", default = 1 }
n_provides = { type = "int", desc = "number of times to provide", unit = "int" }
i_provides = { type = "int", desc = "interval between each provide", unit = "seconds" }
[[testcases]]
name = "store-get-value"
instances = { min = 16, max = 250, default = 16 }
roles = ["storer", "fetcher"]
[testcases.params]
bucket_size = { type = "int", desc = "bucket size", unit = "peers" }
[[testcases]]
name = "get-closest-peers"
instances = { min = 16, max = 10000, default = 16 }
[testcases.params]
timeout_secs = { type = "int", desc = "test timeout", unit = "seconds", default = 300 }
latency = { type = "int", desc = "latency between peers", unit = "ms", default = 100 }
auto_refresh = { type = "bool", desc = "enable DHT routing table autorefresh", unit = "bool", default = true }
random_walk = { type = "bool", desc = "run 5 random walks before the test", unit = "bool", default = false }
bucket_size = { type = "int", desc = "routing table bucket size", unit = "peers", default = 2 }
alpha = { type = "int", desc = "dht concurrency parameter", unit = "int", default = 3 }
beta = { type = "int", desc = "dht resiliency parameter", unit = "int", default = 3 }
client_mode = { type = "bool", desc = "all undialable nodes are clients", unit = "bool", default = "false" }
datastore = { type = "int", desc = "datastore type", unit = "int", default = 0 }
peer_id_seed = { type = "int", desc = "seed used to generate all peer IDs - must be smaller than MaxInt-instances", default = 0 }
bootstrapper = { type = "bool", desc = "node is a bootstrapper", unit = "bool", default = false}
bs_strategy = { type = "int", desc = "the identifier for the nodes bootstrap strategy", unit = "int", default = 0}
undialable = { type = "bool", desc = "node is undialable", unit = "bool", default = false}
group_order = { type = "int", desc = "the order in which the node is bootstrapped, may be tied with another node", unit ="int", default = 0}
expect_dht = { type = "bool", desc = "the node expects to be a dht server", unit ="bool", default = true}
record_seed = { type = "int", desc = "the seed used to generate records", unit = "int", default = 0 }
record_count = { type = "int", desc = "number of records a peer provides", unit = "int", default = 0 }
search_records = { type = "bool", desc = "node will search for records", unit = "bool", default = false }
[[testcases]]
name = "bootstrap-network"
instances = { min = 16, max = 10000, default = 16 }
[testcases.params]
timeout_secs = { type = "int", desc = "test timeout", unit = "seconds", default = 300 }
latency = { type = "int", desc = "latency between peers", unit = "ms", default = 100 }
auto_refresh = { type = "bool", desc = "enable DHT routing table autorefresh", unit = "bool", default = true }
random_walk = { type = "bool", desc = "run 5 random walks before the test", unit = "bool", default = false }
bucket_size = { type = "int", desc = "routing table bucket size", unit = "peers", default = 2 }
alpha = { type = "int", desc = "dht concurrency parameter", unit = "int", default = 3 }
beta = { type = "int", desc = "dht resiliency parameter", unit = "int", default = 3 }
client_mode = { type = "bool", desc = "all undialable nodes are clients", unit = "bool", default = "false" }
datastore = { type = "int", desc = "datastore type", unit = "int", default = 0 }
peer_id_seed = { type = "int", desc = "seed used to generate all peer IDs - must be smaller than MaxInt-instances", default = 0 }
bootstrapper = { type = "bool", desc = "node is a bootstrapper", unit = "bool", default = false}
bs_strategy = { type = "int", desc = "the identifier for the nodes bootstrap strategy", unit = "int", default = 0}
undialable = { type = "bool", desc = "node is undialable", unit = "bool", default = false}
group_order = { type = "int", desc = "the order in which the node is bootstrapped, may be tied with another node", unit ="int", default = 0}
expect_dht = { type = "bool", desc = "the node expects to be a dht server", unit ="bool", default = true}
[[testcases]]
name = "all"
instances = { min = 16, max = 10000, default = 16 }
[testcases.params]
timeout_secs = { type = "int", desc = "test timeout", unit = "seconds", default = 300 }
latency = { type = "int", desc = "latency between peers", unit = "ms", default = 100 }
auto_refresh = { type = "bool", desc = "enable DHT routing table autorefresh", unit = "bool", default = true }
random_walk = { type = "bool", desc = "run 5 random walks before the test", unit = "bool", default = false }
bucket_size = { type = "int", desc = "routing table bucket size", unit = "peers", default = 2 }
alpha = { type = "int", desc = "dht concurrency parameter", unit = "int", default = 3 }
beta = { type = "int", desc = "dht resiliency parameter", unit = "int", default = 3 }
client_mode = { type = "bool", desc = "all undialable nodes are clients", unit = "bool", default = "false" }
datastore = { type = "int", desc = "datastore type", unit = "int", default = 0 }
peer_id_seed = { type = "int", desc = "seed used to generate all peer IDs - must be smaller than MaxInt-instances", default = 0 }
bootstrapper = { type = "bool", desc = "node is a bootstrapper", unit = "bool", default = false}
bs_strategy = { type = "int", desc = "the identifier for the nodes bootstrap strategy", unit = "int", default = 0}
undialable = { type = "bool", desc = "node is undialable", unit = "bool", default = false}
group_order = { type = "int", desc = "the order in which the node is bootstrapped, may be tied with another node", unit ="int", default = 0}
expect_dht = { type = "bool", desc = "the node expects to be a dht server", unit ="bool", default = true}
record_seed = { type = "int", desc = "the seed used to generate records", unit = "int", default = 0 }
record_count = { type = "int", desc = "number of records a peer provides", unit = "int", default = 0 }
search_records = { type = "bool", desc = "node will search for records", unit = "bool", default = false }
n_find_peers = { type = "int", desc = "number of peers to find", unit = "peers", default = 0 }

29
dht/scripts/graphs.ps1 Executable file
View File

@ -0,0 +1,29 @@
param($graphID,$dataDir)
$allGraphs = @("br", "bt", "at", "ab", "end")
$fmt = "png"
if (!$dataDir) {
$dataDir = gci $HOME/.testground/results/dht | Sort LastWriteTime | select -Last 1
}
if ($graphID) {
$allGraphs = @($graphID)
}
$allGraphs | %{
$g = $_
$data = gci $dataDir -recurse | ?{$_.Name -eq "stderr.json"} | Get-Content |
ConvertFrom-Json
$gdataz = $data | ?{$_.N -eq "Graph" -and $_.M -eq $g}
$gdata = $gdataz | %{"Z{0} -> Z{1};`n" -f $_.From, $_.To}
$file = "digraph D {`n " + $gdata + "}"
$file > "$g-conn.dot"
$rtdata = $data | ?{$_.N -eq "RT" -and $_.M -eq "$g"} | %{"Z{0} -> Z{1};`n" -f $_.From, $_.To}
$rtfile = "digraph D {`n " + $rtdata + "}"
$rtfile > "$g-rt.dot"
#$file | circo "-T$fmt" -o "$g.$fmt"
}

225
dht/scripts/prov-results.ps1 Executable file
View File

@ -0,0 +1,225 @@
param($dataDir)
$allFiles = $dataDir | gci -Recurse -File
$connGraphs = $allFiles | ?{$_.Name -eq "dht_graphs.out"}
$rts = $allFiles | ?{$_.Name -eq "dht_rt.out"}
$errs = $allFiles | ?{$_.Name -eq "run.err"}
$ns = 1000000000
function basicStats ($values, $reverse) {
if ($null -eq $values) {
return [PSCustomObject]@{
Average = 0
Percentile95 = 0
}
}
$obj = $values | measure-object -Average -Sum -Maximum -Minimum -StandardDeviation
if ($null -eq $reverse || $false -eq $reverse) {
$sorted = $values | Sort-Object
} else {
$sorted = $values | Sort-Object -Descending
}
$95percentile = $sorted[[math]::Ceiling(95 / 100 * ($sorted.Count - 1))]
if ($null -eq $95percentile) {
return "ASDFASFASF"
}
return [PSCustomObject]@{
Average = [math]::Round([double]$obj.Average,2)
Percentile95 = [math]::Round([double]$95percentile, 2)
}
}
function groupStats ($metrics, $groupIndex, $reverse) {
$fields = @{}
$grouped = $metrics | Group-Object -Property {$_.Name.Split("|")[$groupIndex]}
foreach ($g in $grouped) {
$v = basicStats ($g.Group | %{$_.Value}) $reverse
$fields.Add($g.Name, $v)
}
$ret = New-Object -TypeName psobject -Property $fields
return $ret
}
function run($groupDir) {
$groupID = $groupDir.Name
$files = $groupDir | gci -Recurse -File
$queries = $files | ?{$_.Name -eq "dht_queries.out"}
$lookups = $files | ?{$_.Name -eq "dht_lookup.out"}
$out = $files | ?{$_.Name -eq "run.out"}
$metrics = $out | Get-Content | ConvertFrom-Json | %{$_.event.metric} | ?{$_}
$mset =
$provs = $metrics |
?{$_.name -and $_.name.StartsWith("time-to-provide") -and $_.value -gt 0} |
%{$_.value/$ns}
$findfirst = $metrics |
?{$_.name -and $_.name.StartsWith("time-to-find-first")} |
%{ [pscustomobject]@{ Name=$_.name; Value= $_.value/$ns; }}
$findlast = $metrics |
?{$_.name -and $_.name.StartsWith("time-to-find-last")} |
%{ [pscustomobject]@{ Name=$_.name; Value= $_.value/$ns; }}
$findall = $metrics |
?{$_.name -and $_.name.StartsWith("time-to-find|")} |
%{ [pscustomobject]@{ Name=$_.name; Value= $_.value/$ns; }}
$findgood = $metrics |
?{$_.name -and $_.name.StartsWith("time-to-find|done")} |
%{ [pscustomobject]@{ Name=$_.name; Value= $_.value/$ns; }}
$findfail = $metrics |
?{$_.name -and $_.name.StartsWith("time-to-find|fail")} |
%{ [pscustomobject]@{ Name=$_.name; Value= $_.value/$ns; }}
$found = $metrics |
?{$_.name -and $_.name.StartsWith("peers-found") -and $_.value -gt 0} |
%{ [pscustomobject]@{ Name=$_.name; Value= $_.value; }}
$missing = $metrics |
?{$_.name -and $_.name.StartsWith("peers-missing")} |
%{ [pscustomobject]@{ Name=$_.name; Value= $_.value; }}
$failures = $metrics |
?{$_.name -and $_.name.StartsWith("peers-found") -and $_.value -eq 0} |
%{ [pscustomobject]@{ Name=$_.name; Value= $_.value; }}
$dials = $queries | %{Get-Content $_ | ConvertFrom-Json | ?{$_.msg -eq "dialing"} | measure-object } |
%{$_.Count}
$msgs = $queries | %{Get-Content $_ | ConvertFrom-Json | ?{$_.msg -eq "send"} | measure-object } |
%{$_.Count}
echo "Group $groupID :"
if ($null -ne $provs) {
echo "Time-to-Provide"
basicStats($provs) | Format-Table
}
if ($null -ne $findfirst) {
echo "Time-to-Find-First"
groupStats $findfirst 1 | Format-Table
echo "Time-to-Find-Last"
groupStats $findlast 2 | Format-Table
if ($null -ne $findgood) {
echo "Time-to-Find Success"
groupStats $findgood 2 | Format-Table
}
if ($null -ne $findfail) {
echo "Time-to-Find Fail"
groupStats $findfail 2 | Format-Table
echo "Number of Failures"
groupStats $failures 2 | Format-Table
}
if (($null -ne $findgood) -and ($null -ne $findfail)) {
echo "Time-to-Find"
groupStats $findall 2 | Format-Table
}
echo "Peers Found"
groupStats $found 2 $true | Format-Table
echo "Peers Missing"
groupStats $missing 2 | Format-Table
#if ($failures -ne $null) {
# echo "Peers Failures"
# groupStats $failures 2 | Format-Table
#} else {
# echo "No Peer Failures"
#}
}
if ($dials -ne $null) {
echo "Total number of dials"
basicStats($dials) | Format-Table
} else {
echo "No DHT query dials performed"
}
if ($msgs -ne $null) {
echo "Total number of messages sent"
basicStats($msgs) | Format-Table
} else {
echo "No DHT query messages sent"
}
}
function condense($fileDir) {
Remove-Item $fileDir/lookupcmp.json -ErrorAction Ignore
Remove-Item $fileDir/runcmp.json -ErrorAction Ignore
$lookupOut = gci $fileDir/dht_lookups.out | gc | ConvertFrom-Json
$start = $lookupOut | Select-Object -First 1 -ExpandProperty ts
$lookupOut | %{
$_.ts = ($_.ts - $start)/$ns;
$_.node = -join $_.node[-4..-1];
$_.nodeKad = -join $_.nodeKad[0..4];
$_.target = -join $_.target[-4..-1];
$_.info.ID = -join $_.info.ID[0..4];
#$_.targetKad = -join $_.targetKad[0..4];
if ($null -ne $_.cause) {
sliceLast $_ cause
sliceLast $_ source
sliceFirst $_ causeKad
sliceFirst $_ sourceKad
arrslice $_ heard $false
arrslice $_ waiting $false
arrslice $_ queried $false
arrslice $_ unreachable $false
arrslice $_ heardKad $true
arrslice $_ waitingKad $true
arrslice $_ queriedKad $true
arrslice $_ unreachableKad $true
}
$_} | Select-Object -Property * -ExcludeProperty heard,waiting,queried,unreachable |
%{ $_ | ConvertTo-Json -Compress -Depth 10 | Add-Content $fileDir/lookupcmp.json }
$runOut = gci $fileDir/run.out | gc | ConvertFrom-Json
$runOut | %{$_.ts = ($_.ts - $start)/$ns; $_} |
%{ $_ | ConvertTo-Json -Compress -Depth 10 | Add-Content $fileDir/runcmp.json }
}
function sliceFirst($obj, $field) {
if ($null -eq $obj.$field) {
return
}
$obj.$field = -join $obj.$field[0..4]
}
function sliceLast($obj, $field) {
if ($null -eq $obj.$field) {
return
}
$obj.$field = -join $obj.$field[-4..-1]
}
function arrslice($obj, $field, $first) {
if ($null -eq $obj.$field) {
return
}
if ($first) {
$obj.$field = $obj.$field | %{-join $_[0..4]}
} else {
$obj.$field = $obj.$field | %{-join $_[-4..-1]}
}
}
run $dataDir
condense $dataDir

204
dht/scripts/results.ps1 Executable file
View File

@ -0,0 +1,204 @@
param($runID)
$ErrorActionPreference = "Stop"
$env:TESTGROUND_SRCDIR="$env:USERPROFILE/go/src/github.com/ipfs/testground"
$outputDir = "$env:USERPROFILE/workspace/testground/stats"
$runner = "cluster:k8s"
#$runner = "local:docker"
$graphs = $true
if (-not [System.IO.Directory]::Exists("$outputDir/$runID")) {
$outname = "$outputDir/$runID.tar.gz"
testground collect $runID --runner $runner -o $outname
tar -C $outputDir -zxvf $outname
}
$groupDirs = gci $outputDir/$runID
$allFiles = $groupDirs | gci -Recurse -File
$connGraphs = $allFiles | ?{$_.Name -eq "dht_graphs.out"}
$rts = $allFiles | ?{$_.Name -eq "dht_rt.out"}
$errs = $allFiles | ?{$_.Name -eq "run.err"}
$ns = 1000000000
function basicStats ($values, $reverse) {
if ($null -eq $values) {
return [PSCustomObject]@{
Average = 0
Percentile95 = 0
}
}
$obj = $values | measure-object -Average -Sum -Maximum -Minimum -StandardDeviation
if ($null -eq $reverse || $false -eq $reverse) {
$sorted = $values | Sort-Object
} else {
$sorted = $values | Sort-Object -Descending
}
$95percentile = $sorted[[math]::Ceiling(95 / 100 * ($sorted.Count - 1))]
if ($null -eq $95percentile) {
return "ASDFASFASF"
}
return [PSCustomObject]@{
Average = [math]::Round([double]$obj.Average,2)
Percentile95 = [math]::Round([double]$95percentile, 2)
}
}
function groupStats ($metrics, $groupIndex, $reverse) {
$fields = @{}
$grouped = $metrics | Group-Object -Property {$_.Name.Split("|")[$groupIndex]}
foreach ($g in $grouped) {
$v = basicStats ($g.Group | %{$_.Value}) $reverse
$fields.Add($g.Name, $v)
}
$ret = New-Object -TypeName psobject -Property $fields
return $ret
}
foreach ($groupDir in $groupDirs) {
$groupID = $groupDir.Name
$files = $groupDir | gci -Recurse -File
$queries = $files | ?{$_.Name -eq "dht_queries.out"}
$lookups = $files | ?{$_.Name -eq "dht_lookup.out"}
$out = $files | ?{$_.Name -eq "run.out"}
$metrics = $out | Get-Content | ConvertFrom-Json | %{$_.event.metric} | ?{$_}
$mset =
$provs = $metrics |
?{$_.name -and $_.name.StartsWith("time-to-provide") -and $_.value -gt 0} |
%{$_.value/$ns}
$findfirst = $metrics |
?{$_.name -and $_.name.StartsWith("time-to-find-first")} |
%{ [pscustomobject]@{ Name=$_.name; Value= $_.value/$ns; }}
$findlast = $metrics |
?{$_.name -and $_.name.StartsWith("time-to-find-last")} |
%{ [pscustomobject]@{ Name=$_.name; Value= $_.value/$ns; }}
$findall = $metrics |
?{$_.name -and $_.name.StartsWith("time-to-find|")} |
%{ [pscustomobject]@{ Name=$_.name; Value= $_.value/$ns; }}
$findgood = $metrics |
?{$_.name -and $_.name.StartsWith("time-to-find|done")} |
%{ [pscustomobject]@{ Name=$_.name; Value= $_.value/$ns; }}
$findfail = $metrics |
?{$_.name -and $_.name.StartsWith("time-to-find|fail")} |
%{ [pscustomobject]@{ Name=$_.name; Value= $_.value/$ns; }}
$found = $metrics |
?{$_.name -and $_.name.StartsWith("peers-found") -and $_.value -gt 0} |
%{ [pscustomobject]@{ Name=$_.name; Value= $_.value; }}
$missing = $metrics |
?{$_.name -and $_.name.StartsWith("peers-missing")} |
%{ [pscustomobject]@{ Name=$_.name; Value= $_.value; }}
$failures = $metrics |
?{$_.name -and $_.name.StartsWith("peers-found") -and $_.value -eq 0} |
%{ [pscustomobject]@{ Name=$_.name; Value= $_.value; }}
$dials = $queries | %{Get-Content $_ | ConvertFrom-Json | ?{$_.msg -eq "dialing"} | measure-object } |
%{$_.Count}
$msgs = $queries | %{Get-Content $_ | ConvertFrom-Json | ?{$_.msg -eq "send"} | measure-object } |
%{$_.Count}
echo "Group $groupID :"
if ($null -ne $provs) {
echo "Time-to-Provide"
basicStats($provs) | Format-Table
}
if ($null -ne $findfirst) {
echo "Time-to-Find-First"
groupStats $findfirst 1 | Format-Table
echo "Time-to-Find-Last"
groupStats $findlast 2 | Format-Table
if ($null -ne $findgood) {
echo "Time-to-Find Success"
groupStats $findgood 2 | Format-Table
}
if ($null -ne $findfail) {
echo "Time-to-Find Fail"
groupStats $findfail 2 | Format-Table
echo "Number of Failures"
groupStats $failures 2 | Format-Table
}
if (($null -ne $findgood) -and ($null -ne $findfail)) {
echo "Time-to-Find"
groupStats $findall 2 | Format-Table
}
echo "Peers Found"
groupStats $found 2 $true | Format-Table
echo "Peers Missing"
groupStats $missing 2 | Format-Table
#if ($failures -ne $null) {
# echo "Peers Failures"
# groupStats $failures 2 | Format-Table
#} else {
# echo "No Peer Failures"
#}
}
if ($dials -ne $null) {
echo "Total number of dials"
basicStats($dials) | Format-Table
} else {
echo "No DHT query dials performed"
}
if ($msgs -ne $null) {
echo "Total number of messages sent"
basicStats($msgs) | Format-Table
} else {
echo "No DHT query messages sent"
}
}
if (-not $graphs) {
return
}
$allGraphs = $connGraphs | Get-Content | ConvertFrom-Json | Group-Object -Property msg
$allGraphs | %{
$g = $_.Name
$obj = $_.Group
$gdata = $obj | %{"Z{0} -> Z{1};`n" -f $_.From, $_.To}
$file = "digraph D {`n " + $gdata + "}"
$file > "$outputDir/$runID/$g-conn.dot"
#$file | circo "-T$fmt" -o "$g.$fmt"
}
$allRTs = $rts | Get-Content | ConvertFrom-Json | Group-Object -Property msg
$allRTs | %{
$g = $_.Name
$obj = $_.Group
$gdata = $obj | %{"Z{0} -> Z{1};`n" -f $_.Node, $_.Peer}
$file = "digraph D {`n " + $gdata + "}"
$file > "$outputDir/$runID/$g-rt.dot"
#$file | circo "-T$fmt" -o "$g.$fmt"
}

34
dht/test/all.go Normal file
View File

@ -0,0 +1,34 @@
package test
import (
"context"
"github.com/testground/sdk-go/runtime"
)
func All(runenv *runtime.RunEnv) error {
commonOpts := GetCommonOpts(runenv)
ctx, cancel := context.WithTimeout(context.Background(), commonOpts.Timeout)
defer cancel()
ri, err := Base(ctx, runenv, commonOpts)
if err != nil {
return err
}
if err := TestFindPeers(ctx, ri); err != nil {
return err
}
if err := TestGetClosestPeers(ctx, ri); err != nil {
return err
}
if err := TestProviderRecords(ctx, ri); err != nil {
return err
}
if err := TestIPNSRecords(ctx, ri); err != nil {
return err
}
Teardown(ctx, ri.RunInfo)
return nil
}

View File

@ -0,0 +1,20 @@
package test
import (
"context"
"github.com/testground/sdk-go/runtime"
)
func BootstrapNetwork(runenv *runtime.RunEnv) error {
commonOpts := GetCommonOpts(runenv)
ctx, cancel := context.WithTimeout(context.Background(), commonOpts.Timeout)
defer cancel()
_, err := Base(ctx, runenv, commonOpts)
if err != nil {
return err
}
return nil
}

946
dht/test/common.go Normal file
View File

@ -0,0 +1,946 @@
package test
import (
"context"
"fmt"
"math"
"math/rand"
"net"
"os"
gosync "sync"
"time"
"github.com/pkg/errors"
"github.com/testground/sdk-go/runtime"
"github.com/testground/sdk-go/sync"
tglibp2p "github.com/libp2p/test-plans/dht/libp2p"
"github.com/libp2p/test-plans/dht/utils"
leveldb "github.com/ipfs/go-ds-leveldb"
"github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-datastore/sync"
"github.com/libp2p/go-libp2p"
connmgr "github.com/libp2p/go-libp2p-connmgr"
"github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/host"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
kaddht "github.com/libp2p/go-libp2p-kad-dht"
swarm "github.com/libp2p/go-libp2p-swarm"
tptu "github.com/libp2p/go-libp2p-transport-upgrader"
tcp "github.com/libp2p/go-tcp-transport"
"github.com/multiformats/go-multiaddr"
"github.com/multiformats/go-multiaddr-net"
"go.uber.org/zap"
)
func init() {
os.Setenv("LIBP2P_TCP_REUSEPORT", "false")
swarm.BackoffBase = 0
}
const minTestInstances = 16
type OptDatastore int
const (
OptDatastoreMemory OptDatastore = iota
OptDatastoreLeveldb
)
type SetupOpts struct {
Timeout time.Duration
Latency time.Duration
AutoRefresh bool
RandomWalk bool
BucketSize int
Alpha int
Beta int
NDisjointPaths int
ClientMode bool
Datastore OptDatastore
PeerIDSeed int
Bootstrapper bool
BootstrapStrategy int
Undialable bool
GroupOrder int
ExpectedServer bool
}
type DHTRunInfo struct {
*utils.RunInfo
DHTGroupProperties map[string]*SetupOpts
Node *NodeParams
Others map[peer.ID]*DHTNodeInfo
}
func GetCommonOpts(runenv *runtime.RunEnv) *SetupOpts {
opts := &SetupOpts{
Timeout: time.Duration(runenv.IntParam("timeout_secs")) * time.Second,
Latency: time.Duration(runenv.IntParam("latency")) * time.Millisecond,
AutoRefresh: runenv.BooleanParam("auto_refresh"),
RandomWalk: runenv.BooleanParam("random_walk"),
BucketSize: runenv.IntParam("bucket_size"),
Alpha: runenv.IntParam("alpha"),
Beta: runenv.IntParam("beta"),
ClientMode: runenv.BooleanParam("client_mode"),
Datastore: OptDatastore(runenv.IntParam("datastore")),
PeerIDSeed: runenv.IntParam("peer_id_seed"),
Bootstrapper: runenv.BooleanParam("bootstrapper"),
BootstrapStrategy: runenv.IntParam("bs_strategy"),
Undialable: runenv.BooleanParam("undialable"),
GroupOrder: runenv.IntParam("group_order"),
ExpectedServer: runenv.BooleanParam("expect_dht"),
}
return opts
}
type NodeParams struct {
host host.Host
dht *kaddht.IpfsDHT
info *DHTNodeInfo
}
type DHTNodeInfo struct {
*tglibp2p.NodeInfo
Properties *SetupOpts
}
type NodeProperties struct {
Bootstrapper bool
Undialable bool
ExpectedServer bool
}
var ConnManagerGracePeriod = 1 * time.Second
// NewDHTNode creates a libp2p Host, and a DHT instance on top of it.
func NewDHTNode(ctx context.Context, runenv *runtime.RunEnv, opts *SetupOpts, idKey crypto.PrivKey, info *DHTNodeInfo) (host.Host, *kaddht.IpfsDHT, error) {
swarm.DialTimeoutLocal = opts.Timeout
var min, max int
if info.Properties.Bootstrapper {
// TODO: Assumes only 1 bootstrapper group
min = (runenv.TestInstanceCount / runenv.TestGroupInstanceCount) *
int(math.Ceil(math.Log2(float64(runenv.TestInstanceCount))))
max = min * 2
} else {
min = int(math.Ceil(math.Log2(float64(runenv.TestInstanceCount))) * 5)
max = min * 2
}
// We need enough connections to be able to trim some and still have a
// few peers.
//
// Note: this check is redundant just to be explicit. If we have over 16
// peers, we're above this limit.
// if min < 3 || max >= runenv.TestInstanceCount {
if min < 3 {
return nil, nil, fmt.Errorf("not enough peers")
}
runenv.RecordMessage("connmgr parameters: hi=%d, lo=%d", max, min)
// Generate bogus advertising address
tcpAddr, err := getSubnetAddr(runenv.TestSubnet)
if err != nil {
return nil, nil, err
}
libp2pOpts := []libp2p.Option{
libp2p.Identity(idKey),
// Use only the TCP transport without reuseport.
libp2p.Transport(func(u *tptu.Upgrader) *tcp.TcpTransport {
tpt := tcp.NewTCPTransport(u)
tpt.DisableReuseport = true
return tpt
}),
// Setup the connection manager to trim to
libp2p.ConnectionManager(connmgr.NewConnManager(min, max, ConnManagerGracePeriod)),
}
if info.Properties.Undialable {
tcpAddr.Port = rand.Intn(1024) + 1024
bogusAddr, err := manet.FromNetAddr(tcpAddr)
if err != nil {
return nil, nil, err
}
bogusAddrLst := []multiaddr.Multiaddr{bogusAddr}
libp2pOpts = append(libp2pOpts,
libp2p.NoListenAddrs,
libp2p.AddrsFactory(func(listeningAddrs []multiaddr.Multiaddr) []multiaddr.Multiaddr {
return bogusAddrLst
}))
l, err := net.ListenTCP("tcp", tcpAddr)
if err != nil {
return nil, nil, err
}
go func() {
for ctx.Err() == nil {
c, err := l.Accept()
if err != nil {
continue
}
go func() {
time.Sleep(time.Second * 5)
_ = c.Close()
}()
}
}()
} else {
addr, err := manet.FromNetAddr(tcpAddr)
if err != nil {
return nil, nil, err
}
libp2pOpts = append(libp2pOpts,
libp2p.ListenAddrs(addr))
}
libp2pOpts = append(libp2pOpts, getTaggedLibp2pOpts(opts, info)...)
node, err := libp2p.New(ctx, libp2pOpts...)
if err != nil {
return nil, nil, err
}
var ds datastore.Batching
switch opts.Datastore {
case OptDatastoreMemory:
ds = dssync.MutexWrap(datastore.NewMapDatastore())
case OptDatastoreLeveldb:
ds, err = leveldb.NewDatastore("", nil)
if err != nil {
return nil, nil, err
}
default:
return nil, nil, fmt.Errorf("invalid datastore type")
}
runenv.RecordMessage("creating DHT")
dht, err := createDHT(ctx, node, ds, opts, info)
if err != nil {
runenv.RecordMessage("creating DHT error %v", err)
return nil, nil, err
}
runenv.RecordMessage("creating DHT successful")
return node, dht, nil
}
func getSubnetAddr(subnet *runtime.IPNet) (*net.TCPAddr, error) {
addrs, err := net.InterfaceAddrs()
if err != nil {
return nil, err
}
for _, addr := range addrs {
if ip, ok := addr.(*net.IPNet); ok {
if subnet.Contains(ip.IP) {
tcpAddr := &net.TCPAddr{IP: ip.IP}
return tcpAddr, nil
}
} else {
panic(fmt.Sprintf("%T", addr))
}
}
return nil, fmt.Errorf("no network interface found. Addrs: %v", addrs)
}
var networkSetupNum int
var networkSetupMx gosync.Mutex
// SetupNetwork instructs the sidecar (if enabled) to setup the network for this
// test case.
func SetupNetwork(ctx context.Context, ri *DHTRunInfo, latency time.Duration) error {
if !ri.RunEnv.TestSidecar {
return nil
}
networkSetupMx.Lock()
defer networkSetupMx.Unlock()
if networkSetupNum == 0 {
// Wait for the network to be initialized.
if err := ri.Client.WaitNetworkInitialized(ctx, ri.RunEnv); err != nil {
return err
}
}
networkSetupNum++
// TODO: just put the unique testplan id inside the runenv?
hostname, err := os.Hostname()
if err != nil {
return err
}
state := sync.State(fmt.Sprintf("network-configured-%d", networkSetupNum))
_, _ = ri.Client.Publish(ctx, sync.NetworkTopic(hostname), &sync.NetworkConfig{
Network: "default",
Enable: true,
Default: sync.LinkShape{
Latency: latency,
Bandwidth: 10 << 20, // 10Mib
},
State: state,
})
ri.RunEnv.RecordMessage("finished resetting network latency")
err = <-ri.Client.MustBarrier(ctx, state, ri.RunEnv.TestInstanceCount).C
if err != nil {
return fmt.Errorf("failed to configure network: %w", err)
}
return nil
}
// Setup sets up the elements necessary for the test cases
func Setup(ctx context.Context, runenv *runtime.RunEnv, opts *SetupOpts) (*DHTRunInfo, error) {
if err := initAssets(runenv); err != nil {
return nil, err
}
client := sync.MustBoundClient(ctx, runenv)
//defer watcher.Close()
//defer writer.Close()
ri := &DHTRunInfo{
RunInfo: &utils.RunInfo{
RunEnv: runenv,
Client: client,
},
DHTGroupProperties: make(map[string]*SetupOpts),
}
// TODO: Take opts.NFindPeers into account when setting a minimum?
if ri.RunEnv.TestInstanceCount < minTestInstances {
return nil, fmt.Errorf(
"requires at least %d instances, only %d started",
minTestInstances, ri.RunEnv.TestInstanceCount,
)
}
err := SetupNetwork(ctx, ri, 0)
if err != nil {
return nil, err
}
ri.RunEnv.RecordMessage("past the setup network barrier")
groupSeq, testSeq, err := utils.GetGroupsAndSeqs(ctx, ri.RunInfo, opts.GroupOrder)
if err != nil {
return nil, err
}
for g, props := range ri.GroupProperties {
fakeEnv := &runtime.RunEnv{
RunParams: runtime.RunParams{TestInstanceParams: props.Params},
}
ri.DHTGroupProperties[g] = GetCommonOpts(fakeEnv)
}
ri.RunEnv.RecordMessage("past nodeid")
rng := rand.New(rand.NewSource(int64(testSeq)))
priv, _, err := crypto.GenerateEd25519Key(rng)
if err != nil {
return nil, err
}
testNode := &NodeParams{
host: nil,
dht: nil,
info: &DHTNodeInfo{
NodeInfo: &tglibp2p.NodeInfo{
Seq: testSeq,
GroupSeq: groupSeq,
Group: ri.RunEnv.TestGroupID,
Addrs: nil,
},
Properties: opts,
},
}
testNode.host, testNode.dht, err = NewDHTNode(ctx, ri.RunEnv, opts, priv, testNode.info)
if err != nil {
return nil, err
}
testNode.info.Addrs = host.InfoFromHost(testNode.host)
otherNodes, err := tglibp2p.ShareAddresses(ctx, ri.RunInfo, testNode.info.NodeInfo)
if err != nil {
return nil, err
}
ri.RunEnv.RecordMessage("finished setup function")
outputStart(testNode)
otherDHTNodes := make(map[peer.ID]*DHTNodeInfo, len(otherNodes))
for pid, nodeInfo := range otherNodes {
otherDHTNodes[pid] = &DHTNodeInfo{
NodeInfo: nodeInfo,
Properties: ri.DHTGroupProperties[nodeInfo.Group],
}
}
ri.Node = testNode
ri.Others = otherDHTNodes
return ri, nil
}
func GetBootstrapNodes(ri *DHTRunInfo) []peer.AddrInfo {
var toDial []peer.AddrInfo
nodeInfo := ri.Node.info
otherNodes := ri.Others
switch nodeInfo.Properties.BootstrapStrategy {
case 0: // Do nothing
return toDial
case 1: // Connect to all bootstrappers
for _, info := range otherNodes {
if info.Properties.Bootstrapper {
toDial = append(toDial, *info.Addrs)
}
}
case 2: // Connect to a random bootstrapper (based on our sequence number)
// List all the bootstrappers.
var bootstrappers []peer.AddrInfo
for _, info := range otherNodes {
if info.Properties.Bootstrapper {
bootstrappers = append(bootstrappers, *info.Addrs)
}
}
if len(bootstrappers) > 0 {
toDial = append(toDial, bootstrappers[nodeInfo.Seq%len(bootstrappers)])
}
case 3: // Connect to log(n) random bootstrappers (based on our sequence number)
// List all the bootstrappers.
var bootstrappers []peer.AddrInfo
for _, info := range otherNodes {
if info.Properties.Bootstrapper {
bootstrappers = append(bootstrappers, *info.Addrs)
}
}
added := make(map[int]struct{})
if len(bootstrappers) > 0 {
targetSize := int(math.Log2(float64(len(bootstrappers)))/2) + 1
rng := rand.New(rand.NewSource(int64(nodeInfo.Seq)))
for i := 0; i < targetSize; i++ {
bsIndex := rng.Int() % len(bootstrappers)
if _, found := added[bsIndex]; found {
i--
continue
}
toDial = append(toDial, bootstrappers[bsIndex])
}
}
case 4: // dial the _next_ peer in the ring
mySeqNo := nodeInfo.Seq
var targetSeqNo int
if mySeqNo == len(otherNodes) {
targetSeqNo = 0
} else {
targetSeqNo = mySeqNo + 1
}
// look for the node with sequence number 0
for _, info := range otherNodes {
if info.Seq == targetSeqNo {
toDial = append(toDial, *info.Addrs)
break
}
}
case 5: // Connect to all dialable peers
toDial = make([]peer.AddrInfo, 0, len(otherNodes))
for _, info := range otherNodes {
if !info.Properties.Undialable {
toDial = append(toDial, *info.Addrs)
}
}
return toDial
case 6: // connect to log(n) of the network, where n is the number of dialable nodes
plist := make([]*DHTNodeInfo, len(otherNodes)+1)
for _, info := range otherNodes {
plist[info.Seq] = info
}
plist[nodeInfo.Seq] = nodeInfo
numDialable := 0
for _, info := range plist {
if !info.Properties.Undialable {
numDialable++
}
}
targetSize := int(math.Log2(float64(numDialable)/2)) + 1
nodeLst := make([]*DHTNodeInfo, len(plist))
copy(nodeLst, plist)
rng := rand.New(rand.NewSource(0))
rng = rand.New(rand.NewSource(int64(rng.Int31()) + int64(nodeInfo.Seq)))
rng.Shuffle(len(nodeLst), func(i, j int) {
nodeLst[i], nodeLst[j] = nodeLst[j], nodeLst[i]
})
for _, info := range nodeLst {
if len(toDial) > targetSize {
break
}
if info.Seq != nodeInfo.Seq && !info.Properties.Undialable {
toDial = append(toDial, *info.Addrs)
}
}
case 7: // connect to log(server nodes) and log(other dialable nodes)
plist := make([]*DHTNodeInfo, len(otherNodes)+1)
for _, info := range otherNodes {
plist[info.Seq] = info
}
plist[nodeInfo.Seq] = nodeInfo
numServer := 0
numOtherDialable := 0
for _, info := range plist {
if info.Properties.ExpectedServer {
numServer++
} else if !info.Properties.Undialable {
numOtherDialable++
}
}
targetServerNodes := int(math.Log2(float64(numServer/2))) + 1
targetOtherNodes := int(math.Log2(float64(numOtherDialable/2))) + 1
serverAddrs := getBootstrapAddrs(plist, nodeInfo, targetServerNodes, 0, func(info *DHTNodeInfo) bool {
if info.Seq != nodeInfo.Seq && info.Properties.ExpectedServer {
return true
}
return false
})
otherAddrs := getBootstrapAddrs(plist, nodeInfo, targetOtherNodes, 0, func(info *DHTNodeInfo) bool {
if info.Seq != nodeInfo.Seq && info.Properties.ExpectedServer {
return true
}
return false
})
toDial = append(toDial, serverAddrs...)
toDial = append(toDial, otherAddrs...)
default:
panic(fmt.Errorf("invalid number of bootstrap strategy %d", ri.Node.info.Properties.BootstrapStrategy))
}
return toDial
}
func getBootstrapAddrs(plist []*DHTNodeInfo, nodeInfo *DHTNodeInfo, targetSize int, rngSeed int64, valid func(info *DHTNodeInfo) bool) (toDial []peer.AddrInfo) {
nodeLst := make([]*DHTNodeInfo, len(plist))
copy(nodeLst, plist)
rng := rand.New(rand.NewSource(rngSeed))
rng = rand.New(rand.NewSource(int64(rng.Int31()) + int64(nodeInfo.Seq)))
rng.Shuffle(len(nodeLst), func(i, j int) {
nodeLst[i], nodeLst[j] = nodeLst[j], nodeLst[i]
})
for _, info := range nodeLst {
if len(toDial) > targetSize {
break
}
if valid(info) {
toDial = append(toDial, *info.Addrs)
}
}
return
}
// Bootstrap brings the network into a completely bootstrapped and ready state.
//
// 1. Connect:
// a. If any bootstrappers are defined, it connects them together and connects all other peers to one of the bootstrappers (deterministically).
// b. Otherwise, every peer is connected to the next peer (in lexicographical peer ID order).
// 2. Routing: Refresh all the routing tables.
// 3. Trim: Wait out the grace period then invoke the connection manager to simulate a running network with connection churn.
// 4. Forget & Reconnect:
// a. Forget the addresses of all peers we've disconnected from. Otherwise, FindPeer is useless.
// b. Re-connect to at least one node if we've disconnected from _all_ nodes.
// We may want to make this an error in the future?
func Bootstrap(ctx context.Context, ri *DHTRunInfo, bootstrapNodes []peer.AddrInfo) error {
runenv := ri.RunEnv
defer runenv.RecordMessage("bootstrap phase ended")
node := ri.Node
dht := node.dht
stager := utils.NewBatchStager(ctx, node.info.Seq, runenv.TestInstanceCount, "bootstrapping", ri.RunInfo)
////////////////
// 1: CONNECT //
////////////////
runenv.RecordMessage("bootstrap: begin connect")
// Wait until it's our turn to bootstrap
gradualBsStager := utils.NewGradualStager(ctx, node.info.Seq, runenv.TestInstanceCount,
"boostrap-gradual", ri.RunInfo, utils.LinearGradualStaging(100))
if err := gradualBsStager.Begin(); err != nil {
return err
}
runenv.RecordMessage("bootstrap: dialing %v", bootstrapNodes)
// Connect to our peers.
if err := Connect(ctx, runenv, dht, bootstrapNodes...); err != nil {
return err
}
runenv.RecordMessage("bootstrap: dialed %d other peers", len(bootstrapNodes))
// TODO: Use an updated autonat that doesn't require this
// Wait for Autonat to kick in
time.Sleep(time.Second * 30)
if err := Connect(ctx, runenv, dht, bootstrapNodes...); err != nil {
return err
}
go func() {
ticker := time.NewTicker(time.Second * 5)
for {
<-ticker.C
if node.dht.RoutingTable().Size() < 2 {
_ = Connect(ctx, runenv, dht, bootstrapNodes...)
}
}
}()
////////////////
// 2: ROUTING //
////////////////
// Wait for these peers to be added to the routing table.
if err := WaitRoutingTable(ctx, runenv, dht); err != nil {
return err
}
runenv.RecordMessage("bootstrap: have peer in routing table")
runenv.RecordMessage("bootstrap: begin routing")
outputGraph(node.dht, "br")
rrt := func() error {
if err := <-dht.RefreshRoutingTable(); err != nil {
runenv.RecordMessage("bootstrap: refresh failure - rt size %d", dht.RoutingTable().Size())
return err
}
return nil
}
// Setup our routing tables.
ready := false
numTries, maxNumTries := 1, 3
for !ready {
if err := rrt(); err != nil {
if numTries >= maxNumTries {
outputGraph(dht, "failedrefresh")
return err
}
numTries++
if err := Connect(ctx, runenv, dht, bootstrapNodes...); err != nil {
return err
}
} else {
ready = true
}
}
runenv.RecordMessage("bootstrap: table ready")
// TODO: Repeat this a few times until our tables have stabilized? That
// _shouldn't_ be necessary.
runenv.RecordMessage("bootstrap: everyone table ready")
outputGraph(node.dht, "ar")
/////////////
// 3: TRIM //
/////////////
if err := gradualBsStager.End(); err != nil {
return err
}
// Need to wait for connections to exit the grace period.
time.Sleep(2 * ConnManagerGracePeriod)
runenv.RecordMessage("bootstrap: begin trim")
// Force the connection manager to do it's dirty work. DIE CONNECTIONS
// DIE!
dht.Host().ConnManager().TrimOpenConns(ctx)
outputGraph(node.dht, "at")
///////////////////////////
// 4: FORGET & RECONNECT //
///////////////////////////
if err := stager.Begin(); err != nil {
return err
}
// Forget all peers we're no longer connected to. We need to do this
// _after_ we wait for everyone to trim so we can forget peers that
// disconnected from us.
forgotten := 0
for _, p := range dht.Host().Peerstore().Peers() {
if dht.RoutingTable().Find(p) == "" && dht.Host().Network().Connectedness(p) != network.Connected {
forgotten++
dht.Host().Peerstore().ClearAddrs(p)
}
}
runenv.RecordMessage("bootstrap: forgotten %d peers", forgotten)
// Make sure we have at least one peer. If not, reconnect to a
// bootstrapper and log a warning.
if len(dht.Host().Network().Peers()) == 0 {
// TODO: Report this as an error?
runenv.RecordMessage("bootstrap: fully disconnected, reconnecting.")
if err := Connect(ctx, runenv, dht, bootstrapNodes...); err != nil {
return err
}
if err := WaitRoutingTable(ctx, runenv, dht); err != nil {
return err
}
runenv.RecordMessage("bootstrap: finished reconnecting to %d peers", len(bootstrapNodes))
}
tmpCtx, tmpc := context.WithTimeout(ctx, time.Second*10)
if err := WaitRoutingTable(tmpCtx, runenv, dht); err != nil {
tmpc()
return err
}
if tmpCtx.Err() != nil {
runenv.RecordMessage("peer %s failed with rt of size %d", node.host.ID().Pretty(), node.dht.RoutingTable().Size())
}
tmpc()
if err := stager.End(); err != nil {
return err
}
outputGraph(node.dht, "ab")
runenv.RecordMessage(
"bootstrap: finished with %d connections, %d in the routing table",
len(dht.Host().Network().Peers()),
dht.RoutingTable().Size(),
)
TableHealth(dht, ri.Others, ri)
runenv.RecordMessage("bootstrap: done")
return nil
}
// Connect connects a host to a set of peers.
//
// Automatically skips our own peer.
func Connect(ctx context.Context, runenv *runtime.RunEnv, dht *kaddht.IpfsDHT, toDial ...peer.AddrInfo) error {
tryConnect := func(ctx context.Context, ai peer.AddrInfo, attempts int) error {
var err error
for i := 1; i <= attempts; i++ {
runenv.RecordMessage("dialling peer %s (attempt %d)", ai.ID, i)
if err = dht.Host().Connect(ctx, ai); err == nil {
return nil
} else {
runenv.RecordMessage("failed to dial peer %v (attempt %d), err: %s", ai.ID, i, err)
}
select {
case <-time.After(time.Duration(rand.Intn(3000))*time.Millisecond + 2*time.Second):
case <-ctx.Done():
return fmt.Errorf("error while dialing peer %v, attempts made: %d: %w", ai.Addrs, i, ctx.Err())
}
}
return fmt.Errorf("failed while dialing peer %v, attempts: %d: %w", ai.Addrs, attempts, err)
}
// Dial to all the other peers.
var err error
numFailedConnections := 0
numAttemptedConnections := 0
for _, ai := range toDial {
if ai.ID == dht.Host().ID() {
continue
}
numAttemptedConnections++
if err = tryConnect(ctx, ai, 3); err != nil {
numFailedConnections++
}
}
if float64(numFailedConnections)/float64(numAttemptedConnections) > 0.75 {
return errors.Wrap(err, "too high percentage of failed connections")
}
if numAttemptedConnections-numFailedConnections <= 1 {
return errors.Wrap(err, "insufficient connections formed")
}
return nil
}
// RandomWalk performs 5 random walks.
func RandomWalk(ctx context.Context, runenv *runtime.RunEnv, dht *kaddht.IpfsDHT) error {
for i := 0; i < 5; i++ {
if err := dht.Bootstrap(ctx); err != nil {
return fmt.Errorf("Could not run a random-walk: %w", err)
}
}
return nil
}
func Base(ctx context.Context, runenv *runtime.RunEnv, commonOpts *SetupOpts) (*DHTRunInfo, error) {
ectx := specializedTraceQuery(ctx, runenv, "bootstrap-network")
ri, err := Setup(ectx, runenv, commonOpts)
if err != nil {
return nil, err
}
// Bring the network into a nice, stable, bootstrapped state.
if err = Bootstrap(ectx, ri, GetBootstrapNodes(ri)); err != nil {
return nil, err
}
if commonOpts.RandomWalk {
if err = RandomWalk(ectx, runenv, ri.Node.dht); err != nil {
return nil, err
}
}
if err := SetupNetwork(ectx, ri, commonOpts.Latency); err != nil {
return nil, err
}
return ri, nil
}
// Sync synchronizes all test instances around a single sync point.
func Sync(
ctx context.Context,
ri *utils.RunInfo,
state sync.State,
) error {
// Set a state barrier.
doneCh := ri.Client.MustBarrier(ctx, state, ri.RunEnv.TestInstanceCount).C
// Signal we're in the same state.
_, err := ri.Client.SignalEntry(ctx, state)
if err != nil {
return err
}
// Wait until all others have signalled.
return <-doneCh
}
// WaitRoutingTable waits until the routing table is not empty.
func WaitRoutingTable(ctx context.Context, runenv *runtime.RunEnv, dht *kaddht.IpfsDHT) error {
//ctxt, cancel := context.WithTimeout(ctx, time.Second*10)
//defer cancel()
for {
if dht.RoutingTable().Size() > 0 {
return nil
}
t := time.NewTimer(time.Second * 10)
select {
case <-time.After(200 * time.Millisecond):
case <-t.C:
runenv.RecordMessage("waiting on routing table")
case <-ctx.Done():
peers := dht.Host().Network().Peers()
errStr := fmt.Sprintf("empty rt. %d peer conns. they are %v", len(peers), peers)
runenv.RecordMessage(errStr)
return fmt.Errorf(errStr)
//return fmt.Errorf("got no peers in routing table")
}
}
}
// Teardown concludes this test case, waiting for all other instances to reach
// the 'end' state first.
func Teardown(ctx context.Context, ri *utils.RunInfo) {
err := Sync(ctx, ri, "end")
if err != nil {
ri.RunEnv.RecordFailure(fmt.Errorf("end sync failed: %w", err))
panic(err)
}
}
var graphLogger, rtLogger, nodeLogger *zap.SugaredLogger
func initAssets(runenv *runtime.RunEnv) error {
var err error
_, graphLogger, err = runenv.CreateStructuredAsset("dht_graphs.out", runtime.StandardJSONConfig())
if err != nil {
runenv.RecordMessage("failed to initialize dht_graphs.out asset; nooping logger: %s", err)
graphLogger = zap.NewNop().Sugar()
return err
}
_, rtLogger, err = runenv.CreateStructuredAsset("dht_rt.out", runtime.StandardJSONConfig())
if err != nil {
runenv.RecordMessage("failed to initialize dht_rt.out asset; nooping logger: %s", err)
rtLogger = zap.NewNop().Sugar()
return err
}
_, nodeLogger, err = runenv.CreateStructuredAsset("node.out", runtime.StandardJSONConfig())
if err != nil {
runenv.RecordMessage("failed to initialize node.out asset; nooping logger: %s", err)
nodeLogger = zap.NewNop().Sugar()
return err
}
return nil
}
func outputGraph(dht *kaddht.IpfsDHT, graphID string) {
for _, c := range dht.Host().Network().Conns() {
if c.Stat().Direction == network.DirOutbound {
graphLogger.Infow(graphID, "From", c.LocalPeer().Pretty(), "To", c.RemotePeer().Pretty())
}
}
for _, p := range dht.RoutingTable().ListPeers() {
rtLogger.Infow(graphID, "Node", dht.PeerID().Pretty(), "Peer", p.Pretty())
}
}
func outputStart(node *NodeParams) {
nodeLogger.Infow("nodeparams",
"seq", node.info.Seq,
"dialable", !node.info.Properties.Undialable,
"peerID", node.info.Addrs.ID.Pretty(),
"addrs", node.info.Addrs.Addrs,
)
}

View File

@ -0,0 +1,49 @@
// +build balsam
package test
import (
"context"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-ipns"
"github.com/testground/sdk-go/runtime"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p-core/host"
"github.com/libp2p/go-libp2p-core/peer"
kaddht "github.com/libp2p/go-libp2p-kad-dht"
dhtopts "github.com/libp2p/go-libp2p-kad-dht/opts"
)
func createDHT(ctx context.Context, h host.Host, ds datastore.Batching, opts *SetupOpts, info *DHTNodeInfo) (*kaddht.IpfsDHT, error) {
dhtOptions := []dhtopts.Option{
dhtopts.Protocols("/testground/kad/1.0.0"),
dhtopts.Datastore(ds),
dhtopts.BucketSize(opts.BucketSize),
dhtopts.RoutingTableRefreshQueryTimeout(opts.Timeout),
dhtopts.NamespacedValidator("ipns", ipns.Validator{KeyBook: h.Peerstore()}),
}
if !opts.AutoRefresh {
dhtOptions = append(dhtOptions, dhtopts.DisableAutoRefresh())
}
if info.Properties.Undialable && opts.ClientMode {
dhtOptions = append(dhtOptions, dhtopts.Client(true))
}
dht, err := kaddht.New(ctx, h, dhtOptions...)
if err != nil {
return nil, err
}
return dht, nil
}
func getTaggedLibp2pOpts(opts *SetupOpts, info *DHTNodeInfo) []libp2p.Option { return nil }
func getAllProvRecordsNum() int { return 1000 }
func specializedTraceQuery(ctx context.Context, runenv *runtime.RunEnv, tag string) context.Context {
return ctx
}
func TableHealth(dht *kaddht.IpfsDHT, peers map[peer.ID]*DHTNodeInfo, ri *DHTRunInfo) {}

View File

@ -0,0 +1,134 @@
// +build cypress
package test
import (
"context"
"sync"
"github.com/testground/sdk-go/runtime"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p-core/host"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-ipns"
kaddht "github.com/libp2p/go-libp2p-kad-dht"
kbucket "github.com/libp2p/go-libp2p-kbucket"
"github.com/libp2p/go-libp2p-xor/kademlia"
"github.com/libp2p/go-libp2p-xor/key"
"github.com/libp2p/go-libp2p-xor/trie"
"go.uber.org/zap"
)
func createDHT(ctx context.Context, h host.Host, ds datastore.Batching, opts *SetupOpts, info *DHTNodeInfo) (*kaddht.IpfsDHT, error) {
dhtOptions := []kaddht.Option{
kaddht.ProtocolPrefix("/testground"),
kaddht.V1CompatibleMode(false),
kaddht.Datastore(ds),
kaddht.BucketSize(opts.BucketSize),
kaddht.RoutingTableRefreshQueryTimeout(opts.Timeout),
kaddht.Concurrency(opts.Alpha),
kaddht.Resiliency(opts.Beta),
kaddht.NamespacedValidator("ipns", ipns.Validator{KeyBook: h.Peerstore()}),
}
if !opts.AutoRefresh {
dhtOptions = append(dhtOptions, kaddht.DisableAutoRefresh())
}
if info.Properties.Bootstrapper {
dhtOptions = append(dhtOptions, kaddht.Mode(kaddht.ModeServer))
} else if info.Properties.Undialable && opts.ClientMode {
dhtOptions = append(dhtOptions, kaddht.Mode(kaddht.ModeClient))
}
dht, err := kaddht.New(ctx, h, dhtOptions...)
if err != nil {
return nil, err
}
return dht, nil
}
func getTaggedLibp2pOpts(opts *SetupOpts, info *DHTNodeInfo) []libp2p.Option {
if info.Properties.Bootstrapper {
return []libp2p.Option{libp2p.EnableNATService(), libp2p.WithReachability(true)}
} else {
return []libp2p.Option{libp2p.EnableNATService()}
}
}
func getAllProvRecordsNum() int { return 0 }
var (
sqonce sync.Once
sqlogger, rtlogger *zap.SugaredLogger
)
func specializedTraceQuery(ctx context.Context, runenv *runtime.RunEnv, tag string) context.Context {
sqonce.Do(func() {
var err error
_, sqlogger, err = runenv.CreateStructuredAsset("dht_lookups.out", runtime.StandardJSONConfig())
if err != nil {
runenv.RecordMessage("failed to initialize dht_lookups.out asset; nooping logger: %s", err)
sqlogger = zap.NewNop().Sugar()
}
_, rtlogger, err = runenv.CreateStructuredAsset("rt_evts.out", runtime.StandardJSONConfig())
if err != nil {
runenv.RecordMessage("failed to initialize dht_lookups.out asset; nooping logger: %s", err)
rtlogger = zap.NewNop().Sugar()
}
})
ectx, events := kaddht.RegisterForLookupEvents(ctx)
ectx, rtEvts := kaddht.RegisterForRoutingTableEvents(ectx)
lookupLogger := sqlogger.With("tag", tag)
routingTableLogger := rtlogger.With("tag", tag)
go func() {
for e := range events {
lookupLogger.Infow("lookup event", "info", e)
}
}()
go func() {
for e := range rtEvts {
routingTableLogger.Infow("rt event", "info", e)
}
}()
return ectx
}
// TableHealth computes health reports for a network of nodes, whose routing contacts are given.
func TableHealth(dht *kaddht.IpfsDHT, peers map[peer.ID]*DHTNodeInfo, ri *DHTRunInfo) {
// Construct global network view trie
var kn []key.Key
knownNodes := trie.New()
for p, info := range peers {
if info.Properties.ExpectedServer {
k := kadPeerID(p)
kn = append(kn, k)
knownNodes.Add(k)
}
}
rtPeerIDs := dht.RoutingTable().ListPeers()
rtPeers := make([]key.Key, len(rtPeerIDs))
for i, p := range rtPeerIDs {
rtPeers[i] = kadPeerID(p)
}
report := kademlia.TableHealth(kadPeerID(dht.PeerID()), rtPeers, knownNodes)
ri.RunEnv.RecordMessage("table health: %s", report.String())
return
}
func kadPeerID(p peer.ID) key.Key {
return key.KbucketIDToKey(kbucket.ConvertPeerID(p))
}

110
dht/test/find_peers.go Normal file
View File

@ -0,0 +1,110 @@
package test
import (
"context"
"fmt"
"github.com/libp2p/test-plans/dht/utils"
"time"
"github.com/testground/sdk-go/runtime"
)
func FindPeers(runenv *runtime.RunEnv) error {
commonOpts := GetCommonOpts(runenv)
ctx, cancel := context.WithTimeout(context.Background(), commonOpts.Timeout)
defer cancel()
ri, err := Base(ctx, runenv, commonOpts)
if err != nil {
return err
}
if err := TestFindPeers(ctx, ri); err != nil {
return err
}
Teardown(ctx, ri.RunInfo)
return nil
}
func TestFindPeers(ctx context.Context, ri *DHTRunInfo) error {
runenv := ri.RunEnv
nFindPeers := runenv.IntParam("n_find_peers")
if nFindPeers > runenv.TestInstanceCount {
return fmt.Errorf("NFindPeers greater than the number of test instances")
}
node := ri.Node
peers := ri.Others
stager := utils.NewBatchStager(ctx, node.info.Seq, runenv.TestInstanceCount, "peer-records", ri.RunInfo)
// Ok, we're _finally_ ready.
// TODO: Dump routing table stats. We should dump:
//
// * How full our "closest" bucket is. That is, look at the "all peers"
// list, find the BucketSize closest peers, and determine the % of those
// peers to which we're connected. It should be close to 100%.
// * How many peers we're actually connected to?
// * How many of our connected peers are actually useful to us?
// Perform FIND_PEER N times.
if err := stager.Begin(); err != nil {
return err
}
found := 0
for p, info := range peers {
if found >= nFindPeers {
break
}
if len(node.host.Peerstore().Addrs(p)) > 0 {
// Skip peer's we've already found (even if we've
// disconnected for some reason).
continue
}
if info.Properties.Undialable || node.info.Addrs.ID == info.Addrs.ID {
continue
}
runenv.RecordMessage("start find peer number %d", found+1)
ectx, cancel := context.WithCancel(ctx)
ectx = TraceQuery(ectx, runenv, node, p.Pretty(), "peer-records")
t := time.Now()
// TODO: Instrument libp2p dht to get:
// - Number of peers dialed
// - Number of dials along the way that failed
_, err := node.dht.FindPeer(ectx, p)
cancel()
if err != nil {
runenv.RecordMessage("find peer failed: peer %s : %s", p, err)
runenv.RecordMetric(&runtime.MetricDefinition{
Name: fmt.Sprintf("time-to-failed-peer-%d", found),
Unit: "ns",
ImprovementDir: -1,
}, float64(time.Since(t).Nanoseconds()))
} else {
runenv.RecordMetric(&runtime.MetricDefinition{
Name: fmt.Sprintf("time-to-peer-%d", found),
Unit: "ns",
ImprovementDir: -1,
}, float64(time.Since(t).Nanoseconds()))
}
found++
}
if err := stager.End(); err != nil {
return err
}
return nil
}

242
dht/test/find_providers.go Normal file
View File

@ -0,0 +1,242 @@
package test
import (
"context"
"fmt"
"github.com/libp2p/test-plans/dht/utils"
"time"
"golang.org/x/sync/errgroup"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/ipfs/go-cid"
u "github.com/ipfs/go-ipfs-util"
"github.com/testground/sdk-go/runtime"
)
type findProvsParams struct {
RecordSeed int
RecordCount int
SearchRecords bool
}
func getFindProvsParams(params map[string]string) findProvsParams {
tmpRunEnv := runtime.RunEnv{RunParams: runtime.RunParams{
TestInstanceParams: params,
}}
fpOpts := findProvsParams{
RecordSeed: tmpRunEnv.IntParam("record_seed"),
RecordCount: tmpRunEnv.IntParam("record_count"),
SearchRecords: tmpRunEnv.BooleanParam("search_records"),
}
return fpOpts
}
func FindProviders(runenv *runtime.RunEnv) error {
commonOpts := GetCommonOpts(runenv)
ctx, cancel := context.WithTimeout(context.Background(), commonOpts.Timeout)
defer cancel()
ri, err := Base(ctx, runenv, commonOpts)
if err != nil {
return err
}
if err := TestProviderRecords(ctx, ri); err != nil {
return err
}
Teardown(ctx, ri.RunInfo)
return nil
}
func TestProviderRecords(ctx context.Context, ri *DHTRunInfo) error {
runenv := ri.RunEnv
node := ri.Node
fpOpts := getFindProvsParams(ri.RunEnv.RunParams.TestInstanceParams)
stager := utils.NewBatchStager(ctx, node.info.Seq, runenv.TestInstanceCount, "provider-records", ri.RunInfo)
emitRecords, searchRecords := getRecords(ri, fpOpts)
if err := stager.Begin(); err != nil {
return err
}
runenv.RecordMessage("start provide loop")
// If we're a member of the providing cohort, let's provide those CIDs to
// the network.
if fpOpts.RecordCount > 0 {
g := errgroup.Group{}
for index, cid := range emitRecords {
i := index
c := cid
g.Go(func() error {
p := peer.ID(c.Bytes())
ectx, cancel := context.WithCancel(ctx) //nolint
ectx = TraceQuery(ctx, runenv, node, p.Pretty(), "provider-records")
t := time.Now()
err := node.dht.Provide(ectx, c, true)
cancel()
if err == nil {
runenv.RecordMessage("Provided CID: %s", c)
runenv.RecordMetric(&runtime.MetricDefinition{
Name: fmt.Sprintf("time-to-provide-%d", i),
Unit: "ns",
ImprovementDir: -1,
}, float64(time.Since(t).Nanoseconds()))
}
return err
})
}
if err := g.Wait(); err != nil {
_ = stager.End()
return fmt.Errorf("failed while providing: %s", err)
}
}
if err := stager.End(); err != nil {
return err
}
outputGraph(node.dht, "after_provide")
if err := stager.Begin(); err != nil {
return err
}
if fpOpts.SearchRecords {
g := errgroup.Group{}
for _, record := range searchRecords {
for index, cid := range record.RecordIDs {
i := index
c := cid
groupID := record.GroupID
g.Go(func() error {
p := peer.ID(c.Bytes())
ectx, cancel := context.WithCancel(ctx) //nolint
ectx = TraceQuery(ctx, runenv, node, p.Pretty(), "provider-records")
t := time.Now()
numProvs := 0
provsCh := node.dht.FindProvidersAsync(ectx, c, getAllProvRecordsNum())
status := "done"
var tLastFound time.Time
provLoop:
for {
select {
case _, ok := <-provsCh:
if !ok {
break provLoop
}
tLastFound = time.Now()
if numProvs == 0 {
runenv.RecordMetric(&runtime.MetricDefinition{
Name: fmt.Sprintf("time-to-find-first|%s|%d", groupID, i),
Unit: "ns",
ImprovementDir: -1,
}, float64(tLastFound.Sub(t).Nanoseconds()))
}
numProvs++
case <-ctx.Done():
status = "incomplete"
break provLoop
}
}
cancel()
if numProvs > 0 {
runenv.RecordMetric(&runtime.MetricDefinition{
Name: fmt.Sprintf("time-to-find-last|%s|%s|%d", status, groupID, i),
Unit: "ns",
ImprovementDir: -1,
}, float64(tLastFound.Sub(t).Nanoseconds()))
} else if status != "incomplete" {
status = "fail"
}
runenv.RecordMetric(&runtime.MetricDefinition{
Name: fmt.Sprintf("time-to-find|%s|%s|%d", status, groupID, i),
Unit: "ns",
ImprovementDir: -1,
}, float64(time.Since(t).Nanoseconds()))
runenv.RecordMetric(&runtime.MetricDefinition{
Name: fmt.Sprintf("peers-found|%s|%s|%d", status, groupID, i),
Unit: "peers",
ImprovementDir: 1,
}, float64(numProvs))
runenv.RecordMetric(&runtime.MetricDefinition{
Name: fmt.Sprintf("peers-missing|%s|%s|%d", status, groupID, i),
Unit: "peers",
ImprovementDir: -1,
}, float64(ri.GroupProperties[groupID].Size-numProvs))
return nil
})
}
}
if err := g.Wait(); err != nil {
_ = stager.End()
return fmt.Errorf("failed while finding providerss: %s", err)
}
}
if err := stager.End(); err != nil {
return err
}
return nil
}
// getRecords returns the records we plan to store and those we plan to search for. It also tells other nodes via the
// sync service which nodes our group plans on advertising
func getRecords(ri *DHTRunInfo, fpOpts findProvsParams) ([]cid.Cid, []*ProviderRecordSubmission) {
recGen := func(groupID string, groupFPOpts findProvsParams) (out []cid.Cid) {
for i := 0; i < groupFPOpts.RecordCount; i++ {
c := fmt.Sprintf("CID %d - group %s - seeded with %d", i, groupID, groupFPOpts.RecordSeed)
out = append(out, cid.NewCidV0(u.Hash([]byte(c))))
}
return out
}
var emitRecords []cid.Cid
if fpOpts.RecordCount > 0 {
// Calculate the CIDs we're dealing with.
emitRecords = recGen(ri.Node.info.Group, fpOpts)
}
var searchRecords []*ProviderRecordSubmission
if fpOpts.SearchRecords {
for _, g := range ri.Groups {
gOpts := ri.GroupProperties[g]
groupFPOpts := getFindProvsParams(gOpts.Params)
if groupFPOpts.RecordCount > 0 {
searchRecords = append(searchRecords, &ProviderRecordSubmission{
RecordIDs: recGen(g, groupFPOpts),
GroupID: g,
})
}
}
}
return emitRecords, searchRecords
}
type ProviderRecordSubmission struct {
RecordIDs []cid.Cid
GroupID string
}

View File

@ -0,0 +1,188 @@
package test
import (
"context"
"encoding/hex"
"fmt"
"github.com/libp2p/test-plans/dht/utils"
"time"
"golang.org/x/sync/errgroup"
"github.com/ipfs/go-cid"
u "github.com/ipfs/go-ipfs-util"
"github.com/libp2p/go-libp2p-core/peer"
kbucket "github.com/libp2p/go-libp2p-kbucket"
"github.com/testground/sdk-go/runtime"
)
func GetClosestPeers(runenv *runtime.RunEnv) error {
commonOpts := GetCommonOpts(runenv)
ctx, cancel := context.WithTimeout(context.Background(), commonOpts.Timeout)
defer cancel()
ri, err := Base(ctx, runenv, commonOpts)
if err != nil {
return err
}
if err := TestGetClosestPeers(ctx, ri); err != nil {
return err
}
Teardown(ctx, ri.RunInfo)
return nil
}
func TestGetClosestPeers(ctx context.Context, ri *DHTRunInfo) error {
fpOpts := getFindProvsParams(ri.RunEnv.RunParams.TestInstanceParams)
runenv := ri.RunEnv
// TODO: This is hacky we should probably thread through a separate GCPRecordCount variable
maxRecCount := 0
for _, g := range ri.GroupProperties {
gOpts := getFindProvsParams(g.Params)
if gOpts.RecordCount > maxRecCount {
maxRecCount = gOpts.RecordCount
}
}
// Calculate the CIDs we're dealing with.
cids := func() (out []cid.Cid) {
for i := 0; i < maxRecCount; i++ {
c := fmt.Sprintf("CID %d - seeded with %d", i, fpOpts.RecordSeed)
out = append(out, cid.NewCidV0(u.Hash([]byte(c))))
}
return out
}()
node := ri.Node
others := ri.Others
stager := utils.NewBatchStager(ctx, node.info.Seq, runenv.TestInstanceCount, "get-closest-peers", ri.RunInfo)
if err := stager.Begin(); err != nil {
return err
}
runenv.RecordMessage("start gcp loop")
if fpOpts.SearchRecords {
g := errgroup.Group{}
for index, cid := range cids {
i := index
c := cid
g.Go(func() error {
p := peer.ID(c.Bytes())
ectx, cancel := context.WithCancel(ctx)
ectx = TraceQuery(ectx, runenv, node, p.Pretty(), "get-closest-peers")
t := time.Now()
pids, err := node.dht.GetClosestPeers(ectx, c.KeyString())
cancel()
peers := make([]peer.ID, 0, node.info.Properties.BucketSize)
for p := range pids {
peers = append(peers, p)
}
if err == nil {
runenv.RecordMetric(&runtime.MetricDefinition{
Name: fmt.Sprintf("time-to-gcp-%d", i),
Unit: "ns",
ImprovementDir: -1,
}, float64(time.Since(t).Nanoseconds()))
runenv.RecordMetric(&runtime.MetricDefinition{
Name: fmt.Sprintf("gcp-peers-found-%d", i),
Unit: "peers",
ImprovementDir: 1,
}, float64(len(pids)))
actualClosest := getClosestPeerRanking(node, others, c)
outputGCP(runenv, node.info.Addrs.ID, c, peers, actualClosest)
} else {
runenv.RecordMessage("Error during GCP %w", err)
}
return err
})
}
if err := g.Wait(); err != nil {
_ = stager.End()
return fmt.Errorf("failed while finding providerss: %s", err)
}
}
runenv.RecordMessage("done gcp loop")
if err := stager.End(); err != nil {
return err
}
return nil
}
func getClosestPeerRanking(me *NodeParams, others map[peer.ID]*DHTNodeInfo, target cid.Cid) []peer.ID {
var allPeers []peer.ID
allPeers = append(allPeers, me.dht.PeerID())
for p := range others {
allPeers = append(allPeers, p)
}
kadTarget := kbucket.ConvertKey(target.KeyString())
return kbucket.SortClosestPeers(allPeers, kadTarget)
}
func outputGCP(runenv *runtime.RunEnv, me peer.ID, target cid.Cid, peers, rankedPeers []peer.ID) {
peerStrs := make([]string, len(peers))
kadPeerStrs := make([]string, len(peers))
for i, p := range peers {
peerStrs[i] = p.String()
kadPeerStrs[i] = hex.EncodeToString(kbucket.ConvertKey(string(p)))
}
actualClosest := rankedPeers[:len(peers)]
nodeLogger.Infow("gcp-results",
"me", me.String(),
"KadMe", kbucket.ConvertKey(string(me)),
"target", target,
"peers", peers,
"actual", actualClosest,
"KadTarget", kbucket.ConvertKey(target.KeyString()),
"KadPeers", peerIDsToKadIDs(peers),
"KadActual", peerIDsToKadIDs(actualClosest),
"Scores", gcpScore(peers, rankedPeers),
)
_ = nodeLogger.Sync()
}
func gcpScore(peers, rankedPeers []peer.ID) []int {
getIndex := func(peers []peer.ID, target peer.ID) int {
for i, p := range peers {
if p == target {
return i
}
}
return -1
}
// score is distance between actual ranking and our ranking
var scores []int
for i, p := range peers {
diff := getIndex(rankedPeers, p) - i
scores = append(scores, diff)
}
return scores
}
func peerIDsToKadIDs(peers []peer.ID) []kbucket.ID {
kadIDs := make([]kbucket.ID, len(peers))
for i, p := range peers {
kadIDs[i] = kbucket.ConvertPeerID(p)
}
return kadIDs
}

View File

@ -0,0 +1,73 @@
package test
import (
"fmt"
"github.com/testground/sdk-go/runtime"
)
// TODO this entire test needs to be revisited.
// ProvideStress implements the Provide Stress test case
func ProvideStress(runenv *runtime.RunEnv) error {
// // Test Parameters
// var (
// timeout = time.Duration(runenv.IntParamD("timeout_secs", 60)) * time.Second
// randomWalk = runenv.BooleanParamD("random_walk", false)
// bucketSize = runenv.IntParamD("bucket_size", 20)
// autoRefresh = runenv.BooleanParamD("auto_refresh", true)
// nProvides = runenv.IntParamD("n_provides", 10)
// iProvides = time.Duration(runenv.IntParamD("i-provides", 1)) * time.Second
// )
// ctx, cancel := context.WithTimeout(context.Background(), timeout)
// defer cancel()
// watcher, writer := sync.MustWatcherWriter(runenv)
// defer watcher.Close()
// defer writer.Close()
// _, dht, _, err := SetUp(ctx, runenv, timeout, randomWalk, bucketSize, autoRefresh, watcher, writer)
// if err != nil {
// runenv.Abort(err)
// return
// }
// defer TearDown(ctx, runenv, watcher, writer)
// /// --- Act I
// // Each node calls Provide for `i-provides` until it reaches a total of `n-provides`
// var (
// seed = 0
// counter = 0
// )
// Loop:
// for {
// select {
// case <-time.After(iProvides):
// v := fmt.Sprintf("%d -- something random", seed)
// mhv := ipfsUtil.Hash([]byte(v))
// cidToPublish := cid.NewCidV0(mhv)
// err := dht.Provide(ctx, cidToPublish, true)
// if err != nil {
// runenv.Abort(fmt.Errorf("Failed on .Provide - %w", err))
// return
// }
// runenv.RecordMessage("Provided a CID")
// counter++
// if counter == nProvides {
// break Loop
// }
// case <-ctx.Done():
// runenv.Abort(fmt.Errorf("Context closed before ending the test"))
// return
// }
// }
// runenv.RecordMessage("Provided all scheduled CIDs")
// runenv.OK()
return fmt.Errorf("unimplemented")
}

319
dht/test/store_get_value.go Normal file
View File

@ -0,0 +1,319 @@
package test
import (
"context"
"fmt"
"math/rand"
"time"
"github.com/libp2p/test-plans/dht/utils"
"github.com/testground/sdk-go/runtime"
"github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/gogo/protobuf/proto"
"github.com/ipfs/go-ipns"
ipns_pb "github.com/ipfs/go-ipns/pb"
"golang.org/x/sync/errgroup"
)
func StoreGetValue(runenv *runtime.RunEnv) error {
commonOpts := GetCommonOpts(runenv)
ctx, cancel := context.WithTimeout(context.Background(), commonOpts.Timeout)
defer cancel()
ri, err := Base(ctx, runenv, commonOpts)
if err != nil {
return err
}
if err := TestIPNSRecords(ctx, ri); err != nil {
return err
}
Teardown(ctx, ri.RunInfo)
return nil
}
func TestIPNSRecords(ctx context.Context, ri *DHTRunInfo) error {
runenv := ri.RunEnv
node := ri.Node
fpOpts := getFindProvsParams(ri.RunEnv.RunParams.TestInstanceParams)
stager := utils.NewBatchStager(ctx, node.info.Seq, runenv.TestInstanceCount, "ipns-records", ri.RunInfo)
emitRecords, searchRecords, err := generateIPNSRecords(ri, fpOpts)
if err != nil {
return err
}
emitRecordsKeys := make([]string, len(emitRecords))
for i, privKey := range emitRecords {
pid, err := peer.IDFromPrivateKey(privKey)
if err != nil {
return err
}
emitRecordsKeys[i] = ipns.RecordKey(pid)
}
runenv.RecordMessage("start put loop")
for i := 0; i < 5; i++ {
if err := putIPNSRecord(ctx, ri, fpOpts, stager, i, emitRecords, emitRecordsKeys); err != nil {
return err
}
if err := getIPNSRecord(ctx, ri, fpOpts, stager, i, searchRecords); err != nil {
return err
}
}
return nil
}
func putIPNSRecord(ctx context.Context, ri *DHTRunInfo, fpOpts findProvsParams, stager utils.Stager, recNum int, emitRecords []crypto.PrivKey, emitRecordsKeys []string) error {
runenv := ri.RunEnv
node := ri.Node
if err := stager.Begin(); err != nil {
return err
}
// If we're a member of the putting cohort, let's put those IPNS records to the network.
if fpOpts.RecordCount > 0 {
g := errgroup.Group{}
for index, privKey := range emitRecords {
i := index
record, err := ipns.Create(privKey, []byte(fmt.Sprintf("/path/to/stuff/%d", recNum)), uint64(recNum), time.Now().Add(time.Hour))
if err != nil {
return err
}
if err := ipns.EmbedPublicKey(privKey.GetPublic(), record); err != nil {
return err
}
recordKey := emitRecordsKeys[i]
recordBytes, err := proto.Marshal(record)
if err != nil {
return err
}
g.Go(func() error {
ectx, cancel := context.WithCancel(ctx) //nolint
ectx = TraceQuery(ctx, runenv, node, recordKey, "ipns-records")
t := time.Now()
err := node.dht.PutValue(ectx, recordKey, recordBytes)
cancel()
if err == nil {
runenv.RecordMessage("Put IPNS Key: %s", recordKey)
runenv.RecordMetric(&runtime.MetricDefinition{
Name: fmt.Sprintf("time-to-put-%d", i),
Unit: "ns",
ImprovementDir: -1,
}, float64(time.Since(t).Nanoseconds()))
} else {
runenv.RecordMessage("Failed to Put IPNS Key: %s : err: %s", recordKey, err)
runenv.RecordMetric(&runtime.MetricDefinition{
Name: fmt.Sprintf("time-to-failed-put-%d", i),
Unit: "ns",
ImprovementDir: -1,
}, float64(time.Since(t).Nanoseconds()))
}
return nil
})
}
if err := g.Wait(); err != nil {
panic("how is there an error here?")
}
}
if err := stager.End(); err != nil {
return err
}
return nil
}
func getIPNSRecord(ctx context.Context, ri *DHTRunInfo, fpOpts findProvsParams, stager utils.Stager, recNum int, searchRecords []*RecordSubmission) error {
runenv := ri.RunEnv
node := ri.Node
if err := stager.Begin(); err != nil {
return err
}
if fpOpts.SearchRecords {
g := errgroup.Group{}
for _, record := range searchRecords {
for index, key := range record.RecordIDs {
i := index
k := key
groupID := record.GroupID
g.Go(func() error {
ectx, cancel := context.WithCancel(ctx) //nolint
ectx = TraceQuery(ctx, runenv, node, k, "ipns-records")
t := time.Now()
runenv.RecordMessage("Searching for IPNS Key: %s", k)
numRecs := 0
recordCh, err := node.dht.SearchValue(ectx, k)
if err != nil {
runenv.RecordMessage("Failed to Search for IPNS Key: %s : err: %s", k, err)
runenv.RecordMetric(&runtime.MetricDefinition{
Name: fmt.Sprintf("time-to-failed-put-%d", i),
Unit: "ns",
ImprovementDir: -1,
}, float64(time.Since(t).Nanoseconds()))
return nil //nolint
}
status := "done"
var tLastFound time.Time
var lastRec []byte
searchLoop:
for {
select {
case rec, ok := <-recordCh:
if !ok {
break searchLoop
}
lastRec = rec
tLastFound = time.Now()
if numRecs == 0 {
runenv.RecordMetric(&runtime.MetricDefinition{
Name: fmt.Sprintf("time-to-get-first|%s|%d", groupID, i),
Unit: "ns",
ImprovementDir: -1,
}, float64(tLastFound.Sub(t).Nanoseconds()))
}
numRecs++
case <-ctx.Done():
break searchLoop
}
}
cancel()
if numRecs > 0 {
runenv.RecordMetric(&runtime.MetricDefinition{
Name: fmt.Sprintf("time-to-get-last|%s|%s|%d", status, groupID, i),
Unit: "ns",
ImprovementDir: -1,
}, float64(tLastFound.Sub(t).Nanoseconds()))
runenv.RecordMetric(&runtime.MetricDefinition{
Name: fmt.Sprintf("record-updates|%s|%s|%d|%d", status, groupID, recNum, i),
Unit: "records",
ImprovementDir: -1,
}, float64(numRecs))
if len(lastRec) == 0 {
panic("this should not be possible")
}
recordResult := &ipns_pb.IpnsEntry{}
if err := recordResult.Unmarshal(lastRec); err != nil {
panic(fmt.Errorf("received invalid IPNS record: err %v", err))
}
if diff := int(*recordResult.Sequence) - recNum; diff > 0 {
runenv.RecordMetric(&runtime.MetricDefinition{
Name: fmt.Sprintf("incomplete-get|%s|%d|%d", groupID, recNum, i),
Unit: "records",
ImprovementDir: -1,
}, float64(diff))
status = "fail"
}
} else {
status = "fail"
}
runenv.RecordMetric(&runtime.MetricDefinition{
Name: fmt.Sprintf("time-to-get|%s|%s|%d|%d", status, groupID, recNum, i),
Unit: "ns",
ImprovementDir: -1,
}, float64(time.Since(t).Nanoseconds()))
return nil
})
}
}
if err := g.Wait(); err != nil {
panic("how is this possible?")
}
}
if err := stager.End(); err != nil {
return err
}
return nil
}
// generateIPNSRecords returns the records we plan to store and those we plan to search for
func generateIPNSRecords(ri *DHTRunInfo, fpOpts findProvsParams) (emitRecords []crypto.PrivKey, searchRecords []*RecordSubmission, err error) {
recGen := func(groupID string, groupFPOpts findProvsParams) (out []crypto.PrivKey, err error) {
// Calculate key based on seed
rng := rand.New(rand.NewSource(int64(fpOpts.RecordSeed)))
// Calculate key based on group (run through the rng to do this)
for _, g := range ri.Groups {
rng.Int63()
if g == groupID {
break
}
}
// Unique key per record is generated since the rng is mutated by creating the new key
for i := 0; i < groupFPOpts.RecordCount; i++ {
priv, _, err := crypto.GenerateEd25519Key(rng)
if err != nil {
return nil, err
}
out = append(out, priv)
}
return out, nil
}
if fpOpts.RecordCount > 0 && ri.Node.info.GroupSeq == 0 {
// Calculate the CIDs we're dealing with.
emitRecords, err = recGen(ri.Node.info.Group, fpOpts)
if err != nil {
return
}
}
if fpOpts.SearchRecords {
for _, g := range ri.Groups {
gOpts := ri.GroupProperties[g]
groupFPOpts := getFindProvsParams(gOpts.Params)
if groupFPOpts.RecordCount > 0 {
recs, err := recGen(g, groupFPOpts)
if err != nil {
return nil, nil, err
}
ipnsKeys := make([]string, len(recs))
for i, k := range recs {
pid, err := peer.IDFromPrivateKey(k)
if err != nil {
return nil, nil, err
}
ipnsKeys[i] = ipns.RecordKey(pid)
}
searchRecords = append(searchRecords, &RecordSubmission{
RecordIDs: ipnsKeys,
GroupID: g,
})
}
}
}
return
}
type RecordSubmission struct {
RecordIDs []string
GroupID string
}

9
dht/test/sync.go Normal file
View File

@ -0,0 +1,9 @@
package test
import (
"github.com/testground/sdk-go/sync"
)
// PeerAttribTopic represents a subtree under the test run's sync tree where peers
// participating in this distributed test advertise their attributes.
var PeerAttribTopic = sync.NewTopic("attribs", &DHTNodeInfo{})

79
dht/test/tracers.go Normal file
View File

@ -0,0 +1,79 @@
package test
import (
"context"
"sync"
"github.com/testground/sdk-go/runtime"
"github.com/libp2p/go-libp2p-core/host"
"github.com/libp2p/go-libp2p-core/network"
routing "github.com/libp2p/go-libp2p-core/routing"
"go.uber.org/zap"
)
var (
qonce sync.Once
qlogger *zap.SugaredLogger
)
// TraceConnections starts tracing connections into an output asset with name
// conn_trace.out.
func TraceConnections(runenv *runtime.RunEnv, node host.Host) error {
_, trace, err := runenv.CreateStructuredAsset("conn_trace.out", runtime.StandardJSONConfig())
if err != nil {
return err
}
trace = trace.With("id", node.ID())
node.Network().Notify(&network.NotifyBundle{
ConnectedF: func(n network.Network, c network.Conn) {
trace.Infow("connect", "peer", c.RemotePeer(), "dir", c.Stat().Direction)
},
DisconnectedF: func(n network.Network, c network.Conn) {
trace.Infow("disconnect", "peer", c.RemotePeer())
},
})
return nil
}
// TraceQuery returns a context.Context that can be used in a DHT query to
// cause query events to be traced. It initialises the output asset once.
func TraceQuery(ctx context.Context, runenv *runtime.RunEnv, node *NodeParams, target string, tag string) context.Context {
qonce.Do(func() {
var err error
_, qlogger, err = runenv.CreateStructuredAsset("dht_queries.out", runtime.StandardJSONConfig())
if err != nil {
runenv.RecordMessage("failed to initialize dht_queries.out asset; nooping logger: %s", err)
qlogger = zap.NewNop().Sugar()
}
})
ectx, events := routing.RegisterForQueryEvents(ctx)
log := qlogger.With("tag", tag, "node", node.host.ID().Pretty(), "target", target)
go func() {
for e := range events {
var msg string
switch e.Type {
case routing.SendingQuery:
msg = "send"
case routing.PeerResponse:
msg = "receive"
case routing.AddingPeer:
msg = "adding"
case routing.DialingPeer:
msg = "dialing"
case routing.QueryError:
msg = "error"
case routing.Provider, routing.Value:
msg = "result"
}
log.Infow(msg, "peer", e.ID, "closer", e.Responses, "value", e.Extra)
}
}()
retCtx := specializedTraceQuery(ectx, runenv, tag)
return retCtx
}

109
dht/utils/identifiers.go Normal file
View File

@ -0,0 +1,109 @@
package utils
import (
"context"
"sort"
"github.com/testground/sdk-go/sync"
)
func GetGroupsAndSeqs(ctx context.Context, ri *RunInfo, groupOrder int) (groupSeq, testSeq int, err error) {
groupSeq, err = getGroupSeq(ctx, ri)
if err != nil {
return
}
if err = setGroupInfo(ctx, ri, groupOrder); err != nil {
return
}
ri.RunEnv.RecordMessage("past group info")
testSeq = getNodeID(ri, groupSeq)
return
}
// getGroupSeq returns the sequence number of this test instance within its group
func getGroupSeq(ctx context.Context, ri *RunInfo) (int, error) {
seq, err := ri.Client.SignalAndWait(ctx, sync.State(ri.RunEnv.TestGroupID), ri.RunEnv.TestGroupInstanceCount)
seq-- // make 0-indexed
return int(seq), err
}
// setGroupInfo uses the sync service to determine which groups are part of the test and to get their sizes.
// This information is set on the passed in RunInfo.
func setGroupInfo(ctx context.Context, ri *RunInfo, groupOrder int) error {
gi := &GroupInfo{
ID: ri.RunEnv.TestGroupID,
Size: ri.RunEnv.TestGroupInstanceCount,
Order: groupOrder,
Params: ri.RunEnv.TestInstanceParams,
}
subCtx, cancel := context.WithCancel(ctx)
defer cancel()
groupInfoCh := make(chan *GroupInfo)
ri.Client.MustPublishSubscribe(subCtx, GroupIDTopic, gi, groupInfoCh)
groupOrderMap := make(map[int][]string)
groups := make(map[string]*GroupInfo)
for i := 0; i < ri.RunEnv.TestInstanceCount; i++ {
select {
case g, more := <-groupInfoCh:
if !more {
break
}
if _, ok := groups[g.ID]; !ok {
groups[g.ID] = g
groupOrderMap[g.Order] = append(groupOrderMap[g.Order], g.ID)
}
case <-ctx.Done():
return ctx.Err()
}
}
ri.RunEnv.RecordMessage("there are %d groups %v", len(groups), groups)
sortedGroups := make([]string, 0, len(groups))
sortedOrderNums := make([]int, 0, len(groupOrderMap))
for order := range groupOrderMap {
sortedOrderNums = append(sortedOrderNums, order)
}
sort.Ints(sortedOrderNums)
for i := 0; i < len(sortedOrderNums); i++ {
sort.Strings(groupOrderMap[i])
sortedGroups = append(sortedGroups, groupOrderMap[i]...)
}
ri.Groups = sortedGroups
ri.GroupProperties = groups
ri.RunEnv.RecordMessage("sortedGroup order %v", sortedGroups)
return nil
}
// getNodeID returns the sequence number of this test instance within the test
func getNodeID(ri *RunInfo, seq int) int {
id := seq
for _, g := range ri.Groups {
if g == ri.RunEnv.TestGroupID {
break
}
id += ri.GroupProperties[g].Size
}
return id
}
// GroupIDTopic represents a subtree under the test run's sync tree where peers
// participating in this distributed test advertise their groups.
var GroupIDTopic = sync.NewTopic("groupIDs", &GroupInfo{})
type GroupInfo struct {
ID string
Size int
Order int
Params map[string]string
}

14
dht/utils/runinfo.go Normal file
View File

@ -0,0 +1,14 @@
package utils
import (
"github.com/testground/sdk-go/runtime"
"github.com/testground/sdk-go/sync"
)
type RunInfo struct {
RunEnv *runtime.RunEnv
Client *sync.Client
Groups []string
GroupProperties map[string]*GroupInfo
}

202
dht/utils/staging.go Normal file
View File

@ -0,0 +1,202 @@
package utils
import (
"context"
"fmt"
"math"
"strconv"
"time"
"github.com/testground/sdk-go/runtime"
"github.com/testground/sdk-go/sync"
)
type Stager interface {
Begin() error
End() error
Reset(name string)
}
type stager struct {
ctx context.Context
seq int
total int
name string
stage int
ri *RunInfo
t time.Time
}
func (s *stager) Reset(name string) {
s.name = name
s.stage = 0
}
func NewBatchStager(ctx context.Context, seq int, total int, name string, ri *RunInfo) *BatchStager {
return &BatchStager{stager{
ctx: ctx,
seq: seq,
total: total,
name: name,
stage: 0,
ri: ri,
t: time.Now(),
}}
}
type BatchStager struct{ stager }
func (s *BatchStager) Begin() error {
s.stage += 1
s.t = time.Now()
return nil
}
func (s *BatchStager) End() error {
// Signal that we're done
stage := sync.State(s.name + strconv.Itoa(s.stage))
t := time.Now()
_, err := s.ri.Client.SignalEntry(s.ctx, stage)
if err != nil {
return err
}
s.ri.RunEnv.RecordMetric(&runtime.MetricDefinition{
Name: "signal " + string(stage),
Unit: "ns",
ImprovementDir: -1,
}, float64(time.Since(t).Nanoseconds()))
t = time.Now()
err = <-s.ri.Client.MustBarrier(s.ctx, stage, s.total).C
s.ri.RunEnv.RecordMetric(&runtime.MetricDefinition{
Name: "barrier" + string(stage),
Unit: "ns",
ImprovementDir: -1,
}, float64(time.Since(t).Nanoseconds()))
s.ri.RunEnv.RecordMetric(&runtime.MetricDefinition{
Name: "full " + string(stage),
Unit: "ns",
ImprovementDir: -1,
}, float64(time.Since(s.t).Nanoseconds()))
return err
}
func (s *BatchStager) Reset(name string) { s.stager.Reset(name) }
func NewSinglePeerStager(ctx context.Context, seq int, total int, name string, ri *RunInfo) *SinglePeerStager {
return &SinglePeerStager{BatchStager{stager{
ctx: ctx,
seq: seq,
total: total,
name: name,
stage: 0,
ri: ri,
t: time.Now(),
}}}
}
type SinglePeerStager struct{ BatchStager }
func (s *SinglePeerStager) Begin() error {
if err := s.BatchStager.Begin(); err != nil {
return err
}
// Wait until it's out turn
stage := sync.State(s.name + string(s.stage))
return <-s.ri.Client.MustBarrier(s.ctx, stage, s.seq).C
}
func (s *SinglePeerStager) End() error {
return s.BatchStager.End()
}
func (s *SinglePeerStager) Reset(name string) { s.stager.Reset(name) }
func NewGradualStager(ctx context.Context, seq int, total int, name string, ri *RunInfo, gradFn gradualFn) *GradualStager {
return &GradualStager{BatchStager{stager{
ctx: ctx,
seq: seq,
total: total,
name: name,
stage: 0,
ri: ri,
t: time.Now(),
}}, gradFn}
}
type gradualFn func(seq int) (int, int)
type GradualStager struct {
BatchStager
gradualFn
}
func (s *GradualStager) Begin() error {
if err := s.BatchStager.Begin(); err != nil {
return err
}
// Wait until it's out turn
ourTurn, waitFor := s.gradualFn(s.seq)
stageWait := sync.State(fmt.Sprintf("%s%d-%d", s.name, s.stage, ourTurn))
stageNext := sync.State(fmt.Sprintf("%s%d-%d", s.name, s.stage, ourTurn+1))
s.ri.RunEnv.RecordMessage("%d is waiting on %d from state %d", s.seq, waitFor, ourTurn)
err := <-s.ri.Client.MustBarrier(s.ctx, stageWait, waitFor).C
if err != nil {
return err
}
s.ri.RunEnv.RecordMessage("%d is running", s.seq)
_, err = s.ri.Client.SignalEntry(s.ctx, stageNext)
return err
}
func (s *GradualStager) End() error {
lastStage := sync.State(fmt.Sprintf("%s%d-end", s.name, s.stage))
_, err := s.ri.Client.SignalEntry(s.ctx, lastStage)
if err != nil {
return err
}
total := s.ri.RunEnv.TestInstanceCount - 1
s.ri.RunEnv.RecordMessage("%d is done - waiting for %d", s.seq, total)
err = <-s.ri.Client.MustBarrier(s.ctx, lastStage, total).C
s.ri.RunEnv.RecordMessage("%d passed the barrier", s.seq)
return err
}
func (s *GradualStager) Reset(name string) { s.stager.Reset(name) }
func LinearGradualStaging(slope int) gradualFn {
return func(seq int) (int, int) {
slope := slope
turnNum := int(math.Floor(float64(seq) / float64(slope)))
waitFor := slope
if turnNum == 0 {
waitFor = 0
}
return turnNum, waitFor
}
}
func ExponentialGradualStaging() gradualFn {
return func(seq int) (int, int) {
switch seq {
case 0:
return 0, 0
case 1:
return 1, 1
default:
turnNum := int(math.Floor(math.Log2(float64(seq)))) + 1
waitFor := int(math.Exp2(float64(turnNum - 2)))
return turnNum, waitFor
}
}
}
type NoStager struct{}
func (s *NoStager) Begin() error { return nil }
func (s *NoStager) End() error { return nil }
func (s *NoStager) Reset(name string) {}