Add `testing/deployer` (neé `consul-topology`) [NET-4610] (#17823)

Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com>
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: Freddy <freddygv@users.noreply.github.com>
This commit is contained in:
Nick Irvine 2023-07-17 15:15:22 -07:00 committed by GitHub
parent 07fce869af
commit 62005369b5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
60 changed files with 7818 additions and 0 deletions

View File

@ -34,6 +34,7 @@ jobs:
- "envoyextensions" - "envoyextensions"
- "troubleshoot" - "troubleshoot"
- "test/integration/consul-container" - "test/integration/consul-container"
- "testing/deployer"
fail-fast: true fail-fast: true
name: lint ${{ matrix.directory }} name: lint ${{ matrix.directory }}
steps: steps:

4
testing/deployer/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
/terraform
/workdir
/sample-cli
workdir

179
testing/deployer/README.md Normal file
View File

@ -0,0 +1,179 @@
[![GoDoc](https://pkg.go.dev/badge/github.com/hashicorp/consul/testing/deployer)](https://pkg.go.dev/github.com/hashicorp/consul/testing/deployer)
## Summary
This is a Go library used to launch one or more Consul clusters that can be
peered using the cluster peering feature. Under the covers `terraform` is used
in conjunction with the
[`kreuzwerker/docker`](https://registry.terraform.io/providers/kreuzwerker/docker/latest)
provider to manage a fleet of local docker containers and networks.
### Configuration
The complete topology of Consul clusters is defined using a topology.Config
which allows you to define a set of networks and reference those networks when
assigning nodes and services to clusters. Both Consul clients and
`consul-dataplane` instances are supported.
Here is an example configuration with two peered clusters:
```
cfg := &topology.Config{
Networks: []*topology.Network{
{Name: "dc1"},
{Name: "dc2"},
{Name: "wan", Type: "wan"},
},
Clusters: []*topology.Cluster{
{
Name: "dc1",
Nodes: []*topology.Node{
{
Kind: topology.NodeKindServer,
Name: "dc1-server1",
Addresses: []*topology.Address{
{Network: "dc1"},
{Network: "wan"},
},
},
{
Kind: topology.NodeKindClient,
Name: "dc1-client1",
Services: []*topology.Service{
{
ID: topology.ServiceID{Name: "mesh-gateway"},
Port: 8443,
EnvoyAdminPort: 19000,
IsMeshGateway: true,
},
},
},
{
Kind: topology.NodeKindClient,
Name: "dc1-client2",
Services: []*topology.Service{
{
ID: topology.ServiceID{Name: "ping"},
Image: "rboyer/pingpong:latest",
Port: 8080,
EnvoyAdminPort: 19000,
Command: []string{
"-bind", "0.0.0.0:8080",
"-dial", "127.0.0.1:9090",
"-pong-chaos",
"-dialfreq", "250ms",
"-name", "ping",
},
Upstreams: []*topology.Upstream{{
ID: topology.ServiceID{Name: "pong"},
LocalPort: 9090,
Peer: "peer-dc2-default",
}},
},
},
},
},
InitialConfigEntries: []api.ConfigEntry{
&api.ExportedServicesConfigEntry{
Name: "default",
Services: []api.ExportedService{{
Name: "ping",
Consumers: []api.ServiceConsumer{{
Peer: "peer-dc2-default",
}},
}},
},
},
},
{
Name: "dc2",
Nodes: []*topology.Node{
{
Kind: topology.NodeKindServer,
Name: "dc2-server1",
Addresses: []*topology.Address{
{Network: "dc2"},
{Network: "wan"},
},
},
{
Kind: topology.NodeKindClient,
Name: "dc2-client1",
Services: []*topology.Service{
{
ID: topology.ServiceID{Name: "mesh-gateway"},
Port: 8443,
EnvoyAdminPort: 19000,
IsMeshGateway: true,
},
},
},
{
Kind: topology.NodeKindDataplane,
Name: "dc2-client2",
Services: []*topology.Service{
{
ID: topology.ServiceID{Name: "pong"},
Image: "rboyer/pingpong:latest",
Port: 8080,
EnvoyAdminPort: 19000,
Command: []string{
"-bind", "0.0.0.0:8080",
"-dial", "127.0.0.1:9090",
"-pong-chaos",
"-dialfreq", "250ms",
"-name", "pong",
},
Upstreams: []*topology.Upstream{{
ID: topology.ServiceID{Name: "ping"},
LocalPort: 9090,
Peer: "peer-dc1-default",
}},
},
},
},
},
InitialConfigEntries: []api.ConfigEntry{
&api.ExportedServicesConfigEntry{
Name: "default",
Services: []api.ExportedService{{
Name: "ping",
Consumers: []api.ServiceConsumer{{
Peer: "peer-dc2-default",
}},
}},
},
},
},
},
Peerings: []*topology.Peering{{
Dialing: topology.PeerCluster{
Name: "dc1",
},
Accepting: topology.PeerCluster{
Name: "dc2",
},
}},
}
```
Once you have a topology configuration, you simply call the appropriate
`Launch` function to validate and boot the cluster.
You may also modify your original configuration (in some allowed ways) and call
`Relaunch` on an existing topology which will differentially adjust the running
infrastructure. This can be useful to do things like upgrade instances in place
or subly reconfigure them.
### For Testing
It is meant to be consumed primarily by unit tests desiring a complex
reasonably realistic Consul setup. For that use case use the `sprawl/sprawltest` wrapper:
```
func TestSomething(t *testing.T) {
cfg := &topology.Config{...}
sp := sprawltest.Launch(t, cfg)
// do stuff with 'sp'
}
```

9
testing/deployer/TODO.md Normal file
View File

@ -0,0 +1,9 @@
Missing things that should probably be added;
- consul-dataplane support for running mesh gateways
- consul-dataplane health check updates (automatic; manual)
- ServerExternalAddresses in a peering; possibly rig up a DNS name for this.
- after creating a token, verify it exists on all servers before proceding (rather than sleep looping on not-founds)
- investigate strange gRPC bug that is currently papered over
- allow services to override their mesh gateway modes
- remove some of the debug prints of various things

44
testing/deployer/go.mod Normal file
View File

@ -0,0 +1,44 @@
module github.com/hashicorp/consul/testing/deployer
go 1.20
require (
github.com/google/go-cmp v0.5.9
github.com/hashicorp/consul/api v1.20.0
github.com/hashicorp/consul/sdk v0.13.1
github.com/hashicorp/go-cleanhttp v0.5.2
github.com/hashicorp/go-hclog v1.5.0
github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/hcl/v2 v2.16.2
github.com/mitchellh/copystructure v1.2.0
github.com/rboyer/safeio v0.2.2
github.com/stretchr/testify v1.8.2
golang.org/x/crypto v0.7.0
)
require (
github.com/agext/levenshtein v1.2.1 // indirect
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
github.com/armon/go-metrics v0.3.10 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/go-uuid v1.0.3 // indirect
github.com/hashicorp/go-version v1.2.1 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/serf v0.10.1 // indirect
github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-wordwrap v1.0.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/zclconf/go-cty v1.12.1 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

241
testing/deployer/go.sum Normal file
View File

@ -0,0 +1,241 @@
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8=
github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw=
github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo=
github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/hashicorp/consul/api v1.20.0 h1:9IHTjNVSZ7MIwjlW3N3a7iGiykCMDpxZu8jsxFJh0yc=
github.com/hashicorp/consul/api v1.20.0/go.mod h1:nR64eD44KQ59Of/ECwt2vUmIK2DKsDzAwTmwmLl8Wpo=
github.com/hashicorp/consul/sdk v0.13.1 h1:EygWVWWMczTzXGpO93awkHFzfUka6hLYJ0qhETd+6lY=
github.com/hashicorp/consul/sdk v0.13.1/go.mod h1:SW/mM4LbKfqmMvcFu8v+eiQQ7oitXEFeiBe9StxERb0=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI=
github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl/v2 v2.16.2 h1:mpkHZh/Tv+xet3sy3F9Ld4FyI2tUpWe9x3XtPx9f1a0=
github.com/hashicorp/hcl/v2 v2.16.2/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM=
github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/rboyer/safeio v0.2.2 h1:XhtqyUTRleMYGyBt3ni4j2BtEh669U2ry2INnnd+B4k=
github.com/rboyer/safeio v0.2.2/go.mod h1:pSnr2LFXyn/c/fotxotyOdYy7pP/XSh6MpBmzXPjiNc=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY=
github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -0,0 +1,332 @@
package sprawl
import (
"fmt"
"strings"
"time"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/secrets"
"github.com/hashicorp/consul/testing/deployer/topology"
)
// TODO: fix this by checking that a token/policy works on ALL servers before
// returning from create.
func isACLNotFound(err error) bool {
if err == nil {
return false
}
return strings.Contains(err.Error(), `ACL not found`)
}
func (s *Sprawl) bootstrapACLs(cluster string) error {
var (
client = s.clients[cluster]
logger = s.logger.With("cluster", cluster)
mgmtToken = s.secrets.ReadGeneric(cluster, secrets.BootstrapToken)
)
ac := client.ACL()
if mgmtToken != "" {
NOT_BOOTED:
ready, err := s.isACLBootstrapped(cluster, client)
if err != nil {
return fmt.Errorf("error checking if the acl system is bootstrapped: %w", err)
} else if !ready {
logger.Warn("ACL system is not ready yet")
time.Sleep(250 * time.Millisecond)
goto NOT_BOOTED
}
TRYAGAIN:
// check to see if it works
_, _, err = ac.TokenReadSelf(&api.QueryOptions{Token: mgmtToken})
if err != nil {
if isACLNotBootstrapped(err) {
logger.Warn("system is rebooting", "error", err)
time.Sleep(250 * time.Millisecond)
goto TRYAGAIN
}
return fmt.Errorf("management token no longer works: %w", err)
}
logger.Info("current management token", "token", mgmtToken)
return nil
}
TRYAGAIN2:
logger.Info("bootstrapping ACLs")
tok, _, err := ac.Bootstrap()
if err != nil {
if isACLNotBootstrapped(err) {
logger.Warn("system is rebooting", "error", err)
time.Sleep(250 * time.Millisecond)
goto TRYAGAIN2
}
return err
}
mgmtToken = tok.SecretID
s.secrets.SaveGeneric(cluster, secrets.BootstrapToken, mgmtToken)
logger.Info("current management token", "token", mgmtToken)
return nil
}
func isACLNotBootstrapped(err error) bool {
switch {
case strings.Contains(err.Error(), "ACL system must be bootstrapped before making any requests that require authorization"):
return true
case strings.Contains(err.Error(), "The ACL system is currently in legacy mode"):
return true
}
return false
}
func (s *Sprawl) isACLBootstrapped(cluster string, client *api.Client) (bool, error) {
policy, _, err := client.ACL().PolicyReadByName("global-management", &api.QueryOptions{
Token: s.secrets.ReadGeneric(cluster, secrets.BootstrapToken),
})
if err != nil {
if strings.Contains(err.Error(), "Unexpected response code: 403 (ACL not found)") {
return false, nil
} else if isACLNotBootstrapped(err) {
return false, nil
}
return false, err
}
return policy != nil, nil
}
func (s *Sprawl) createAnonymousToken(cluster *topology.Cluster) error {
var (
client = s.clients[cluster.Name]
logger = s.logger.With("cluster", cluster.Name)
)
if err := s.createAnonymousPolicy(cluster); err != nil {
return err
}
token, err := CreateOrUpdateToken(client, anonymousToken())
if err != nil {
return err
}
logger.Info("created anonymous token",
"token", token.SecretID,
)
return nil
}
func (s *Sprawl) createAnonymousPolicy(cluster *topology.Cluster) error {
var (
client = s.clients[cluster.Name]
logger = s.logger.With("cluster", cluster.Name)
)
op, err := CreateOrUpdatePolicy(client, anonymousPolicy(cluster.Enterprise))
if err != nil {
return err
}
logger.Info("created anonymous policy",
"policy-name", op.Name,
"policy-id", op.ID,
)
return nil
}
func (s *Sprawl) createAgentTokens(cluster *topology.Cluster) error {
var (
client = s.clients[cluster.Name]
logger = s.logger.With("cluster", cluster.Name)
)
for _, node := range cluster.Nodes {
// NOTE: always create tokens even for disabled nodes.
if !node.IsAgent() {
continue
}
if tok := s.secrets.ReadAgentToken(cluster.Name, node.ID()); tok == "" {
token, err := CreateOrUpdateToken(client, tokenForNode(node, cluster.Enterprise))
if err != nil {
return err
}
logger.Info("created agent token",
"node", node.ID(),
"token", token.SecretID,
)
s.secrets.SaveAgentToken(cluster.Name, node.ID(), token.SecretID)
}
}
return nil
}
// Create a policy to allow super permissive catalog reads across namespace
// boundaries.
func (s *Sprawl) createCrossNamespaceCatalogReadPolicies(cluster *topology.Cluster, partition string) error {
if !cluster.Enterprise {
return nil
}
var (
client = s.clients[cluster.Name]
logger = s.logger.With("cluster", cluster.Name)
)
op, err := CreateOrUpdatePolicy(client, policyForCrossNamespaceRead(partition))
if err != nil {
return err
}
logger.Info("created cross-ns-catalog-read policy",
"policy-name", op.Name,
"policy-id", op.ID,
"partition", partition,
)
return nil
}
func (s *Sprawl) createAllServiceTokens() error {
for _, cluster := range s.topology.Clusters {
if err := s.createServiceTokens(cluster); err != nil {
return fmt.Errorf("createServiceTokens[%s]: %w", cluster.Name, err)
}
}
return nil
}
func (s *Sprawl) createServiceTokens(cluster *topology.Cluster) error {
var (
client = s.clients[cluster.Name]
logger = s.logger.With("cluster", cluster.Name)
)
sids := make(map[topology.ServiceID]struct{})
for _, node := range cluster.Nodes {
if !node.RunsWorkloads() || len(node.Services) == 0 || node.Disabled {
continue
}
for _, svc := range node.Services {
sid := svc.ID
if _, done := sids[sid]; done {
continue
}
var overridePolicy *api.ACLPolicy
if svc.IsMeshGateway {
var err error
overridePolicy, err = CreateOrUpdatePolicy(client, policyForMeshGateway(svc, cluster.Enterprise))
if err != nil {
return fmt.Errorf("could not create policy: %w", err)
}
}
token, err := CreateOrUpdateToken(client, tokenForService(svc, overridePolicy, cluster.Enterprise))
if err != nil {
return fmt.Errorf("could not create token: %w", err)
}
logger.Info("created service token",
"service", svc.ID.Name,
"namespace", svc.ID.Namespace,
"partition", svc.ID.Partition,
"token", token.SecretID,
)
s.secrets.SaveServiceToken(cluster.Name, sid, token.SecretID)
sids[sid] = struct{}{}
}
}
return nil
}
func CreateOrUpdateToken(client *api.Client, t *api.ACLToken) (*api.ACLToken, error) {
ac := client.ACL()
currentToken, err := getTokenByDescription(client, t.Description, &api.QueryOptions{
Partition: t.Partition,
Namespace: t.Namespace,
})
if err != nil {
return nil, err
} else if currentToken != nil {
t.AccessorID = currentToken.AccessorID
t.SecretID = currentToken.SecretID
}
if t.AccessorID != "" {
t, _, err = ac.TokenUpdate(t, nil)
} else {
t, _, err = ac.TokenCreate(t, nil)
}
if err != nil {
return nil, err
}
return t, nil
}
func getTokenByDescription(client *api.Client, description string, opts *api.QueryOptions) (*api.ACLToken, error) {
ac := client.ACL()
tokens, _, err := ac.TokenList(opts)
if err != nil {
return nil, err
}
for _, tokenEntry := range tokens {
if tokenEntry.Description == description {
token, _, err := ac.TokenRead(tokenEntry.AccessorID, opts)
if err != nil {
return nil, err
}
return token, nil
}
}
return nil, nil
}
func CreateOrUpdatePolicy(client *api.Client, p *api.ACLPolicy) (*api.ACLPolicy, error) {
ac := client.ACL()
currentPolicy, _, err := ac.PolicyReadByName(p.Name, &api.QueryOptions{
Partition: p.Partition,
Namespace: p.Namespace,
})
// There is a quirk about Consul 1.14.x, where: if reading a policy yields
// an empty result, we return "ACL not found". It's safe to ignore this here,
// because if the Client's ACL token truly doesn't exist, then the create fails below.
if err != nil && !strings.Contains(err.Error(), "ACL not found") {
return nil, err
} else if currentPolicy != nil {
p.ID = currentPolicy.ID
}
if p.ID != "" {
p, _, err = ac.PolicyUpdate(p, nil)
} else {
p, _, err = ac.PolicyCreate(p, nil)
}
if err != nil {
return nil, err
}
return p, nil
}

View File

@ -0,0 +1,160 @@
package sprawl
import (
"fmt"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/testing/deployer/topology"
)
func policyForCrossNamespaceRead(partition string) *api.ACLPolicy {
return &api.ACLPolicy{
Name: "cross-ns-catalog-read",
Description: "cross-ns-catalog-read",
Partition: partition,
Rules: fmt.Sprintf(`
partition %[1]q {
namespace_prefix "" {
node_prefix "" { policy = "read" }
service_prefix "" { policy = "read" }
}
}
`, partition),
}
}
const anonymousTokenAccessorID = "00000000-0000-0000-0000-000000000002"
func anonymousToken() *api.ACLToken {
return &api.ACLToken{
AccessorID: anonymousTokenAccessorID,
// SecretID: "anonymous",
Description: "anonymous",
Local: false,
Policies: []*api.ACLTokenPolicyLink{
{
Name: "anonymous",
},
},
}
}
func anonymousPolicy(enterprise bool) *api.ACLPolicy {
p := &api.ACLPolicy{
Name: "anonymous",
Description: "anonymous",
}
if enterprise {
p.Rules = `
partition_prefix "" {
namespace_prefix "" {
node_prefix "" { policy = "read" }
service_prefix "" { policy = "read" }
}
}
`
} else {
p.Rules = `
node_prefix "" { policy = "read" }
service_prefix "" { policy = "read" }
`
}
return p
}
func tokenForNode(node *topology.Node, enterprise bool) *api.ACLToken {
nid := node.ID()
tokenName := "agent--" + nid.ACLString()
token := &api.ACLToken{
Description: tokenName,
Local: false,
NodeIdentities: []*api.ACLNodeIdentity{{
NodeName: node.PodName(),
Datacenter: node.Datacenter,
}},
}
if enterprise {
token.Partition = node.Partition
token.Namespace = "default"
}
return token
}
func tokenForService(svc *topology.Service, overridePolicy *api.ACLPolicy, enterprise bool) *api.ACLToken {
token := &api.ACLToken{
Description: "service--" + svc.ID.ACLString(),
Local: false,
}
if overridePolicy != nil {
token.Policies = []*api.ACLTokenPolicyLink{{ID: overridePolicy.ID}}
} else {
token.ServiceIdentities = []*api.ACLServiceIdentity{{
ServiceName: svc.ID.Name,
}}
}
if enterprise {
token.Namespace = svc.ID.Namespace
token.Partition = svc.ID.Partition
}
return token
}
func policyForMeshGateway(svc *topology.Service, enterprise bool) *api.ACLPolicy {
policyName := "mesh-gateway--" + svc.ID.ACLString()
policy := &api.ACLPolicy{
Name: policyName,
Description: policyName,
}
if enterprise {
policy.Partition = svc.ID.Partition
policy.Namespace = "default"
}
if enterprise {
policy.Rules = `
namespace_prefix "" {
service "mesh-gateway" {
policy = "write"
}
service_prefix "" {
policy = "read"
}
node_prefix "" {
policy = "read"
}
}
agent_prefix "" {
policy = "read"
}
# for peering
mesh = "write"
peering = "read"
`
} else {
policy.Rules = `
service "mesh-gateway" {
policy = "write"
}
service_prefix "" {
policy = "read"
}
node_prefix "" {
policy = "read"
}
agent_prefix "" {
policy = "read"
}
# for peering
mesh = "write"
peering = "read"
`
}
return policy
}

View File

@ -0,0 +1,520 @@
package sprawl
import (
"context"
"crypto/rand"
"encoding/base64"
"errors"
"fmt"
"strings"
"time"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/build"
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/secrets"
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/tfgen"
"github.com/hashicorp/consul/testing/deployer/topology"
"github.com/hashicorp/consul/testing/deployer/util"
)
const (
sharedBootstrapToken = "root"
// sharedBootstrapToken = "ec59aa56-1996-4ff1-911a-f5d782552a13"
sharedAgentRecoveryToken = "22082b05-05c9-4a0a-b3da-b9685ac1d688"
)
func (s *Sprawl) launch() error {
return s.launchType(true)
}
func (s *Sprawl) relaunch() error {
return s.launchType(false)
}
func (s *Sprawl) launchType(firstTime bool) (launchErr error) {
if err := build.DockerImages(s.logger, s.runner, s.topology); err != nil {
return fmt.Errorf("build.DockerImages: %w", err)
}
if firstTime {
// Initialize secrets the easy way for now (same in all clusters).
gossipKey, err := newGossipKey()
if err != nil {
return fmt.Errorf("newGossipKey: %w", err)
}
for _, cluster := range s.topology.Clusters {
s.secrets.SaveGeneric(cluster.Name, secrets.BootstrapToken, sharedBootstrapToken)
s.secrets.SaveGeneric(cluster.Name, secrets.AgentRecovery, sharedAgentRecoveryToken)
s.secrets.SaveGeneric(cluster.Name, secrets.GossipKey, gossipKey)
// Give servers a copy of the bootstrap token for use as their agent tokens
// to avoid complicating the chicken/egg situation for startup.
for _, node := range cluster.Nodes {
if node.IsServer() { // include disabled
s.secrets.SaveAgentToken(cluster.Name, node.ID(), sharedBootstrapToken)
}
}
}
}
var cleanupFuncs []func()
defer func() {
for i := len(cleanupFuncs) - 1; i >= 0; i-- {
cleanupFuncs[i]()
}
}()
if firstTime {
var err error
s.generator, err = tfgen.NewGenerator(
s.logger.Named("tfgen"),
s.runner,
s.topology,
&s.secrets,
s.workdir,
s.license,
)
if err != nil {
return err
}
} else {
s.generator.SetTopology(s.topology)
}
cleanupFuncs = append(cleanupFuncs, func() {
// Log the error before the cleanup so you don't have to wait to see
// the cause.
if launchErr != nil {
s.logger.Error("fatal error during launch", "error", launchErr)
}
_ = s.generator.DestroyAllQuietly()
})
if firstTime {
// The networking phase is special. We have to pick a random subnet and
// hope. Once we have this established once it is immutable for future
// runs.
if err := s.initNetworkingAndVolumes(); err != nil {
return fmt.Errorf("initNetworkingAndVolumes: %w", err)
}
}
if err := s.assignIPAddresses(); err != nil {
return fmt.Errorf("assignIPAddresses: %w", err)
}
// The previous terraform run should have made the special volume for us.
if err := s.initTLS(context.TODO()); err != nil {
return fmt.Errorf("initTLS: %w", err)
}
if firstTime {
if err := s.createFirstTime(); err != nil {
return err
}
s.generator.MarkLaunched()
} else {
if err := s.updateExisting(); err != nil {
return err
}
}
if err := s.waitForPeeringEstablishment(); err != nil {
return fmt.Errorf("waitForPeeringEstablishment: %w", err)
}
cleanupFuncs = nil // reset
return nil
}
func (s *Sprawl) Stop() error {
var merr error
if s.generator != nil {
if err := s.generator.DestroyAllQuietly(); err != nil {
merr = multierror.Append(merr, err)
}
}
return merr
}
const dockerOutOfNetworksErrorMessage = `Unable to create network: Error response from daemon: Pool overlaps with other one on this address space`
var ErrDockerNetworkCollision = errors.New("could not create one or more docker networks for use due to subnet collision")
func (s *Sprawl) initNetworkingAndVolumes() error {
var lastErr error
for attempts := 0; attempts < 5; attempts++ {
err := s.generator.Generate(tfgen.StepNetworks)
if err != nil && strings.Contains(err.Error(), dockerOutOfNetworksErrorMessage) {
lastErr = ErrDockerNetworkCollision
s.logger.Warn(ErrDockerNetworkCollision.Error()+"; retrying", "attempt", attempts+1)
time.Sleep(1 * time.Second)
continue
} else if err != nil {
return fmt.Errorf("generator[networks]: %w", err)
}
return nil
}
return lastErr
}
func (s *Sprawl) assignIPAddresses() error {
// assign ips now that we have network ips known to us
for _, net := range s.topology.Networks {
if len(net.IPPool) == 0 {
return fmt.Errorf("network %q does not have any ip assignments", net.Name)
}
}
for _, cluster := range s.topology.Clusters {
for _, node := range cluster.Nodes {
for _, addr := range node.Addresses {
net, ok := s.topology.Networks[addr.Network]
if !ok {
return fmt.Errorf("unknown network %q", addr.Network)
}
addr.IPAddress = net.IPByIndex(node.Index)
}
}
}
return nil
}
func (s *Sprawl) initConsulServers() error {
if err := s.generator.Generate(tfgen.StepServers); err != nil {
return fmt.Errorf("generator[servers]: %w", err)
}
// s.logger.Info("ALL", "t", jd(s.topology)) // TODO
// Create token-less api clients first.
for _, cluster := range s.topology.Clusters {
node := cluster.FirstServer()
var err error
s.clients[cluster.Name], err = util.ProxyAPIClient(
node.LocalProxyPort(),
node.LocalAddress(),
8500,
"", /*no token yet*/
)
if err != nil {
return fmt.Errorf("error creating initial bootstrap client for cluster=%s: %w", cluster.Name, err)
}
}
if err := s.rejoinAllConsulServers(); err != nil {
return err
}
for _, cluster := range s.topology.Clusters {
err := s.bootstrapACLs(cluster.Name)
if err != nil {
return fmt.Errorf("bootstrap[%s]: %w", cluster.Name, err)
}
mgmtToken := s.secrets.ReadGeneric(cluster.Name, secrets.BootstrapToken)
// Reconfigure the clients to use a management token.
node := cluster.FirstServer()
s.clients[cluster.Name], err = util.ProxyAPIClient(
node.LocalProxyPort(),
node.LocalAddress(),
8500,
mgmtToken,
)
if err != nil {
return fmt.Errorf("error creating final client for cluster=%s: %v", cluster.Name, err)
}
// For some reason the grpc resolver stuff for partitions takes some
// time to get ready.
s.waitForLocalWrites(cluster, mgmtToken)
// Create tenancies so that the ACL tokens and clients have somewhere to go.
if cluster.Enterprise {
if err := s.initTenancies(cluster); err != nil {
return fmt.Errorf("initTenancies[%s]: %w", cluster.Name, err)
}
}
if err := s.populateInitialConfigEntries(cluster); err != nil {
return fmt.Errorf("populateInitialConfigEntries[%s]: %w", cluster.Name, err)
}
if err := s.createAnonymousToken(cluster); err != nil {
return fmt.Errorf("createAnonymousToken[%s]: %w", cluster.Name, err)
}
// Create tokens for all of the agents to use for anti-entropy.
//
// NOTE: this will cause the servers to roll to pick up the change to
// the acl{tokens{agent=XXX}}} section.
if err := s.createAgentTokens(cluster); err != nil {
return fmt.Errorf("createAgentTokens[%s]: %w", cluster.Name, err)
}
}
return nil
}
func (s *Sprawl) createFirstTime() error {
if err := s.initConsulServers(); err != nil {
return fmt.Errorf("initConsulServers: %w", err)
}
if err := s.generator.Generate(tfgen.StepAgents); err != nil {
return fmt.Errorf("generator[agents]: %w", err)
}
for _, cluster := range s.topology.Clusters {
if err := s.waitForClientAntiEntropyOnce(cluster); err != nil {
return fmt.Errorf("waitForClientAntiEntropyOnce[%s]: %w", cluster.Name, err)
}
}
// Ideally we start services WITH a token initially, so we pre-create them
// before running terraform for them.
if err := s.createAllServiceTokens(); err != nil {
return fmt.Errorf("createAllServiceTokens: %w", err)
}
if err := s.registerAllServicesForDataplaneInstances(); err != nil {
return fmt.Errorf("registerAllServicesForDataplaneInstances: %w", err)
}
// We can do this ahead, because we've incrementally run terraform as
// we went.
if err := s.registerAllServicesToAgents(); err != nil {
return fmt.Errorf("registerAllServicesToAgents: %w", err)
}
// NOTE: start services WITH token initially
if err := s.generator.Generate(tfgen.StepServices); err != nil {
return fmt.Errorf("generator[services]: %w", err)
}
if err := s.initPeerings(); err != nil {
return fmt.Errorf("initPeerings: %w", err)
}
return nil
}
func (s *Sprawl) updateExisting() error {
if err := s.preRegenTasks(); err != nil {
return fmt.Errorf("preRegenTasks: %w", err)
}
// We save all of the terraform to the end. Some of the containers will
// be a little broken until we can do stuff like register services to
// new agents, which we cannot do until they come up.
if err := s.generator.Generate(tfgen.StepRelaunch); err != nil {
return fmt.Errorf("generator[relaunch]: %w", err)
}
if err := s.postRegenTasks(); err != nil {
return fmt.Errorf("postRegenTasks: %w", err)
}
// TODO: enforce that peering relationships cannot change
// TODO: include a fixup version of new peerings?
return nil
}
func (s *Sprawl) preRegenTasks() error {
for _, cluster := range s.topology.Clusters {
// Create tenancies so that the ACL tokens and clients have somewhere to go.
if cluster.Enterprise {
if err := s.initTenancies(cluster); err != nil {
return fmt.Errorf("initTenancies[%s]: %w", cluster.Name, err)
}
}
if err := s.populateInitialConfigEntries(cluster); err != nil {
return fmt.Errorf("populateInitialConfigEntries[%s]: %w", cluster.Name, err)
}
// Create tokens for all of the agents to use for anti-entropy.
if err := s.createAgentTokens(cluster); err != nil {
return fmt.Errorf("createAgentTokens[%s]: %w", cluster.Name, err)
}
}
// Ideally we start services WITH a token initially, so we pre-create them
// before running terraform for them.
if err := s.createAllServiceTokens(); err != nil {
return fmt.Errorf("createAllServiceTokens: %w", err)
}
if err := s.registerAllServicesForDataplaneInstances(); err != nil {
return fmt.Errorf("registerAllServicesForDataplaneInstances: %w", err)
}
return nil
}
func (s *Sprawl) postRegenTasks() error {
if err := s.rejoinAllConsulServers(); err != nil {
return err
}
for _, cluster := range s.topology.Clusters {
var err error
mgmtToken := s.secrets.ReadGeneric(cluster.Name, secrets.BootstrapToken)
// Reconfigure the clients to use a management token.
node := cluster.FirstServer()
s.clients[cluster.Name], err = util.ProxyAPIClient(
node.LocalProxyPort(),
node.LocalAddress(),
8500,
mgmtToken,
)
if err != nil {
return fmt.Errorf("error creating final client for cluster=%s: %v", cluster.Name, err)
}
s.waitForLeader(cluster)
// For some reason the grpc resolver stuff for partitions takes some
// time to get ready.
s.waitForLocalWrites(cluster, mgmtToken)
}
for _, cluster := range s.topology.Clusters {
if err := s.waitForClientAntiEntropyOnce(cluster); err != nil {
return fmt.Errorf("waitForClientAntiEntropyOnce[%s]: %w", cluster.Name, err)
}
}
if err := s.registerAllServicesToAgents(); err != nil {
return fmt.Errorf("registerAllServicesToAgents: %w", err)
}
return nil
}
func (s *Sprawl) waitForLocalWrites(cluster *topology.Cluster, token string) {
var (
client = s.clients[cluster.Name]
logger = s.logger.With("cluster", cluster.Name)
)
tryKV := func() error {
_, err := client.KV().Put(&api.KVPair{
Key: "local-test",
Value: []byte("payload-for-local-test-in-" + cluster.Name),
}, nil)
return err
}
tryAP := func() error {
if !cluster.Enterprise {
return nil
}
_, _, err := client.Partitions().Create(context.Background(), &api.Partition{
Name: "placeholder",
}, &api.WriteOptions{Token: token})
return err
}
start := time.Now()
for attempts := 0; ; attempts++ {
if err := tryKV(); err != nil {
logger.Warn("local kv write failed; something is not ready yet", "error", err)
time.Sleep(500 * time.Millisecond)
continue
} else {
dur := time.Since(start)
logger.Info("local kv write success", "elapsed", dur, "retries", attempts)
}
break
}
if cluster.Enterprise {
start = time.Now()
for attempts := 0; ; attempts++ {
if err := tryAP(); err != nil {
logger.Warn("local partition write failed; something is not ready yet", "error", err)
time.Sleep(500 * time.Millisecond)
continue
} else {
dur := time.Since(start)
logger.Info("local partition write success", "elapsed", dur, "retries", attempts)
}
break
}
}
}
func (s *Sprawl) waitForClientAntiEntropyOnce(cluster *topology.Cluster) error {
var (
client = s.clients[cluster.Name]
logger = s.logger.With("cluster", cluster.Name)
)
var (
queryOptionList = cluster.PartitionQueryOptionsList()
start = time.Now()
cc = client.Catalog()
)
for {
// Enumerate all of the nodes that are currently in the catalog. This
// will overmatch including things like fake nodes for agentless but
// that's ok.
current := make(map[topology.NodeID]*api.Node)
for _, queryOpts := range queryOptionList {
nodes, _, err := cc.Nodes(queryOpts)
if err != nil {
return err
}
for _, node := range nodes {
nid := topology.NewNodeID(node.Node, node.Partition)
current[nid] = node
}
}
// See if we have them all.
var stragglers []topology.NodeID
for _, node := range cluster.Nodes {
if !node.IsAgent() || node.Disabled {
continue
}
nid := node.CatalogID()
got, ok := current[nid]
if ok && len(got.TaggedAddresses) > 0 {
// this is a field that is not updated just due to serf reconcile
continue
}
stragglers = append(stragglers, nid)
}
if len(stragglers) == 0 {
dur := time.Since(start)
logger.Info("all nodes have posted node updates, so first anti-entropy has happened", "elapsed", dur)
return nil
}
logger.Info("not all client nodes have posted node updates yet", "nodes", stragglers)
time.Sleep(1 * time.Second)
}
}
func newGossipKey() (string, error) {
key := make([]byte, 16)
n, err := rand.Reader.Read(key)
if err != nil {
return "", fmt.Errorf("Error reading random data: %s", err)
}
if n != 16 {
return "", fmt.Errorf("Couldn't read enough entropy. Generate more entropy!")
}
return base64.StdEncoding.EncodeToString(key), nil
}

View File

@ -0,0 +1,425 @@
package sprawl
import (
"fmt"
"net/http"
"time"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/testing/deployer/topology"
"github.com/hashicorp/consul/testing/deployer/util"
)
func (s *Sprawl) registerAllServicesToAgents() error {
for _, cluster := range s.topology.Clusters {
if err := s.registerServicesToAgents(cluster); err != nil {
return fmt.Errorf("registerServicesToAgents[%s]: %w", cluster.Name, err)
}
}
return nil
}
func (s *Sprawl) registerAllServicesForDataplaneInstances() error {
for _, cluster := range s.topology.Clusters {
if err := s.registerServicesForDataplaneInstances(cluster); err != nil {
return fmt.Errorf("registerServicesForDataplaneInstances[%s]: %w", cluster.Name, err)
}
}
return nil
}
func (s *Sprawl) registerServicesToAgents(cluster *topology.Cluster) error {
for _, node := range cluster.Nodes {
if !node.RunsWorkloads() || len(node.Services) == 0 || node.Disabled {
continue
}
if !node.IsAgent() {
continue
}
agentClient, err := util.ProxyAPIClient(
node.LocalProxyPort(),
node.LocalAddress(),
8500,
"", /*token will be in request*/
)
if err != nil {
return err
}
for _, svc := range node.Services {
if err := s.registerAgentService(agentClient, cluster, node, svc); err != nil {
return err
}
}
}
return nil
}
func (s *Sprawl) registerAgentService(
agentClient *api.Client,
cluster *topology.Cluster,
node *topology.Node,
svc *topology.Service,
) error {
if !node.IsAgent() {
panic("called wrong method type")
}
if svc.IsMeshGateway {
return nil // handled at startup time for agent-full, but won't be for agent-less
}
var (
logger = s.logger.With("cluster", cluster.Name)
)
reg := &api.AgentServiceRegistration{
ID: svc.ID.Name,
Name: svc.ID.Name,
Port: svc.Port,
Meta: svc.Meta,
}
if cluster.Enterprise {
reg.Namespace = svc.ID.Namespace
reg.Partition = svc.ID.Partition
}
if !svc.DisableServiceMesh {
var upstreams []api.Upstream
for _, u := range svc.Upstreams {
uAPI := api.Upstream{
DestinationPeer: u.Peer,
DestinationName: u.ID.Name,
LocalBindAddress: u.LocalAddress,
LocalBindPort: u.LocalPort,
// Config map[string]interface{} `json:",omitempty" bexpr:"-"`
// MeshGateway MeshGatewayConfig `json:",omitempty"`
}
if cluster.Enterprise {
uAPI.DestinationNamespace = u.ID.Namespace
if u.Peer == "" {
uAPI.DestinationPartition = u.ID.Partition
}
}
upstreams = append(upstreams, uAPI)
}
reg.Connect = &api.AgentServiceConnect{
SidecarService: &api.AgentServiceRegistration{
Proxy: &api.AgentServiceConnectProxyConfig{
Upstreams: upstreams,
},
},
}
}
switch {
case svc.CheckTCP != "":
chk := &api.AgentServiceCheck{
Name: "up",
TCP: svc.CheckTCP,
Interval: "5s",
Timeout: "1s",
}
reg.Checks = append(reg.Checks, chk)
case svc.CheckHTTP != "":
chk := &api.AgentServiceCheck{
Name: "up",
HTTP: svc.CheckHTTP,
Method: "GET",
Interval: "5s",
Timeout: "1s",
}
reg.Checks = append(reg.Checks, chk)
}
// Switch token for every request.
hdr := make(http.Header)
hdr.Set("X-Consul-Token", s.secrets.ReadServiceToken(cluster.Name, svc.ID))
agentClient.SetHeaders(hdr)
RETRY:
if err := agentClient.Agent().ServiceRegister(reg); err != nil {
if isACLNotFound(err) {
time.Sleep(50 * time.Millisecond)
goto RETRY
}
return fmt.Errorf("failed to register service %q to node %q: %w", svc.ID, node.ID(), err)
}
logger.Info("registered service to client agent",
"service", svc.ID.Name,
"node", node.Name,
"namespace", svc.ID.Namespace,
"partition", svc.ID.Partition,
)
return nil
}
func (s *Sprawl) registerServicesForDataplaneInstances(cluster *topology.Cluster) error {
for _, node := range cluster.Nodes {
if !node.RunsWorkloads() || len(node.Services) == 0 || node.Disabled {
continue
}
if !node.IsDataplane() {
continue
}
if err := s.registerCatalogNode(cluster, node); err != nil {
return fmt.Errorf("error registering virtual node: %w", err)
}
for _, svc := range node.Services {
if err := s.registerCatalogService(cluster, node, svc); err != nil {
return fmt.Errorf("error registering service: %w", err)
}
if !svc.DisableServiceMesh {
if err := s.registerCatalogSidecarService(cluster, node, svc); err != nil {
return fmt.Errorf("error registering sidecar service: %w", err)
}
}
}
}
return nil
}
func (s *Sprawl) registerCatalogNode(
cluster *topology.Cluster,
node *topology.Node,
) error {
if !node.IsDataplane() {
panic("called wrong method type")
}
var (
client = s.clients[cluster.Name]
logger = s.logger.With("cluster", cluster.Name)
)
reg := &api.CatalogRegistration{
Node: node.PodName(),
Address: node.LocalAddress(),
NodeMeta: map[string]string{
"dataplane-faux": "1",
},
}
if cluster.Enterprise {
reg.Partition = node.Partition
}
// register synthetic node
RETRY:
if _, err := client.Catalog().Register(reg, nil); err != nil {
if isACLNotFound(err) {
time.Sleep(50 * time.Millisecond)
goto RETRY
}
return fmt.Errorf("error registering virtual node %s: %w", node.ID(), err)
}
logger.Info("virtual node created",
"node", node.ID(),
)
return nil
}
func (s *Sprawl) registerCatalogService(
cluster *topology.Cluster,
node *topology.Node,
svc *topology.Service,
) error {
if !node.IsDataplane() {
panic("called wrong method type")
}
var (
client = s.clients[cluster.Name]
logger = s.logger.With("cluster", cluster.Name)
)
reg := serviceToCatalogRegistration(cluster, node, svc)
RETRY:
if _, err := client.Catalog().Register(reg, nil); err != nil {
if isACLNotFound(err) {
time.Sleep(50 * time.Millisecond)
goto RETRY
}
return fmt.Errorf("error registering service %s to node %s: %w", svc.ID, node.ID(), err)
}
logger.Info("dataplane service created",
"service", svc.ID,
"node", node.ID(),
)
return nil
}
func (s *Sprawl) registerCatalogSidecarService(
cluster *topology.Cluster,
node *topology.Node,
svc *topology.Service,
) error {
if !node.IsDataplane() {
panic("called wrong method type")
}
if svc.DisableServiceMesh {
panic("not valid")
}
var (
client = s.clients[cluster.Name]
logger = s.logger.With("cluster", cluster.Name)
)
pid, reg := serviceToSidecarCatalogRegistration(cluster, node, svc)
RETRY:
if _, err := client.Catalog().Register(reg, nil); err != nil {
if isACLNotFound(err) {
time.Sleep(50 * time.Millisecond)
goto RETRY
}
return fmt.Errorf("error registering service %s to node %s: %w", svc.ID, node.ID(), err)
}
logger.Info("dataplane sidecar service created",
"service", pid,
"node", node.ID(),
)
return nil
}
func serviceToCatalogRegistration(
cluster *topology.Cluster,
node *topology.Node,
svc *topology.Service,
) *api.CatalogRegistration {
reg := &api.CatalogRegistration{
Node: node.PodName(),
SkipNodeUpdate: true,
Service: &api.AgentService{
Kind: api.ServiceKindTypical,
ID: svc.ID.Name,
Service: svc.ID.Name,
Meta: svc.Meta,
Port: svc.Port,
Address: node.LocalAddress(),
},
}
if node.HasPublicAddress() {
reg.TaggedAddresses = map[string]string{
"lan": node.LocalAddress(),
"lan_ipv4": node.LocalAddress(),
"wan": node.PublicAddress(),
"wan_ipv4": node.PublicAddress(),
}
}
if cluster.Enterprise {
reg.Partition = svc.ID.Partition
reg.Service.Namespace = svc.ID.Namespace
reg.Service.Partition = svc.ID.Partition
}
if svc.HasCheck() {
chk := &api.HealthCheck{
Name: "external sync",
// Type: "external-sync",
Status: "passing", // TODO
ServiceID: svc.ID.Name,
ServiceName: svc.ID.Name,
Output: "",
}
if cluster.Enterprise {
chk.Namespace = svc.ID.Namespace
chk.Partition = svc.ID.Partition
}
switch {
case svc.CheckTCP != "":
chk.Definition.TCP = svc.CheckTCP
case svc.CheckHTTP != "":
chk.Definition.HTTP = svc.CheckHTTP
chk.Definition.Method = "GET"
}
reg.Checks = append(reg.Checks, chk)
}
return reg
}
func serviceToSidecarCatalogRegistration(
cluster *topology.Cluster,
node *topology.Node,
svc *topology.Service,
) (topology.ServiceID, *api.CatalogRegistration) {
pid := svc.ID
pid.Name += "-sidecar-proxy"
reg := &api.CatalogRegistration{
Node: node.PodName(),
SkipNodeUpdate: true,
Service: &api.AgentService{
Kind: api.ServiceKindConnectProxy,
ID: pid.Name,
Service: pid.Name,
Meta: svc.Meta,
Port: svc.EnvoyPublicListenerPort,
Address: node.LocalAddress(),
Proxy: &api.AgentServiceConnectProxyConfig{
DestinationServiceName: svc.ID.Name,
DestinationServiceID: svc.ID.Name,
LocalServicePort: svc.Port,
},
},
Checks: []*api.HealthCheck{{
Name: "external sync",
// Type: "external-sync",
Status: "passing", // TODO
ServiceID: pid.Name,
ServiceName: pid.Name,
Definition: api.HealthCheckDefinition{
TCP: fmt.Sprintf("%s:%d", node.LocalAddress(), svc.EnvoyPublicListenerPort),
},
Output: "",
}},
}
if node.HasPublicAddress() {
reg.TaggedAddresses = map[string]string{
"lan": node.LocalAddress(),
"lan_ipv4": node.LocalAddress(),
"wan": node.PublicAddress(),
"wan_ipv4": node.PublicAddress(),
}
}
if cluster.Enterprise {
reg.Partition = pid.Partition
reg.Service.Namespace = pid.Namespace
reg.Service.Partition = pid.Partition
reg.Checks[0].Namespace = pid.Namespace
reg.Checks[0].Partition = pid.Partition
}
for _, u := range svc.Upstreams {
pu := api.Upstream{
DestinationName: u.ID.Name,
DestinationPeer: u.Peer,
LocalBindAddress: u.LocalAddress,
LocalBindPort: u.LocalPort,
}
if cluster.Enterprise {
pu.DestinationNamespace = u.ID.Namespace
if u.Peer == "" {
pu.DestinationPartition = u.ID.Partition
}
}
reg.Service.Proxy.Upstreams = append(reg.Service.Proxy.Upstreams, pu)
}
return pid, reg
}

View File

@ -0,0 +1,58 @@
package sprawl
import (
"fmt"
"strings"
"time"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/testing/deployer/topology"
)
func (s *Sprawl) populateInitialConfigEntries(cluster *topology.Cluster) error {
if len(cluster.InitialConfigEntries) == 0 {
return nil
}
var (
client = s.clients[cluster.Name]
logger = s.logger.With("cluster", cluster.Name)
)
for _, ce := range cluster.InitialConfigEntries {
_, _, err := client.ConfigEntries().Set(ce, nil)
if err != nil {
if ce.GetKind() == api.ServiceIntentions && strings.Contains(err.Error(), intentionsMigrationError) {
logger.Warn("known error writing initial config entry; trying again",
"kind", ce.GetKind(),
"name", ce.GetName(),
"namespace", ce.GetNamespace(),
"partition", ce.GetPartition(),
"error", err,
)
time.Sleep(500 * time.Millisecond)
continue
}
return fmt.Errorf(
"error persisting config entry kind=%q name=%q namespace=%q partition=%q: %w",
ce.GetKind(),
ce.GetName(),
ce.GetNamespace(),
ce.GetPartition(),
err,
)
}
logger.Info("wrote initial config entry",
"kind", ce.GetKind(),
"name", ce.GetName(),
"namespace", ce.GetNamespace(),
"partition", ce.GetPartition(),
)
}
return nil
}
const intentionsMigrationError = `Intentions are read only while being upgraded to config entries`

View File

@ -0,0 +1,98 @@
package sprawl
import (
"errors"
"fmt"
"time"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/secrets"
"github.com/hashicorp/consul/testing/deployer/topology"
"github.com/hashicorp/consul/testing/deployer/util"
)
func getLeader(client *api.Client) (string, error) {
leaderAdd, err := client.Status().Leader()
if err != nil {
return "", fmt.Errorf("could not query leader: %w", err)
}
if leaderAdd == "" {
return "", errors.New("no leader available")
}
return leaderAdd, nil
}
func (s *Sprawl) waitForLeader(cluster *topology.Cluster) {
var (
client = s.clients[cluster.Name]
logger = s.logger.With("cluster", cluster.Name)
)
for {
leader, err := client.Status().Leader()
if leader != "" && err == nil {
logger.Info("cluster has leader", "leader_addr", leader)
return
}
logger.Info("cluster has no leader yet", "error", err)
time.Sleep(500 * time.Millisecond)
}
}
func (s *Sprawl) rejoinAllConsulServers() error {
// Join the servers together.
for _, cluster := range s.topology.Clusters {
if err := s.rejoinServers(cluster); err != nil {
return fmt.Errorf("rejoinServers[%s]: %w", cluster.Name, err)
}
s.waitForLeader(cluster)
}
return nil
}
func (s *Sprawl) rejoinServers(cluster *topology.Cluster) error {
var (
// client = s.clients[cluster.Name]
logger = s.logger.With("cluster", cluster.Name)
)
servers := cluster.ServerNodes()
recoveryToken := s.secrets.ReadGeneric(cluster.Name, secrets.AgentRecovery)
node0, rest := servers[0], servers[1:]
client, err := util.ProxyNotPooledAPIClient(
node0.LocalProxyPort(),
node0.LocalAddress(),
8500,
recoveryToken,
)
if err != nil {
return fmt.Errorf("could not get client for %q: %w", node0.ID(), err)
}
logger.Info("joining servers together",
"from", node0.ID(),
"rest", nodeSliceToNodeIDSlice(rest),
)
for _, node := range rest {
for {
err = client.Agent().Join(node.LocalAddress(), false)
if err == nil {
break
}
logger.Warn("could not join", "from", node0.ID(), "to", node.ID(), "error", err)
time.Sleep(500 * time.Millisecond)
}
}
return nil
}
func nodeSliceToNodeIDSlice(nodes []*topology.Node) []topology.NodeID {
var out []topology.NodeID
for _, node := range nodes {
out = append(out, node.ID())
}
return out
}

View File

@ -0,0 +1,8 @@
package sprawl
import "encoding/json"
func jd(v any) string {
b, _ := json.MarshalIndent(v, "", " ")
return string(b)
}

View File

@ -0,0 +1,170 @@
package sprawl
import (
"bytes"
"fmt"
"sort"
"strconv"
"strings"
"text/tabwriter"
)
// PrintDetails will dump relevant addressing and naming data to the logger for
// human interaction purposes.
func (s *Sprawl) PrintDetails() error {
det := logDetails{
TopologyID: s.topology.ID,
}
for _, cluster := range s.topology.Clusters {
client := s.clients[cluster.Name]
cfg, err := client.Operator().RaftGetConfiguration(nil)
if err != nil {
return fmt.Errorf("could not get raft config for cluster %q: %w", cluster.Name, err)
}
var leaderNode string
for _, svr := range cfg.Servers {
if svr.Leader {
leaderNode = strings.TrimSuffix(svr.Node, "-pod")
}
}
cd := clusterDetails{
Name: cluster.Name,
Leader: leaderNode,
}
for _, node := range cluster.Nodes {
if node.Disabled {
continue
}
var addrs []string
for _, addr := range node.Addresses {
addrs = append(addrs, addr.Network+"="+addr.IPAddress)
}
sort.Strings(addrs)
if node.IsServer() {
cd.Apps = append(cd.Apps, appDetail{
Type: "server",
Container: node.DockerName(),
Addresses: addrs,
ExposedPort: node.ExposedPort(8500),
})
}
for _, svc := range node.Services {
if svc.IsMeshGateway {
cd.Apps = append(cd.Apps, appDetail{
Type: "mesh-gateway",
Container: node.DockerName(),
ExposedPort: node.ExposedPort(svc.Port),
ExposedEnvoyAdminPort: node.ExposedPort(svc.EnvoyAdminPort),
Addresses: addrs,
Service: svc.ID.String(),
})
} else {
cd.Apps = append(cd.Apps, appDetail{
Type: "app",
Container: node.DockerName(),
ExposedPort: node.ExposedPort(svc.Port),
ExposedEnvoyAdminPort: node.ExposedPort(svc.EnvoyAdminPort),
Addresses: addrs,
Service: svc.ID.String(),
})
}
}
}
det.Clusters = append(det.Clusters, cd)
}
var buf bytes.Buffer
w := tabwriter.NewWriter(&buf, 0, 0, 3, ' ', tabwriter.Debug)
score := map[string]int{
"server": 0,
"mesh-gateway": 1,
"app": 2,
}
for _, cluster := range det.Clusters {
fmt.Fprintf(w, "CLUSTER\tTYPE\tCONTAINER\tNAME\tADDRS\tPORTS\t\n")
sort.Slice(cluster.Apps, func(i, j int) bool {
a := cluster.Apps[i]
b := cluster.Apps[j]
asc := score[a.Type]
bsc := score[b.Type]
if asc < bsc {
return true
} else if asc > bsc {
return false
}
if a.Container < b.Container {
return true
} else if a.Container > b.Container {
return false
}
if a.Service < b.Service {
return true
} else if a.Service > b.Service {
return false
}
return a.ExposedPort < b.ExposedPort
})
for _, d := range cluster.Apps {
if d.Type == "server" && d.Container == cluster.Leader {
d.Type = "leader"
}
portStr := "app=" + strconv.Itoa(d.ExposedPort)
if d.ExposedEnvoyAdminPort > 0 {
portStr += " envoy=" + strconv.Itoa(d.ExposedEnvoyAdminPort)
}
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t\n",
cluster.Name,
d.Type,
d.Container,
d.Service,
strings.Join(d.Addresses, ", "),
portStr,
)
}
fmt.Fprintf(w, "\t\t\t\t\t\n")
}
w.Flush()
s.logger.Info("CURRENT SPRAWL DETAILS", "details", buf.String())
return nil
}
type logDetails struct {
TopologyID string
Clusters []clusterDetails
}
type clusterDetails struct {
Name string
Leader string
Apps []appDetail
}
type appDetail struct {
Type string // server|mesh-gateway|app
Container string
Addresses []string
ExposedPort int `json:",omitempty"`
ExposedEnvoyAdminPort int `json:",omitempty"`
// just services
Service string `json:",omitempty"`
}

View File

@ -0,0 +1,174 @@
package sprawl
import (
"bytes"
"context"
"fmt"
"os"
"strings"
"time"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/testing/deployer/topology"
)
func (s *Sprawl) ensureLicense() error {
if s.license != "" {
return nil
}
v, err := readLicense()
if err != nil {
return err
}
s.license = v
return nil
}
func readLicense() (string, error) {
if license := os.Getenv("CONSUL_LICENSE"); license != "" {
return license, nil
}
licensePath := os.Getenv("CONSUL_LICENSE_PATH")
if licensePath == "" {
return "", nil
}
licenseBytes, err := os.ReadFile(licensePath)
if err != nil {
return "", err
}
return strings.TrimSpace(string(licenseBytes)), nil
}
func (s *Sprawl) initTenancies(cluster *topology.Cluster) error {
var (
client = s.clients[cluster.Name]
logger = s.logger.With("cluster", cluster.Name)
)
// TODO: change this to UPSERT
var (
partClient = client.Partitions()
nsClient = client.Namespaces()
partitionNameList []string
)
for _, ap := range cluster.Partitions {
if ap.Name != "default" {
old, _, err := partClient.Read(context.Background(), ap.Name, nil)
if err != nil {
return fmt.Errorf("error reading partition %q: %w", ap.Name, err)
}
if old == nil {
obj := &api.Partition{
Name: ap.Name,
}
_, _, err := partClient.Create(context.Background(), obj, nil)
if err != nil {
return fmt.Errorf("error creating partition %q: %w", ap.Name, err)
}
logger.Info("created partition", "partition", ap.Name)
}
partitionNameList = append(partitionNameList, ap.Name)
}
if err := s.createCrossNamespaceCatalogReadPolicies(cluster, ap.Name); err != nil {
return fmt.Errorf("createCrossNamespaceCatalogReadPolicies[%s]: %w", ap.Name, err)
}
for _, ns := range ap.Namespaces {
old, _, err := nsClient.Read(ns, &api.QueryOptions{Partition: ap.Name})
if err != nil {
return err
}
if old == nil {
obj := &api.Namespace{
Partition: ap.Name,
Name: ns,
ACLs: &api.NamespaceACLConfig{
PolicyDefaults: []api.ACLLink{
{Name: "cross-ns-catalog-read"},
},
},
}
if ns == "default" {
_, _, err := nsClient.Update(obj, nil)
if err != nil {
return err
}
logger.Info("updated namespace", "namespace", ns, "partition", ap.Name)
} else {
_, _, err := nsClient.Create(obj, nil)
if err != nil {
return err
}
logger.Info("created namespace", "namespace", ns, "partition", ap.Name)
}
}
}
}
if err := s.waitUntilPartitionedSerfIsReady(context.TODO(), cluster, partitionNameList); err != nil {
return fmt.Errorf("waitUntilPartitionedSerfIsReady: %w", err)
}
return nil
}
func (s *Sprawl) waitUntilPartitionedSerfIsReady(ctx context.Context, cluster *topology.Cluster, partitions []string) error {
var (
logger = s.logger.With("cluster", cluster.Name)
)
readyLogs := make(map[string]string)
for _, partition := range partitions {
readyLogs[partition] = `agent.server: Added serf partition to gossip network: partition=` + partition
}
start := time.Now()
logger.Info("waiting for partitioned serf to be ready on all servers", "partitions", partitions)
for _, node := range cluster.Nodes {
if !node.IsServer() || node.Disabled {
continue
}
var buf bytes.Buffer
for {
buf.Reset()
err := s.runner.DockerExec(ctx, []string{
"logs", node.DockerName(),
}, &buf, nil)
if err != nil {
return fmt.Errorf("could not fetch docker logs from node %q: %w", node.ID(), err)
}
var (
body = buf.String()
found []string
)
for partition, readyLog := range readyLogs {
if strings.Contains(body, readyLog) {
found = append(found, partition)
}
}
if len(found) == len(readyLogs) {
break
}
}
time.Sleep(500 * time.Millisecond)
}
logger.Info("partitioned serf is ready on all servers", "partitions", partitions, "elapsed", time.Since(start))
return nil
}

View File

@ -0,0 +1,11 @@
package sprawl
// Deprecated: remove
func TruncateSquidError(err error) error {
return err
}
// Deprecated: remove
func IsSquid503(err error) bool {
return false
}

View File

@ -0,0 +1,83 @@
package build
import (
"context"
"strings"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/runner"
"github.com/hashicorp/consul/testing/deployer/topology"
)
const dockerfileEnvoy = `
ARG CONSUL_IMAGE
ARG ENVOY_IMAGE
FROM ${CONSUL_IMAGE}
FROM ${ENVOY_IMAGE}
COPY --from=0 /bin/consul /bin/consul
`
// FROM hashicorp/consul-dataplane:latest
// COPY --from=busybox:uclibc /bin/sh /bin/sh
const dockerfileDataplane = `
ARG DATAPLANE_IMAGE
FROM busybox:latest
FROM ${DATAPLANE_IMAGE}
COPY --from=0 /bin/busybox /bin/busybox
USER 0:0
RUN ["busybox", "--install", "/bin", "-s"]
USER 100:0
ENTRYPOINT []
`
func DockerImages(
logger hclog.Logger,
run *runner.Runner,
t *topology.Topology,
) error {
logw := logger.Named("docker").StandardWriter(&hclog.StandardLoggerOptions{ForceLevel: hclog.Info})
built := make(map[string]struct{})
for _, c := range t.Clusters {
for _, n := range c.Nodes {
joint := n.Images.EnvoyConsulImage()
if _, ok := built[joint]; joint != "" && !ok {
logger.Info("building image", "image", joint)
err := run.DockerExec(context.TODO(), []string{
"build",
"--build-arg",
"CONSUL_IMAGE=" + n.Images.Consul,
"--build-arg",
"ENVOY_IMAGE=" + n.Images.Envoy,
"-t", joint,
"-",
}, logw, strings.NewReader(dockerfileEnvoy))
if err != nil {
return err
}
built[joint] = struct{}{}
}
cdp := n.Images.LocalDataplaneImage()
if _, ok := built[cdp]; cdp != "" && !ok {
logger.Info("building image", "image", cdp)
err := run.DockerExec(context.TODO(), []string{
"build",
"--build-arg",
"DATAPLANE_IMAGE=" + n.Images.Dataplane,
"-t", cdp,
"-",
}, logw, strings.NewReader(dockerfileDataplane))
if err != nil {
return err
}
built[cdp] = struct{}{}
}
}
}
return nil
}

View File

@ -0,0 +1,120 @@
package runner
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"os"
"os/exec"
"github.com/hashicorp/go-hclog"
)
type Runner struct {
logger hclog.Logger
tfBin string
dockerBin string
}
func Load(logger hclog.Logger) (*Runner, error) {
r := &Runner{
logger: logger,
}
type item struct {
name string
dest *string
warn string // optional
}
lookup := []item{
{"docker", &r.dockerBin, ""},
{"terraform", &r.tfBin, ""},
}
var (
bins []string
err error
)
for _, i := range lookup {
*i.dest, err = exec.LookPath(i.name)
if err != nil {
if errors.Is(err, exec.ErrNotFound) {
if i.warn != "" {
return nil, fmt.Errorf("Could not find %q on path (%s): %w", i.name, i.warn, err)
} else {
return nil, fmt.Errorf("Could not find %q on path: %w", i.name, err)
}
}
return nil, fmt.Errorf("Unexpected failure looking for %q on path: %w", i.name, err)
}
bins = append(bins, *i.dest)
}
r.logger.Trace("using binaries", "paths", bins)
return r, nil
}
func (r *Runner) DockerExec(ctx context.Context, args []string, stdout io.Writer, stdin io.Reader) error {
return cmdExec(ctx, "docker", r.dockerBin, args, stdout, nil, stdin, "")
}
func (r *Runner) DockerExecWithStderr(ctx context.Context, args []string, stdout, stderr io.Writer, stdin io.Reader) error {
return cmdExec(ctx, "docker", r.dockerBin, args, stdout, stderr, stdin, "")
}
func (r *Runner) TerraformExec(ctx context.Context, args []string, stdout io.Writer, workdir string) error {
return cmdExec(ctx, "terraform", r.tfBin, args, stdout, nil, nil, workdir)
}
func cmdExec(ctx context.Context, name, binary string, args []string, stdout, stderr io.Writer, stdin io.Reader, dir string) error {
if binary == "" {
panic("binary named " + name + " was not detected")
}
var errWriter bytes.Buffer
if stdout == nil {
stdout = os.Stdout // TODO: wrap logs
}
cmd := exec.CommandContext(ctx, binary, args...)
if dir != "" {
cmd.Dir = dir
}
cmd.Stdout = stdout
cmd.Stderr = &errWriter
if stderr != nil {
cmd.Stderr = io.MultiWriter(stderr, cmd.Stderr)
}
cmd.Stdin = stdin
if err := cmd.Run(); err != nil {
return &ExecError{
BinaryName: name,
Err: err,
ErrorOutput: errWriter.String(),
}
}
return nil
}
type ExecError struct {
BinaryName string
ErrorOutput string
Err error
}
func (e *ExecError) Unwrap() error {
return e.Err
}
func (e *ExecError) Error() string {
return fmt.Sprintf(
"could not invoke %q: %v : %s",
e.BinaryName,
e.Err,
e.ErrorOutput,
)
}

View File

@ -0,0 +1,70 @@
package secrets
import (
"net/url"
"strings"
"github.com/hashicorp/consul/testing/deployer/topology"
)
type Store struct {
m map[string]string
}
const (
GossipKey = "gossip"
BootstrapToken = "bootstrap-token"
AgentRecovery = "agent-recovery"
)
func (s *Store) SaveGeneric(cluster, name, value string) {
s.save(encode(cluster, "generic", name), value)
}
func (s *Store) ReadGeneric(cluster, name string) string {
return s.read(encode(cluster, "generic", name))
}
func (s *Store) SaveAgentToken(cluster string, nid topology.NodeID, value string) {
s.save(encode(cluster, "agent", nid.String()), value)
}
func (s *Store) ReadAgentToken(cluster string, nid topology.NodeID) string {
return s.read(encode(cluster, "agent", nid.String()))
}
func (s *Store) SaveServiceToken(cluster string, sid topology.ServiceID, value string) {
s.save(encode(cluster, "service", sid.String()), value)
}
func (s *Store) ReadServiceToken(cluster string, sid topology.ServiceID) string {
return s.read(encode(cluster, "service", sid.String()))
}
func (s *Store) save(key, value string) {
if s.m == nil {
s.m = make(map[string]string)
}
s.m[key] = value
}
func (s *Store) read(key string) string {
if s.m == nil {
return ""
}
v, ok := s.m[key]
if !ok {
return ""
}
return v
}
func encode(parts ...string) string {
var out []string
for _, p := range parts {
out = append(out, url.QueryEscape(p))
}
return strings.Join(out, "/")
}

View File

@ -0,0 +1,215 @@
package tfgen
import (
"fmt"
"strings"
"github.com/hashicorp/hcl/v2/hclwrite"
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/secrets"
"github.com/hashicorp/consul/testing/deployer/topology"
)
func (g *Generator) generateAgentHCL(node *topology.Node) (string, error) {
if !node.IsAgent() {
return "", fmt.Errorf("not an agent")
}
cluster, ok := g.topology.Clusters[node.Cluster]
if !ok {
return "", fmt.Errorf("no such cluster: %s", node.Cluster)
}
var b HCLBuilder
b.add("server", node.IsServer())
b.add("bind_addr", "0.0.0.0")
b.add("client_addr", "0.0.0.0")
b.add("advertise_addr", `{{ GetInterfaceIP "eth0" }}`)
b.add("datacenter", node.Datacenter)
b.add("disable_update_check", true)
b.add("log_level", "trace")
b.add("enable_debug", true)
b.add("use_streaming_backend", true)
// speed up leaves
b.addBlock("performance", func() {
b.add("leave_drain_time", "50ms")
})
b.add("primary_datacenter", node.Datacenter)
// Using retry_join here is bad because changing server membership will
// destroy and recreate all of the servers
// if !node.IsServer() {
b.addSlice("retry_join", []string{"server." + node.Cluster + "-consulcluster.lan"})
b.add("retry_interval", "1s")
// }
if node.IsServer() {
b.addBlock("peering", func() {
b.add("enabled", true)
})
}
b.addBlock("ui_config", func() {
b.add("enabled", true)
})
b.addBlock("telemetry", func() {
b.add("disable_hostname", true)
b.add("prometheus_retention_time", "168h")
})
b.add("encrypt", g.sec.ReadGeneric(node.Cluster, secrets.GossipKey))
{
var (
root = "/consul/config/certs"
caFile = root + "/consul-agent-ca.pem"
certFile = root + "/" + node.TLSCertPrefix + ".pem"
certKey = root + "/" + node.TLSCertPrefix + "-key.pem"
)
b.addBlock("tls", func() {
b.addBlock("internal_rpc", func() {
b.add("ca_file", caFile)
b.add("cert_file", certFile)
b.add("key_file", certKey)
b.add("verify_incoming", true)
b.add("verify_server_hostname", true)
b.add("verify_outgoing", true)
})
// if cfg.EncryptionTLSAPI {
// b.addBlock("https", func() {
// b.add("ca_file", caFile)
// b.add("cert_file", certFile)
// b.add("key_file", certKey)
// // b.add("verify_incoming", true)
// })
// }
if node.IsServer() {
b.addBlock("grpc", func() {
b.add("ca_file", caFile)
b.add("cert_file", certFile)
b.add("key_file", certKey)
// b.add("verify_incoming", true)
})
}
})
}
b.addBlock("ports", func() {
if node.IsServer() {
b.add("grpc_tls", 8503)
b.add("grpc", -1)
} else {
b.add("grpc", 8502)
b.add("grpc_tls", -1)
}
b.add("http", 8500)
b.add("dns", 8600)
})
b.addSlice("recursors", []string{"8.8.8.8"})
b.addBlock("acl", func() {
b.add("enabled", true)
b.add("default_policy", "deny")
b.add("down_policy", "extend-cache")
b.add("enable_token_persistence", true)
b.addBlock("tokens", func() {
if node.IsServer() {
b.add("initial_management", g.sec.ReadGeneric(node.Cluster, secrets.BootstrapToken))
}
b.add("agent_recovery", g.sec.ReadGeneric(node.Cluster, secrets.AgentRecovery))
b.add("agent", g.sec.ReadAgentToken(node.Cluster, node.ID()))
})
})
if node.IsServer() {
b.add("bootstrap_expect", len(cluster.ServerNodes()))
// b.add("translate_wan_addrs", true)
b.addBlock("rpc", func() {
b.add("enable_streaming", true)
})
if node.HasPublicAddress() {
b.add("advertise_addr_wan", `{{ GetInterfaceIP "eth1" }}`) // note: can't use 'node.PublicAddress()' b/c we don't know that yet
}
// Exercise config entry bootstrap
// b.addBlock("config_entries", func() {
// b.addBlock("bootstrap", func() {
// b.add("kind", "service-defaults")
// b.add("name", "placeholder")
// b.add("protocol", "grpc")
// })
// b.addBlock("bootstrap", func() {
// b.add("kind", "service-intentions")
// b.add("name", "placeholder")
// b.addBlock("sources", func() {
// b.add("name", "placeholder-client")
// b.add("action", "allow")
// })
// })
// })
b.addBlock("connect", func() {
b.add("enabled", true)
})
} else {
if cluster.Enterprise {
b.add("partition", node.Partition)
}
}
return b.String(), nil
}
type HCLBuilder struct {
parts []string
}
func (b *HCLBuilder) format(s string, a ...any) {
if len(a) == 0 {
b.parts = append(b.parts, s)
} else {
b.parts = append(b.parts, fmt.Sprintf(s, a...))
}
}
func (b *HCLBuilder) add(k string, v any) {
switch x := v.(type) {
case string:
if x != "" {
b.format("%s = %q", k, x)
}
case int:
b.format("%s = %d", k, x)
case bool:
b.format("%s = %v", k, x)
default:
panic(fmt.Sprintf("unexpected type %T", v))
}
}
func (b *HCLBuilder) addBlock(block string, fn func()) {
b.format(block + "{")
fn()
b.format("}")
}
func (b *HCLBuilder) addSlice(name string, vals []string) {
b.format(name + " = [")
for _, v := range vals {
b.format("%q,", v)
}
b.format("]")
}
func (b *HCLBuilder) String() string {
joined := strings.Join(b.parts, "\n")
// Ensure it looks tidy
return string(hclwrite.Format([]byte(joined)))
}

View File

@ -0,0 +1,45 @@
package tfgen
import (
"fmt"
)
// digestOutputs takes the data extracted from terraform output variables and
// updates various fields on the topology.Topology with that data.
func (g *Generator) digestOutputs(out *Outputs) error {
for clusterName, nodeMap := range out.Nodes {
cluster, ok := g.topology.Clusters[clusterName]
if !ok {
return fmt.Errorf("found output cluster that does not exist: %s", clusterName)
}
for nid, nodeOut := range nodeMap {
node := cluster.NodeByID(nid)
if node == nil {
return fmt.Errorf("found output node that does not exist in cluster %q: %s", nid, clusterName)
}
if node.DigestExposedPorts(nodeOut.Ports) {
g.logger.Info("discovered exposed port mappings",
"cluster", clusterName,
"node", nid.String(),
"ports", nodeOut.Ports,
)
}
}
}
for netName, proxyPort := range out.ProxyPorts {
changed, err := g.topology.DigestExposedProxyPort(netName, proxyPort)
if err != nil {
return err
}
if changed {
g.logger.Info("discovered exposed forward proxy port",
"network", netName,
"port", proxyPort,
)
}
}
return nil
}

View File

@ -0,0 +1,180 @@
package tfgen
import (
"bytes"
"fmt"
"os"
"path/filepath"
"strings"
"text/template"
"github.com/hashicorp/consul/testing/deployer/topology"
"github.com/hashicorp/consul/testing/deployer/util"
)
func (g *Generator) getCoreDNSContainer(
net *topology.Network,
ipAddress string,
hashes []string,
) Resource {
var env []string
for i, hv := range hashes {
env = append(env, fmt.Sprintf("HASH_FILE_%d_VALUE=%s", i, hv))
}
coredns := struct {
Name string
DockerNetworkName string
IPAddress string
HashValues string
Env []string
}{
Name: net.Name,
DockerNetworkName: net.DockerName,
IPAddress: ipAddress,
Env: env,
}
return Eval(tfCorednsT, &coredns)
}
func (g *Generator) writeCoreDNSFiles(net *topology.Network, dnsIPAddress string) (bool, []string, error) {
if net.IsPublic() {
return false, nil, fmt.Errorf("coredns only runs on local networks")
}
rootdir := filepath.Join(g.workdir, "terraform", "coredns-config-"+net.Name)
if err := os.MkdirAll(rootdir, 0755); err != nil {
return false, nil, err
}
for _, cluster := range g.topology.Clusters {
if cluster.NetworkName != net.Name {
continue
}
var addrs []string
for _, node := range cluster.SortedNodes() {
if node.Kind != topology.NodeKindServer || node.Disabled {
continue
}
addr := node.AddressByNetwork(net.Name)
if addr.IPAddress != "" {
addrs = append(addrs, addr.IPAddress)
}
}
var (
clusterDNSName = cluster.Name + "-consulcluster.lan"
)
corefilePath := filepath.Join(rootdir, "Corefile")
zonefilePath := filepath.Join(rootdir, "servers")
_, err := UpdateFileIfDifferent(
g.logger,
generateCoreDNSConfigFile(
clusterDNSName,
addrs,
),
corefilePath,
0644,
)
if err != nil {
return false, nil, fmt.Errorf("error writing %q: %w", corefilePath, err)
}
corefileHash, err := util.HashFile(corefilePath)
if err != nil {
return false, nil, fmt.Errorf("error hashing %q: %w", corefilePath, err)
}
_, err = UpdateFileIfDifferent(
g.logger,
generateCoreDNSZoneFile(
dnsIPAddress,
clusterDNSName,
addrs,
),
zonefilePath,
0644,
)
if err != nil {
return false, nil, fmt.Errorf("error writing %q: %w", zonefilePath, err)
}
zonefileHash, err := util.HashFile(zonefilePath)
if err != nil {
return false, nil, fmt.Errorf("error hashing %q: %w", zonefilePath, err)
}
return true, []string{corefileHash, zonefileHash}, nil
}
return false, nil, nil
}
func generateCoreDNSConfigFile(
clusterDNSName string,
addrs []string,
) []byte {
serverPart := ""
if len(addrs) > 0 {
var servers []string
for _, addr := range addrs {
servers = append(servers, addr+":8600")
}
serverPart = fmt.Sprintf(`
consul:53 {
forward . %s
log
errors
whoami
}
`, strings.Join(servers, " "))
}
return []byte(fmt.Sprintf(`
%[1]s:53 {
file /config/servers %[1]s
log
errors
whoami
}
%[2]s
.:53 {
forward . 8.8.8.8:53
log
errors
whoami
}
`, clusterDNSName, serverPart))
}
func generateCoreDNSZoneFile(
dnsIPAddress string,
clusterDNSName string,
addrs []string,
) []byte {
var buf bytes.Buffer
buf.WriteString(fmt.Sprintf(`
$TTL 60
$ORIGIN %[1]s.
@ IN SOA ns.%[1]s. webmaster.%[1]s. (
2017042745 ; serial
7200 ; refresh (2 hours)
3600 ; retry (1 hour)
1209600 ; expire (2 weeks)
3600 ; minimum (1 hour)
)
@ IN NS ns.%[1]s. ; Name server
ns IN A %[2]s ; self
`, clusterDNSName, dnsIPAddress))
for _, addr := range addrs {
buf.WriteString(fmt.Sprintf(`
server IN A %s ; Consul server
`, addr))
}
return buf.Bytes()
}
var tfCorednsT = template.Must(template.ParseFS(content, "templates/container-coredns.tf.tmpl"))

View File

@ -0,0 +1,39 @@
package tfgen
import (
"fmt"
"regexp"
)
var invalidResourceName = regexp.MustCompile(`[^a-z0-9-]+`)
func DockerImageResourceName(image string) string {
return invalidResourceName.ReplaceAllLiteralString(image, "-")
}
func DockerNetwork(name, subnet string) Resource {
return Text(fmt.Sprintf(`
resource "docker_network" %[1]q {
name = %[1]q
attachable = true
ipam_config {
subnet = %[2]q
}
}
`, name, subnet))
}
func DockerVolume(name string) Resource {
return Text(fmt.Sprintf(`
resource "docker_volume" %[1]q {
name = %[1]q
}`, name))
}
func DockerImage(name, image string) Resource {
return Text(fmt.Sprintf(`
resource "docker_image" %[1]q {
name = %[2]q
keep_locally = true
}`, name, image))
}

View File

@ -0,0 +1,15 @@
package tfgen
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestDockerImageResourceName(t *testing.T) {
fn := DockerImageResourceName
assert.Equal(t, "", fn(""))
assert.Equal(t, "abcdefghijklmnopqrstuvwxyz0123456789-", fn("abcdefghijklmnopqrstuvwxyz0123456789-"))
assert.Equal(t, "hashicorp-consul-1-15-0", fn("hashicorp/consul:1.15.0"))
}

View File

@ -0,0 +1,475 @@
package tfgen
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/runner"
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/secrets"
"github.com/hashicorp/consul/testing/deployer/topology"
"github.com/hashicorp/consul/testing/deployer/util"
)
type Generator struct {
logger hclog.Logger
runner *runner.Runner
topology *topology.Topology
sec *secrets.Store
workdir string
license string
tfLogger io.Writer
// set during network phase
remainingSubnets map[string]struct{}
launched bool
}
func NewGenerator(
logger hclog.Logger,
runner *runner.Runner,
topo *topology.Topology,
sec *secrets.Store,
workdir string,
license string,
) (*Generator, error) {
if logger == nil {
panic("logger is required")
}
if runner == nil {
panic("runner is required")
}
if topo == nil {
panic("topology is required")
}
if sec == nil {
panic("secrets store is required")
}
if workdir == "" {
panic("workdir is required")
}
g := &Generator{
logger: logger,
runner: runner,
sec: sec,
workdir: workdir,
license: license,
tfLogger: logger.Named("terraform").StandardWriter(&hclog.StandardLoggerOptions{
ForceLevel: hclog.Info,
}),
}
g.SetTopology(topo)
_ = g.terraformDestroy(context.Background(), true) // cleanup prior run
return g, nil
}
func (g *Generator) MarkLaunched() {
g.launched = true
}
func (g *Generator) SetTopology(topo *topology.Topology) {
if topo == nil {
panic("topology is required")
}
g.topology = topo
}
type Step int
const (
StepAll Step = 0
StepNetworks Step = 1
StepServers Step = 2
StepAgents Step = 3
StepServices Step = 4
// StepPeering Step = XXX5
StepRelaunch Step = 5
)
func (s Step) String() string {
switch s {
case StepAll:
return "all"
case StepNetworks:
return "networks"
case StepServers:
return "servers"
case StepAgents:
return "agents"
case StepServices:
return "services"
case StepRelaunch:
return "relaunch"
// case StepPeering:
// return "peering"
default:
return "UNKNOWN--" + strconv.Itoa(int(s))
}
}
func (s Step) StartServers() bool { return s >= StepServers }
func (s Step) StartAgents() bool { return s >= StepAgents }
func (s Step) StartServices() bool { return s >= StepServices }
// func (s Step) InitiatePeering() bool { return s >= StepPeering }
func (g *Generator) Regenerate() error {
return g.Generate(StepRelaunch)
}
func (g *Generator) Generate(step Step) error {
if g.launched && step != StepRelaunch {
return fmt.Errorf("cannot use step %q after successful launch; see Regenerate()", step)
}
g.logger.Info("generating and creating resources", "step", step.String())
var (
networks []Resource
volumes []Resource
images []Resource
containers []Resource
imageNames = make(map[string]string)
)
addVolume := func(name string) {
volumes = append(volumes, DockerVolume(name))
}
addImage := func(name, image string) {
if image == "" {
return
}
if _, ok := imageNames[image]; ok {
return
}
if name == "" {
name = DockerImageResourceName(image)
}
imageNames[image] = name
g.logger.Info("registering image", "resource", name, "image", image)
images = append(images, DockerImage(name, image))
}
if g.remainingSubnets == nil {
g.remainingSubnets = util.GetPossibleDockerNetworkSubnets()
}
if len(g.remainingSubnets) == 0 {
return fmt.Errorf("exhausted all docker networks")
}
addImage("nginx", "nginx:latest")
addImage("coredns", "coredns/coredns:latest")
for _, net := range g.topology.SortedNetworks() {
if net.Subnet == "" {
// Because this harness runs on a linux or macos host, we can't
// directly invoke the moby libnetwork calls to check for free
// subnets as it would have to cross into the docker desktop vm on
// mac.
//
// Instead rely on map iteration order being random to avoid
// collisions, but detect the terraform failure and retry until
// success.
var ipnet string
for ipnet = range g.remainingSubnets {
}
if ipnet == "" {
return fmt.Errorf("could not get a free docker network")
}
delete(g.remainingSubnets, ipnet)
if _, err := net.SetSubnet(ipnet); err != nil {
return fmt.Errorf("assigned subnet is invalid %q: %w", ipnet, err)
}
}
networks = append(networks, DockerNetwork(net.DockerName, net.Subnet))
var (
// We always ask for a /24, so just blindly pick x.x.x.252 as our
// proxy address. There's an offset of 2 in the list of available
// addresses here because we removed x.x.x.0 and x.x.x.1 from the
// pool.
proxyIPAddress = net.IPByIndex(250)
// Grab x.x.x.253 for the dns server
dnsIPAddress = net.IPByIndex(251)
)
{
// wrote, hashes, err := g.write
}
{ // nginx forward proxy
_, hash, err := g.writeNginxConfig(net)
if err != nil {
return fmt.Errorf("writeNginxConfig[%s]: %w", net.Name, err)
}
containers = append(containers, g.getForwardProxyContainer(net, proxyIPAddress, hash))
}
net.ProxyAddress = proxyIPAddress
net.DNSAddress = ""
if net.IsLocal() {
wrote, hashes, err := g.writeCoreDNSFiles(net, dnsIPAddress)
if err != nil {
return fmt.Errorf("writeCoreDNSFiles[%s]: %w", net.Name, err)
}
if wrote {
net.DNSAddress = dnsIPAddress
containers = append(containers, g.getCoreDNSContainer(net, dnsIPAddress, hashes))
}
}
}
for _, c := range g.topology.SortedClusters() {
if c.TLSVolumeName == "" {
c.TLSVolumeName = c.Name + "-tls-material-" + g.topology.ID
}
addVolume(c.TLSVolumeName)
}
addImage("pause", "registry.k8s.io/pause:3.3")
if step.StartServers() {
for _, c := range g.topology.SortedClusters() {
for _, node := range c.SortedNodes() {
if node.Disabled {
continue
}
addImage("", node.Images.Consul)
addImage("", node.Images.EnvoyConsulImage())
addImage("", node.Images.LocalDataplaneImage())
if node.IsAgent() {
addVolume(node.DockerName())
}
for _, svc := range node.Services {
addImage("", svc.Image)
}
myContainers, err := g.generateNodeContainers(step, c, node)
if err != nil {
return err
}
containers = append(containers, myContainers...)
}
}
}
tfpath := func(p string) string {
return filepath.Join(g.workdir, "terraform", p)
}
if _, err := WriteHCLResourceFile(g.logger, []Resource{Text(terraformPrelude)}, tfpath("init.tf"), 0644); err != nil {
return err
}
if netResult, err := WriteHCLResourceFile(g.logger, networks, tfpath("networks.tf"), 0644); err != nil {
return err
} else if netResult == UpdateResultModified {
if step != StepNetworks {
return fmt.Errorf("cannot change networking details after they are established")
}
}
if _, err := WriteHCLResourceFile(g.logger, volumes, tfpath("volumes.tf"), 0644); err != nil {
return err
}
if _, err := WriteHCLResourceFile(g.logger, images, tfpath("images.tf"), 0644); err != nil {
return err
}
if _, err := WriteHCLResourceFile(g.logger, containers, tfpath("containers.tf"), 0644); err != nil {
return err
}
if err := g.terraformApply(context.TODO()); err != nil {
return err
}
out, err := g.terraformOutputs(context.TODO())
if err != nil {
return err
}
return g.digestOutputs(out)
}
func (g *Generator) DestroyAll() error {
return g.terraformDestroy(context.TODO(), false)
}
func (g *Generator) DestroyAllQuietly() error {
return g.terraformDestroy(context.TODO(), true)
}
func (g *Generator) terraformApply(ctx context.Context) error {
tfdir := filepath.Join(g.workdir, "terraform")
if _, err := os.Stat(filepath.Join(tfdir, ".terraform")); err != nil {
if !os.IsNotExist(err) {
return err
}
// On the fly init
g.logger.Info("Running 'terraform init'...")
if err := g.runner.TerraformExec(ctx, []string{"init", "-input=false"}, g.tfLogger, tfdir); err != nil {
return err
}
}
g.logger.Info("Running 'terraform apply'...")
return g.runner.TerraformExec(ctx, []string{"apply", "-input=false", "-auto-approve"}, g.tfLogger, tfdir)
}
func (g *Generator) terraformDestroy(ctx context.Context, quiet bool) error {
g.logger.Info("Running 'terraform destroy'...")
var out io.Writer
if quiet {
out = io.Discard
} else {
out = g.tfLogger
}
tfdir := filepath.Join(g.workdir, "terraform")
return g.runner.TerraformExec(ctx, []string{
"destroy", "-input=false", "-auto-approve", "-refresh=false",
}, out, tfdir)
}
func (g *Generator) terraformOutputs(ctx context.Context) (*Outputs, error) {
tfdir := filepath.Join(g.workdir, "terraform")
var buf bytes.Buffer
err := g.runner.TerraformExec(ctx, []string{
"output", "-json",
}, &buf, tfdir)
if err != nil {
return nil, err
}
type outputVar struct {
// may be map[string]any
Value any `json:"value"`
}
raw := make(map[string]*outputVar)
dec := json.NewDecoder(&buf)
if err := dec.Decode(&raw); err != nil {
return nil, err
}
out := &Outputs{}
for key, rv := range raw {
switch {
case strings.HasPrefix(key, "ports_"):
cluster, nid, ok := extractNodeOutputKey("ports_", key)
if !ok {
return nil, fmt.Errorf("unexpected output var: %s", key)
}
ports := make(map[int]int)
for k, v := range rv.Value.(map[string]any) {
ki, err := strconv.Atoi(k)
if err != nil {
return nil, fmt.Errorf("unexpected port value %q: %w", k, err)
}
ports[ki] = int(v.(float64))
}
out.SetNodePorts(cluster, nid, ports)
case strings.HasPrefix(key, "forwardproxyport_"):
netname := strings.TrimPrefix(key, "forwardproxyport_")
found := rv.Value.(map[string]any)
if len(found) != 1 {
return nil, fmt.Errorf("found unexpected ports: %v", found)
}
got, ok := found[strconv.Itoa(proxyInternalPort)]
if !ok {
return nil, fmt.Errorf("found unexpected ports: %v", found)
}
out.SetProxyPort(netname, int(got.(float64)))
}
}
return out, nil
}
func extractNodeOutputKey(prefix, key string) (string, topology.NodeID, bool) {
clusterNode := strings.TrimPrefix(key, prefix)
cluster, nodeid, ok := strings.Cut(clusterNode, "_")
if !ok {
return "", topology.NodeID{}, false
}
partition, node, ok := strings.Cut(nodeid, "_")
if !ok {
return "", topology.NodeID{}, false
}
nid := topology.NewNodeID(node, partition)
return cluster, nid, true
}
type Outputs struct {
ProxyPorts map[string]int // net -> exposed port
Nodes map[string]map[topology.NodeID]*NodeOutput // clusterID -> node -> stuff
}
func (o *Outputs) SetNodePorts(cluster string, nid topology.NodeID, ports map[int]int) {
nodeOut := o.getNode(cluster, nid)
nodeOut.Ports = ports
}
func (o *Outputs) SetProxyPort(net string, port int) {
if o.ProxyPorts == nil {
o.ProxyPorts = make(map[string]int)
}
o.ProxyPorts[net] = port
}
func (o *Outputs) getNode(cluster string, nid topology.NodeID) *NodeOutput {
if o.Nodes == nil {
o.Nodes = make(map[string]map[topology.NodeID]*NodeOutput)
}
cnodes, ok := o.Nodes[cluster]
if !ok {
cnodes = make(map[topology.NodeID]*NodeOutput)
o.Nodes[cluster] = cnodes
}
nodeOut, ok := cnodes[nid]
if !ok {
nodeOut = &NodeOutput{}
cnodes[nid] = nodeOut
}
return nodeOut
}
type NodeOutput struct {
Ports map[int]int `json:",omitempty"`
}

View File

@ -0,0 +1,70 @@
package tfgen
import (
"bytes"
"os"
"strings"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/hcl/v2/hclwrite"
"github.com/rboyer/safeio"
)
func WriteHCLResourceFile(
logger hclog.Logger,
res []Resource,
path string,
perm os.FileMode,
) (UpdateResult, error) {
var text []string
for _, r := range res {
val, err := r.Render()
if err != nil {
return UpdateResultNone, err
}
text = append(text, strings.TrimSpace(val))
}
body := strings.Join(text, "\n\n")
// Ensure it looks tidy
out := hclwrite.Format(bytes.TrimSpace([]byte(body)))
return UpdateFileIfDifferent(logger, out, path, perm)
}
type UpdateResult int
const (
UpdateResultNone UpdateResult = iota
UpdateResultCreated
UpdateResultModified
)
func UpdateFileIfDifferent(
logger hclog.Logger,
body []byte,
path string,
perm os.FileMode,
) (UpdateResult, error) {
prev, err := os.ReadFile(path)
result := UpdateResultNone
if err != nil {
if !os.IsNotExist(err) {
return result, err
}
logger.Info("writing new file", "path", path)
result = UpdateResultCreated
} else {
// loaded
if bytes.Equal(body, prev) {
return result, nil
}
logger.Info("file has changed", "path", path)
result = UpdateResultModified
}
_, err = safeio.WriteToFile(bytes.NewReader(body), path, perm)
return result, err
}

View File

@ -0,0 +1,249 @@
package tfgen
import (
"fmt"
"sort"
"strconv"
"text/template"
"github.com/hashicorp/consul/testing/deployer/topology"
)
type terraformPod struct {
PodName string
Node *topology.Node
Ports []int
Labels map[string]string
TLSVolumeName string
DNSAddress string
DockerNetworkName string
}
type terraformConsulAgent struct {
terraformPod
ImageResource string
HCL string
EnterpriseLicense string
Env []string
}
type terraformMeshGatewayService struct {
terraformPod
EnvoyImageResource string
Service *topology.Service
Command []string
}
type terraformService struct {
terraformPod
AppImageResource string
EnvoyImageResource string // agentful
DataplaneImageResource string // agentless
Service *topology.Service
Env []string
Command []string
EnvoyCommand []string // agentful
}
func (g *Generator) generateNodeContainers(
step Step,
cluster *topology.Cluster,
node *topology.Node,
) ([]Resource, error) {
if node.Disabled {
return nil, fmt.Errorf("cannot generate containers for a disabled node")
}
pod := terraformPod{
PodName: node.PodName(),
Node: node,
Labels: map[string]string{
"consulcluster-topology-id": g.topology.ID,
"consulcluster-cluster-name": node.Cluster,
},
TLSVolumeName: cluster.TLSVolumeName,
DNSAddress: "8.8.8.8",
}
cluster, ok := g.topology.Clusters[node.Cluster]
if !ok {
return nil, fmt.Errorf("no such cluster: %s", node.Cluster)
}
net, ok := g.topology.Networks[cluster.NetworkName]
if !ok {
return nil, fmt.Errorf("no local network: %s", cluster.NetworkName)
}
if net.DNSAddress != "" {
pod.DNSAddress = net.DNSAddress
}
pod.DockerNetworkName = net.DockerName
var (
containers []Resource
)
if node.IsAgent() {
agentHCL, err := g.generateAgentHCL(node)
if err != nil {
return nil, err
}
agent := terraformConsulAgent{
terraformPod: pod,
ImageResource: DockerImageResourceName(node.Images.Consul),
HCL: agentHCL,
EnterpriseLicense: g.license,
Env: node.AgentEnv,
}
switch {
case node.IsServer() && step.StartServers(),
!node.IsServer() && step.StartAgents():
containers = append(containers, Eval(tfConsulT, &agent))
}
}
for _, svc := range node.SortedServices() {
if svc.IsMeshGateway {
if node.Kind == topology.NodeKindDataplane {
panic("NOT READY YET")
}
gw := terraformMeshGatewayService{
terraformPod: pod,
EnvoyImageResource: DockerImageResourceName(node.Images.EnvoyConsulImage()),
Service: svc,
Command: []string{
"consul", "connect", "envoy",
"-register",
"-mesh-gateway",
},
}
if token := g.sec.ReadServiceToken(node.Cluster, svc.ID); token != "" {
gw.Command = append(gw.Command, "-token", token)
}
if cluster.Enterprise {
gw.Command = append(gw.Command,
"-partition",
svc.ID.Partition,
)
}
gw.Command = append(gw.Command,
"-address",
`{{ GetInterfaceIP \"eth0\" }}:`+strconv.Itoa(svc.Port),
"-wan-address",
`{{ GetInterfaceIP \"eth1\" }}:`+strconv.Itoa(svc.Port),
)
gw.Command = append(gw.Command,
"-grpc-addr", "http://127.0.0.1:8502",
"-admin-bind",
// for demo purposes
"0.0.0.0:"+strconv.Itoa(svc.EnvoyAdminPort),
"--",
"-l",
"trace",
)
if step.StartServices() {
containers = append(containers, Eval(tfMeshGatewayT, &gw))
}
} else {
tfsvc := terraformService{
terraformPod: pod,
AppImageResource: DockerImageResourceName(svc.Image),
Service: svc,
Command: svc.Command,
}
tfsvc.Env = append(tfsvc.Env, svc.Env...)
if step.StartServices() {
containers = append(containers, Eval(tfAppT, &tfsvc))
}
setenv := func(k, v string) {
tfsvc.Env = append(tfsvc.Env, k+"="+v)
}
if !svc.DisableServiceMesh {
if node.IsDataplane() {
tfsvc.DataplaneImageResource = DockerImageResourceName(node.Images.LocalDataplaneImage())
tfsvc.EnvoyImageResource = ""
tfsvc.EnvoyCommand = nil
// --- REQUIRED ---
setenv("DP_CONSUL_ADDRESSES", "server."+node.Cluster+"-consulcluster.lan")
setenv("DP_SERVICE_NODE_NAME", node.PodName())
setenv("DP_PROXY_SERVICE_ID", svc.ID.Name+"-sidecar-proxy")
} else {
tfsvc.DataplaneImageResource = ""
tfsvc.EnvoyImageResource = DockerImageResourceName(node.Images.EnvoyConsulImage())
tfsvc.EnvoyCommand = []string{
"consul", "connect", "envoy",
"-sidecar-for", svc.ID.Name,
}
}
if cluster.Enterprise {
if node.IsDataplane() {
setenv("DP_SERVICE_NAMESPACE", svc.ID.Namespace)
setenv("DP_SERVICE_PARTITION", svc.ID.Partition)
} else {
tfsvc.EnvoyCommand = append(tfsvc.EnvoyCommand,
"-partition",
svc.ID.Partition,
"-namespace",
svc.ID.Namespace,
)
}
}
if token := g.sec.ReadServiceToken(node.Cluster, svc.ID); token != "" {
if node.IsDataplane() {
setenv("DP_CREDENTIAL_TYPE", "static")
setenv("DP_CREDENTIAL_STATIC_TOKEN", token)
} else {
tfsvc.EnvoyCommand = append(tfsvc.EnvoyCommand, "-token", token)
}
}
if node.IsDataplane() {
setenv("DP_ENVOY_ADMIN_BIND_ADDRESS", "0.0.0.0") // for demo purposes
setenv("DP_ENVOY_ADMIN_BIND_PORT", "19000")
setenv("DP_LOG_LEVEL", "trace")
setenv("DP_CA_CERTS", "/consul/config/certs/consul-agent-ca.pem")
setenv("DP_CONSUL_GRPC_PORT", "8503")
setenv("DP_TLS_SERVER_NAME", "server."+node.Datacenter+".consul")
} else {
tfsvc.EnvoyCommand = append(tfsvc.EnvoyCommand,
"-grpc-addr", "http://127.0.0.1:8502",
"-admin-bind",
// for demo purposes
"0.0.0.0:"+strconv.Itoa(svc.EnvoyAdminPort),
"--",
"-l",
"trace",
)
}
if step.StartServices() {
sort.Strings(tfsvc.Env)
if node.IsDataplane() {
containers = append(containers, Eval(tfAppDataplaneT, &tfsvc))
} else {
containers = append(containers, Eval(tfAppSidecarT, &tfsvc))
}
}
}
}
}
// Wait until the very end to render the pod so we know all of the ports.
pod.Ports = node.SortedPorts()
// pod placeholder container
containers = append(containers, Eval(tfPauseT, &pod))
return containers, nil
}
var tfPauseT = template.Must(template.ParseFS(content, "templates/container-pause.tf.tmpl"))
var tfConsulT = template.Must(template.ParseFS(content, "templates/container-consul.tf.tmpl"))
var tfMeshGatewayT = template.Must(template.ParseFS(content, "templates/container-mgw.tf.tmpl"))
var tfAppT = template.Must(template.ParseFS(content, "templates/container-app.tf.tmpl"))
var tfAppSidecarT = template.Must(template.ParseFS(content, "templates/container-app-sidecar.tf.tmpl"))
var tfAppDataplaneT = template.Must(template.ParseFS(content, "templates/container-app-dataplane.tf.tmpl"))

View File

@ -0,0 +1,16 @@
package tfgen
const terraformPrelude = `provider "docker" {
host = "unix:///var/run/docker.sock"
}
terraform {
required_providers {
docker = {
source = "kreuzwerker/docker"
version = "~> 2.0"
}
}
required_version = ">= 0.13"
}
`

View File

@ -0,0 +1,87 @@
package tfgen
import (
"fmt"
"os"
"path/filepath"
"text/template"
"github.com/hashicorp/consul/testing/deployer/topology"
"github.com/hashicorp/consul/testing/deployer/util"
)
const proxyInternalPort = 80
func (g *Generator) writeNginxConfig(net *topology.Network) (bool, string, error) {
rootdir := filepath.Join(g.workdir, "terraform", "nginx-config-"+net.Name)
if err := os.MkdirAll(rootdir, 0755); err != nil {
return false, "", err
}
configFile := filepath.Join(rootdir, "nginx.conf")
body := fmt.Sprintf(`
server {
listen %d;
location / {
resolver 8.8.8.8;
##############
# Relevant config knobs are here: https://nginx.org/en/docs/http/ngx_http_proxy_module.html
##############
proxy_pass http://$http_host$uri$is_args$args;
proxy_cache off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_connect_timeout 5s;
proxy_read_timeout 5s;
proxy_send_timeout 5s;
proxy_request_buffering off;
proxy_buffering off;
}
}
`, proxyInternalPort)
_, err := UpdateFileIfDifferent(
g.logger,
[]byte(body),
configFile,
0644,
)
if err != nil {
return false, "", fmt.Errorf("error writing %q: %w", configFile, err)
}
hash, err := util.HashFile(configFile)
if err != nil {
return false, "", fmt.Errorf("error hashing %q: %w", configFile, err)
}
return true, hash, err
}
func (g *Generator) getForwardProxyContainer(
net *topology.Network,
ipAddress string,
hash string,
) Resource {
env := []string{"HASH_FILE_VALUE=" + hash}
proxy := struct {
Name string
DockerNetworkName string
InternalPort int
IPAddress string
Env []string
}{
Name: net.Name,
DockerNetworkName: net.DockerName,
InternalPort: proxyInternalPort,
IPAddress: ipAddress,
Env: env,
}
return Eval(tfForwardProxyT, &proxy)
}
var tfForwardProxyT = template.Must(template.ParseFS(content, "templates/container-proxy.tf.tmpl"))

View File

@ -0,0 +1,95 @@
package tfgen
import (
"bytes"
"text/template"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/hcl/v2/hclwrite"
)
type FileResource struct {
name string
res Resource
}
func (r *FileResource) Name() string { return r.name }
func (r *FileResource) Commit(logger hclog.Logger) error {
val, err := r.res.Render()
if err != nil {
return err
}
_, err = UpdateFileIfDifferent(logger, []byte(val), r.name, 0644)
return err
}
func File(name string, res Resource) *FileResource {
return &FileResource{name: name, res: res}
}
func Text(s string) Resource {
return &textResource{text: s}
}
func Embed(name string) Resource {
return &embedResource{name: name}
}
func Eval(t *template.Template, data any) Resource {
return &evalResource{template: t, data: data, hcl: false}
}
func HCL(t *template.Template, data any) Resource {
return &evalResource{template: t, data: data, hcl: true}
}
type Resource interface {
Render() (string, error)
}
type embedResource struct {
name string
}
func (r *embedResource) Render() (string, error) {
val, err := content.ReadFile(r.name)
if err != nil {
return "", err
}
return string(val), nil
}
type textResource struct {
text string
}
func (r *textResource) Render() (string, error) {
return r.text, nil
}
type evalResource struct {
template *template.Template
data any
hcl bool
}
func (r *evalResource) Render() (string, error) {
out, err := StringTemplate(r.template, r.data)
if err != nil {
return "", err
}
if r.hcl {
return string(hclwrite.Format([]byte(out))), nil
}
return out, nil
}
func StringTemplate(t *template.Template, data any) (string, error) {
var res bytes.Buffer
if err := t.Execute(&res, data); err != nil {
return "", err
}
return res.String(), nil
}

View File

@ -0,0 +1,29 @@
resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}-sidecar" {
name = "{{.Node.DockerName}}-{{.Service.ID.TFString}}-sidecar"
network_mode = "container:${docker_container.{{.PodName}}.id}"
image = docker_image.{{.DataplaneImageResource}}.latest
restart = "on-failure"
{{- range $k, $v := .Labels }}
labels {
label = "{{ $k }}"
value = "{{ $v }}"
}
{{- end }}
volumes {
volume_name = "{{.TLSVolumeName}}"
container_path = "/consul/config/certs"
read_only = true
}
env = [
{{- range .Env }}
"{{.}}",
{{- end}}
]
command = [
"/usr/local/bin/consul-dataplane",
]
}

View File

@ -0,0 +1,31 @@
resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}-sidecar" {
name = "{{.Node.DockerName}}-{{.Service.ID.TFString}}-sidecar"
network_mode = "container:${docker_container.{{.PodName}}.id}"
image = docker_image.{{.EnvoyImageResource}}.latest
restart = "on-failure"
{{- range $k, $v := .Labels }}
labels {
label = "{{ $k }}"
value = "{{ $v }}"
}
{{- end }}
volumes {
volume_name = "{{.TLSVolumeName}}"
container_path = "/consul/config/certs"
read_only = true
}
env = [
{{- range .Env }}
"{{.}}",
{{- end}}
]
command = [
{{- range .EnvoyCommand }}
"{{.}}",
{{- end }}
]
}

View File

@ -0,0 +1,25 @@
resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}" {
name = "{{.Node.DockerName}}-{{.Service.ID.TFString}}"
network_mode = "container:${docker_container.{{.PodName}}.id}"
image = docker_image.{{.AppImageResource}}.latest
restart = "on-failure"
{{- range $k, $v := .Labels }}
labels {
label = "{{ $k }}"
value = "{{ $v }}"
}
{{- end }}
env = [
{{- range .Env }}
"{{.}}",
{{- end}}
]
command = [
{{- range .Command }}
"{{.}}",
{{- end }}
]
}

View File

@ -0,0 +1,40 @@
resource "docker_container" "{{.Node.DockerName}}" {
name = "{{.Node.DockerName}}"
network_mode = "container:${docker_container.{{.PodName}}.id}"
image = docker_image.{{.ImageResource}}.latest
restart = "always"
env = [
"CONSUL_UID=0",
"CONSUL_GID=0",
"CONSUL_LICENSE={{.EnterpriseLicense}}",
{{- range .Env }}
"{{.}}",
{{- end}}
]
{{- range $k, $v := .Labels }}
labels {
label = "{{ $k }}"
value = "{{ $v }}"
}
{{- end }}
command = [
"agent",
"-hcl",
<<-EOT
{{ .HCL }}
EOT
]
volumes {
volume_name = "{{.Node.DockerName}}"
container_path = "/consul/data"
}
volumes {
volume_name = "{{.TLSVolumeName}}"
container_path = "/consul/config/certs"
}
}

View File

@ -0,0 +1,28 @@
resource "docker_container" "{{.DockerNetworkName}}-coredns" {
name = "{{.DockerNetworkName}}-coredns"
image = docker_image.coredns.latest
restart = "always"
dns = ["8.8.8.8"]
networks_advanced {
name = docker_network.{{.DockerNetworkName}}.name
ipv4_address = "{{.IPAddress}}"
}
env = [
{{- range .Env }}
"{{.}}",
{{- end}}
]
volumes {
host_path = abspath("coredns-config-{{.Name}}")
container_path = "/config"
read_only = true
}
command = [
"-conf",
"/config/Corefile",
]
}

View File

@ -0,0 +1,25 @@
resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}" {
name = "{{.Node.DockerName}}-{{.Service.ID.TFString}}"
network_mode = "container:${docker_container.{{.PodName}}.id}"
image = docker_image.{{.EnvoyImageResource}}.latest
restart = "on-failure"
{{- range $k, $v := .Labels }}
labels {
label = "{{ $k }}"
value = "{{ $v }}"
}
{{- end }}
volumes {
volume_name = "{{.TLSVolumeName}}"
container_path = "/consul/config/certs"
read_only = true
}
command = [
{{- range .Command }}
"{{.}}",
{{- end }}
]
}

View File

@ -0,0 +1,38 @@
resource "docker_container" "{{.PodName}}" {
name = "{{.PodName}}"
image = docker_image.pause.latest
hostname = "{{.PodName}}"
restart = "always"
dns = ["{{.DNSAddress}}"]
{{- range $k, $v := .Labels }}
labels {
label = "{{ $k }}"
value = "{{ $v }}"
}
{{- end }}
depends_on = [
docker_container.{{.DockerNetworkName}}-coredns,
docker_container.{{.DockerNetworkName}}-forwardproxy,
]
{{- range .Ports }}
ports {
internal = {{.}}
}
{{- end }}
{{- range .Node.Addresses }}
networks_advanced {
name = docker_network.{{.DockerNetworkName}}.name
ipv4_address = "{{.IPAddress}}"
}
{{- end }}
}
output "ports_{{.Node.Cluster}}_{{.Node.Partition}}_{{.Node.Name}}" {
value = {
for port in docker_container.{{.PodName}}.ports : port.internal => port.external
}
}

View File

@ -0,0 +1,33 @@
resource "docker_container" "{{.DockerNetworkName}}-forwardproxy" {
name = "{{.DockerNetworkName}}-forwardproxy"
image = docker_image.nginx.latest
restart = "always"
dns = ["8.8.8.8"]
ports {
internal = {{.InternalPort}}
}
networks_advanced {
name = docker_network.{{.DockerNetworkName}}.name
ipv4_address = "{{.IPAddress}}"
}
env = [
{{- range .Env }}
"{{.}}",
{{- end}}
]
volumes {
host_path = abspath("nginx-config-{{.Name}}/nginx.conf")
container_path = "/etc/nginx/conf.d/default.conf"
read_only = true
}
}
output "forwardproxyport_{{.Name}}" {
value = {
for port in docker_container.{{.DockerNetworkName}}-forwardproxy.ports : port.internal => port.external
}
}

View File

@ -0,0 +1,15 @@
package tfgen
import (
"embed"
)
//go:embed templates/container-app-dataplane.tf.tmpl
//go:embed templates/container-app-sidecar.tf.tmpl
//go:embed templates/container-app.tf.tmpl
//go:embed templates/container-consul.tf.tmpl
//go:embed templates/container-mgw.tf.tmpl
//go:embed templates/container-pause.tf.tmpl
//go:embed templates/container-proxy.tf.tmpl
//go:embed templates/container-coredns.tf.tmpl
var content embed.FS

View File

@ -0,0 +1,165 @@
package sprawl
import (
"context"
"fmt"
"strings"
"time"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/consul/testing/deployer/topology"
)
// TODO: this is definitely a grpc resolver/balancer issue to look into
const grpcWeirdError = `transport: Error while dialing failed to find Consul server for global address`
func isWeirdGRPCError(err error) bool {
if err == nil {
return false
}
return strings.Contains(err.Error(), grpcWeirdError)
}
func (s *Sprawl) initPeerings() error {
// TODO: wait until services are healthy? wait until mesh gateways work?
// if err := s.generator.Generate(tfgen.StepPeering); err != nil {
// return fmt.Errorf("generator[peering]: %w", err)
// }
var (
logger = s.logger.Named("peering")
_ = logger
)
for _, peering := range s.topology.Peerings {
dialingCluster, ok := s.topology.Clusters[peering.Dialing.Name]
if !ok {
return fmt.Errorf("peering references dialing cluster that does not exist: %s", peering.String())
}
acceptingCluster, ok := s.topology.Clusters[peering.Accepting.Name]
if !ok {
return fmt.Errorf("peering references accepting cluster that does not exist: %s", peering.String())
}
var (
dialingClient = s.clients[dialingCluster.Name]
acceptingClient = s.clients[acceptingCluster.Name]
)
// TODO: allow for use of ServerExternalAddresses
req1 := api.PeeringGenerateTokenRequest{
PeerName: peering.Accepting.PeerName,
}
if acceptingCluster.Enterprise {
req1.Partition = peering.Accepting.Partition
}
GENTOKEN:
resp, _, err := acceptingClient.Peerings().GenerateToken(context.Background(), req1, nil)
if err != nil {
if isWeirdGRPCError(err) {
time.Sleep(50 * time.Millisecond)
goto GENTOKEN
}
return fmt.Errorf("error generating peering token for %q: %w", peering.String(), err)
}
peeringToken := resp.PeeringToken
logger.Info("generated peering token", "peering", peering.String())
req2 := api.PeeringEstablishRequest{
PeerName: peering.Dialing.PeerName,
PeeringToken: peeringToken,
}
if dialingCluster.Enterprise {
req2.Partition = peering.Dialing.Partition
}
logger.Info("establishing peering with token", "peering", peering.String())
ESTABLISH:
_, _, err = dialingClient.Peerings().Establish(context.Background(), req2, nil)
if err != nil {
if isWeirdGRPCError(err) {
time.Sleep(50 * time.Millisecond)
goto ESTABLISH
}
return fmt.Errorf("error establishing peering with token for %q: %w", peering.String(), err)
}
logger.Info("peering established", "peering", peering.String())
}
return nil
}
func (s *Sprawl) waitForPeeringEstablishment() error {
var (
logger = s.logger.Named("peering")
)
for _, peering := range s.topology.Peerings {
dialingCluster, ok := s.topology.Clusters[peering.Dialing.Name]
if !ok {
return fmt.Errorf("peering references dialing cluster that does not exist: %s", peering.String())
}
acceptingCluster, ok := s.topology.Clusters[peering.Accepting.Name]
if !ok {
return fmt.Errorf("peering references accepting cluster that does not exist: %s", peering.String())
}
var (
dialingClient = s.clients[dialingCluster.Name]
acceptingClient = s.clients[acceptingCluster.Name]
dialingLogger = logger.With(
"cluster", dialingCluster.Name,
"peering", peering.String(),
)
acceptingLogger = logger.With(
"cluster", acceptingCluster.Name,
"peering", peering.String(),
)
)
s.checkPeeringDirection(dialingLogger, dialingClient, peering.Dialing, dialingCluster.Enterprise)
s.checkPeeringDirection(acceptingLogger, acceptingClient, peering.Accepting, acceptingCluster.Enterprise)
}
return nil
}
func (s *Sprawl) checkPeeringDirection(logger hclog.Logger, client *api.Client, pc topology.PeerCluster, enterprise bool) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for {
opts := &api.QueryOptions{}
if enterprise {
opts.Partition = pc.Partition
}
res, _, err := client.Peerings().Read(ctx, pc.PeerName, opts)
if isWeirdGRPCError(err) {
time.Sleep(50 * time.Millisecond)
continue
}
if err != nil {
logger.Info("error looking up peering", "error", err)
time.Sleep(100 * time.Millisecond)
continue
}
if res == nil {
logger.Info("peering not found")
time.Sleep(100 * time.Millisecond)
continue
}
if res.State == api.PeeringStateActive {
logger.Info("peering is active")
return
}
logger.Info("peering not active yet", "state", res.State)
time.Sleep(500 * time.Millisecond)
}
}

View File

@ -0,0 +1,464 @@
package sprawl
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"strings"
"time"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-multierror"
"github.com/mitchellh/copystructure"
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/runner"
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/secrets"
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/tfgen"
"github.com/hashicorp/consul/testing/deployer/topology"
"github.com/hashicorp/consul/testing/deployer/util"
)
// TODO: manage workdir externally without chdir
// Sprawl is the definition of a complete running Consul deployment topology.
type Sprawl struct {
logger hclog.Logger
runner *runner.Runner
license string
secrets secrets.Store
workdir string
// set during Run
config *topology.Config
topology *topology.Topology
generator *tfgen.Generator
clients map[string]*api.Client // one per cluster
}
// Topology allows access to the topology that defines the resources. Do not
// write to any of these fields.
func (s *Sprawl) Topology() *topology.Topology {
return s.topology
}
func (s *Sprawl) Config() *topology.Config {
c2, err := copyConfig(s.config)
if err != nil {
panic(err)
}
return c2
}
func (s *Sprawl) HTTPClientForCluster(clusterName string) (*http.Client, error) {
cluster, ok := s.topology.Clusters[clusterName]
if !ok {
return nil, fmt.Errorf("no such cluster: %s", clusterName)
}
// grab the local network for the cluster
network, ok := s.topology.Networks[cluster.NetworkName]
if !ok {
return nil, fmt.Errorf("no such network: %s", cluster.NetworkName)
}
transport, err := util.ProxyHTTPTransport(network.ProxyPort)
if err != nil {
return nil, err
}
return &http.Client{Transport: transport}, nil
}
// APIClientForNode gets a pooled api.Client connected to the agent running on
// the provided node.
//
// Passing an empty token will assume the bootstrap token. If you want to
// actually use the anonymous token say "-".
func (s *Sprawl) APIClientForNode(clusterName string, nid topology.NodeID, token string) (*api.Client, error) {
cluster, ok := s.topology.Clusters[clusterName]
if !ok {
return nil, fmt.Errorf("no such cluster: %s", clusterName)
}
nid.Normalize()
node := cluster.NodeByID(nid)
if !node.IsAgent() {
return nil, fmt.Errorf("node is not an agent")
}
switch token {
case "":
token = s.secrets.ReadGeneric(clusterName, secrets.BootstrapToken)
case "-":
token = ""
}
return util.ProxyAPIClient(
node.LocalProxyPort(),
node.LocalAddress(),
8500,
token,
)
}
func copyConfig(cfg *topology.Config) (*topology.Config, error) {
dup, err := copystructure.Copy(cfg)
if err != nil {
return nil, err
}
return dup.(*topology.Config), nil
}
// Launch will create the topology defined by the provided configuration and
// bring up all of the relevant clusters. Once created the Stop method must be
// called to destroy everything.
func Launch(
logger hclog.Logger,
workdir string,
cfg *topology.Config,
) (*Sprawl, error) {
if logger == nil {
panic("logger is required")
}
if workdir == "" {
panic("workdir is required")
}
if err := os.MkdirAll(filepath.Join(workdir, "terraform"), 0755); err != nil {
return nil, err
}
runner, err := runner.Load(logger)
if err != nil {
return nil, err
}
// Copy this to avoid leakage.
cfg, err = copyConfig(cfg)
if err != nil {
return nil, err
}
s := &Sprawl{
logger: logger,
runner: runner,
workdir: workdir,
clients: make(map[string]*api.Client),
}
if err := s.ensureLicense(); err != nil {
return nil, err
}
// Copy this AGAIN, BEFORE compiling so we capture the original definition, without denorms.
s.config, err = copyConfig(cfg)
if err != nil {
return nil, err
}
s.topology, err = topology.Compile(logger.Named("compile"), cfg)
if err != nil {
return nil, fmt.Errorf("topology.Compile: %w", err)
}
s.logger.Info("compiled topology", "ct", jd(s.topology)) // TODO
start := time.Now()
if err := s.launch(); err != nil {
return nil, err
}
s.logger.Info("topology is ready for use", "elapsed", time.Since(start))
if err := s.PrintDetails(); err != nil {
return nil, fmt.Errorf("error gathering diagnostic details: %w", err)
}
return s, nil
}
func (s *Sprawl) Relaunch(
cfg *topology.Config,
) error {
// Copy this BEFORE compiling so we capture the original definition, without denorms.
var err error
s.config, err = copyConfig(cfg)
if err != nil {
return err
}
newTopology, err := topology.Recompile(s.logger.Named("recompile"), cfg, s.topology)
if err != nil {
return fmt.Errorf("topology.Compile: %w", err)
}
s.topology = newTopology
s.logger.Info("compiled replacement topology", "ct", jd(s.topology)) // TODO
start := time.Now()
if err := s.relaunch(); err != nil {
return err
}
s.logger.Info("topology is ready for use", "elapsed", time.Since(start))
if err := s.PrintDetails(); err != nil {
return fmt.Errorf("error gathering diagnostic details: %w", err)
}
return nil
}
// Leader returns the cluster leader agent, or an error if no leader is
// available.
func (s *Sprawl) Leader(clusterName string) (*topology.Node, error) {
cluster, ok := s.topology.Clusters[clusterName]
if !ok {
return nil, fmt.Errorf("no such cluster: %s", clusterName)
}
var (
client = s.clients[cluster.Name]
// logger = s.logger.With("cluster", cluster.Name)
)
leaderAddr, err := getLeader(client)
if err != nil {
return nil, err
}
for _, node := range cluster.Nodes {
if !node.IsServer() || node.Disabled {
continue
}
if strings.HasPrefix(leaderAddr, node.LocalAddress()+":") {
return node, nil
}
}
return nil, fmt.Errorf("leader not found")
}
// Followers returns the cluster following servers.
func (s *Sprawl) Followers(clusterName string) ([]*topology.Node, error) {
cluster, ok := s.topology.Clusters[clusterName]
if !ok {
return nil, fmt.Errorf("no such cluster: %s", clusterName)
}
leaderNode, err := s.Leader(clusterName)
if err != nil {
return nil, fmt.Errorf("could not determine leader: %w", err)
}
var followers []*topology.Node
for _, node := range cluster.Nodes {
if !node.IsServer() || node.Disabled {
continue
}
if node.ID() != leaderNode.ID() {
followers = append(followers, node)
}
}
return followers, nil
}
func (s *Sprawl) DisabledServers(clusterName string) ([]*topology.Node, error) {
cluster, ok := s.topology.Clusters[clusterName]
if !ok {
return nil, fmt.Errorf("no such cluster: %s", clusterName)
}
var servers []*topology.Node
for _, node := range cluster.Nodes {
if !node.IsServer() || !node.Disabled {
continue
}
servers = append(servers, node)
}
return servers, nil
}
func (s *Sprawl) StopContainer(ctx context.Context, containerName string) error {
return s.runner.DockerExec(ctx, []string{"stop", containerName}, nil, nil)
}
func (s *Sprawl) SnapshotEnvoy(ctx context.Context) error {
snapDir := filepath.Join(s.workdir, "envoy-snapshots")
if err := os.MkdirAll(snapDir, 0755); err != nil {
return fmt.Errorf("could not create envoy snapshot output dir %s: %w", snapDir, err)
}
targets := map[string]string{
"config_dump.json": "config_dump",
"clusters.json": "clusters?format=json",
"stats.txt": "stats",
"stats_prometheus.txt": "stats/prometheus",
}
var merr error
for _, c := range s.topology.Clusters {
client, err := s.HTTPClientForCluster(c.Name)
if err != nil {
return fmt.Errorf("could not get http client for cluster %q: %w", c.Name, err)
}
for _, n := range c.Nodes {
if n.Disabled {
continue
}
for _, s := range n.Services {
if s.Disabled || s.EnvoyAdminPort <= 0 {
continue
}
prefix := fmt.Sprintf("http://%s:%d", n.LocalAddress(), s.EnvoyAdminPort)
for fn, target := range targets {
u := prefix + "/" + target
body, err := scrapeURL(client, u)
if err != nil {
merr = multierror.Append(merr, fmt.Errorf("could not scrape %q for %s on %s: %w",
target, s.ID.String(), n.ID().String(), err,
))
continue
}
outFn := filepath.Join(snapDir, n.DockerName()+"--"+s.ID.TFString()+"."+fn)
if err := os.WriteFile(outFn+".tmp", body, 0644); err != nil {
merr = multierror.Append(merr, fmt.Errorf("could not write output %q for %s on %s: %w",
target, s.ID.String(), n.ID().String(), err,
))
continue
}
if err := os.Rename(outFn+".tmp", outFn); err != nil {
merr = multierror.Append(merr, fmt.Errorf("could not write output %q for %s on %s: %w",
target, s.ID.String(), n.ID().String(), err,
))
continue
}
}
}
}
}
return merr
}
func scrapeURL(client *http.Client, url string) ([]byte, error) {
res, err := client.Get(url)
if err != nil {
return nil, err
}
defer res.Body.Close()
body, err := io.ReadAll(res.Body)
if err != nil {
return nil, err
}
return body, nil
}
func (s *Sprawl) CaptureLogs(ctx context.Context) error {
logDir := filepath.Join(s.workdir, "logs")
if err := os.MkdirAll(logDir, 0755); err != nil {
return fmt.Errorf("could not create log output dir %s: %w", logDir, err)
}
containers, err := s.listContainers(ctx)
if err != nil {
return err
}
s.logger.Info("Capturing logs")
var merr error
for _, container := range containers {
if err := s.dumpContainerLogs(ctx, container, logDir); err != nil {
merr = multierror.Append(merr, fmt.Errorf("could not dump logs for container %s: %w", container, err))
}
}
return merr
}
// Dump known containers out of terraform state file.
func (s *Sprawl) listContainers(ctx context.Context) ([]string, error) {
tfdir := filepath.Join(s.workdir, "terraform")
var buf bytes.Buffer
if err := s.runner.TerraformExec(ctx, []string{"state", "list"}, &buf, tfdir); err != nil {
return nil, fmt.Errorf("error listing containers in terraform state file: %w", err)
}
var (
scan = bufio.NewScanner(&buf)
containers []string
)
for scan.Scan() {
line := strings.TrimSpace(scan.Text())
name := strings.TrimPrefix(line, "docker_container.")
if name != line {
containers = append(containers, name)
continue
}
}
if err := scan.Err(); err != nil {
return nil, err
}
return containers, nil
}
func (s *Sprawl) dumpContainerLogs(ctx context.Context, containerName, outputRoot string) error {
path := filepath.Join(outputRoot, containerName+".log")
f, err := os.Create(path + ".tmp")
if err != nil {
return err
}
keep := false
defer func() {
_ = f.Close()
if !keep {
_ = os.Remove(path + ".tmp")
_ = os.Remove(path)
}
}()
err = s.runner.DockerExecWithStderr(
ctx,
[]string{"logs", containerName},
f,
f,
nil,
)
if err != nil {
return err
}
if err := f.Close(); err != nil {
return err
}
if err := os.Rename(path+".tmp", path); err != nil {
return err
}
keep = true
return nil
}

View File

@ -0,0 +1,202 @@
package sprawltest
import (
"context"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"sync"
"testing"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-multierror"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/testing/deployer/sprawl"
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/runner"
"github.com/hashicorp/consul/testing/deployer/topology"
)
// TODO(rb): move comments to doc.go
var (
// set SPRAWL_WORKDIR_ROOT in the environment to have the test output
// coalesced in here. By default it uses a directory called "workdir" in
// each package.
workdirRoot string
// set SPRAWL_KEEP_WORKDIR=1 in the environment to keep the workdir output
// intact. Files are all destroyed by default.
keepWorkdirOnFail bool
// set SPRAWL_KEEP_RUNNING=1 in the environment to keep the workdir output
// intact and also refrain from tearing anything down. Things are all
// destroyed by default.
//
// SPRAWL_KEEP_RUNNING=1 implies SPRAWL_KEEP_WORKDIR=1
keepRunningOnFail bool
// set SPRAWL_SKIP_OLD_CLEANUP to prevent the library from tearing down and
// removing anything found in the working directory at init time. The
// default behavior is to do this.
skipOldCleanup bool
)
var cleanupPriorRunOnce sync.Once
func init() {
if root := os.Getenv("SPRAWL_WORKDIR_ROOT"); root != "" {
fmt.Fprintf(os.Stdout, "INFO: sprawltest: SPRAWL_WORKDIR_ROOT set; using %q as output root\n", root)
workdirRoot = root
} else {
workdirRoot = "workdir"
}
if os.Getenv("SPRAWL_KEEP_WORKDIR") == "1" {
keepWorkdirOnFail = true
fmt.Fprintf(os.Stdout, "INFO: sprawltest: SPRAWL_KEEP_WORKDIR set; not destroying workdir on failure\n")
}
if os.Getenv("SPRAWL_KEEP_RUNNING") == "1" {
keepRunningOnFail = true
keepWorkdirOnFail = true
fmt.Fprintf(os.Stdout, "INFO: sprawltest: SPRAWL_KEEP_RUNNING set; not tearing down resources on failure\n")
}
if os.Getenv("SPRAWL_SKIP_OLD_CLEANUP") == "1" {
skipOldCleanup = true
fmt.Fprintf(os.Stdout, "INFO: sprawltest: SPRAWL_SKIP_OLD_CLEANUP set; not cleaning up anything found in %q\n", workdirRoot)
}
if !skipOldCleanup {
cleanupPriorRunOnce.Do(func() {
fmt.Fprintf(os.Stdout, "INFO: sprawltest: triggering cleanup of any prior test runs\n")
CleanupWorkingDirectories()
})
}
}
// Launch will create the topology defined by the provided configuration and
// bring up all of the relevant clusters.
//
// - Logs will be routed to (*testing.T).Logf.
//
// - By default everything will be stopped and removed via
// (*testing.T).Cleanup. For failed tests, this can be skipped by setting the
// environment variable SKIP_TEARDOWN=1.
func Launch(t *testing.T, cfg *topology.Config) *sprawl.Sprawl {
SkipIfTerraformNotPresent(t)
sp, err := sprawl.Launch(
testutil.Logger(t),
initWorkingDirectory(t),
cfg,
)
require.NoError(t, err)
stopOnCleanup(t, sp)
return sp
}
func initWorkingDirectory(t *testing.T) string {
// TODO(rb): figure out how to get the calling package which we can put in
// the middle here, which is likely 2 call frames away so maybe
// runtime.Callers can help
scratchDir := filepath.Join(workdirRoot, t.Name())
_ = os.RemoveAll(scratchDir) // cleanup prior runs
if err := os.MkdirAll(scratchDir, 0755); err != nil {
t.Fatalf("error: %v", err)
}
t.Cleanup(func() {
if t.Failed() && keepWorkdirOnFail {
t.Logf("test failed; leaving sprawl terraform definitions in: %s", scratchDir)
} else {
_ = os.RemoveAll(scratchDir)
}
})
return scratchDir
}
func stopOnCleanup(t *testing.T, sp *sprawl.Sprawl) {
t.Cleanup(func() {
if t.Failed() && keepWorkdirOnFail {
// It's only worth it to capture the logs if we aren't going to
// immediately discard them.
if err := sp.CaptureLogs(context.Background()); err != nil {
t.Logf("log capture encountered failures: %v", err)
}
if err := sp.SnapshotEnvoy(context.Background()); err != nil {
t.Logf("envoy snapshot capture encountered failures: %v", err)
}
}
if t.Failed() && keepRunningOnFail {
t.Log("test failed; leaving sprawl running")
} else {
//nolint:errcheck
sp.Stop()
}
})
}
// CleanupWorkingDirectories is meant to run in an init() once at the start of
// any tests.
func CleanupWorkingDirectories() {
fi, err := os.ReadDir(workdirRoot)
if os.IsNotExist(err) {
return
} else if err != nil {
fmt.Fprintf(os.Stderr, "WARN: sprawltest: unable to scan 'workdir' for prior runs to cleanup\n")
return
} else if len(fi) == 0 {
fmt.Fprintf(os.Stdout, "INFO: sprawltest: no prior tests to clean up\n")
return
}
r, err := runner.Load(hclog.NewNullLogger())
if err != nil {
fmt.Fprintf(os.Stderr, "WARN: sprawltest: unable to look for 'terraform' and 'docker' binaries\n")
return
}
ctx := context.Background()
for _, d := range fi {
if !d.IsDir() {
continue
}
path := filepath.Join(workdirRoot, d.Name(), "terraform")
fmt.Fprintf(os.Stdout, "INFO: sprawltest: cleaning up failed prior run in: %s\n", path)
err := r.TerraformExec(ctx, []string{
"init", "-input=false",
}, io.Discard, path)
err2 := r.TerraformExec(ctx, []string{
"destroy", "-input=false", "-auto-approve", "-refresh=false",
}, io.Discard, path)
if err2 != nil {
err = multierror.Append(err, err2)
}
if err != nil {
fmt.Fprintf(os.Stderr, "WARN: sprawltest: could not clean up failed prior run in: %s: %v\n", path, err)
} else {
_ = os.RemoveAll(path)
}
}
}
func SkipIfTerraformNotPresent(t *testing.T) {
const terraformBinaryName = "terraform"
path, err := exec.LookPath(terraformBinaryName)
if err != nil || path == "" {
t.Skipf("%q not found on $PATH - download and install to run this test", terraformBinaryName)
}
}

View File

@ -0,0 +1,180 @@
package sprawltest_test
import (
"strconv"
"testing"
"github.com/hashicorp/consul/api"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest"
"github.com/hashicorp/consul/testing/deployer/topology"
)
func TestSprawl(t *testing.T) {
serversDC1 := newTopologyServerSet("dc1-server", 3, []string{"dc1", "wan"}, nil)
serversDC2 := newTopologyServerSet("dc2-server", 3, []string{"dc2", "wan"}, nil)
cfg := &topology.Config{
Networks: []*topology.Network{
{Name: "dc1"},
{Name: "dc2"},
{Name: "wan", Type: "wan"},
},
Clusters: []*topology.Cluster{
{
Name: "dc1",
Nodes: topology.MergeSlices(serversDC1, []*topology.Node{
{
Kind: topology.NodeKindClient,
Name: "dc1-client1",
Services: []*topology.Service{
{
ID: topology.ServiceID{Name: "mesh-gateway"},
Port: 8443,
EnvoyAdminPort: 19000,
IsMeshGateway: true,
},
},
},
{
Kind: topology.NodeKindClient,
Name: "dc1-client2",
Services: []*topology.Service{
{
ID: topology.ServiceID{Name: "ping"},
Image: "rboyer/pingpong:latest",
Port: 8080,
EnvoyAdminPort: 19000,
Command: []string{
"-bind", "0.0.0.0:8080",
"-dial", "127.0.0.1:9090",
"-pong-chaos",
"-dialfreq", "250ms",
"-name", "ping",
},
Upstreams: []*topology.Upstream{{
ID: topology.ServiceID{Name: "pong"},
LocalPort: 9090,
Peer: "peer-dc2-default",
}},
},
},
},
}),
InitialConfigEntries: []api.ConfigEntry{
&api.ExportedServicesConfigEntry{
Name: "default",
Services: []api.ExportedService{{
Name: "ping",
Consumers: []api.ServiceConsumer{{
Peer: "peer-dc2-default",
}},
}},
},
},
},
{
Name: "dc2",
Nodes: topology.MergeSlices(serversDC2, []*topology.Node{
{
Kind: topology.NodeKindClient,
Name: "dc2-client1",
Services: []*topology.Service{
{
ID: topology.ServiceID{Name: "mesh-gateway"},
Port: 8443,
EnvoyAdminPort: 19000,
IsMeshGateway: true,
},
},
},
{
Kind: topology.NodeKindDataplane,
Name: "dc2-client2",
Services: []*topology.Service{
{
ID: topology.ServiceID{Name: "pong"},
Image: "rboyer/pingpong:latest",
Port: 8080,
EnvoyAdminPort: 19000,
Command: []string{
"-bind", "0.0.0.0:8080",
"-dial", "127.0.0.1:9090",
"-pong-chaos",
"-dialfreq", "250ms",
"-name", "pong",
},
Upstreams: []*topology.Upstream{{
ID: topology.ServiceID{Name: "ping"},
LocalPort: 9090,
Peer: "peer-dc1-default",
}},
},
},
},
}),
InitialConfigEntries: []api.ConfigEntry{
&api.ExportedServicesConfigEntry{
Name: "default",
Services: []api.ExportedService{{
Name: "ping",
Consumers: []api.ServiceConsumer{{
Peer: "peer-dc2-default",
}},
}},
},
},
},
},
Peerings: []*topology.Peering{{
Dialing: topology.PeerCluster{
Name: "dc1",
},
Accepting: topology.PeerCluster{
Name: "dc2",
},
}},
}
sp := sprawltest.Launch(t, cfg)
for _, cluster := range sp.Topology().Clusters {
leader, err := sp.Leader(cluster.Name)
require.NoError(t, err)
t.Logf("%s: leader = %s", cluster.Name, leader.ID())
followers, err := sp.Followers(cluster.Name)
require.NoError(t, err)
for _, f := range followers {
t.Logf("%s: follower = %s", cluster.Name, f.ID())
}
}
}
func newTopologyServerSet(
namePrefix string,
num int,
networks []string,
mutateFn func(i int, node *topology.Node),
) []*topology.Node {
var out []*topology.Node
for i := 1; i <= num; i++ {
name := namePrefix + strconv.Itoa(i)
node := &topology.Node{
Kind: topology.NodeKindServer,
Name: name,
}
for _, net := range networks {
node.Addresses = append(node.Addresses, &topology.Address{Network: net})
}
if mutateFn != nil {
mutateFn(i, node)
}
out = append(out, node)
}
return out
}

View File

@ -0,0 +1,114 @@
package sprawl
import (
"bytes"
"context"
"fmt"
"io"
"github.com/hashicorp/consul/testing/deployer/topology"
)
const (
consulUID = "100"
consulGID = "1000"
consulUserArg = consulUID + ":" + consulGID
)
func tlsPrefixFromNode(node *topology.Node) string {
switch node.Kind {
case topology.NodeKindServer:
return node.Partition + "." + node.Name + ".server"
case topology.NodeKindClient:
return node.Partition + "." + node.Name + ".client"
default:
return ""
}
}
func tlsCertCreateCommand(node *topology.Node) string {
if node.IsServer() {
return fmt.Sprintf(`consul tls cert create -server -dc=%s -node=%s`, node.Datacenter, node.PodName())
} else {
return fmt.Sprintf(`consul tls cert create -client -dc=%s`, node.Datacenter)
}
}
func (s *Sprawl) initTLS(ctx context.Context) error {
for _, cluster := range s.topology.Clusters {
var buf bytes.Buffer
// Create the CA if not already done, and proceed to do all of the
// consul CLI calls inside of a throwaway temp directory.
buf.WriteString(`
if [[ ! -f consul-agent-ca-key.pem || ! -f consul-agent-ca.pem ]]; then
consul tls ca create
fi
rm -rf tmp
mkdir -p tmp
cp -a consul-agent-ca-key.pem consul-agent-ca.pem tmp
cd tmp
`)
for _, node := range cluster.Nodes {
if !node.IsAgent() || node.Disabled {
continue
}
node.TLSCertPrefix = tlsPrefixFromNode(node)
if node.TLSCertPrefix == "" {
continue
}
expectPrefix := cluster.Datacenter + "-" + string(node.Kind) + "-consul-0"
// Conditionally generate these in isolation and rename them to
// not rely upon the numerical indexing.
buf.WriteString(fmt.Sprintf(`
if [[ ! -f %[1]s || ! -f %[2]s ]]; then
rm -f %[3]s %[4]s
%[5]s
mv -f %[3]s %[1]s
mv -f %[4]s %[2]s
fi
`,
"../"+node.TLSCertPrefix+"-key.pem", "../"+node.TLSCertPrefix+".pem",
expectPrefix+"-key.pem", expectPrefix+".pem",
tlsCertCreateCommand(node),
))
}
err := s.runner.DockerExec(ctx, []string{
"run",
"--rm",
"-i",
"--net=none",
"-v", cluster.TLSVolumeName + ":/data",
"busybox:latest",
"sh", "-c",
// Need this so the permissions stick; docker seems to treat unused volumes differently.
`touch /data/VOLUME_PLACEHOLDER && chown -R ` + consulUserArg + ` /data`,
}, io.Discard, nil)
if err != nil {
return fmt.Errorf("could not initialize docker volume for cert data %q: %w", cluster.TLSVolumeName, err)
}
err = s.runner.DockerExec(ctx, []string{"run",
"--rm",
"-i",
"--net=none",
"-u", consulUserArg,
"-v", cluster.TLSVolumeName + ":/data",
"-w", "/data",
"--entrypoint", "",
cluster.Images.Consul,
"/bin/sh", "-ec", buf.String(),
}, io.Discard, nil)
if err != nil {
return fmt.Errorf("could not create all necessary TLS certificates in docker volume: %v", err)
}
}
return nil
}

View File

@ -0,0 +1,671 @@
package topology
import (
crand "crypto/rand"
"encoding/hex"
"errors"
"fmt"
"reflect"
"regexp"
"sort"
"github.com/google/go-cmp/cmp"
"github.com/hashicorp/go-hclog"
)
const DockerPrefix = "consulcluster"
func Compile(logger hclog.Logger, raw *Config) (*Topology, error) {
return compile(logger, raw, nil)
}
func Recompile(logger hclog.Logger, raw *Config, prev *Topology) (*Topology, error) {
if prev == nil {
return nil, errors.New("missing previous topology")
}
return compile(logger, raw, prev)
}
func compile(logger hclog.Logger, raw *Config, prev *Topology) (*Topology, error) {
var id string
if prev == nil {
var err error
id, err = newTopologyID()
if err != nil {
return nil, err
}
} else {
id = prev.ID
}
images := DefaultImages().OverrideWith(raw.Images)
if images.Consul != "" {
return nil, fmt.Errorf("topology.images.consul cannot be set at this level")
}
if len(raw.Networks) == 0 {
return nil, fmt.Errorf("topology.networks is empty")
}
networks := make(map[string]*Network)
for _, net := range raw.Networks {
if net.DockerName != "" {
return nil, fmt.Errorf("network %q should not specify DockerName", net.Name)
}
if !IsValidLabel(net.Name) {
return nil, fmt.Errorf("network name is not valid: %s", net.Name)
}
if _, exists := networks[net.Name]; exists {
return nil, fmt.Errorf("cannot have two networks with the same name %q", net.Name)
}
switch net.Type {
case "":
net.Type = "lan"
case "wan", "lan":
default:
return nil, fmt.Errorf("network %q has unknown type %q", net.Name, net.Type)
}
networks[net.Name] = net
net.DockerName = DockerPrefix + "-" + net.Name + "-" + id
}
if len(raw.Clusters) == 0 {
return nil, fmt.Errorf("topology.clusters is empty")
}
var (
clusters = make(map[string]*Cluster)
nextIndex int // use a global index so any shared networks work properly with assignments
)
foundPeerNames := make(map[string]map[string]struct{})
for _, c := range raw.Clusters {
if c.Name == "" {
return nil, fmt.Errorf("cluster has no name")
}
foundPeerNames[c.Name] = make(map[string]struct{})
if !IsValidLabel(c.Name) {
return nil, fmt.Errorf("cluster name is not valid: %s", c.Name)
}
if _, exists := clusters[c.Name]; exists {
return nil, fmt.Errorf("cannot have two clusters with the same name %q; use unique names and override the Datacenter field if that's what you want", c.Name)
}
if c.Datacenter == "" {
c.Datacenter = c.Name
} else {
if !IsValidLabel(c.Datacenter) {
return nil, fmt.Errorf("datacenter name is not valid: %s", c.Datacenter)
}
}
clusters[c.Name] = c
if c.NetworkName == "" {
c.NetworkName = c.Name
}
c.Images = images.OverrideWith(c.Images).ChooseConsul(c.Enterprise)
if _, ok := networks[c.NetworkName]; !ok {
return nil, fmt.Errorf("cluster %q uses network name %q that does not exist", c.Name, c.NetworkName)
}
if len(c.Nodes) == 0 {
return nil, fmt.Errorf("cluster %q has no nodes", c.Name)
}
if c.TLSVolumeName != "" {
return nil, fmt.Errorf("user cannot specify the TLSVolumeName field")
}
tenancies := make(map[string]map[string]struct{})
addTenancy := func(partition, namespace string) {
partition = PartitionOrDefault(partition)
namespace = NamespaceOrDefault(namespace)
m, ok := tenancies[partition]
if !ok {
m = make(map[string]struct{})
tenancies[partition] = m
}
m[namespace] = struct{}{}
}
for _, ap := range c.Partitions {
addTenancy(ap.Name, "default")
for _, ns := range ap.Namespaces {
addTenancy(ap.Name, ns)
}
}
for _, ce := range c.InitialConfigEntries {
addTenancy(ce.GetPartition(), ce.GetNamespace())
}
seenNodes := make(map[NodeID]struct{})
for _, n := range c.Nodes {
if n.Name == "" {
return nil, fmt.Errorf("cluster %q node has no name", c.Name)
}
if !IsValidLabel(n.Name) {
return nil, fmt.Errorf("node name is not valid: %s", n.Name)
}
switch n.Kind {
case NodeKindServer, NodeKindClient, NodeKindDataplane:
default:
return nil, fmt.Errorf("cluster %q node %q has invalid kind: %s", c.Name, n.Name, n.Kind)
}
n.Partition = PartitionOrDefault(n.Partition)
if !IsValidLabel(n.Partition) {
return nil, fmt.Errorf("node partition is not valid: %s", n.Partition)
}
addTenancy(n.Partition, "default")
if _, exists := seenNodes[n.ID()]; exists {
return nil, fmt.Errorf("cannot have two nodes in the same cluster %q with the same name %q", c.Name, n.ID())
}
seenNodes[n.ID()] = struct{}{}
if len(n.usedPorts) != 0 {
return nil, fmt.Errorf("user cannot specify the usedPorts field")
}
n.usedPorts = make(map[int]int)
exposePort := func(v int) bool {
if _, ok := n.usedPorts[v]; ok {
return false
}
n.usedPorts[v] = 0
return true
}
if n.IsAgent() {
// TODO: the ux here is awful; we should be able to examine the topology to guess properly
exposePort(8500)
if n.IsServer() {
exposePort(8503)
} else {
exposePort(8502)
}
}
if n.Index != 0 {
return nil, fmt.Errorf("user cannot specify the node index")
}
n.Index = nextIndex
nextIndex++
n.Images = c.Images.OverrideWith(n.Images).ChooseNode(n.Kind)
n.Cluster = c.Name
n.Datacenter = c.Datacenter
n.dockerName = DockerPrefix + "-" + n.Name + "-" + id
if len(n.Addresses) == 0 {
n.Addresses = append(n.Addresses, &Address{Network: c.NetworkName})
}
var (
numPublic int
numLocal int
)
for _, addr := range n.Addresses {
if addr.Network == "" {
return nil, fmt.Errorf("cluster %q node %q has invalid address", c.Name, n.Name)
}
if addr.Type != "" {
return nil, fmt.Errorf("user cannot specify the address type directly")
}
net, ok := networks[addr.Network]
if !ok {
return nil, fmt.Errorf("cluster %q node %q uses network name %q that does not exist", c.Name, n.Name, addr.Network)
}
if net.IsPublic() {
numPublic++
} else if net.IsLocal() {
numLocal++
}
addr.Type = net.Type
addr.DockerNetworkName = net.DockerName
}
if numLocal == 0 {
return nil, fmt.Errorf("cluster %q node %q has no local addresses", c.Name, n.Name)
}
if numPublic > 1 {
return nil, fmt.Errorf("cluster %q node %q has more than one public address", c.Name, n.Name)
}
seenServices := make(map[ServiceID]struct{})
for _, svc := range n.Services {
if n.IsAgent() {
// Default to that of the enclosing node.
svc.ID.Partition = n.Partition
}
svc.ID.Normalize()
// Denormalize
svc.Node = n
if !IsValidLabel(svc.ID.Partition) {
return nil, fmt.Errorf("service partition is not valid: %s", svc.ID.Partition)
}
if !IsValidLabel(svc.ID.Namespace) {
return nil, fmt.Errorf("service namespace is not valid: %s", svc.ID.Namespace)
}
if !IsValidLabel(svc.ID.Name) {
return nil, fmt.Errorf("service name is not valid: %s", svc.ID.Name)
}
addTenancy(svc.ID.Partition, svc.ID.Namespace)
if _, exists := seenServices[svc.ID]; exists {
return nil, fmt.Errorf("cannot have two services on the same node %q in the same cluster %q with the same name %q", n.ID(), c.Name, svc.ID)
}
seenServices[svc.ID] = struct{}{}
if !svc.DisableServiceMesh && n.IsDataplane() {
if svc.EnvoyPublicListenerPort <= 0 {
if _, ok := n.usedPorts[20000]; !ok {
// For convenience the FIRST service on a node can get 20000 for free.
svc.EnvoyPublicListenerPort = 20000
} else {
return nil, fmt.Errorf("envoy public listener port is required")
}
}
}
// add all of the service ports
for _, port := range svc.ports() {
if ok := exposePort(port); !ok {
return nil, fmt.Errorf("port used more than once on cluster %q node %q: %d", c.Name, n.ID(), port)
}
}
// TODO(rb): re-expose?
// switch svc.Protocol {
// case "":
// svc.Protocol = "tcp"
// fallthrough
// case "tcp":
// if svc.CheckHTTP != "" {
// return nil, fmt.Errorf("cannot set CheckHTTP for tcp service")
// }
// case "http":
// if svc.CheckTCP != "" {
// return nil, fmt.Errorf("cannot set CheckTCP for tcp service")
// }
// default:
// return nil, fmt.Errorf("service has invalid protocol: %s", svc.Protocol)
// }
for _, u := range svc.Upstreams {
// Default to that of the enclosing service.
if u.Peer == "" {
if u.ID.Partition == "" {
u.ID.Partition = svc.ID.Partition
}
if u.ID.Namespace == "" {
u.ID.Namespace = svc.ID.Namespace
}
} else {
if u.ID.Partition != "" {
u.ID.Partition = "" // irrelevant here; we'll set it to the value of the OTHER side for plumbing purposes in tests
}
u.ID.Namespace = NamespaceOrDefault(u.ID.Namespace)
foundPeerNames[c.Name][u.Peer] = struct{}{}
}
if u.ID.Name == "" {
return nil, fmt.Errorf("upstream service name is required")
}
addTenancy(u.ID.Partition, u.ID.Namespace)
}
if err := svc.Validate(); err != nil {
return nil, fmt.Errorf("cluster %q node %q service %q is not valid: %w", c.Name, n.Name, svc.ID.String(), err)
}
}
}
// Explode this into the explicit list based on stray references made.
c.Partitions = nil
for ap, nsMap := range tenancies {
p := &Partition{
Name: ap,
}
for ns := range nsMap {
p.Namespaces = append(p.Namespaces, ns)
}
sort.Strings(p.Namespaces)
c.Partitions = append(c.Partitions, p)
}
sort.Slice(c.Partitions, func(i, j int) bool {
return c.Partitions[i].Name < c.Partitions[j].Name
})
if !c.Enterprise {
expect := []*Partition{{Name: "default", Namespaces: []string{"default"}}}
if !reflect.DeepEqual(c.Partitions, expect) {
return nil, fmt.Errorf("cluster %q references non-default partitions or namespaces but is OSS", c.Name)
}
}
}
clusteredPeerings := make(map[string]map[string]*PeerCluster) // local-cluster -> local-peer -> info
addPeerMapEntry := func(pc PeerCluster) {
pm, ok := clusteredPeerings[pc.Name]
if !ok {
pm = make(map[string]*PeerCluster)
clusteredPeerings[pc.Name] = pm
}
pm[pc.PeerName] = &pc
}
for _, p := range raw.Peerings {
dialingCluster, ok := clusters[p.Dialing.Name]
if !ok {
return nil, fmt.Errorf("peering references a dialing cluster that does not exist: %s", p.Dialing.Name)
}
acceptingCluster, ok := clusters[p.Accepting.Name]
if !ok {
return nil, fmt.Errorf("peering references an accepting cluster that does not exist: %s", p.Accepting.Name)
}
if p.Dialing.Name == p.Accepting.Name {
return nil, fmt.Errorf("self peerings are not allowed: %s", p.Dialing.Name)
}
p.Dialing.Partition = PartitionOrDefault(p.Dialing.Partition)
p.Accepting.Partition = PartitionOrDefault(p.Accepting.Partition)
if dialingCluster.Enterprise {
if !dialingCluster.hasPartition(p.Dialing.Partition) {
return nil, fmt.Errorf("dialing side of peering cannot reference a partition that does not exist: %s", p.Dialing.Partition)
}
} else {
if p.Dialing.Partition != "default" {
return nil, fmt.Errorf("dialing side of peering cannot reference a partition when OSS")
}
}
if acceptingCluster.Enterprise {
if !acceptingCluster.hasPartition(p.Accepting.Partition) {
return nil, fmt.Errorf("accepting side of peering cannot reference a partition that does not exist: %s", p.Accepting.Partition)
}
} else {
if p.Accepting.Partition != "default" {
return nil, fmt.Errorf("accepting side of peering cannot reference a partition when OSS")
}
}
if p.Dialing.PeerName == "" {
p.Dialing.PeerName = "peer-" + p.Accepting.Name + "-" + p.Accepting.Partition
}
if p.Accepting.PeerName == "" {
p.Accepting.PeerName = "peer-" + p.Dialing.Name + "-" + p.Dialing.Partition
}
{ // Ensure the link fields do not have recursive links.
p.Dialing.Link = nil
p.Accepting.Link = nil
// Copy the un-linked data before setting the link
pa := p.Accepting
pd := p.Dialing
p.Accepting.Link = &pd
p.Dialing.Link = &pa
}
addPeerMapEntry(p.Accepting)
addPeerMapEntry(p.Dialing)
delete(foundPeerNames[p.Accepting.Name], p.Accepting.PeerName)
delete(foundPeerNames[p.Dialing.Name], p.Dialing.PeerName)
}
for cluster, peers := range foundPeerNames {
if len(peers) > 0 {
var pretty []string
for name := range peers {
pretty = append(pretty, name)
}
sort.Strings(pretty)
return nil, fmt.Errorf("cluster[%s] found topology references to peerings that do not exist: %v", cluster, pretty)
}
}
// after we decoded the peering stuff, we can fill in some computed data in the upstreams
for _, c := range clusters {
c.Peerings = clusteredPeerings[c.Name]
for _, n := range c.Nodes {
for _, svc := range n.Services {
for _, u := range svc.Upstreams {
if u.Peer == "" {
u.Cluster = c.Name
u.Peering = nil
continue
}
remotePeer, ok := c.Peerings[u.Peer]
if !ok {
return nil, fmt.Errorf("not possible")
}
u.Cluster = remotePeer.Link.Name
u.Peering = remotePeer.Link
// this helps in generating fortio assertions; otherwise field is ignored
u.ID.Partition = remotePeer.Link.Partition
}
}
}
}
t := &Topology{
ID: id,
Networks: networks,
Clusters: clusters,
Images: images,
Peerings: raw.Peerings,
}
if prev != nil {
// networks cannot change
if !sameKeys(prev.Networks, t.Networks) {
return nil, fmt.Errorf("cannot create or destroy networks")
}
for _, newNetwork := range t.Networks {
oldNetwork := prev.Networks[newNetwork.Name]
// Carryover
newNetwork.inheritFromExisting(oldNetwork)
if err := isSame(oldNetwork, newNetwork); err != nil {
return nil, fmt.Errorf("networks cannot change: %w", err)
}
}
// cannot add or remove an entire cluster
if !sameKeys(prev.Clusters, t.Clusters) {
return nil, fmt.Errorf("cannot create or destroy clusters")
}
for _, newCluster := range t.Clusters {
oldCluster := prev.Clusters[newCluster.Name]
// Carryover
newCluster.inheritFromExisting(oldCluster)
if newCluster.Name != oldCluster.Name ||
newCluster.NetworkName != oldCluster.NetworkName ||
newCluster.Datacenter != oldCluster.Datacenter ||
newCluster.Enterprise != oldCluster.Enterprise {
return nil, fmt.Errorf("cannot edit some cluster fields for %q", newCluster.Name)
}
// WARN on presence of some things.
if len(newCluster.InitialConfigEntries) > 0 {
logger.Warn("initial config entries were provided, but are skipped on recompile")
}
// Check NODES
if err := inheritAndValidateNodes(oldCluster.Nodes, newCluster.Nodes); err != nil {
return nil, fmt.Errorf("some immutable aspects of nodes were changed in cluster %q: %w", newCluster.Name, err)
}
}
}
return t, nil
}
const permutedWarning = "use the disabled node kind if you want to ignore a node"
func inheritAndValidateNodes(
prev, curr []*Node,
) error {
nodeMap := mapifyNodes(curr)
for prevIdx, node := range prev {
currNode, ok := nodeMap[node.ID()]
if !ok {
return fmt.Errorf("node %q has vanished; "+permutedWarning, node.ID())
}
// Ensure it hasn't been permuted.
if currNode.Pos != prevIdx {
return fmt.Errorf(
"node %q has been shuffled %d -> %d; "+permutedWarning,
node.ID(),
prevIdx,
currNode.Pos,
)
}
if currNode.Node.Kind != node.Kind ||
currNode.Node.Partition != node.Partition ||
currNode.Node.Name != node.Name ||
currNode.Node.Index != node.Index ||
len(currNode.Node.Addresses) != len(node.Addresses) ||
!sameKeys(currNode.Node.usedPorts, node.usedPorts) {
return fmt.Errorf("cannot edit some node fields for %q", node.ID())
}
currNode.Node.inheritFromExisting(node)
for i := 0; i < len(currNode.Node.Addresses); i++ {
prevAddr := node.Addresses[i]
currAddr := currNode.Node.Addresses[i]
if prevAddr.Network != currAddr.Network {
return fmt.Errorf("addresses were shuffled for node %q", node.ID())
}
if prevAddr.Type != currAddr.Type {
return fmt.Errorf("cannot edit some address fields for %q", node.ID())
}
currAddr.inheritFromExisting(prevAddr)
}
svcMap := mapifyServices(currNode.Node.Services)
for _, svc := range node.Services {
currSvc, ok := svcMap[svc.ID]
if !ok {
continue // service has vanished, this is ok
}
// don't care about index permutation
if currSvc.ID != svc.ID ||
currSvc.Port != svc.Port ||
currSvc.EnvoyAdminPort != svc.EnvoyAdminPort ||
currSvc.EnvoyPublicListenerPort != svc.EnvoyPublicListenerPort ||
isSame(currSvc.Command, svc.Command) != nil ||
isSame(currSvc.Env, svc.Env) != nil {
return fmt.Errorf("cannot edit some address fields for %q", svc.ID)
}
currSvc.inheritFromExisting(svc)
}
}
return nil
}
func newTopologyID() (string, error) {
const n = 16
id := make([]byte, n)
if _, err := crand.Read(id[:]); err != nil {
return "", err
}
return hex.EncodeToString(id)[:n], nil
}
// matches valid DNS labels according to RFC 1123, should be at most 63
// characters according to the RFC
var validLabel = regexp.MustCompile(`^[a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?$`)
// IsValidLabel returns true if the string given is a valid DNS label (RFC 1123).
// Note: the only difference between RFC 1035 and RFC 1123 labels is that in
// RFC 1123 labels can begin with a number.
func IsValidLabel(name string) bool {
return validLabel.MatchString(name)
}
// ValidateLabel is similar to IsValidLabel except it returns an error
// instead of false when name is not a valid DNS label. The error will contain
// reference to what constitutes a valid DNS label.
func ValidateLabel(name string) error {
if !IsValidLabel(name) {
return errors.New("a valid DNS label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character")
}
return nil
}
func isSame(x, y any) error {
diff := cmp.Diff(x, y)
if diff != "" {
return fmt.Errorf("values are not equal\n--- expected\n+++ actual\n%v", diff)
}
return nil
}
func sameKeys[K comparable, V any](x, y map[K]V) bool {
if len(x) != len(y) {
return false
}
for kx := range x {
if _, ok := y[kx]; !ok {
return false
}
}
return true
}
func mapifyNodes(nodes []*Node) map[NodeID]nodeWithPosition {
m := make(map[NodeID]nodeWithPosition)
for i, node := range nodes {
m[node.ID()] = nodeWithPosition{
Pos: i,
Node: node,
}
}
return m
}
type nodeWithPosition struct {
Pos int
Node *Node
}
func mapifyServices(services []*Service) map[ServiceID]*Service {
m := make(map[ServiceID]*Service)
for _, svc := range services {
m[svc.ID] = svc
}
return m
}

View File

@ -0,0 +1,3 @@
package topology
const DefaultDataplaneImage = "hashicorp/consul-dataplane:1.1.0"

View File

@ -0,0 +1,4 @@
package topology
const DefaultConsulImage = "hashicorp/consul:1.15.2"
const DefaultConsulEnterpriseImage = "hashicorp/consul-enterprise:1.15.2-ent"

View File

@ -0,0 +1,3 @@
package topology
const DefaultEnvoyImage = "envoyproxy/envoy:v1.25.1"

View File

@ -0,0 +1,142 @@
package topology
import (
"fmt"
"github.com/hashicorp/consul/api"
)
type NodeServiceID struct {
Node string
Service string `json:",omitempty"`
Namespace string `json:",omitempty"`
Partition string `json:",omitempty"`
}
func NewNodeServiceID(node, service, namespace, partition string) NodeServiceID {
id := NodeServiceID{
Node: node,
Service: service,
Namespace: namespace,
Partition: partition,
}
id.Normalize()
return id
}
func (id NodeServiceID) NodeID() NodeID {
return NewNodeID(id.Node, id.Partition)
}
func (id NodeServiceID) ServiceID() ServiceID {
return NewServiceID(id.Service, id.Namespace, id.Partition)
}
func (id *NodeServiceID) Normalize() {
id.Namespace = NamespaceOrDefault(id.Namespace)
id.Partition = PartitionOrDefault(id.Partition)
}
func (id NodeServiceID) String() string {
return fmt.Sprintf("%s/%s/%s/%s", id.Partition, id.Node, id.Namespace, id.Service)
}
type NodeID struct {
Name string `json:",omitempty"`
Partition string `json:",omitempty"`
}
func NewNodeID(name, partition string) NodeID {
id := NodeID{
Name: name,
Partition: partition,
}
id.Normalize()
return id
}
func (id *NodeID) Normalize() {
id.Partition = PartitionOrDefault(id.Partition)
}
func (id NodeID) String() string {
return fmt.Sprintf("%s/%s", id.Partition, id.Name)
}
func (id NodeID) ACLString() string {
return fmt.Sprintf("%s--%s", id.Partition, id.Name)
}
func (id NodeID) TFString() string {
return id.ACLString()
}
type ServiceID struct {
Name string `json:",omitempty"`
Namespace string `json:",omitempty"`
Partition string `json:",omitempty"`
}
func NewServiceID(name, namespace, partition string) ServiceID {
id := ServiceID{
Name: name,
Namespace: namespace,
Partition: partition,
}
id.Normalize()
return id
}
func (id ServiceID) Less(other ServiceID) bool {
if id.Partition != other.Partition {
return id.Partition < other.Partition
}
if id.Namespace != other.Namespace {
return id.Namespace < other.Namespace
}
return id.Name < other.Name
}
func (id *ServiceID) Normalize() {
id.Namespace = NamespaceOrDefault(id.Namespace)
id.Partition = PartitionOrDefault(id.Partition)
}
func (id ServiceID) String() string {
return fmt.Sprintf("%s/%s/%s", id.Partition, id.Namespace, id.Name)
}
func (id ServiceID) ACLString() string {
return fmt.Sprintf("%s--%s--%s", id.Partition, id.Namespace, id.Name)
}
func (id ServiceID) TFString() string {
return id.ACLString()
}
func PartitionOrDefault(name string) string {
if name == "" {
return "default"
}
return name
}
func NamespaceOrDefault(name string) string {
if name == "" {
return "default"
}
return name
}
func DefaultToEmpty(name string) string {
if name == "default" {
return ""
}
return name
}
// PartitionQueryOptions returns an *api.QueryOptions with the given partition
// field set only if the partition is non-default. This helps when writing
// tests for joint use in OSS and ENT.
func PartitionQueryOptions(partition string) *api.QueryOptions {
return &api.QueryOptions{
Partition: DefaultToEmpty(partition),
}
}

View File

@ -0,0 +1,123 @@
package topology
import (
"strings"
)
type Images struct {
Consul string `json:",omitempty"`
ConsulOSS string `json:",omitempty"`
ConsulEnterprise string `json:",omitempty"`
Envoy string
Dataplane string
}
func (i Images) LocalDataplaneImage() string {
if i.Dataplane == "" {
return ""
}
img, tag, ok := strings.Cut(i.Dataplane, ":")
if !ok {
tag = "latest"
}
repo, name, ok := strings.Cut(img, "/")
if ok {
name = repo + "-" + name
}
// ex: local/hashicorp-consul-dataplane:1.1.0
return "local/" + name + ":" + tag
}
func (i Images) EnvoyConsulImage() string {
if i.Consul == "" || i.Envoy == "" {
return ""
}
img1, tag1, ok1 := strings.Cut(i.Consul, ":")
img2, tag2, ok2 := strings.Cut(i.Envoy, ":")
if !ok1 {
tag1 = "latest"
}
if !ok2 {
tag2 = "latest"
}
repo1, name1, ok1 := strings.Cut(img1, "/")
repo2, name2, ok2 := strings.Cut(img2, "/")
if ok1 {
name1 = repo1 + "-" + name1
} else {
name1 = repo1
}
if ok2 {
name2 = repo2 + "-" + name2
} else {
name2 = repo2
}
// ex: local/hashicorp-consul-and-envoyproxy-envoy:1.15.0-with-v1.26.2
return "local/" + name1 + "-and-" + name2 + ":" + tag1 + "-with-" + tag2
}
func (i Images) ChooseNode(kind NodeKind) Images {
switch kind {
case NodeKindServer:
i.Envoy = ""
i.Dataplane = ""
case NodeKindClient:
i.Dataplane = ""
case NodeKindDataplane:
i.Envoy = ""
default:
// do nothing
}
return i
}
func (i Images) ChooseConsul(enterprise bool) Images {
if enterprise {
i.Consul = i.ConsulEnterprise
} else {
i.Consul = i.ConsulOSS
}
i.ConsulEnterprise = ""
i.ConsulOSS = ""
return i
}
func (i Images) OverrideWith(i2 Images) Images {
if i2.Consul != "" {
i.Consul = i2.Consul
}
if i2.ConsulOSS != "" {
i.ConsulOSS = i2.ConsulOSS
}
if i2.ConsulEnterprise != "" {
i.ConsulEnterprise = i2.ConsulEnterprise
}
if i2.Envoy != "" {
i.Envoy = i2.Envoy
}
if i2.Dataplane != "" {
i.Dataplane = i2.Dataplane
}
return i
}
// DefaultImages controls which specific docker images are used as default
// values for topology components that do not specify values.
//
// These can be bulk-updated using the make target 'make update-defaults'
func DefaultImages() Images {
return Images{
Consul: "",
ConsulOSS: DefaultConsulImage,
ConsulEnterprise: DefaultConsulEnterpriseImage,
Envoy: DefaultEnvoyImage,
Dataplane: DefaultDataplaneImage,
}
}

View File

@ -0,0 +1,98 @@
package topology
import (
"strconv"
"testing"
"github.com/stretchr/testify/require"
)
func TestImages_EnvoyConsulImage(t *testing.T) {
type testcase struct {
consul, envoy string
expect string
}
run := func(t *testing.T, tc testcase) {
i := Images{Consul: tc.consul, Envoy: tc.envoy}
j := i.EnvoyConsulImage()
require.Equal(t, tc.expect, j)
}
cases := []testcase{
{
consul: "",
envoy: "",
expect: "",
},
{
consul: "consul",
envoy: "",
expect: "",
},
{
consul: "",
envoy: "envoy",
expect: "",
},
{
consul: "consul",
envoy: "envoy",
expect: "local/consul-and-envoy:latest-with-latest",
},
// repos
{
consul: "hashicorp/consul",
envoy: "envoy",
expect: "local/hashicorp-consul-and-envoy:latest-with-latest",
},
{
consul: "consul",
envoy: "envoyproxy/envoy",
expect: "local/consul-and-envoyproxy-envoy:latest-with-latest",
},
{
consul: "hashicorp/consul",
envoy: "envoyproxy/envoy",
expect: "local/hashicorp-consul-and-envoyproxy-envoy:latest-with-latest",
},
// tags
{
consul: "consul:1.15.0",
envoy: "envoy",
expect: "local/consul-and-envoy:1.15.0-with-latest",
},
{
consul: "consul",
envoy: "envoy:v1.26.1",
expect: "local/consul-and-envoy:latest-with-v1.26.1",
},
{
consul: "consul:1.15.0",
envoy: "envoy:v1.26.1",
expect: "local/consul-and-envoy:1.15.0-with-v1.26.1",
},
// repos+tags
{
consul: "hashicorp/consul:1.15.0",
envoy: "envoy:v1.26.1",
expect: "local/hashicorp-consul-and-envoy:1.15.0-with-v1.26.1",
},
{
consul: "consul:1.15.0",
envoy: "envoyproxy/envoy:v1.26.1",
expect: "local/consul-and-envoyproxy-envoy:1.15.0-with-v1.26.1",
},
{
consul: "hashicorp/consul:1.15.0",
envoy: "envoyproxy/envoy:v1.26.1",
expect: "local/hashicorp-consul-and-envoyproxy-envoy:1.15.0-with-v1.26.1",
},
}
for i, tc := range cases {
t.Run(strconv.Itoa(i), func(t *testing.T) {
run(t, tc)
})
}
}

View File

@ -0,0 +1,787 @@
package topology
import (
"errors"
"fmt"
"net"
"net/netip"
"reflect"
"sort"
"github.com/hashicorp/consul/api"
)
type Topology struct {
ID string
// Images controls which specific docker images are used when running this
// node. Non-empty fields here override non-empty fields inherited from the
// general default values from DefaultImages().
Images Images
// Networks is the list of networks to create for this set of clusters.
Networks map[string]*Network
// Clusters defines the list of Consul clusters that should be created, and
// their associated workloads.
Clusters map[string]*Cluster
// Peerings defines the list of pairwise peerings that should be established
// between clusters.
Peerings []*Peering `json:",omitempty"`
}
func (t *Topology) DigestExposedProxyPort(netName string, proxyPort int) (bool, error) {
net, ok := t.Networks[netName]
if !ok {
return false, fmt.Errorf("found output network that does not exist: %s", netName)
}
if net.ProxyPort == proxyPort {
return false, nil
}
net.ProxyPort = proxyPort
// Denormalize for UX.
for _, cluster := range t.Clusters {
for _, node := range cluster.Nodes {
for _, addr := range node.Addresses {
if addr.Network == netName {
addr.ProxyPort = proxyPort
}
}
}
}
return true, nil
}
func (t *Topology) SortedNetworks() []*Network {
var out []*Network
for _, n := range t.Networks {
out = append(out, n)
}
sort.Slice(out, func(i, j int) bool {
return out[i].Name < out[j].Name
})
return out
}
func (t *Topology) SortedClusters() []*Cluster {
var out []*Cluster
for _, c := range t.Clusters {
out = append(out, c)
}
sort.Slice(out, func(i, j int) bool {
return out[i].Name < out[j].Name
})
return out
}
type Config struct {
// Images controls which specific docker images are used when running this
// node. Non-empty fields here override non-empty fields inherited from the
// general default values from DefaultImages().
Images Images
// Networks is the list of networks to create for this set of clusters.
Networks []*Network
// Clusters defines the list of Consul clusters that should be created, and
// their associated workloads.
Clusters []*Cluster
// Peerings defines the list of pairwise peerings that should be established
// between clusters.
Peerings []*Peering
}
func (c *Config) Cluster(name string) *Cluster {
for _, cluster := range c.Clusters {
if cluster.Name == name {
return cluster
}
}
return nil
}
type Network struct {
Type string // lan/wan ; empty means lan
Name string // logical name
// computed at topology compile
DockerName string
// generated during network-and-tls
Subnet string
IPPool []string `json:"-"`
// generated during network-and-tls
ProxyAddress string `json:",omitempty"`
DNSAddress string `json:",omitempty"`
// filled in from terraform outputs after network-and-tls
ProxyPort int `json:",omitempty"`
}
func (n *Network) IsLocal() bool {
return n.Type == "" || n.Type == "lan"
}
func (n *Network) IsPublic() bool {
return n.Type == "wan"
}
func (n *Network) inheritFromExisting(existing *Network) {
n.Subnet = existing.Subnet
n.IPPool = existing.IPPool
n.ProxyAddress = existing.ProxyAddress
n.DNSAddress = existing.DNSAddress
n.ProxyPort = existing.ProxyPort
}
func (n *Network) IPByIndex(index int) string {
if index >= len(n.IPPool) {
panic(fmt.Sprintf(
"not enough ips on this network to assign index %d: %d",
len(n.IPPool), index,
))
}
return n.IPPool[index]
}
func (n *Network) SetSubnet(subnet string) (bool, error) {
if n.Subnet == subnet {
return false, nil
}
p, err := netip.ParsePrefix(subnet)
if err != nil {
return false, err
}
if !p.IsValid() {
return false, errors.New("not valid")
}
p = p.Masked()
var ipPool []string
addr := p.Addr()
for {
if !p.Contains(addr) {
break
}
ipPool = append(ipPool, addr.String())
addr = addr.Next()
}
ipPool = ipPool[2:] // skip the x.x.x.{0,1}
n.Subnet = subnet
n.IPPool = ipPool
return true, nil
}
// Cluster represents a single standalone install of Consul. This is the unit
// of what is peered when using cluster peering. Older consul installs would
// call this a datacenter.
type Cluster struct {
Name string
NetworkName string // empty assumes same as Name
// Images controls which specific docker images are used when running this
// cluster. Non-empty fields here override non-empty fields inherited from
// the enclosing Topology.
Images Images
// Enterprise marks this cluster as desiring to run Consul Enterprise
// components.
Enterprise bool `json:",omitempty"`
// Nodes is the definition of the nodes (agent-less and agent-ful).
Nodes []*Node
// Partitions is a list of tenancy configurations that should be created
// after the servers come up but before the clients and the rest of the
// topology starts.
//
// Enterprise Only.
Partitions []*Partition `json:",omitempty"`
// Datacenter defaults to "Name" if left unspecified. It lets you possibly
// create multiple peer clusters with identical datacenter names.
Datacenter string
// InitialConfigEntries is a convenience function to have some config
// entries created after the servers start up but before the rest of the
// topology comes up.
InitialConfigEntries []api.ConfigEntry `json:",omitempty"`
// TLSVolumeName is the docker volume name containing the various certs
// generated by 'consul tls cert create'
//
// This is generated during the networking phase and is not user specified.
TLSVolumeName string `json:",omitempty"`
// Peerings is a map of peering names to information about that peering in this cluster
//
// Denormalized during compile.
Peerings map[string]*PeerCluster `json:",omitempty"`
}
func (c *Cluster) inheritFromExisting(existing *Cluster) {
c.TLSVolumeName = existing.TLSVolumeName
}
type Partition struct {
Name string
Namespaces []string
}
func (c *Cluster) hasPartition(p string) bool {
for _, partition := range c.Partitions {
if partition.Name == p {
return true
}
}
return false
}
func (c *Cluster) PartitionQueryOptionsList() []*api.QueryOptions {
if !c.Enterprise {
return []*api.QueryOptions{{}}
}
var out []*api.QueryOptions
for _, p := range c.Partitions {
out = append(out, &api.QueryOptions{Partition: p.Name})
}
return out
}
func (c *Cluster) ServerNodes() []*Node {
var out []*Node
for _, node := range c.SortedNodes() {
if node.Kind != NodeKindServer || node.Disabled {
continue
}
out = append(out, node)
}
return out
}
func (c *Cluster) ServerByAddr(addr string) *Node {
expect, _, err := net.SplitHostPort(addr)
if err != nil {
return nil
}
for _, node := range c.Nodes {
if node.Kind != NodeKindServer || node.Disabled {
continue
}
if node.LocalAddress() == expect {
return node
}
}
return nil
}
func (c *Cluster) FirstServer() *Node {
for _, node := range c.Nodes {
if node.IsServer() && !node.Disabled && node.ExposedPort(8500) > 0 {
return node
}
}
return nil
}
func (c *Cluster) FirstClient() *Node {
for _, node := range c.Nodes {
if node.Kind != NodeKindClient || node.Disabled {
continue
}
return node
}
return nil
}
func (c *Cluster) ActiveNodes() []*Node {
var out []*Node
for _, node := range c.Nodes {
if !node.Disabled {
out = append(out, node)
}
}
return out
}
func (c *Cluster) SortedNodes() []*Node {
var out []*Node
out = append(out, c.Nodes...)
kindOrder := map[NodeKind]int{
NodeKindServer: 1,
NodeKindClient: 2,
NodeKindDataplane: 2,
}
sort.Slice(out, func(i, j int) bool {
ni, nj := out[i], out[j]
// servers before clients/dataplanes
ki, kj := kindOrder[ni.Kind], kindOrder[nj.Kind]
if ki < kj {
return true
} else if ki > kj {
return false
}
// lex sort by partition
if ni.Partition < nj.Partition {
return true
} else if ni.Partition > nj.Partition {
return false
}
// lex sort by name
return ni.Name < nj.Name
})
return out
}
func (c *Cluster) FindService(id NodeServiceID) *Service {
id.Normalize()
nid := id.NodeID()
sid := id.ServiceID()
return c.ServiceByID(nid, sid)
}
func (c *Cluster) ServiceByID(nid NodeID, sid ServiceID) *Service {
return c.NodeByID(nid).ServiceByID(sid)
}
func (c *Cluster) ServicesByID(sid ServiceID) []*Service {
sid.Normalize()
var out []*Service
for _, n := range c.Nodes {
for _, svc := range n.Services {
if svc.ID == sid {
out = append(out, svc)
}
}
}
return out
}
func (c *Cluster) NodeByID(nid NodeID) *Node {
nid.Normalize()
for _, n := range c.Nodes {
if n.ID() == nid {
return n
}
}
panic("node not found: " + nid.String())
}
type Address struct {
Network string
// denormalized at topology compile
Type string
// denormalized at topology compile
DockerNetworkName string
// generated after network-and-tls
IPAddress string
// denormalized from terraform outputs stored in the Network
ProxyPort int `json:",omitempty"`
}
func (a *Address) inheritFromExisting(existing *Address) {
a.IPAddress = existing.IPAddress
a.ProxyPort = existing.ProxyPort
}
func (a Address) IsLocal() bool {
return a.Type == "" || a.Type == "lan"
}
func (a Address) IsPublic() bool {
return a.Type == "wan"
}
type NodeKind string
const (
NodeKindUnknown NodeKind = ""
NodeKindServer NodeKind = "server"
NodeKindClient NodeKind = "client"
NodeKindDataplane NodeKind = "dataplane"
)
// TODO: rename pod
type Node struct {
Kind NodeKind
Partition string // will be not empty
Name string // logical name
// Images controls which specific docker images are used when running this
// node. Non-empty fields here override non-empty fields inherited from
// the enclosing Cluster.
Images Images
// AgentEnv contains optional environment variables to attach to Consul agents.
AgentEnv []string
Disabled bool `json:",omitempty"`
Addresses []*Address
Services []*Service
// denormalized at topology compile
Cluster string
Datacenter string
// computed at topology compile
Index int
// generated during network-and-tls
TLSCertPrefix string `json:",omitempty"`
// dockerName is computed at topology compile
dockerName string
// usedPorts has keys that are computed at topology compile (internal
// ports) and values initialized to zero until terraform creates the pods
// and extracts the exposed port values from output variables.
usedPorts map[int]int // keys are from compile / values are from terraform output vars
}
func (n *Node) DockerName() string {
return n.dockerName
}
func (n *Node) ExposedPort(internalPort int) int {
return n.usedPorts[internalPort]
}
func (n *Node) SortedPorts() []int {
var out []int
for internalPort := range n.usedPorts {
out = append(out, internalPort)
}
sort.Ints(out)
return out
}
func (n *Node) inheritFromExisting(existing *Node) {
n.TLSCertPrefix = existing.TLSCertPrefix
merged := existing.usedPorts
for k, vNew := range n.usedPorts {
if _, present := merged[k]; !present {
merged[k] = vNew
}
}
n.usedPorts = merged
}
func (n *Node) String() string {
return n.ID().String()
}
func (n *Node) ID() NodeID {
return NewNodeID(n.Name, n.Partition)
}
func (n *Node) CatalogID() NodeID {
return NewNodeID(n.PodName(), n.Partition)
}
func (n *Node) PodName() string {
return n.dockerName + "-pod"
}
func (n *Node) AddressByNetwork(name string) *Address {
for _, a := range n.Addresses {
if a.Network == name {
return a
}
}
return nil
}
func (n *Node) LocalAddress() string {
for _, a := range n.Addresses {
if a.IsLocal() {
if a.IPAddress == "" {
panic("node has no assigned local address")
}
return a.IPAddress
}
}
panic("node has no local network")
}
func (n *Node) HasPublicAddress() bool {
for _, a := range n.Addresses {
if a.IsPublic() {
return true
}
}
return false
}
func (n *Node) LocalProxyPort() int {
for _, a := range n.Addresses {
if a.IsLocal() {
if a.ProxyPort > 0 {
return a.ProxyPort
}
panic("node has no assigned local address")
}
}
panic("node has no local network")
}
func (n *Node) PublicAddress() string {
for _, a := range n.Addresses {
if a.IsPublic() {
if a.IPAddress == "" {
panic("node has no assigned public address")
}
return a.IPAddress
}
}
panic("node has no public network")
}
func (n *Node) PublicProxyPort() int {
for _, a := range n.Addresses {
if a.IsPublic() {
if a.ProxyPort > 0 {
return a.ProxyPort
}
panic("node has no assigned public address")
}
}
panic("node has no public network")
}
func (n *Node) IsServer() bool {
return n.Kind == NodeKindServer
}
func (n *Node) IsAgent() bool {
return n.Kind == NodeKindServer || n.Kind == NodeKindClient
}
func (n *Node) RunsWorkloads() bool {
return n.IsAgent() || n.IsDataplane()
}
func (n *Node) IsDataplane() bool {
return n.Kind == NodeKindDataplane
}
func (n *Node) SortedServices() []*Service {
var out []*Service
out = append(out, n.Services...)
sort.Slice(out, func(i, j int) bool {
mi := out[i].IsMeshGateway
mj := out[j].IsMeshGateway
if mi && !mi {
return false
} else if !mi && mj {
return true
}
return out[i].ID.Less(out[j].ID)
})
return out
}
// DigestExposedPorts returns true if it was changed.
func (n *Node) DigestExposedPorts(ports map[int]int) bool {
if reflect.DeepEqual(n.usedPorts, ports) {
return false
}
for internalPort := range n.usedPorts {
if v, ok := ports[internalPort]; ok {
n.usedPorts[internalPort] = v
} else {
panic(fmt.Sprintf(
"cluster %q node %q port %d not found in exposed list",
n.Cluster,
n.ID(),
internalPort,
))
}
}
for _, svc := range n.Services {
svc.DigestExposedPorts(ports)
}
return true
}
func (n *Node) ServiceByID(sid ServiceID) *Service {
sid.Normalize()
for _, svc := range n.Services {
if svc.ID == sid {
return svc
}
}
panic("service not found: " + sid.String())
}
type ServiceAndNode struct {
Service *Service
Node *Node
}
type Service struct {
ID ServiceID
Image string
Port int
ExposedPort int `json:",omitempty"`
Disabled bool `json:",omitempty"` // TODO
// TODO: expose extra port here?
Meta map[string]string `json:",omitempty"`
// TODO(rb): re-expose this perhaps? Protocol string `json:",omitempty"` // tcp|http (empty == tcp)
CheckHTTP string `json:",omitempty"` // url; will do a GET
CheckTCP string `json:",omitempty"` // addr; will do a socket open/close
EnvoyAdminPort int
ExposedEnvoyAdminPort int `json:",omitempty"`
EnvoyPublicListenerPort int `json:",omitempty"` // agentless
Command []string `json:",omitempty"` // optional
Env []string `json:",omitempty"` // optional
DisableServiceMesh bool `json:",omitempty"`
IsMeshGateway bool `json:",omitempty"`
Upstreams []*Upstream
// denormalized at topology compile
Node *Node `json:"-"`
}
func (s *Service) inheritFromExisting(existing *Service) {
s.ExposedPort = existing.ExposedPort
s.ExposedEnvoyAdminPort = existing.ExposedEnvoyAdminPort
}
func (s *Service) ports() []int {
var out []int
if s.Port > 0 {
out = append(out, s.Port)
}
if s.EnvoyAdminPort > 0 {
out = append(out, s.EnvoyAdminPort)
}
if s.EnvoyPublicListenerPort > 0 {
out = append(out, s.EnvoyPublicListenerPort)
}
for _, u := range s.Upstreams {
if u.LocalPort > 0 {
out = append(out, u.LocalPort)
}
}
return out
}
func (s *Service) HasCheck() bool {
return s.CheckTCP != "" || s.CheckHTTP != ""
}
func (s *Service) DigestExposedPorts(ports map[int]int) {
s.ExposedPort = ports[s.Port]
if s.EnvoyAdminPort > 0 {
s.ExposedEnvoyAdminPort = ports[s.EnvoyAdminPort]
} else {
s.ExposedEnvoyAdminPort = 0
}
}
func (s *Service) Validate() error {
if s.ID.Name == "" {
return fmt.Errorf("service name is required")
}
if s.Image == "" && !s.IsMeshGateway {
return fmt.Errorf("service image is required")
}
if s.Port <= 0 {
return fmt.Errorf("service has invalid port")
}
if s.DisableServiceMesh && s.IsMeshGateway {
return fmt.Errorf("cannot disable service mesh and still run a mesh gateway")
}
if s.DisableServiceMesh && len(s.Upstreams) > 0 {
return fmt.Errorf("cannot disable service mesh and configure upstreams")
}
if s.DisableServiceMesh {
if s.EnvoyAdminPort != 0 {
return fmt.Errorf("cannot use envoy admin port without a service mesh")
}
} else {
if s.EnvoyAdminPort <= 0 {
return fmt.Errorf("envoy admin port is required")
}
}
for _, u := range s.Upstreams {
if u.ID.Name == "" {
return fmt.Errorf("upstream service name is required")
}
if u.LocalPort <= 0 {
return fmt.Errorf("upstream local port is required")
}
if u.LocalAddress != "" {
ip := net.ParseIP(u.LocalAddress)
if ip == nil {
return fmt.Errorf("upstream local address is invalid: %s", u.LocalAddress)
}
}
}
return nil
}
type Upstream struct {
ID ServiceID
LocalAddress string `json:",omitempty"` // defaults to 127.0.0.1
LocalPort int
Peer string `json:",omitempty"`
// TODO: what about mesh gateway mode overrides?
// computed at topology compile
Cluster string `json:",omitempty"`
Peering *PeerCluster `json:",omitempty"` // this will have Link!=nil
}
type Peering struct {
Dialing PeerCluster
Accepting PeerCluster
}
type PeerCluster struct {
Name string
Partition string
PeerName string // name to call it on this side; defaults if not specified
// computed at topology compile (pointer so it can be empty in json)
Link *PeerCluster `json:",omitempty"`
}
func (c PeerCluster) String() string {
return c.Name + ":" + c.Partition
}
func (p *Peering) String() string {
return "(" + p.Dialing.String() + ")->(" + p.Accepting.String() + ")"
}

View File

@ -0,0 +1,17 @@
package topology
func MergeSlices[V any](x, y []V) []V {
switch {
case len(x) == 0 && len(y) == 0:
return nil
case len(x) == 0:
return y
case len(y) == 0:
return x
}
out := make([]V, 0, len(x)+len(y))
out = append(out, x...)
out = append(out, y...)
return out
}

View File

@ -0,0 +1,11 @@
package topology
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestMergeSlices(t *testing.T) {
require.Nil(t, MergeSlices[int](nil, nil))
}

View File

@ -0,0 +1,63 @@
package util
import (
"fmt"
"net/http"
"net/url"
"strconv"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/go-cleanhttp"
)
func ProxyNotPooledAPIClient(proxyPort int, containerIP string, containerPort int, token string) (*api.Client, error) {
return proxyAPIClient(cleanhttp.DefaultTransport(), proxyPort, containerIP, containerPort, token)
}
func ProxyAPIClient(proxyPort int, containerIP string, containerPort int, token string) (*api.Client, error) {
return proxyAPIClient(cleanhttp.DefaultPooledTransport(), proxyPort, containerIP, containerPort, token)
}
func proxyAPIClient(baseTransport *http.Transport, proxyPort int, containerIP string, containerPort int, token string) (*api.Client, error) {
if proxyPort <= 0 {
return nil, fmt.Errorf("cannot use an http proxy on port %d", proxyPort)
}
if containerIP == "" {
return nil, fmt.Errorf("container IP is required")
}
if containerPort <= 0 {
return nil, fmt.Errorf("cannot dial api client on port %d", containerPort)
}
proxyURL, err := url.Parse("http://127.0.0.1:" + strconv.Itoa(proxyPort))
if err != nil {
return nil, err
}
cfg := api.DefaultConfig()
cfg.Transport = baseTransport
cfg.Transport.Proxy = http.ProxyURL(proxyURL)
cfg.Address = fmt.Sprintf("http://%s:%d", containerIP, containerPort)
cfg.Token = token
return api.NewClient(cfg)
}
func ProxyNotPooledHTTPTransport(proxyPort int) (*http.Transport, error) {
return proxyHTTPTransport(cleanhttp.DefaultTransport(), proxyPort)
}
func ProxyHTTPTransport(proxyPort int) (*http.Transport, error) {
return proxyHTTPTransport(cleanhttp.DefaultPooledTransport(), proxyPort)
}
func proxyHTTPTransport(baseTransport *http.Transport, proxyPort int) (*http.Transport, error) {
if proxyPort <= 0 {
return nil, fmt.Errorf("cannot use an http proxy on port %d", proxyPort)
}
proxyURL, err := url.Parse("http://127.0.0.1:" + strconv.Itoa(proxyPort))
if err != nil {
return nil, err
}
baseTransport.Proxy = http.ProxyURL(proxyURL)
return baseTransport, nil
}

View File

@ -0,0 +1,57 @@
package util
import (
"fmt"
"io"
"os"
"path/filepath"
"golang.org/x/crypto/blake2b"
)
func FilesExist(parent string, paths ...string) (bool, error) {
for _, p := range paths {
ok, err := FileExists(filepath.Join(parent, p))
if err != nil {
return false, err
}
if !ok {
return false, nil
}
}
return true, nil
}
func FileExists(path string) (bool, error) {
_, err := os.Stat(path)
if os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, err
} else {
return true, nil
}
}
func HashFile(path string) (string, error) {
hash, err := blake2b.New256(nil)
if err != nil {
return "", err
}
if err := AddFileToHash(path, hash); err != nil {
return "", err
}
return fmt.Sprintf("%x", hash.Sum(nil)), nil
}
func AddFileToHash(path string, w io.Writer) error {
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
_, err = io.Copy(w, f)
return err
}

View File

@ -0,0 +1,21 @@
// Copyright 2015 Docker Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Originally from:
// https://github.com/moby/moby/blob/7489b51f610104ab5acc43f4e77142927e7b522e/libnetwork/ipamutils
//
// The only changes were to remove dead code from the package that we did not
// need, and to edit the tests to use github.com/stretchr/testify to avoid an
// extra dependency.
package ipamutils

View File

@ -0,0 +1,117 @@
// Package ipamutils provides utility functions for ipam management
package ipamutils
import (
"fmt"
"net"
"sync"
)
var (
// predefinedLocalScopeDefaultNetworks contains a list of 31 IPv4 private networks with host size 16 and 12
// (172.17-31.x.x/16, 192.168.x.x/20) which do not overlap with the networks in `PredefinedGlobalScopeDefaultNetworks`
predefinedLocalScopeDefaultNetworks []*net.IPNet
// predefinedGlobalScopeDefaultNetworks contains a list of 64K IPv4 private networks with host size 8
// (10.x.x.x/24) which do not overlap with the networks in `PredefinedLocalScopeDefaultNetworks`
predefinedGlobalScopeDefaultNetworks []*net.IPNet
mutex sync.Mutex
localScopeDefaultNetworks = []*NetworkToSplit{{"172.17.0.0/16", 16}, {"172.18.0.0/16", 16}, {"172.19.0.0/16", 16},
{"172.20.0.0/14", 16}, {"172.24.0.0/14", 16}, {"172.28.0.0/14", 16},
{"192.168.0.0/16", 20}}
globalScopeDefaultNetworks = []*NetworkToSplit{{"10.0.0.0/8", 24}}
)
// NetworkToSplit represent a network that has to be split in chunks with mask length Size.
// Each subnet in the set is derived from the Base pool. Base is to be passed
// in CIDR format.
// Example: a Base "10.10.0.0/16 with Size 24 will define the set of 256
// 10.10.[0-255].0/24 address pools
type NetworkToSplit struct {
Base string `json:"base"`
Size int `json:"size"`
}
func init() {
var err error
if predefinedGlobalScopeDefaultNetworks, err = SplitNetworks(globalScopeDefaultNetworks); err != nil {
panic("failed to initialize the global scope default address pool: " + err.Error())
}
if predefinedLocalScopeDefaultNetworks, err = SplitNetworks(localScopeDefaultNetworks); err != nil {
panic("failed to initialize the local scope default address pool: " + err.Error())
}
}
// ConfigGlobalScopeDefaultNetworks configures global default pool.
// Ideally this will be called from SwarmKit as part of swarm init
func ConfigGlobalScopeDefaultNetworks(defaultAddressPool []*NetworkToSplit) error {
if defaultAddressPool == nil {
return nil
}
mutex.Lock()
defer mutex.Unlock()
defaultNetworks, err := SplitNetworks(defaultAddressPool)
if err != nil {
return err
}
predefinedGlobalScopeDefaultNetworks = defaultNetworks
return nil
}
// GetGlobalScopeDefaultNetworks returns a copy of the global-sopce network list.
func GetGlobalScopeDefaultNetworks() []*net.IPNet {
mutex.Lock()
defer mutex.Unlock()
return append([]*net.IPNet(nil), predefinedGlobalScopeDefaultNetworks...)
}
// GetLocalScopeDefaultNetworks returns a copy of the default local-scope network list.
func GetLocalScopeDefaultNetworks() []*net.IPNet {
return append([]*net.IPNet(nil), predefinedLocalScopeDefaultNetworks...)
}
// SplitNetworks takes a slice of networks, split them accordingly and returns them
func SplitNetworks(list []*NetworkToSplit) ([]*net.IPNet, error) {
localPools := make([]*net.IPNet, 0, len(list))
for _, p := range list {
_, b, err := net.ParseCIDR(p.Base)
if err != nil {
return nil, fmt.Errorf("invalid base pool %q: %v", p.Base, err)
}
ones, _ := b.Mask.Size()
if p.Size <= 0 || p.Size < ones {
return nil, fmt.Errorf("invalid pools size: %d", p.Size)
}
localPools = append(localPools, splitNetwork(p.Size, b)...)
}
return localPools, nil
}
func splitNetwork(size int, base *net.IPNet) []*net.IPNet {
one, bits := base.Mask.Size()
mask := net.CIDRMask(size, bits)
n := 1 << uint(size-one)
s := uint(bits - size)
list := make([]*net.IPNet, 0, n)
for i := 0; i < n; i++ {
ip := copyIP(base.IP)
addIntToIP(ip, uint(i<<s))
list = append(list, &net.IPNet{IP: ip, Mask: mask})
}
return list
}
func copyIP(from net.IP) net.IP {
ip := make([]byte, len(from))
copy(ip, from)
return ip
}
func addIntToIP(array net.IP, ordinal uint) {
for i := len(array) - 1; i >= 0; i-- {
array[i] |= (byte)(ordinal & 0xff)
ordinal >>= 8
}
}

View File

@ -0,0 +1,102 @@
package ipamutils
import (
"net"
"testing"
"github.com/stretchr/testify/assert"
)
func initBroadPredefinedNetworks() []*net.IPNet {
pl := make([]*net.IPNet, 0, 31)
mask := []byte{255, 255, 0, 0}
for i := 17; i < 32; i++ {
pl = append(pl, &net.IPNet{IP: []byte{172, byte(i), 0, 0}, Mask: mask})
}
mask20 := []byte{255, 255, 240, 0}
for i := 0; i < 16; i++ {
pl = append(pl, &net.IPNet{IP: []byte{192, 168, byte(i << 4), 0}, Mask: mask20})
}
return pl
}
func initGranularPredefinedNetworks() []*net.IPNet {
pl := make([]*net.IPNet, 0, 256*256)
mask := []byte{255, 255, 255, 0}
for i := 0; i < 256; i++ {
for j := 0; j < 256; j++ {
pl = append(pl, &net.IPNet{IP: []byte{10, byte(i), byte(j), 0}, Mask: mask})
}
}
return pl
}
func initGlobalScopeNetworks() []*net.IPNet {
pl := make([]*net.IPNet, 0, 256*256)
mask := []byte{255, 255, 255, 0}
for i := 0; i < 256; i++ {
for j := 0; j < 256; j++ {
pl = append(pl, &net.IPNet{IP: []byte{30, byte(i), byte(j), 0}, Mask: mask})
}
}
return pl
}
func TestDefaultNetwork(t *testing.T) {
for _, nw := range GetGlobalScopeDefaultNetworks() {
if ones, bits := nw.Mask.Size(); bits != 32 || ones != 24 {
t.Fatalf("Unexpected size for network in granular list: %v", nw)
}
}
for _, nw := range GetLocalScopeDefaultNetworks() {
if ones, bits := nw.Mask.Size(); bits != 32 || (ones != 20 && ones != 16) {
t.Fatalf("Unexpected size for network in broad list: %v", nw)
}
}
originalBroadNets := initBroadPredefinedNetworks()
m := make(map[string]bool)
for _, v := range originalBroadNets {
m[v.String()] = true
}
for _, nw := range GetLocalScopeDefaultNetworks() {
_, ok := m[nw.String()]
assert.True(t, ok)
delete(m, nw.String())
}
assert.Empty(t, m)
originalGranularNets := initGranularPredefinedNetworks()
m = make(map[string]bool)
for _, v := range originalGranularNets {
m[v.String()] = true
}
for _, nw := range GetGlobalScopeDefaultNetworks() {
_, ok := m[nw.String()]
assert.True(t, ok)
delete(m, nw.String())
}
assert.Empty(t, m)
}
func TestConfigGlobalScopeDefaultNetworks(t *testing.T) {
err := ConfigGlobalScopeDefaultNetworks([]*NetworkToSplit{{"30.0.0.0/8", 24}})
assert.NoError(t, err)
originalGlobalScopeNetworks := initGlobalScopeNetworks()
m := make(map[string]bool)
for _, v := range originalGlobalScopeNetworks {
m[v.String()] = true
}
for _, nw := range GetGlobalScopeDefaultNetworks() {
_, ok := m[nw.String()]
assert.True(t, ok)
delete(m, nw.String())
}
assert.Empty(t, m)
}

View File

@ -0,0 +1,17 @@
package util
import (
"github.com/hashicorp/consul/testing/deployer/util/internal/ipamutils"
)
// GetPossibleDockerNetworkSubnets returns a copy of the global-scope network list.
func GetPossibleDockerNetworkSubnets() map[string]struct{} {
list := ipamutils.GetGlobalScopeDefaultNetworks()
out := make(map[string]struct{})
for _, ipnet := range list {
subnet := ipnet.String()
out[subnet] = struct{}{}
}
return out
}