mirror of https://github.com/status-im/consul.git
port ENT ingress gateway upgrade tests [NET-2294] [NET-2296] (#16804)
This commit is contained in:
parent
4c038df0ab
commit
32358ddf7e
|
@ -574,7 +574,7 @@ function suite_setup {
|
|||
|
||||
# pre-build the test-sds-server container
|
||||
echo "Rebuilding 'test-sds-server' image..."
|
||||
retry_default docker build -t test-sds-server -f Dockerfile-test-sds-server test-sds-server
|
||||
retry_default docker build -t test-sds-server -f test-sds-server/Dockerfile test-sds-server
|
||||
}
|
||||
|
||||
function suite_teardown {
|
||||
|
|
|
@ -3,6 +3,7 @@ module github.com/hashicorp/consul/test/integration/consul-container
|
|||
go 1.20
|
||||
|
||||
require (
|
||||
fortio.org/fortio v1.54.0
|
||||
github.com/avast/retry-go v3.0.0+incompatible
|
||||
github.com/docker/docker v20.10.22+incompatible
|
||||
github.com/docker/go-connections v0.4.0
|
||||
|
@ -19,10 +20,15 @@ require (
|
|||
github.com/stretchr/testify v1.8.1
|
||||
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569
|
||||
github.com/testcontainers/testcontainers-go v0.15.0
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4
|
||||
golang.org/x/mod v0.8.0
|
||||
google.golang.org/grpc v1.53.0
|
||||
)
|
||||
|
||||
require (
|
||||
fortio.org/dflag v1.5.2 // indirect
|
||||
fortio.org/log v1.3.0 // indirect
|
||||
fortio.org/sets v1.0.2 // indirect
|
||||
fortio.org/version v1.0.2 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.9.4 // indirect
|
||||
|
@ -36,7 +42,7 @@ require (
|
|||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/btree v1.0.0 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
|
@ -66,11 +72,12 @@ require (
|
|||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
golang.org/x/net v0.4.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230303215020-44a13b063f3e // indirect
|
||||
golang.org/x/net v0.8.0 // indirect
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
|
||||
golang.org/x/sys v0.3.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220921223823-23cae91e6737 // indirect
|
||||
google.golang.org/grpc v1.49.0 // indirect
|
||||
golang.org/x/sys v0.6.0 // indirect
|
||||
golang.org/x/text v0.8.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230223222841-637eb2293923 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
|
|
@ -23,6 +23,17 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy
|
|||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
fortio.org/assert v1.1.4 h1:Za1RaG+OjsTMpQS3J3UCvTF6wc4+IOHCz+jAOU37Y4o=
|
||||
fortio.org/dflag v1.5.2 h1:F9XVRj4Qr2IbJP7BMj7XZc9wB0Q/RZ61Ool+4YPVad8=
|
||||
fortio.org/dflag v1.5.2/go.mod h1:ppb/A8u+KKg+qUUYZNYuvRnXuVb8IsdHb/XGzsmjkN8=
|
||||
fortio.org/fortio v1.54.0 h1:2jn8yTd6hcIEoKY4CjI0lI6XxTWVxsMYF2bMiWOmv+Y=
|
||||
fortio.org/fortio v1.54.0/go.mod h1:SRaZbikL31UoAkw0On2hwpvHrQ0rRVnsAz3UGVNvMRw=
|
||||
fortio.org/log v1.3.0 h1:bESPvuQGKejw7rrx41Sg3GoF+tsrB7oC08PxBs5/AM0=
|
||||
fortio.org/log v1.3.0/go.mod h1:u/8/2lyczXq52aT5Nw6reD+3cR6m/EbS2jBiIYhgiTU=
|
||||
fortio.org/sets v1.0.2 h1:gSWZFg9rgzl1zJfI/93lDJKBFw8WZ3Uxe3oQ5uDM4T4=
|
||||
fortio.org/sets v1.0.2/go.mod h1:xVjulHr0FhlmReSymI+AhDtQ4FgjiazQ3JmuNpYFMs8=
|
||||
fortio.org/version v1.0.2 h1:8NwxdX58aoeKx7T5xAPO0xlUu1Hpk42nRz5s6e6eKZ0=
|
||||
fortio.org/version v1.0.2/go.mod h1:2JQp9Ax+tm6QKiGuzR5nJY63kFeANcgrZ0osoQFDVm0=
|
||||
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
|
@ -108,7 +119,7 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA
|
|||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
|
||||
|
@ -370,8 +381,8 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
|
|||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
|
@ -813,6 +824,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20230303215020-44a13b063f3e h1:S8xf0d0OEmWrClvbMiUSp+7cGD00txONylwExlf9wR0=
|
||||
golang.org/x/exp v0.0.0-20230303215020-44a13b063f3e/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -834,8 +847,8 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB
|
|||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -877,8 +890,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
|||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
|
||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU=
|
||||
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -980,8 +993,8 @@ golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -990,7 +1003,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
|||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
|
||||
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
@ -1092,8 +1106,8 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG
|
|||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20220921223823-23cae91e6737 h1:K1zaaMdYBXRyX+cwFnxj7M6zwDyumLQMZ5xqwGvjreQ=
|
||||
google.golang.org/genproto v0.0.0-20220921223823-23cae91e6737/go.mod h1:2r/26NEF3bFmT3eC3aZreahSal0C3Shl8Gi6vyDYqOQ=
|
||||
google.golang.org/genproto v0.0.0-20230223222841-637eb2293923 h1:znp6mq/drrY+6khTAlJUDNFFcDGV2ENLYKpMq8SyCds=
|
||||
google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw=
|
||||
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
|
@ -1112,8 +1126,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp
|
|||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
|
||||
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
|
||||
google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
|
||||
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
package assert
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"fortio.org/fortio/fgrpc"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
// GRPCPing sends a fgrpc.PingMessage to a fortio server at addr, analogous to
|
||||
// the CLI command `fortio grpcping`. It retries for up to 1m, with a 25ms gap.
|
||||
func GRPCPing(t *testing.T, addr string) {
|
||||
t.Helper()
|
||||
pingConn, err := grpc.Dial(addr, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
require.NoError(t, err)
|
||||
pingCl := fgrpc.NewPingServerClient(pingConn)
|
||||
var msg *fgrpc.PingMessage
|
||||
retries := 0
|
||||
retry.RunWith(&retry.Timer{Timeout: time.Minute, Wait: 25 * time.Millisecond}, t, func(r *retry.R) {
|
||||
retries += 1
|
||||
msg, err = pingCl.Ping(context.Background(), &fgrpc.PingMessage{
|
||||
// use addr as payload so we have something variable to check against
|
||||
Payload: addr,
|
||||
})
|
||||
if err != nil {
|
||||
r.Error(err)
|
||||
}
|
||||
})
|
||||
assert.Equal(t, addr, msg.Payload)
|
||||
}
|
|
@ -121,22 +121,30 @@ func ServiceLogContains(t *testing.T, service libservice.Service, target string)
|
|||
return strings.Contains(logs, target)
|
||||
}
|
||||
|
||||
// AssertFortioName asserts that the fortio service replying at urlbase/debug
|
||||
// AssertFortioName is a convenience function for [AssertFortioNameWithClient], using a [cleanhttp.DefaultClient()]
|
||||
func AssertFortioName(t *testing.T, urlbase string, name string, reqHost string) {
|
||||
t.Helper()
|
||||
client := cleanhttp.DefaultClient()
|
||||
AssertFortioNameWithClient(t, urlbase, name, reqHost, client)
|
||||
}
|
||||
|
||||
// AssertFortioNameWithClient asserts that the fortio service replying at urlbase/debug
|
||||
// has a `FORTIO_NAME` env variable set. This validates that the client is sending
|
||||
// traffic to the right envoy proxy.
|
||||
//
|
||||
// If reqHost is set, the Host field of the HTTP request will be set to its value.
|
||||
//
|
||||
// It retries with timeout defaultHTTPTimeout and wait defaultHTTPWait.
|
||||
func AssertFortioName(t *testing.T, urlbase string, name string, reqHost string) {
|
||||
//
|
||||
// client must be a custom http.Client
|
||||
func AssertFortioNameWithClient(t *testing.T, urlbase string, name string, reqHost string, client *http.Client) {
|
||||
t.Helper()
|
||||
var fortioNameRE = regexp.MustCompile(("\nFORTIO_NAME=(.+)\n"))
|
||||
client := cleanhttp.DefaultClient()
|
||||
retry.RunWith(&retry.Timer{Timeout: defaultHTTPTimeout, Wait: defaultHTTPWait}, t, func(r *retry.R) {
|
||||
fullurl := fmt.Sprintf("%s/debug?env=dump", urlbase)
|
||||
req, err := http.NewRequest("GET", fullurl, nil)
|
||||
if err != nil {
|
||||
r.Fatal("could not make request to service ", fullurl)
|
||||
r.Fatalf("could not build request to %q: %v", fullurl, err)
|
||||
}
|
||||
if reqHost != "" {
|
||||
req.Host = reqHost
|
||||
|
@ -144,14 +152,16 @@ func AssertFortioName(t *testing.T, urlbase string, name string, reqHost string)
|
|||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
r.Fatal("could not make call to service ", fullurl)
|
||||
r.Fatalf("could not make request to %q: %v", fullurl, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
r.Fatalf("could not make request to %q: status %d", fullurl, resp.StatusCode)
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
r.Error(err)
|
||||
return
|
||||
r.Fatalf("failed to read response body from %q: %v", fullurl, err)
|
||||
}
|
||||
|
||||
m := fortioNameRE.FindStringSubmatch(string(body))
|
||||
|
|
|
@ -63,7 +63,7 @@ func LaunchContainerOnNode(
|
|||
Started: true,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("creating container: %w", err)
|
||||
}
|
||||
deferClean.Add(func() {
|
||||
_ = container.Terminate(ctx)
|
||||
|
@ -71,12 +71,12 @@ func LaunchContainerOnNode(
|
|||
|
||||
ip, err := container.ContainerIP(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("fetching container IP: %w", err)
|
||||
}
|
||||
|
||||
if utils.FollowLog {
|
||||
if err := container.StartLogProducer(ctx); err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("starting log producer: %w", err)
|
||||
}
|
||||
container.FollowOutput(&LogConsumer{
|
||||
Prefix: req.Name,
|
||||
|
@ -90,7 +90,7 @@ func LaunchContainerOnNode(
|
|||
for _, portStr := range mapPorts {
|
||||
mapped, err := pod.MappedPort(ctx, nat.Port(portStr))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("mapping port %s: %w", portStr, err)
|
||||
}
|
||||
ports[portStr] = mapped
|
||||
}
|
||||
|
|
|
@ -186,6 +186,7 @@ func NewConfigBuilder(ctx *BuildContext) *Builder {
|
|||
b.conf.Set("connect.enabled", true)
|
||||
b.conf.Set("log_level", "debug")
|
||||
b.conf.Set("server", true)
|
||||
b.conf.Set("ui_config.enabled", true)
|
||||
|
||||
// These are the default ports, disabling plaintext transport
|
||||
b.conf.Set("ports.dns", 8600)
|
||||
|
|
|
@ -128,21 +128,34 @@ func (c *Cluster) Add(configs []Config, serfJoin bool, ports ...int) (xe error)
|
|||
// Each agent gets it's own area in the cluster scratch.
|
||||
conf.ScratchDir = filepath.Join(c.ScratchDir, strconv.Itoa(c.Index))
|
||||
if err := os.MkdirAll(conf.ScratchDir, 0777); err != nil {
|
||||
return fmt.Errorf("container %d: %w", idx, err)
|
||||
return fmt.Errorf("container %d making scratchDir: %w", idx, err)
|
||||
}
|
||||
if err := os.Chmod(conf.ScratchDir, 0777); err != nil {
|
||||
return fmt.Errorf("container %d: %w", idx, err)
|
||||
return fmt.Errorf("container %d perms on scratchDir: %w", idx, err)
|
||||
}
|
||||
|
||||
n, err := NewConsulContainer(
|
||||
context.Background(),
|
||||
conf,
|
||||
c,
|
||||
ports...,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("container %d: %w", idx, err)
|
||||
var n Agent
|
||||
|
||||
// retry creating client every ten seconds. with local development, we've found
|
||||
// that this "port not found" error occurs when runs happen too close together
|
||||
if err := goretry.Do(
|
||||
func() (err error) {
|
||||
n, err = NewConsulContainer(
|
||||
context.Background(),
|
||||
conf,
|
||||
c,
|
||||
ports...,
|
||||
)
|
||||
return err
|
||||
},
|
||||
goretry.Delay(10*time.Second),
|
||||
goretry.RetryIf(func(err error) bool {
|
||||
return strings.Contains(err.Error(), "port not found")
|
||||
}),
|
||||
); err != nil {
|
||||
return fmt.Errorf("container %d creating: %s", idx, err)
|
||||
}
|
||||
|
||||
agents = append(agents, n)
|
||||
c.Index++
|
||||
}
|
||||
|
|
|
@ -543,6 +543,8 @@ func newContainerRequest(config Config, opts containerOpts, ports ...int) (podRe
|
|||
"8081/tcp", // Envoy App Listener - http port used by static-server-v1
|
||||
"8082/tcp", // Envoy App Listener - http port used by static-server-v2
|
||||
"8083/tcp", // Envoy App Listener - http port used by static-server-v3
|
||||
|
||||
"9997/tcp", // Envoy App Listener
|
||||
"9998/tcp", // Envoy App Listener
|
||||
"9999/tcp", // Envoy App Listener
|
||||
},
|
||||
|
|
|
@ -149,6 +149,10 @@ type GatewayConfig struct {
|
|||
}
|
||||
|
||||
func NewGatewayService(ctx context.Context, gwCfg GatewayConfig, node libcluster.Agent, ports ...int) (Service, error) {
|
||||
return NewGatewayServiceReg(ctx, gwCfg, node, true, ports...)
|
||||
}
|
||||
|
||||
func NewGatewayServiceReg(ctx context.Context, gwCfg GatewayConfig, node libcluster.Agent, doRegister bool, ports ...int) (Service, error) {
|
||||
nodeConfig := node.GetConfig()
|
||||
if nodeConfig.ScratchDir == "" {
|
||||
return nil, fmt.Errorf("node ScratchDir is required")
|
||||
|
@ -174,24 +178,29 @@ func NewGatewayService(ctx context.Context, gwCfg GatewayConfig, node libcluster
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cmd := []string{
|
||||
"consul", "connect", "envoy",
|
||||
fmt.Sprintf("-gateway=%s", gwCfg.Kind),
|
||||
"-service", gwCfg.Name,
|
||||
"-namespace", gwCfg.Namespace,
|
||||
"-address", "{{ GetInterfaceIP \"eth0\" }}:8443",
|
||||
"-admin-bind", fmt.Sprintf("0.0.0.0:%d", adminPort),
|
||||
}
|
||||
if doRegister {
|
||||
cmd = append(cmd, "-register")
|
||||
}
|
||||
cmd = append(cmd, "--")
|
||||
envoyArgs := []string{
|
||||
"--log-level", envoyLogLevel,
|
||||
}
|
||||
|
||||
req := testcontainers.ContainerRequest{
|
||||
FromDockerfile: dockerfileCtx,
|
||||
WaitingFor: wait.ForLog("").WithStartupTimeout(10 * time.Second),
|
||||
AutoRemove: false,
|
||||
Name: containerName,
|
||||
Cmd: []string{
|
||||
"consul", "connect", "envoy",
|
||||
fmt.Sprintf("-gateway=%s", gwCfg.Kind),
|
||||
"-register",
|
||||
"-namespace", gwCfg.Namespace,
|
||||
"-service", gwCfg.Name,
|
||||
"-address", "{{ GetInterfaceIP \"eth0\" }}:8443",
|
||||
"-admin-bind", fmt.Sprintf("0.0.0.0:%d", adminPort),
|
||||
"--",
|
||||
"--log-level", envoyLogLevel,
|
||||
},
|
||||
Env: make(map[string]string),
|
||||
Env: make(map[string]string),
|
||||
Cmd: append(cmd, envoyArgs...),
|
||||
}
|
||||
|
||||
nodeInfo := node.GetInfo()
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package service
|
||||
|
||||
import (
|
||||
|
@ -31,18 +28,20 @@ type SidecarService struct {
|
|||
}
|
||||
|
||||
type ServiceOpts struct {
|
||||
Name string
|
||||
ID string
|
||||
Meta map[string]string
|
||||
HTTPPort int
|
||||
GRPCPort int
|
||||
Checks Checks
|
||||
Connect SidecarService
|
||||
Namespace string
|
||||
Name string
|
||||
ID string
|
||||
Meta map[string]string
|
||||
HTTPPort int
|
||||
GRPCPort int
|
||||
// if true, register GRPC port instead of HTTP (default)
|
||||
RegisterGRPC bool
|
||||
Checks Checks
|
||||
Connect SidecarService
|
||||
Namespace string
|
||||
}
|
||||
|
||||
// createAndRegisterStaticServerAndSidecar register the services and launch static-server containers
|
||||
func createAndRegisterStaticServerAndSidecar(node libcluster.Agent, grpcPort int, svc *api.AgentServiceRegistration, containerArgs ...string) (Service, Service, error) {
|
||||
func createAndRegisterStaticServerAndSidecar(node libcluster.Agent, httpPort int, grpcPort int, svc *api.AgentServiceRegistration, containerArgs ...string) (Service, Service, error) {
|
||||
// Do some trickery to ensure that partial completion is correctly torn
|
||||
// down, but successful execution is not.
|
||||
var deferClean utils.ResettableDefer
|
||||
|
@ -53,7 +52,7 @@ func createAndRegisterStaticServerAndSidecar(node libcluster.Agent, grpcPort int
|
|||
}
|
||||
|
||||
// Create a service and proxy instance
|
||||
serverService, err := NewExampleService(context.Background(), svc.ID, svc.Port, grpcPort, node, containerArgs...)
|
||||
serverService, err := NewExampleService(context.Background(), svc.ID, httpPort, grpcPort, node, containerArgs...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -82,25 +81,32 @@ func createAndRegisterStaticServerAndSidecar(node libcluster.Agent, grpcPort int
|
|||
func CreateAndRegisterStaticServerAndSidecar(node libcluster.Agent, serviceOpts *ServiceOpts, containerArgs ...string) (Service, Service, error) {
|
||||
// Register the static-server service and sidecar first to prevent race with sidecar
|
||||
// trying to get xDS before it's ready
|
||||
p := serviceOpts.HTTPPort
|
||||
agentCheck := api.AgentServiceCheck{
|
||||
Name: "Static Server Listening",
|
||||
TCP: fmt.Sprintf("127.0.0.1:%d", p),
|
||||
Interval: "10s",
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
if serviceOpts.RegisterGRPC {
|
||||
p = serviceOpts.GRPCPort
|
||||
agentCheck.TCP = ""
|
||||
agentCheck.GRPC = fmt.Sprintf("127.0.0.1:%d", p)
|
||||
}
|
||||
req := &api.AgentServiceRegistration{
|
||||
Name: serviceOpts.Name,
|
||||
ID: serviceOpts.ID,
|
||||
Port: serviceOpts.HTTPPort,
|
||||
Port: p,
|
||||
Connect: &api.AgentServiceConnect{
|
||||
SidecarService: &api.AgentServiceRegistration{
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{},
|
||||
},
|
||||
},
|
||||
Namespace: serviceOpts.Namespace,
|
||||
Check: &api.AgentServiceCheck{
|
||||
Name: "Static Server Listening",
|
||||
TCP: fmt.Sprintf("127.0.0.1:%d", serviceOpts.HTTPPort),
|
||||
Interval: "10s",
|
||||
Status: api.HealthPassing,
|
||||
},
|
||||
Meta: serviceOpts.Meta,
|
||||
Meta: serviceOpts.Meta,
|
||||
Check: &agentCheck,
|
||||
}
|
||||
return createAndRegisterStaticServerAndSidecar(node, serviceOpts.GRPCPort, req, containerArgs...)
|
||||
return createAndRegisterStaticServerAndSidecar(node, serviceOpts.HTTPPort, serviceOpts.GRPCPort, req, containerArgs...)
|
||||
}
|
||||
|
||||
func CreateAndRegisterStaticServerAndSidecarWithChecks(node libcluster.Agent, serviceOpts *ServiceOpts) (Service, Service, error) {
|
||||
|
@ -125,7 +131,7 @@ func CreateAndRegisterStaticServerAndSidecarWithChecks(node libcluster.Agent, se
|
|||
Meta: serviceOpts.Meta,
|
||||
}
|
||||
|
||||
return createAndRegisterStaticServerAndSidecar(node, serviceOpts.GRPCPort, req)
|
||||
return createAndRegisterStaticServerAndSidecar(node, serviceOpts.HTTPPort, serviceOpts.GRPCPort, req)
|
||||
}
|
||||
|
||||
func CreateAndRegisterStaticClientSidecar(
|
||||
|
|
|
@ -229,6 +229,10 @@ func NewCluster(
|
|||
cluster, err = libcluster.NewN(t, *serverConf, config.NumServers)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
// builder generates certs for us, so copy them back
|
||||
if opts.InjectAutoEncryption {
|
||||
cluster.CACert = serverConf.CACert
|
||||
}
|
||||
|
||||
var retryJoin []string
|
||||
for i := 0; i < config.NumServers; i++ {
|
||||
|
|
|
@ -49,3 +49,17 @@ func isSemVer(ver string) bool {
|
|||
_, err := version.NewVersion(ver)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// ensure version a >= b
|
||||
func VersionGTE(a, b string) bool {
|
||||
av := version.Must(version.NewVersion(a))
|
||||
bv := version.Must(version.NewVersion(b))
|
||||
return av.GreaterThanOrEqual(bv)
|
||||
}
|
||||
|
||||
// ensure version a < b
|
||||
func VersionLT(a, b string) bool {
|
||||
av := version.Must(version.NewVersion(a))
|
||||
bv := version.Must(version.NewVersion(b))
|
||||
return av.LessThan(bv)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,124 @@
|
|||
package upgrade
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||
)
|
||||
|
||||
func TestIngressGateway_GRPC_UpgradeToTarget_fromLatest(t *testing.T) {
|
||||
t.Parallel()
|
||||
cluster, _, client := topology.NewCluster(t, &topology.ClusterConfig{
|
||||
NumServers: 1,
|
||||
NumClients: 1,
|
||||
BuildOpts: &libcluster.BuildOptions{
|
||||
Datacenter: "dc1",
|
||||
ConsulVersion: utils.LatestVersion,
|
||||
},
|
||||
ApplyDefaultProxySettings: true,
|
||||
})
|
||||
|
||||
require.NoError(t, cluster.ConfigEntryWrite(&api.ServiceConfigEntry{
|
||||
Name: libservice.StaticServerServiceName,
|
||||
Kind: api.ServiceDefaults,
|
||||
Protocol: "grpc",
|
||||
}))
|
||||
|
||||
const (
|
||||
nameIG = "ingress-gateway"
|
||||
)
|
||||
|
||||
const nameS1 = libservice.StaticServerServiceName
|
||||
|
||||
igw, err := libservice.NewGatewayService(
|
||||
context.Background(),
|
||||
libservice.GatewayConfig{
|
||||
Name: nameIG,
|
||||
Kind: "ingress",
|
||||
},
|
||||
cluster.Servers()[0],
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// these must be one of the externally-mapped ports from
|
||||
// https://github.com/hashicorp/consul/blob/c5e729e86576771c4c22c6da1e57aaa377319323/test/integration/consul-container/libs/cluster/container.go#L521-L525
|
||||
const portS1DirectNoTLS = 8080
|
||||
require.NoError(t, cluster.ConfigEntryWrite(&api.IngressGatewayConfigEntry{
|
||||
Kind: api.IngressGateway,
|
||||
Name: nameIG,
|
||||
Listeners: []api.IngressListener{
|
||||
{
|
||||
Port: portS1DirectNoTLS,
|
||||
Protocol: "grpc",
|
||||
Services: []api.IngressService{
|
||||
{
|
||||
Name: libservice.StaticServerServiceName,
|
||||
Hosts: []string{"*"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}))
|
||||
|
||||
// register static-server service
|
||||
_, _, err = libservice.CreateAndRegisterStaticServerAndSidecar(
|
||||
cluster.Clients()[0],
|
||||
&libservice.ServiceOpts{
|
||||
Name: nameS1,
|
||||
ID: nameS1,
|
||||
HTTPPort: 8080,
|
||||
GRPCPort: 8079,
|
||||
RegisterGRPC: true,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
libassert.CatalogServiceExists(t, client, nameS1, nil)
|
||||
|
||||
// Register an static-client service
|
||||
serverNodes := cluster.Servers()
|
||||
require.NoError(t, err)
|
||||
require.True(t, len(serverNodes) > 0)
|
||||
staticClientSvcSidecar, err := libservice.CreateAndRegisterStaticClientSidecar(serverNodes[0], "", true)
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := func(t *testing.T) {
|
||||
t.Run("grpc directly", func(t *testing.T) {
|
||||
_, p := staticClientSvcSidecar.GetAddr()
|
||||
libassert.GRPCPing(t, fmt.Sprintf("localhost:%d", p))
|
||||
})
|
||||
t.Run("grpc via igw", func(t *testing.T) {
|
||||
pm, _ := cluster.Servers()[0].GetPod().MappedPort(
|
||||
context.Background(),
|
||||
nat.Port(fmt.Sprintf("%d/tcp", portS1DirectNoTLS)),
|
||||
)
|
||||
libassert.GRPCPing(t, fmt.Sprintf("localhost:%d", pm.Int()))
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("pre-upgrade", func(t *testing.T) {
|
||||
tests(t)
|
||||
})
|
||||
|
||||
if t.Failed() {
|
||||
t.Fatal("failing fast: failed assertions pre-upgrade")
|
||||
}
|
||||
|
||||
t.Logf("Upgrade to version %s", utils.TargetVersion)
|
||||
err = cluster.StandardUpgrade(t, context.Background(), utils.TargetVersion)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, igw.Restart())
|
||||
|
||||
t.Run("post-upgrade", func(t *testing.T) {
|
||||
tests(t)
|
||||
})
|
||||
}
|
|
@ -0,0 +1,335 @@
|
|||
package upgrade
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/hashicorp/consul/api"
|
||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/testcontainers/testcontainers-go"
|
||||
)
|
||||
|
||||
const sdsServerPort = 1234
|
||||
|
||||
// This upgrade test tests Ingress Gateway functionality when using an external
|
||||
// SDS server for certs, as described in https://developer.hashicorp.com/consul/docs/connect/gateways/ingress-gateway#custom-tls-certificates-via-secret-discovery-service-sds
|
||||
// It:
|
||||
// 1. starts a consul cluster
|
||||
// 2. builds and starts a test SDS server from .../test-sds-server
|
||||
// 3. configures an ingress gateway pointed at this SDS server
|
||||
// 4. does HTTPS calls against the gateway and checks that the certs returned
|
||||
// are from the SDS server as expected
|
||||
func TestIngressGateway_SDS_UpgradeToTarget_fromLatest(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cluster, _, client := topology.NewCluster(t, &topology.ClusterConfig{
|
||||
NumServers: 1,
|
||||
NumClients: 2,
|
||||
BuildOpts: &libcluster.BuildOptions{
|
||||
Datacenter: "dc1",
|
||||
ConsulVersion: utils.LatestVersion,
|
||||
},
|
||||
ApplyDefaultProxySettings: true,
|
||||
})
|
||||
|
||||
sdsServerContainerName, rootPEM := createSDSServer(t, cluster)
|
||||
|
||||
require.NoError(t, cluster.ConfigEntryWrite(&api.ServiceConfigEntry{
|
||||
Name: libservice.StaticServerServiceName,
|
||||
Kind: api.ServiceDefaults,
|
||||
Protocol: "http",
|
||||
}))
|
||||
require.NoError(t, cluster.ConfigEntryWrite(&api.ServiceConfigEntry{
|
||||
Name: libservice.StaticServer2ServiceName,
|
||||
Kind: api.ServiceDefaults,
|
||||
Protocol: "http",
|
||||
}))
|
||||
|
||||
const (
|
||||
nameIG = "ingress-gateway"
|
||||
)
|
||||
|
||||
const nameS1 = libservice.StaticServerServiceName
|
||||
const nameS2 = libservice.StaticServer2ServiceName
|
||||
|
||||
// this must be one of the externally-mapped ports from
|
||||
// https://github.com/hashicorp/consul/blob/c5e729e86576771c4c22c6da1e57aaa377319323/test/integration/consul-container/libs/cluster/container.go#L521-L525
|
||||
const (
|
||||
portWildcard = 8080
|
||||
portOther = 9999
|
||||
nameSDSCluster = "sds-cluster"
|
||||
// these are in our pre-created certs in .../test-sds-server
|
||||
hostnameWWW = "www.example.com"
|
||||
hostnameFoo = "foo.example.com"
|
||||
certResourceWildcard = "wildcard.ingress.consul"
|
||||
)
|
||||
require.NoError(t, cluster.ConfigEntryWrite(&api.IngressGatewayConfigEntry{
|
||||
Kind: api.IngressGateway,
|
||||
Name: nameIG,
|
||||
|
||||
Listeners: []api.IngressListener{
|
||||
{
|
||||
Port: portWildcard,
|
||||
Protocol: "http",
|
||||
Services: []api.IngressService{
|
||||
{
|
||||
Name: "*",
|
||||
},
|
||||
},
|
||||
TLS: &api.GatewayTLSConfig{
|
||||
Enabled: true,
|
||||
TLSMinVersion: "TLSv1_2",
|
||||
SDS: &api.GatewayTLSSDSConfig{
|
||||
ClusterName: nameSDSCluster,
|
||||
CertResource: certResourceWildcard,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Port: portOther,
|
||||
Protocol: "http",
|
||||
Services: []api.IngressService{
|
||||
{
|
||||
Name: libservice.StaticServerServiceName,
|
||||
Hosts: []string{hostnameWWW},
|
||||
TLS: &api.GatewayServiceTLSConfig{
|
||||
SDS: &api.GatewayTLSSDSConfig{
|
||||
ClusterName: nameSDSCluster,
|
||||
CertResource: hostnameWWW,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: libservice.StaticServer2ServiceName,
|
||||
Hosts: []string{hostnameFoo},
|
||||
TLS: &api.GatewayServiceTLSConfig{
|
||||
SDS: &api.GatewayTLSSDSConfig{
|
||||
ClusterName: nameSDSCluster,
|
||||
CertResource: hostnameFoo,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
TLS: &api.GatewayTLSConfig{
|
||||
Enabled: true,
|
||||
TLSMinVersion: "TLSv1_2",
|
||||
},
|
||||
},
|
||||
},
|
||||
}))
|
||||
|
||||
const staticClusterJSONKey = "envoy_extra_static_clusters_json"
|
||||
|
||||
// register sds cluster as per https://developer.hashicorp.com/consul/docs/connect/gateways/ingress-gateway#configure-static-sds-cluster-s
|
||||
require.NoError(t, cluster.Servers()[0].GetClient().Agent().ServiceRegister(
|
||||
&api.AgentServiceRegistration{
|
||||
Kind: api.ServiceKindIngressGateway,
|
||||
Name: nameIG,
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
Config: map[string]interface{}{
|
||||
// LOGICAL_DNS because we need to use a hostname
|
||||
// WARNING: this JSON is *very* sensitive and not well-checked.
|
||||
// bad values can lead to envoy not bootstrapping properly
|
||||
staticClusterJSONKey: fmt.Sprintf(`
|
||||
{
|
||||
"name": "%s",
|
||||
"connect_timeout": "5s",
|
||||
"http2_protocol_options": {},
|
||||
"type": "LOGICAL_DNS",
|
||||
"load_assignment": {
|
||||
"cluster_name": "%s",
|
||||
"endpoints": [
|
||||
{
|
||||
"lb_endpoints": [
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socket_address": {
|
||||
"address": "%s",
|
||||
"port_value": %d
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}`, nameSDSCluster, nameSDSCluster, sdsServerContainerName, sdsServerPort),
|
||||
},
|
||||
},
|
||||
},
|
||||
))
|
||||
|
||||
igw, err := libservice.NewGatewayServiceReg(context.Background(), libservice.GatewayConfig{
|
||||
Name: nameIG,
|
||||
Kind: "ingress",
|
||||
}, cluster.Servers()[0], false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// create s1
|
||||
_, _, err = libservice.CreateAndRegisterStaticServerAndSidecar(
|
||||
cluster.Clients()[0],
|
||||
&libservice.ServiceOpts{
|
||||
Name: nameS1,
|
||||
ID: nameS1,
|
||||
HTTPPort: 8080,
|
||||
GRPCPort: 8079,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
libassert.CatalogServiceExists(t, client, nameS1, nil)
|
||||
|
||||
// create s2
|
||||
_, _, err = libservice.CreateAndRegisterStaticServerAndSidecar(
|
||||
cluster.Clients()[1],
|
||||
&libservice.ServiceOpts{
|
||||
Name: nameS2,
|
||||
ID: nameS2,
|
||||
HTTPPort: 8080,
|
||||
GRPCPort: 8079,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
libassert.CatalogServiceExists(t, client, nameS2, nil)
|
||||
|
||||
tests := func(t *testing.T) {
|
||||
t.Run("ensure HTTP response with cert *.ingress.consul", func(t *testing.T) {
|
||||
port := portWildcard
|
||||
reqHost := fmt.Sprintf("%s.ingress.consul:%d", libservice.StaticServerServiceName, port)
|
||||
portMapped, _ := cluster.Servers()[0].GetPod().MappedPort(
|
||||
context.Background(),
|
||||
nat.Port(fmt.Sprintf("%d/tcp", port)),
|
||||
)
|
||||
|
||||
httpClient := httpClientWithCA(t, reqHost, string(rootPEM))
|
||||
urlbase := fmt.Sprintf("https://%s", reqHost)
|
||||
resp := mappedHTTPGET(t, urlbase, portMapped.Int(), nil, nil, httpClient)
|
||||
defer resp.Body.Close()
|
||||
|
||||
require.Equal(t, 1, len(resp.TLS.PeerCertificates))
|
||||
require.Equal(t, 1, len(resp.TLS.PeerCertificates[0].DNSNames))
|
||||
assert.Equal(t, "*.ingress.consul", resp.TLS.PeerCertificates[0].DNSNames[0])
|
||||
})
|
||||
|
||||
t.Run("listener 2: ensure HTTP response with cert www.example.com", func(t *testing.T) {
|
||||
port := portOther
|
||||
reqHost := fmt.Sprintf("%s:%d", hostnameWWW, port)
|
||||
portMapped, _ := cluster.Servers()[0].GetPod().MappedPort(
|
||||
context.Background(),
|
||||
nat.Port(fmt.Sprintf("%d/tcp", port)),
|
||||
)
|
||||
|
||||
httpClient := httpClientWithCA(t, reqHost, string(rootPEM))
|
||||
urlbase := fmt.Sprintf("https://%s", reqHost)
|
||||
resp := mappedHTTPGET(t, urlbase, portMapped.Int(), nil, nil, httpClient)
|
||||
defer resp.Body.Close()
|
||||
|
||||
require.Equal(t, 1, len(resp.TLS.PeerCertificates))
|
||||
require.Equal(t, 1, len(resp.TLS.PeerCertificates[0].DNSNames))
|
||||
assert.Equal(t, hostnameWWW, resp.TLS.PeerCertificates[0].DNSNames[0])
|
||||
})
|
||||
|
||||
t.Run("listener 2: ensure HTTP response with cert foo.example.com", func(t *testing.T) {
|
||||
port := portOther
|
||||
reqHost := fmt.Sprintf("%s:%d", hostnameFoo, port)
|
||||
portMapped, _ := cluster.Servers()[0].GetPod().MappedPort(
|
||||
context.Background(),
|
||||
nat.Port(fmt.Sprintf("%d/tcp", port)),
|
||||
)
|
||||
|
||||
httpClient := httpClientWithCA(t, reqHost, string(rootPEM))
|
||||
urlbase := fmt.Sprintf("https://%s", reqHost)
|
||||
resp := mappedHTTPGET(t, urlbase, portMapped.Int(), nil, nil, httpClient)
|
||||
defer resp.Body.Close()
|
||||
|
||||
require.Equal(t, 1, len(resp.TLS.PeerCertificates))
|
||||
require.Equal(t, 1, len(resp.TLS.PeerCertificates[0].DNSNames))
|
||||
assert.Equal(t, hostnameFoo, resp.TLS.PeerCertificates[0].DNSNames[0])
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("pre-upgrade", func(t *testing.T) {
|
||||
tests(t)
|
||||
})
|
||||
|
||||
if t.Failed() {
|
||||
t.Fatal("failing fast: failed assertions pre-upgrade")
|
||||
}
|
||||
|
||||
// Upgrade the cluster to utils.TargetVersion
|
||||
t.Logf("Upgrade to version %s", utils.TargetVersion)
|
||||
err = cluster.StandardUpgrade(t, context.Background(), utils.TargetVersion)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, igw.Restart())
|
||||
|
||||
t.Run("post-upgrade", func(t *testing.T) {
|
||||
tests(t)
|
||||
})
|
||||
}
|
||||
|
||||
// createSDSServer builds and runs a test SDS server in the given cluster.
|
||||
// It is built from files in .../test-sds-server, shared with the BATS tests.
|
||||
// This includes some pre-generated certs for various scenarios.
|
||||
//
|
||||
// It returns the name of the container (which will also be the hostname), and
|
||||
// the root CA's cert in PEM encoding
|
||||
func createSDSServer(t *testing.T, cluster *libcluster.Cluster) (containerName string, rootPEM []byte) {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*40)
|
||||
defer cancel()
|
||||
|
||||
sdsServerFilesPath, err := filepath.Abs("../../../connect/envoy/test-sds-server/")
|
||||
require.NoError(t, err)
|
||||
|
||||
// TODO: we should probably just generate these certs on every boot
|
||||
certPath := filepath.Join(sdsServerFilesPath, "/certs")
|
||||
|
||||
rootPEMf, err := os.Open(filepath.Join(certPath, "ca-root.crt"))
|
||||
require.NoError(t, err)
|
||||
|
||||
rootPEM, err = io.ReadAll(rootPEMf)
|
||||
require.NoError(t, err)
|
||||
|
||||
containerName = utils.RandName(fmt.Sprintf("%s-test-sds-server", cluster.Servers()[0].GetDatacenter()))
|
||||
|
||||
_, err = testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
|
||||
Started: true,
|
||||
ContainerRequest: testcontainers.ContainerRequest{
|
||||
FromDockerfile: testcontainers.FromDockerfile{
|
||||
Context: sdsServerFilesPath,
|
||||
},
|
||||
Name: containerName,
|
||||
Networks: []string{
|
||||
cluster.NetworkName,
|
||||
},
|
||||
ExposedPorts: []string{
|
||||
fmt.Sprintf("%d/tcp", sdsServerPort),
|
||||
},
|
||||
Mounts: []testcontainers.ContainerMount{
|
||||
{
|
||||
Source: testcontainers.DockerBindMountSource{
|
||||
HostPath: certPath,
|
||||
},
|
||||
Target: "/certs",
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err, "create SDS server container")
|
||||
return containerName, rootPEM
|
||||
}
|
|
@ -5,14 +5,19 @@ package upgrade
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/itchyny/gojq"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
|
@ -29,282 +34,487 @@ import (
|
|||
|
||||
// TestIngressGateway_UpgradeToTarget_fromLatest:
|
||||
// - starts a cluster with 2 static services,
|
||||
// - configures an ingress gateway + router
|
||||
// - performs tests to ensure our routing rules work (namely header manipulation)
|
||||
// - configures an ingress gateway + router with TLS
|
||||
// - performs tests:
|
||||
// - envoy is configured with thresholds (e.g max connections) and health checks
|
||||
// - HTTP header manipulation
|
||||
// - per-service and wildcard and custom hostnames work
|
||||
//
|
||||
// - upgrades the cluster
|
||||
// - performs these tests again
|
||||
func TestIngressGateway_UpgradeToTarget_fromLatest(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
run := func(t *testing.T, oldVersion, targetVersion string) {
|
||||
// setup
|
||||
// TODO? we don't need a peering cluster, so maybe this is overkill
|
||||
cluster, _, client := topology.NewCluster(t, &topology.ClusterConfig{
|
||||
NumServers: 1,
|
||||
NumClients: 2,
|
||||
BuildOpts: &libcluster.BuildOptions{
|
||||
Datacenter: "dc1",
|
||||
ConsulVersion: oldVersion,
|
||||
// TODO? InjectAutoEncryption: true,
|
||||
cluster, _, client := topology.NewCluster(t, &topology.ClusterConfig{
|
||||
NumServers: 1,
|
||||
NumClients: 2,
|
||||
BuildOpts: &libcluster.BuildOptions{
|
||||
Datacenter: "dc1",
|
||||
ConsulVersion: utils.LatestVersion,
|
||||
},
|
||||
ApplyDefaultProxySettings: true,
|
||||
})
|
||||
|
||||
require.NoError(t, cluster.ConfigEntryWrite(&api.ProxyConfigEntry{
|
||||
Name: api.ProxyConfigGlobal,
|
||||
Kind: api.ProxyDefaults,
|
||||
Config: map[string]interface{}{
|
||||
"protocol": "http",
|
||||
},
|
||||
}))
|
||||
|
||||
const (
|
||||
nameIG = "ingress-gateway"
|
||||
nameRouter = "router"
|
||||
)
|
||||
|
||||
const nameS1 = libservice.StaticServerServiceName
|
||||
const nameS2 = libservice.StaticServer2ServiceName
|
||||
require.NoError(t, cluster.ConfigEntryWrite(&api.ServiceRouterConfigEntry{
|
||||
Kind: api.ServiceRouter,
|
||||
// This is a "virtual" service name and will not have a backing
|
||||
// service definition. It must match the name defined in the ingress
|
||||
// configuration.
|
||||
Name: nameRouter,
|
||||
Routes: []api.ServiceRoute{
|
||||
{
|
||||
Match: &api.ServiceRouteMatch{
|
||||
HTTP: &api.ServiceRouteHTTPMatch{
|
||||
PathPrefix: fmt.Sprintf("/%s/", nameS1),
|
||||
},
|
||||
},
|
||||
Destination: &api.ServiceRouteDestination{
|
||||
Service: nameS1,
|
||||
PrefixRewrite: "/",
|
||||
},
|
||||
},
|
||||
ApplyDefaultProxySettings: true,
|
||||
{
|
||||
Match: &api.ServiceRouteMatch{
|
||||
HTTP: &api.ServiceRouteHTTPMatch{
|
||||
PathPrefix: fmt.Sprintf("/%s/", nameS2),
|
||||
},
|
||||
},
|
||||
Destination: &api.ServiceRouteDestination{
|
||||
Service: nameS2,
|
||||
PrefixRewrite: "/",
|
||||
},
|
||||
},
|
||||
},
|
||||
}))
|
||||
|
||||
gwCfg := libservice.GatewayConfig{
|
||||
Name: nameIG,
|
||||
Kind: "ingress",
|
||||
}
|
||||
igw, err := libservice.NewGatewayService(context.Background(), gwCfg, cluster.Servers()[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
// these must be one of the externally-mapped ports from
|
||||
// https://github.com/hashicorp/consul/blob/c5e729e86576771c4c22c6da1e57aaa377319323/test/integration/consul-container/libs/cluster/container.go#L521-L525
|
||||
const portRouter = 8080
|
||||
const portWildcard = 9997
|
||||
const portS1Direct = 9998
|
||||
const portS1DirectCustomHostname = 9999
|
||||
const hostnameS1DirectCustom = "test.example.com"
|
||||
// arbitrary numbers
|
||||
var (
|
||||
overrideOffset = uint32(10000)
|
||||
igwDefaultMaxConns = uint32(3572)
|
||||
igwDefaultMaxPendingReqs = uint32(7644)
|
||||
igwDefaultMaxConcurrentReqs = uint32(7637)
|
||||
// PHC = PassiveHealthCheck
|
||||
igwDefaultPHCMaxFailures = uint32(7382)
|
||||
s1MaxConns = igwDefaultMaxConns + overrideOffset
|
||||
s1MaxPendingReqs = igwDefaultMaxConcurrentReqs + overrideOffset
|
||||
)
|
||||
const (
|
||||
igwDefaultPHCIntervalS = 7820
|
||||
)
|
||||
igwDefaults := api.IngressServiceConfig{
|
||||
MaxConnections: &igwDefaultMaxConns,
|
||||
MaxPendingRequests: &igwDefaultMaxPendingReqs,
|
||||
MaxConcurrentRequests: &igwDefaultMaxConcurrentReqs,
|
||||
}
|
||||
// passive health checks were introduced in 1.15
|
||||
if utils.VersionGTE(utils.LatestVersion, "1.15") {
|
||||
igwDefaults.PassiveHealthCheck = &api.PassiveHealthCheck{
|
||||
Interval: igwDefaultPHCIntervalS * time.Second,
|
||||
MaxFailures: igwDefaultPHCMaxFailures,
|
||||
}
|
||||
}
|
||||
require.NoError(t, cluster.ConfigEntryWrite(&api.IngressGatewayConfigEntry{
|
||||
Kind: api.IngressGateway,
|
||||
Name: nameIG,
|
||||
Defaults: &igwDefaults,
|
||||
TLS: api.GatewayTLSConfig{
|
||||
Enabled: true,
|
||||
TLSMinVersion: "TLSv1_2",
|
||||
},
|
||||
Listeners: []api.IngressListener{
|
||||
{
|
||||
Port: portRouter,
|
||||
Protocol: "http",
|
||||
Services: []api.IngressService{
|
||||
{
|
||||
Name: nameRouter,
|
||||
// for "request header manipulation" subtest
|
||||
RequestHeaders: &api.HTTPHeaderModifiers{
|
||||
Add: map[string]string{
|
||||
"x-foo": "bar-req",
|
||||
"x-existing-1": "appended-req",
|
||||
},
|
||||
Set: map[string]string{
|
||||
"x-existing-2": "replaced-req",
|
||||
"x-client-ip": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%",
|
||||
},
|
||||
Remove: []string{"x-bad-req"},
|
||||
},
|
||||
// for "response header manipulation" subtest
|
||||
ResponseHeaders: &api.HTTPHeaderModifiers{
|
||||
Add: map[string]string{
|
||||
"x-foo": "bar-resp",
|
||||
"x-existing-1": "appended-resp",
|
||||
},
|
||||
Set: map[string]string{
|
||||
"x-existing-2": "replaced-resp",
|
||||
},
|
||||
Remove: []string{"x-bad-resp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// for "envoy config/thresholds" subtest
|
||||
{
|
||||
Port: portS1Direct,
|
||||
Protocol: "http",
|
||||
Services: []api.IngressService{
|
||||
{
|
||||
Name: libservice.StaticServerServiceName,
|
||||
MaxConnections: &s1MaxConns,
|
||||
MaxPendingRequests: &s1MaxPendingReqs,
|
||||
},
|
||||
},
|
||||
},
|
||||
// for "hostname=custom" subtest
|
||||
{
|
||||
Port: portS1DirectCustomHostname,
|
||||
Protocol: "http",
|
||||
Services: []api.IngressService{
|
||||
{
|
||||
Name: libservice.StaticServerServiceName,
|
||||
Hosts: []string{hostnameS1DirectCustom},
|
||||
},
|
||||
},
|
||||
},
|
||||
// for "hostname=*" subtest
|
||||
{
|
||||
Port: portWildcard,
|
||||
Protocol: "http",
|
||||
Services: []api.IngressService{
|
||||
{
|
||||
Name: "*",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}))
|
||||
|
||||
// create s1
|
||||
_, _, err = libservice.CreateAndRegisterStaticServerAndSidecar(
|
||||
cluster.Clients()[0],
|
||||
&libservice.ServiceOpts{
|
||||
Name: nameS1,
|
||||
ID: nameS1,
|
||||
HTTPPort: 8080,
|
||||
GRPCPort: 8079,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
libassert.CatalogServiceExists(t, client, nameS1, nil)
|
||||
|
||||
// create s2
|
||||
_, _, err = libservice.CreateAndRegisterStaticServerAndSidecar(
|
||||
cluster.Clients()[1],
|
||||
&libservice.ServiceOpts{
|
||||
Name: nameS2,
|
||||
ID: nameS2,
|
||||
HTTPPort: 8080,
|
||||
GRPCPort: 8079,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
libassert.CatalogServiceExists(t, client, nameS2, nil)
|
||||
|
||||
// checks
|
||||
// TODO: other checks from verify.bats
|
||||
// ingress-gateway proxy admin up
|
||||
// s1 proxy admin up
|
||||
// s2 proxy admin up
|
||||
// s1 proxy listener has right cert
|
||||
// s2 proxy listener has right cert
|
||||
// ig1 has healthy endpoints for s1
|
||||
// ig1 has healthy endpoints for s2
|
||||
// TODO ^ ??? s1 and s2 aren't direct listeners, only in `router`, so why are they endpoints?
|
||||
|
||||
roots, _, err := client.Connect().CARoots(&api.QueryOptions{})
|
||||
var root *api.CARoot
|
||||
for _, r := range roots.Roots {
|
||||
if r.Active {
|
||||
root = r
|
||||
break
|
||||
}
|
||||
}
|
||||
require.NotNil(t, root, "no active CA root found")
|
||||
|
||||
// tests
|
||||
tests := func(t *testing.T) {
|
||||
// fortio name should be $nameS<X> for /$nameS<X> prefix on router
|
||||
portRouterMapped, _ := cluster.Servers()[0].GetPod().MappedPort(
|
||||
context.Background(),
|
||||
nat.Port(fmt.Sprintf("%d/tcp", portRouter)),
|
||||
)
|
||||
reqHost := fmt.Sprintf("router.ingress.consul:%d", portRouter)
|
||||
|
||||
httpClient := httpClientWithCA(t, reqHost, root.RootCertPEM)
|
||||
|
||||
t.Run("fortio name", func(t *testing.T) {
|
||||
// TODO: occasionally (1 in 5 or so), service 1 gets stuck throwing 503s
|
||||
// - direct connection works fine
|
||||
// - its envoy has some 503s in stats, and some 200s
|
||||
// - igw envoy says all 503s in stats
|
||||
libassert.AssertFortioNameWithClient(t,
|
||||
fmt.Sprintf("https://localhost:%d/%s", portRouterMapped.Int(), nameS1), nameS1, reqHost, httpClient)
|
||||
libassert.AssertFortioNameWithClient(t,
|
||||
fmt.Sprintf("https://localhost:%d/%s", portRouterMapped.Int(), nameS2), nameS2, reqHost, httpClient)
|
||||
})
|
||||
urlbaseS2 := fmt.Sprintf("https://%s/%s", reqHost, nameS2)
|
||||
|
||||
t.Run("envoy config", func(t *testing.T) {
|
||||
var dump string
|
||||
_, adminPort := igw.GetAdminAddr()
|
||||
retry.RunWith(&retry.Timer{Timeout: 30 * time.Second, Wait: 1 * time.Second}, t, func(r *retry.R) {
|
||||
dump, _, err = libassert.GetEnvoyOutput(adminPort, "config_dump", map[string]string{})
|
||||
if err != nil {
|
||||
r.Fatal("could not fetch envoy configuration")
|
||||
}
|
||||
})
|
||||
var m interface{}
|
||||
err = json.Unmarshal([]byte(dump), &m)
|
||||
require.NoError(t, err)
|
||||
|
||||
q, err := gojq.Parse(fmt.Sprintf(`.configs[1].dynamic_active_clusters[]
|
||||
| select(.cluster.name|startswith("%s."))
|
||||
| .cluster`, nameS1))
|
||||
require.NoError(t, err)
|
||||
it := q.Run(m)
|
||||
v, ok := it.Next()
|
||||
require.True(t, ok)
|
||||
t.Run("thresholds", func(t *testing.T) {
|
||||
// TODO: these fail about 10% of the time on my machine, giving me only the defaults, not the override
|
||||
// writing the config again (with a different value) usually works
|
||||
// https://hashicorp.slack.com/archives/C03UNBBDELS/p1677621125567219
|
||||
t.Skip("BUG? thresholds not set about 10% of the time")
|
||||
thresholds := v.(map[string]any)["circuit_breakers"].(map[string]any)["thresholds"].([]map[string]any)[0]
|
||||
assert.Equal(t, float64(s1MaxConns), thresholds["max_connections"].(float64), "max conns from override")
|
||||
assert.Equal(t, float64(s1MaxPendingReqs), thresholds["max_pending_requests"].(float64), "max pending conns from override")
|
||||
assert.Equal(t, float64(*igwDefaults.MaxConcurrentRequests), thresholds["max_requests"].(float64), "max requests from defaults")
|
||||
})
|
||||
t.Run("outlier detection", func(t *testing.T) {
|
||||
if utils.VersionLT(utils.LatestVersion, "1.15") {
|
||||
t.Skipf("version %s (< 1.15) IGW doesn't support Defaults.PassiveHealthCheck", utils.LatestVersion)
|
||||
}
|
||||
// BATS checks against S2, but we're doing S1 just to avoid more jq
|
||||
o := v.(map[string]any)["outlier_detection"].(map[string]any)
|
||||
assert.Equal(t,
|
||||
fmt.Sprintf("%ds", igwDefaultPHCIntervalS),
|
||||
o["interval"].(string),
|
||||
"interval: s1 == default",
|
||||
)
|
||||
assert.Equal(t, float64(igwDefaultPHCMaxFailures), o["consecutive_5xx"].(float64), "s1 max failures == default")
|
||||
_, ec5xx_ok := o["enforcing_consecutive_5xx"]
|
||||
assert.False(t, ec5xx_ok, "s1 enforcing_consective_5xx: unset")
|
||||
})
|
||||
})
|
||||
|
||||
// upsert config entry making http default protocol for global
|
||||
require.NoError(t, cluster.ConfigEntryWrite(&api.ProxyConfigEntry{
|
||||
Name: api.ProxyConfigGlobal,
|
||||
Kind: api.ProxyDefaults,
|
||||
Config: map[string]interface{}{
|
||||
"protocol": "http",
|
||||
},
|
||||
}))
|
||||
t.Run("request header manipulation", func(t *testing.T) {
|
||||
resp := mappedHTTPGET(t, fmt.Sprintf("%s/debug?env=dump", urlbaseS2), portRouterMapped.Int(), http.Header(map[string][]string{
|
||||
"X-Existing-1": {"original"},
|
||||
"X-Existing-2": {"original"},
|
||||
"X-Bad-Req": {"true"},
|
||||
}), nil, httpClient)
|
||||
defer resp.Body.Close()
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
const (
|
||||
nameIG = "ingress-gateway"
|
||||
nameRouter = "router"
|
||||
)
|
||||
// The following check the body, which should echo the headers received
|
||||
// by the fortio container
|
||||
assert.Contains(t, string(body), "X-Foo: bar-req",
|
||||
"Ingress should have added the new request header")
|
||||
assert.Contains(t, string(body), "X-Existing-1: original,appended-req",
|
||||
"Ingress should have appended the first existing header - both should be present")
|
||||
assert.Contains(t, string(body), "X-Existing-2: replaced-req",
|
||||
"Ingress should have replaced the second existing header")
|
||||
// This 172. is the prefix of the IP for the gateway for our docker network.
|
||||
// Perhaps there's some way to look this up.
|
||||
// This is a deviation from BATS, because their tests run inside Docker, and ours run outside.
|
||||
assert.Contains(t, string(body), "X-Client-Ip: 172.",
|
||||
"Ingress should have set the client ip from dynamic Envoy variable")
|
||||
assert.NotContains(t, string(body), "X-Bad-Req: true",
|
||||
"Ingress should have removed the bad request header")
|
||||
|
||||
// upsert config entry for `service-router` `router`:
|
||||
// - prefix matching `/$nameS1` goes to service s1
|
||||
// - prefix matching `/$nameS2` goes to service s2
|
||||
const nameS1 = libservice.StaticServerServiceName
|
||||
const nameS2 = libservice.StaticServer2ServiceName
|
||||
require.NoError(t, cluster.ConfigEntryWrite(&api.ServiceRouterConfigEntry{
|
||||
Kind: api.ServiceRouter,
|
||||
// This is a "virtual" service name and will not have a backing
|
||||
// service definition. It must match the name defined in the ingress
|
||||
// configuration.
|
||||
Name: nameRouter,
|
||||
Routes: []api.ServiceRoute{
|
||||
{
|
||||
Match: &api.ServiceRouteMatch{
|
||||
HTTP: &api.ServiceRouteHTTPMatch{
|
||||
PathPrefix: fmt.Sprintf("/%s/", nameS1),
|
||||
},
|
||||
},
|
||||
Destination: &api.ServiceRouteDestination{
|
||||
Service: nameS1,
|
||||
PrefixRewrite: "/",
|
||||
},
|
||||
},
|
||||
{
|
||||
Match: &api.ServiceRouteMatch{
|
||||
HTTP: &api.ServiceRouteHTTPMatch{
|
||||
PathPrefix: fmt.Sprintf("/%s/", nameS2),
|
||||
},
|
||||
},
|
||||
Destination: &api.ServiceRouteDestination{
|
||||
Service: nameS2,
|
||||
PrefixRewrite: "/",
|
||||
},
|
||||
},
|
||||
},
|
||||
}))
|
||||
})
|
||||
|
||||
gwCfg := libservice.GatewayConfig{
|
||||
Name: nameIG,
|
||||
Kind: "ingress",
|
||||
}
|
||||
igw, err := libservice.NewGatewayService(context.Background(), gwCfg, cluster.Servers()[0])
|
||||
require.NoError(t, err)
|
||||
t.Logf("created gateway: %#v", igw)
|
||||
|
||||
// upsert config entry for ingress-gateway ig1, protocol http, service s1
|
||||
// - listener points at service `router`
|
||||
// - add request headers: 1 new, 1 existing
|
||||
// - set request headers: 1 existing, 1 new, to client IP
|
||||
// - add response headers: 1 new, 1 existing
|
||||
// - set response headers: 1 existing
|
||||
// - remove response header: 1 existing
|
||||
|
||||
// this must be one of the externally-mapped ports from
|
||||
// https://github.com/hashicorp/consul/blob/c5e729e86576771c4c22c6da1e57aaa377319323/test/integration/consul-container/libs/cluster/container.go#L521-L525
|
||||
const portRouter = 8080
|
||||
require.NoError(t, cluster.ConfigEntryWrite(&api.IngressGatewayConfigEntry{
|
||||
Kind: api.IngressGateway,
|
||||
Name: nameIG,
|
||||
Listeners: []api.IngressListener{
|
||||
{
|
||||
Port: portRouter,
|
||||
Protocol: "http",
|
||||
Services: []api.IngressService{
|
||||
{
|
||||
Name: nameRouter,
|
||||
// TODO: extract these header values to consts to test
|
||||
RequestHeaders: &api.HTTPHeaderModifiers{
|
||||
Add: map[string]string{
|
||||
"x-foo": "bar-req",
|
||||
"x-existing-1": "appended-req",
|
||||
},
|
||||
Set: map[string]string{
|
||||
"x-existing-2": "replaced-req",
|
||||
"x-client-ip": "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%",
|
||||
},
|
||||
Remove: []string{"x-bad-req"},
|
||||
},
|
||||
ResponseHeaders: &api.HTTPHeaderModifiers{
|
||||
Add: map[string]string{
|
||||
"x-foo": "bar-resp",
|
||||
"x-existing-1": "appended-resp",
|
||||
},
|
||||
Set: map[string]string{
|
||||
"x-existing-2": "replaced-resp",
|
||||
},
|
||||
Remove: []string{"x-bad-resp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}))
|
||||
|
||||
// create s1
|
||||
_, _, err = libservice.CreateAndRegisterStaticServerAndSidecar(
|
||||
cluster.Clients()[0],
|
||||
&libservice.ServiceOpts{
|
||||
Name: nameS1,
|
||||
ID: nameS1,
|
||||
HTTPPort: 8080,
|
||||
GRPCPort: 8079,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
libassert.CatalogServiceExists(t, client, nameS1, nil)
|
||||
|
||||
// create s2
|
||||
_, _, err = libservice.CreateAndRegisterStaticServerAndSidecar(
|
||||
cluster.Clients()[1],
|
||||
&libservice.ServiceOpts{
|
||||
Name: nameS2,
|
||||
ID: nameS2,
|
||||
HTTPPort: 8080,
|
||||
GRPCPort: 8079,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
libassert.CatalogServiceExists(t, client, nameS2, nil)
|
||||
|
||||
// checks
|
||||
// TODO: other checks from verify.bats
|
||||
// ingress-gateway proxy admin up
|
||||
// s1 proxy admin up
|
||||
// s2 proxy admin up
|
||||
// s1 proxy listener has right cert
|
||||
// s2 proxy listener has right cert
|
||||
// ig1 has healthy endpoints for s1
|
||||
// ig1 has healthy endpoints for s2
|
||||
// TODO ^ ??? s1 and s2 aren't direct listeners, only in `router`, so why are they endpoints?
|
||||
|
||||
// tests
|
||||
tests := func(t *testing.T) {
|
||||
// fortio name should be $nameS<X> for /$nameS<X> prefix on router
|
||||
portRouterMapped, _ := cluster.Servers()[0].GetPod().MappedPort(
|
||||
context.Background(),
|
||||
nat.Port(fmt.Sprintf("%d/tcp", portRouter)),
|
||||
t.Run("response header manipulation", func(t *testing.T) {
|
||||
const params = "?header=x-bad-resp:true&header=x-existing-1:original&header=x-existing-2:original"
|
||||
resp := mappedHTTPGET(t,
|
||||
fmt.Sprintf("%s/echo%s", urlbaseS2, params),
|
||||
portRouterMapped.Int(),
|
||||
nil,
|
||||
nil,
|
||||
httpClient,
|
||||
)
|
||||
reqHost := fmt.Sprintf("router.ingress.consul:%d", portRouter)
|
||||
libassert.AssertFortioName(t,
|
||||
fmt.Sprintf("http://localhost:%d/%s", portRouterMapped.Int(), nameS1), nameS1, reqHost)
|
||||
libassert.AssertFortioName(t,
|
||||
fmt.Sprintf("http://localhost:%d/%s", portRouterMapped.Int(), nameS2), nameS2, reqHost)
|
||||
urlbaseS2 := fmt.Sprintf("http://%s/%s", reqHost, nameS2)
|
||||
defer resp.Body.Close()
|
||||
|
||||
t.Run("request header manipulation", func(t *testing.T) {
|
||||
resp := mappedHTTPGET(t, fmt.Sprintf("%s/debug?env=dump", urlbaseS2), portRouterMapped.Int(), http.Header(map[string][]string{
|
||||
"X-Existing-1": {"original"},
|
||||
"X-Existing-2": {"original"},
|
||||
"X-Bad-Req": {"true"},
|
||||
}))
|
||||
assert.Contains(t, resp.Header.Values("x-foo"), "bar-resp",
|
||||
"Ingress should have added the new response header")
|
||||
assert.Contains(t, resp.Header.Values("x-existing-1"), "original",
|
||||
"Ingress should have appended the first existing header - both should be present")
|
||||
assert.Contains(t, resp.Header.Values("x-existing-1"), "appended-resp",
|
||||
"Ingress should have appended the first existing header - both should be present")
|
||||
assert.Contains(t, resp.Header.Values("x-existing-2"), "replaced-resp",
|
||||
"Ingress should have replaced the second existing header")
|
||||
assert.NotContains(t, resp.Header.Values("x-existing-2"), "original",
|
||||
"x-existing-2 response header should have been overridden")
|
||||
assert.NotContains(t, resp.Header.Values("x-bad-resp"), "true",
|
||||
"X-Bad-Resp response header should have been stripped")
|
||||
})
|
||||
|
||||
t.Run("hostname=custom", func(t *testing.T) {
|
||||
pm, _ := cluster.Servers()[0].GetPod().MappedPort(
|
||||
context.Background(),
|
||||
nat.Port(fmt.Sprintf("%d/tcp", portS1DirectCustomHostname)),
|
||||
)
|
||||
h := fmt.Sprintf("%s:%d", hostnameS1DirectCustom, portS1DirectCustomHostname)
|
||||
clS1Direct := httpClientWithCA(t, h, root.RootCertPEM)
|
||||
const data = "secret password"
|
||||
resp := mappedHTTPGET(t,
|
||||
"https://"+h,
|
||||
pm.Int(),
|
||||
nil,
|
||||
strings.NewReader(data),
|
||||
clS1Direct,
|
||||
)
|
||||
defer resp.Body.Close()
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte(data), body)
|
||||
})
|
||||
|
||||
t.Run("hostname=<service>.ingress.consul", func(t *testing.T) {
|
||||
pm, _ := cluster.Servers()[0].GetPod().MappedPort(
|
||||
context.Background(),
|
||||
nat.Port(fmt.Sprintf("%d/tcp", portS1Direct)),
|
||||
)
|
||||
h := fmt.Sprintf("%s.ingress.consul:%d", libservice.StaticServerServiceName, portS1Direct)
|
||||
clS1Direct := httpClientWithCA(t, h, root.RootCertPEM)
|
||||
const data = "secret password"
|
||||
resp := mappedHTTPGET(t,
|
||||
"https://"+h,
|
||||
pm.Int(),
|
||||
nil,
|
||||
strings.NewReader(data),
|
||||
clS1Direct,
|
||||
)
|
||||
defer resp.Body.Close()
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte(data), body)
|
||||
})
|
||||
t.Run("hostname=*", func(t *testing.T) {
|
||||
pm, _ := cluster.Servers()[0].GetPod().MappedPort(
|
||||
context.Background(),
|
||||
nat.Port(fmt.Sprintf("%d/tcp", portWildcard)),
|
||||
)
|
||||
|
||||
t.Run("s1 HTTPS echo validates against our CA", func(t *testing.T) {
|
||||
h := fmt.Sprintf("%s.ingress.consul:%d", libservice.StaticServerServiceName, portWildcard)
|
||||
cl := httpClientWithCA(t, h, root.RootCertPEM)
|
||||
data := fmt.Sprintf("secret-%s", libservice.StaticClientServiceName)
|
||||
resp := mappedHTTPGET(t,
|
||||
"https://"+h,
|
||||
pm.Int(),
|
||||
nil,
|
||||
strings.NewReader(data),
|
||||
cl,
|
||||
)
|
||||
defer resp.Body.Close()
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
// The following check the body, which should echo the headers received
|
||||
// by the fortio container
|
||||
assert.Contains(t, string(body), "X-Foo: bar-req",
|
||||
"Ingress should have added the new request header")
|
||||
assert.Contains(t, string(body), "X-Existing-1: original,appended-req",
|
||||
"Ingress should have appended the first existing header - both should be present")
|
||||
assert.Contains(t, string(body), "X-Existing-2: replaced-req",
|
||||
"Ingress should have replaced the second existing header")
|
||||
// TODO: This 172. is the prefix of the IP for the gateway for our docker network.
|
||||
// Perhaps there's some way to look this up.
|
||||
// This is a deviation from BATS, because their tests run inside Docker, and ours run outside.
|
||||
assert.Contains(t, string(body), "X-Client-Ip: 172.",
|
||||
"Ingress should have set the client ip from dynamic Envoy variable")
|
||||
assert.NotContains(t, string(body), "X-Bad-Req: true",
|
||||
"Ingress should have removed the bad request header")
|
||||
assert.Equal(t, []byte(data), body)
|
||||
})
|
||||
t.Run("response header manipulation", func(t *testing.T) {
|
||||
const params = "?header=x-bad-resp:true&header=x-existing-1:original&header=x-existing-2:original"
|
||||
|
||||
t.Run("s2 HTTPS echo validates against our CA", func(t *testing.T) {
|
||||
h := fmt.Sprintf("%s.ingress.consul:%d", libservice.StaticServer2ServiceName, portWildcard)
|
||||
cl := httpClientWithCA(t, h, root.RootCertPEM)
|
||||
data := fmt.Sprintf("secret-%s", libservice.StaticClientServiceName)
|
||||
resp := mappedHTTPGET(t,
|
||||
fmt.Sprintf("%s/echo%s", urlbaseS2, params),
|
||||
portRouterMapped.Int(),
|
||||
"https://"+h,
|
||||
pm.Int(),
|
||||
nil,
|
||||
strings.NewReader(data),
|
||||
cl,
|
||||
)
|
||||
defer resp.Body.Close()
|
||||
|
||||
assert.Contains(t, resp.Header.Values("x-foo"), "bar-resp",
|
||||
"Ingress should have added the new response header")
|
||||
assert.Contains(t, resp.Header.Values("x-existing-1"), "original",
|
||||
"Ingress should have appended the first existing header - both should be present")
|
||||
assert.Contains(t, resp.Header.Values("x-existing-1"), "appended-resp",
|
||||
"Ingress should have appended the first existing header - both should be present")
|
||||
assert.Contains(t, resp.Header.Values("x-existing-2"), "replaced-resp",
|
||||
"Ingress should have replaced the second existing header")
|
||||
assert.NotContains(t, resp.Header.Values("x-existing-2"), "original",
|
||||
"x-existing-2 response header should have been overridden")
|
||||
assert.NotContains(t, resp.Header.Values("x-bad-resp"), "true",
|
||||
"X-Bad-Resp response header should have been stripped")
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte(data), body)
|
||||
})
|
||||
}
|
||||
t.Run(fmt.Sprintf("pre-upgrade from %s to %s", oldVersion, targetVersion), func(t *testing.T) {
|
||||
tests(t)
|
||||
})
|
||||
|
||||
if t.Failed() {
|
||||
t.Fatal("failing fast: failed assertions pre-upgrade")
|
||||
}
|
||||
|
||||
// Upgrade the cluster to targetVersion
|
||||
t.Logf("Upgrade to version %s", targetVersion)
|
||||
err = cluster.StandardUpgrade(t, context.Background(), targetVersion)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, igw.Restart())
|
||||
|
||||
t.Run(fmt.Sprintf("post-upgrade from %s to %s", oldVersion, targetVersion), func(t *testing.T) {
|
||||
tests(t)
|
||||
})
|
||||
}
|
||||
t.Run("pre-upgrade", func(t *testing.T) {
|
||||
tests(t)
|
||||
})
|
||||
|
||||
t.Run(fmt.Sprintf("Upgrade from %s to %s", utils.LatestVersion, utils.TargetVersion),
|
||||
func(t *testing.T) {
|
||||
t.Parallel()
|
||||
run(t, utils.LatestVersion, utils.TargetVersion)
|
||||
})
|
||||
time.Sleep(1 * time.Second)
|
||||
if t.Failed() {
|
||||
t.Fatal("failing fast: failed assertions pre-upgrade")
|
||||
}
|
||||
|
||||
// Upgrade the cluster to utils.utils.TargetVersion
|
||||
t.Logf("Upgrade to version %s", utils.TargetVersion)
|
||||
err = cluster.StandardUpgrade(t, context.Background(), utils.TargetVersion)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, igw.Restart())
|
||||
|
||||
t.Run("post-upgrade", func(t *testing.T) {
|
||||
tests(t)
|
||||
})
|
||||
}
|
||||
|
||||
func mappedHTTPGET(t *testing.T, uri string, mappedPort int, header http.Header) *http.Response {
|
||||
// mappedHTTPGET performs an HTTP GET to the given uri, but actually uses
|
||||
// "localhost:<mappedPort>" to connect the host, and sends the host from uri
|
||||
// in the [http.Request.Host] field.
|
||||
//
|
||||
// Extra headers may be specified in header. body is the request body.
|
||||
//
|
||||
// client is used as the [http.Client], for example, one returned by
|
||||
// [httpClientWithCA].
|
||||
//
|
||||
// It retries for up to 1 minute, with a 50ms wait.
|
||||
func mappedHTTPGET(t *testing.T, uri string, mappedPort int, header http.Header, body io.Reader, client *http.Client) *http.Response {
|
||||
t.Helper()
|
||||
var hostHdr string
|
||||
u, _ := url.Parse(uri)
|
||||
hostHdr = u.Host
|
||||
u.Host = fmt.Sprintf("localhost:%d", mappedPort)
|
||||
uri = u.String()
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
DisableKeepAlives: true,
|
||||
},
|
||||
}
|
||||
var resp *http.Response
|
||||
retry.RunWith(&retry.Timer{Timeout: 1 * time.Minute, Wait: 50 * time.Millisecond}, t, func(r *retry.R) {
|
||||
req, err := http.NewRequest("GET", uri, nil)
|
||||
req, err := http.NewRequest("GET", uri, body)
|
||||
if header != nil {
|
||||
req.Header = header
|
||||
}
|
||||
if err != nil {
|
||||
r.Fatal("could not make request to service ", uri)
|
||||
r.Fatalf("could not make call to service %q: %s", uri, err)
|
||||
}
|
||||
if hostHdr != "" {
|
||||
req.Host = hostHdr
|
||||
|
@ -312,8 +522,50 @@ func mappedHTTPGET(t *testing.T, uri string, mappedPort int, header http.Header)
|
|||
|
||||
resp, err = client.Do(req)
|
||||
if err != nil {
|
||||
r.Fatal("could not make call to service ", uri)
|
||||
r.Fatalf("could not make call to service %q: %s", uri, err)
|
||||
}
|
||||
})
|
||||
return resp
|
||||
}
|
||||
|
||||
// httpClientWithCA returns an [http.Client] configured to trust cacertPEM
|
||||
// as a CA, and with reqHost set as the [http.Transport.TLSClientConfig.ServerName].
|
||||
func httpClientWithCA(t *testing.T, reqHost string, cacertPEM string) *http.Client {
|
||||
t.Helper()
|
||||
pool := x509.NewCertPool()
|
||||
ok := pool.AppendCertsFromPEM([]byte(cacertPEM))
|
||||
require.True(t, ok)
|
||||
|
||||
tr := http.Transport{
|
||||
DisableKeepAlives: true,
|
||||
// BUG: our *.ingress.consul certs have a SNI name of `*.ingress.consul.`. Note the trailing
|
||||
// dot. Go's [crypto/x509.Certificate.VerifyHostname] doesn't like the trailing dot, and
|
||||
// so won't evaluate the wildcard. As a workaround, we disable Go's builtin verification and do it
|
||||
// ourselves
|
||||
// https://groups.google.com/g/golang-checkins/c/K510gi92v8M explains the rationale for not
|
||||
// treating names with trailing dots as hostnames
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
RootCAs: pool,
|
||||
VerifyPeerCertificate: func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
|
||||
require.Equal(t, 1, len(rawCerts), "expected 1 cert")
|
||||
cert, err := x509.ParseCertificate(rawCerts[0])
|
||||
require.NoError(t, err)
|
||||
for i, s := range cert.DNSNames {
|
||||
cert.DNSNames[i] = strings.TrimSuffix(s, ".")
|
||||
}
|
||||
_, err = cert.Verify(x509.VerifyOptions{Roots: pool})
|
||||
require.NoError(t, err, "cert validation")
|
||||
return nil
|
||||
},
|
||||
},
|
||||
}
|
||||
reqHostNoPort, _, _ := strings.Cut(reqHost, ":")
|
||||
if reqHost != "" {
|
||||
tr.TLSClientConfig.ServerName = reqHostNoPort
|
||||
}
|
||||
client := http.Client{
|
||||
Transport: &tr,
|
||||
}
|
||||
return &client
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue