mirror of https://github.com/status-im/consul.git
Wasm integration tests for local and remote wasm files (#17756)
* wasm integration tests for local and remote wasm files refactoring and cleanup for wasm testing remove wasm debug logging PR feedback, wasm build lock correct path pattern for wasm build files Add new helper function to minimize changes to existing test code Remove extra param mod tidy add custom service setup to test lib add wait until static server sidecar can reach nginx sidecar Doc comments PR feedback Update workflows to compile wasm for integration tests Fix docker build path Fix package name for linter Update makefile, fix redeclared function Update expected wasm filename Debug test ls in workflow remove pwd in favor of relative path more debugging Build wasm in compatability tests as well Build wasm directly in ci rather than in container Debug tinygo and llvm version Change wasm file extension Remove tinygo debugging Remove extra comments * Add compiled wasm and build instructions
This commit is contained in:
parent
13ce787a3f
commit
2a8bf5df61
|
@ -630,6 +630,8 @@ func newContainerRequest(config Config, opts containerOpts, ports ...int) (podRe
|
||||||
"9997/tcp", // Envoy App Listener
|
"9997/tcp", // Envoy App Listener
|
||||||
"9998/tcp", // Envoy App Listener
|
"9998/tcp", // Envoy App Listener
|
||||||
"9999/tcp", // Envoy App Listener
|
"9999/tcp", // Envoy App Listener
|
||||||
|
|
||||||
|
"80/tcp", // Nginx - http port used in wasm tests
|
||||||
},
|
},
|
||||||
Hostname: opts.hostname,
|
Hostname: opts.hostname,
|
||||||
Networks: opts.addtionalNetworks,
|
Networks: opts.addtionalNetworks,
|
||||||
|
|
|
@ -157,8 +157,15 @@ type SidecarConfig struct {
|
||||||
// "consul connect envoy", for service name (serviceName) on the specified
|
// "consul connect envoy", for service name (serviceName) on the specified
|
||||||
// node. The container exposes port serviceBindPort and envoy admin port
|
// node. The container exposes port serviceBindPort and envoy admin port
|
||||||
// (19000) by mapping them onto host ports. The container's name has a prefix
|
// (19000) by mapping them onto host ports. The container's name has a prefix
|
||||||
// combining datacenter and name.
|
// combining datacenter and name. The customContainerConf parameter can be used
|
||||||
func NewConnectService(ctx context.Context, sidecarCfg SidecarConfig, serviceBindPorts []int, node cluster.Agent) (*ConnectContainer, error) {
|
// to mutate the testcontainers.ContainerRequest used to create the sidecar proxy.
|
||||||
|
func NewConnectService(
|
||||||
|
ctx context.Context,
|
||||||
|
sidecarCfg SidecarConfig,
|
||||||
|
serviceBindPorts []int,
|
||||||
|
node cluster.Agent,
|
||||||
|
customContainerConf func(request testcontainers.ContainerRequest) testcontainers.ContainerRequest,
|
||||||
|
) (*ConnectContainer, error) {
|
||||||
nodeConfig := node.GetConfig()
|
nodeConfig := node.GetConfig()
|
||||||
if nodeConfig.ScratchDir == "" {
|
if nodeConfig.ScratchDir == "" {
|
||||||
return nil, fmt.Errorf("node ScratchDir is required")
|
return nil, fmt.Errorf("node ScratchDir is required")
|
||||||
|
@ -265,6 +272,11 @@ func NewConnectService(ctx context.Context, sidecarCfg SidecarConfig, serviceBin
|
||||||
exposedPorts := make([]string, len(appPortStrs))
|
exposedPorts := make([]string, len(appPortStrs))
|
||||||
copy(exposedPorts, appPortStrs)
|
copy(exposedPorts, appPortStrs)
|
||||||
exposedPorts = append(exposedPorts, adminPortStr)
|
exposedPorts = append(exposedPorts, adminPortStr)
|
||||||
|
|
||||||
|
if customContainerConf != nil {
|
||||||
|
req = customContainerConf(req)
|
||||||
|
}
|
||||||
|
|
||||||
info, err := cluster.LaunchContainerOnNode(ctx, node, req, exposedPorts)
|
info, err := cluster.LaunchContainerOnNode(ctx, node, req, exposedPorts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -127,6 +127,42 @@ func (c exampleContainer) GetStatus() (string, error) {
|
||||||
return state.Status, err
|
return state.Status, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewCustomService creates a new test service from a custom testcontainers.ContainerRequest.
|
||||||
|
func NewCustomService(ctx context.Context, name string, httpPort int, grpcPort int, node libcluster.Agent, request testcontainers.ContainerRequest) (Service, error) {
|
||||||
|
namePrefix := fmt.Sprintf("%s-service-example-%s", node.GetDatacenter(), name)
|
||||||
|
containerName := utils.RandName(namePrefix)
|
||||||
|
|
||||||
|
pod := node.GetPod()
|
||||||
|
if pod == nil {
|
||||||
|
return nil, fmt.Errorf("node Pod is required")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
httpPortStr = strconv.Itoa(httpPort)
|
||||||
|
grpcPortStr = strconv.Itoa(grpcPort)
|
||||||
|
)
|
||||||
|
|
||||||
|
request.Name = containerName
|
||||||
|
|
||||||
|
info, err := libcluster.LaunchContainerOnNode(ctx, node, request, []string{httpPortStr, grpcPortStr})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
out := &exampleContainer{
|
||||||
|
ctx: ctx,
|
||||||
|
container: info.Container,
|
||||||
|
ip: info.IP,
|
||||||
|
httpPort: info.MappedPorts[httpPortStr].Int(),
|
||||||
|
grpcPort: info.MappedPorts[grpcPortStr].Int(),
|
||||||
|
serviceName: name,
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Custom service exposed http port %d, gRPC port %d\n", out.httpPort, out.grpcPort)
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
func NewExampleService(ctx context.Context, name string, httpPort int, grpcPort int, node libcluster.Agent, containerArgs ...string) (Service, error) {
|
func NewExampleService(ctx context.Context, name string, httpPort int, grpcPort int, node libcluster.Agent, containerArgs ...string) (Service, error) {
|
||||||
namePrefix := fmt.Sprintf("%s-service-example-%s", node.GetDatacenter(), name)
|
namePrefix := fmt.Sprintf("%s-service-example-%s", node.GetDatacenter(), name)
|
||||||
containerName := utils.RandName(namePrefix)
|
containerName := utils.RandName(namePrefix)
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"github.com/testcontainers/testcontainers-go"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||||
|
@ -50,7 +51,7 @@ type ServiceOpts struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// createAndRegisterStaticServerAndSidecar register the services and launch static-server containers
|
// createAndRegisterStaticServerAndSidecar register the services and launch static-server containers
|
||||||
func createAndRegisterStaticServerAndSidecar(node libcluster.Agent, httpPort int, grpcPort int, svc *api.AgentServiceRegistration, containerArgs ...string) (Service, Service, error) {
|
func createAndRegisterStaticServerAndSidecar(node libcluster.Agent, httpPort int, grpcPort int, svc *api.AgentServiceRegistration, customContainerCfg func(testcontainers.ContainerRequest) testcontainers.ContainerRequest, containerArgs ...string) (Service, Service, error) {
|
||||||
// Do some trickery to ensure that partial completion is correctly torn
|
// Do some trickery to ensure that partial completion is correctly torn
|
||||||
// down, but successful execution is not.
|
// down, but successful execution is not.
|
||||||
var deferClean utils.ResettableDefer
|
var deferClean utils.ResettableDefer
|
||||||
|
@ -77,10 +78,11 @@ func createAndRegisterStaticServerAndSidecar(node libcluster.Agent, httpPort int
|
||||||
svc.Connect.SidecarService.Proxy != nil &&
|
svc.Connect.SidecarService.Proxy != nil &&
|
||||||
svc.Connect.SidecarService.Proxy.Mode == api.ProxyModeTransparent,
|
svc.Connect.SidecarService.Proxy.Mode == api.ProxyModeTransparent,
|
||||||
}
|
}
|
||||||
serverConnectProxy, err := NewConnectService(context.Background(), sidecarCfg, []int{svc.Port}, node) // bindPort not used
|
serverConnectProxy, err := NewConnectService(context.Background(), sidecarCfg, []int{svc.Port}, node, customContainerCfg) // bindPort not used
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
deferClean.Add(func() {
|
deferClean.Add(func() {
|
||||||
_ = serverConnectProxy.Terminate()
|
_ = serverConnectProxy.Terminate()
|
||||||
})
|
})
|
||||||
|
@ -91,7 +93,101 @@ func createAndRegisterStaticServerAndSidecar(node libcluster.Agent, httpPort int
|
||||||
return serverService, serverConnectProxy, nil
|
return serverService, serverConnectProxy, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateAndRegisterStaticServerAndSidecar(node libcluster.Agent, serviceOpts *ServiceOpts, containerArgs ...string) (Service, Service, error) {
|
// createAndRegisterCustomServiceAndSidecar creates a custom service from the given testcontainers.ContainerRequest
|
||||||
|
// and a sidecar proxy for the service. The customContainerCfg parameter is used to mutate the
|
||||||
|
// testcontainers.ContainerRequest for the sidecar proxy.
|
||||||
|
func createAndRegisterCustomServiceAndSidecar(node libcluster.Agent,
|
||||||
|
httpPort int,
|
||||||
|
grpcPort int,
|
||||||
|
svc *api.AgentServiceRegistration,
|
||||||
|
request testcontainers.ContainerRequest,
|
||||||
|
customContainerCfg func(testcontainers.ContainerRequest) testcontainers.ContainerRequest,
|
||||||
|
) (Service, Service, error) {
|
||||||
|
// Do some trickery to ensure that partial completion is correctly torn
|
||||||
|
// down, but successful execution is not.
|
||||||
|
var deferClean utils.ResettableDefer
|
||||||
|
defer deferClean.Execute()
|
||||||
|
|
||||||
|
if err := node.GetClient().Agent().ServiceRegister(svc); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a service and proxy instance
|
||||||
|
serverService, err := NewCustomService(context.Background(), svc.ID, httpPort, grpcPort, node, request)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
deferClean.Add(func() {
|
||||||
|
_ = serverService.Terminate()
|
||||||
|
})
|
||||||
|
sidecarCfg := SidecarConfig{
|
||||||
|
Name: fmt.Sprintf("%s-sidecar", svc.ID),
|
||||||
|
ServiceID: svc.ID,
|
||||||
|
Namespace: svc.Namespace,
|
||||||
|
EnableTProxy: svc.Connect != nil &&
|
||||||
|
svc.Connect.SidecarService != nil &&
|
||||||
|
svc.Connect.SidecarService.Proxy != nil &&
|
||||||
|
svc.Connect.SidecarService.Proxy.Mode == api.ProxyModeTransparent,
|
||||||
|
}
|
||||||
|
serverConnectProxy, err := NewConnectService(context.Background(), sidecarCfg, []int{svc.Port}, node, customContainerCfg) // bindPort not used
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
deferClean.Add(func() {
|
||||||
|
_ = serverConnectProxy.Terminate()
|
||||||
|
})
|
||||||
|
|
||||||
|
// disable cleanup functions now that we have an object with a Terminate() function
|
||||||
|
deferClean.Reset()
|
||||||
|
|
||||||
|
return serverService, serverConnectProxy, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateAndRegisterCustomServiceAndSidecar(node libcluster.Agent,
|
||||||
|
serviceOpts *ServiceOpts,
|
||||||
|
request testcontainers.ContainerRequest,
|
||||||
|
customContainerCfg func(testcontainers.ContainerRequest) testcontainers.ContainerRequest) (Service, Service, error) {
|
||||||
|
// Register the static-server service and sidecar first to prevent race with sidecar
|
||||||
|
// trying to get xDS before it's ready
|
||||||
|
p := serviceOpts.HTTPPort
|
||||||
|
agentCheck := api.AgentServiceCheck{
|
||||||
|
Name: "Static Server Listening",
|
||||||
|
TCP: fmt.Sprintf("127.0.0.1:%d", p),
|
||||||
|
Interval: "10s",
|
||||||
|
Status: api.HealthPassing,
|
||||||
|
}
|
||||||
|
if serviceOpts.RegisterGRPC {
|
||||||
|
p = serviceOpts.GRPCPort
|
||||||
|
agentCheck.TCP = ""
|
||||||
|
agentCheck.GRPC = fmt.Sprintf("127.0.0.1:%d", p)
|
||||||
|
}
|
||||||
|
req := &api.AgentServiceRegistration{
|
||||||
|
Name: serviceOpts.Name,
|
||||||
|
ID: serviceOpts.ID,
|
||||||
|
Port: p,
|
||||||
|
Connect: &api.AgentServiceConnect{
|
||||||
|
SidecarService: &api.AgentServiceRegistration{
|
||||||
|
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||||
|
Mode: api.ProxyMode(serviceOpts.Connect.Proxy.Mode),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Namespace: serviceOpts.Namespace,
|
||||||
|
Meta: serviceOpts.Meta,
|
||||||
|
Check: &agentCheck,
|
||||||
|
}
|
||||||
|
return createAndRegisterCustomServiceAndSidecar(node, serviceOpts.HTTPPort, serviceOpts.GRPCPort, req, request, customContainerCfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateAndRegisterStaticServerAndSidecarWithCustomContainerConfig creates an example static server and a sidecar for
|
||||||
|
// the service. The customContainerCfg parameter is a function of testcontainers.ContainerRequest to
|
||||||
|
// testcontainers.ContainerRequest which can be used to mutate the container request for the sidecar proxy and inject
|
||||||
|
// custom configuration and lifecycle hooks.
|
||||||
|
func CreateAndRegisterStaticServerAndSidecarWithCustomContainerConfig(node libcluster.Agent,
|
||||||
|
serviceOpts *ServiceOpts,
|
||||||
|
customContainerCfg func(testcontainers.ContainerRequest) testcontainers.ContainerRequest,
|
||||||
|
containerArgs ...string) (Service, Service, error) {
|
||||||
// Register the static-server service and sidecar first to prevent race with sidecar
|
// Register the static-server service and sidecar first to prevent race with sidecar
|
||||||
// trying to get xDS before it's ready
|
// trying to get xDS before it's ready
|
||||||
p := serviceOpts.HTTPPort
|
p := serviceOpts.HTTPPort
|
||||||
|
@ -122,7 +218,11 @@ func CreateAndRegisterStaticServerAndSidecar(node libcluster.Agent, serviceOpts
|
||||||
Check: &agentCheck,
|
Check: &agentCheck,
|
||||||
Locality: serviceOpts.Locality,
|
Locality: serviceOpts.Locality,
|
||||||
}
|
}
|
||||||
return createAndRegisterStaticServerAndSidecar(node, serviceOpts.HTTPPort, serviceOpts.GRPCPort, req, containerArgs...)
|
return createAndRegisterStaticServerAndSidecar(node, serviceOpts.HTTPPort, serviceOpts.GRPCPort, req, customContainerCfg, containerArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateAndRegisterStaticServerAndSidecar(node libcluster.Agent, serviceOpts *ServiceOpts, containerArgs ...string) (Service, Service, error) {
|
||||||
|
return CreateAndRegisterStaticServerAndSidecarWithCustomContainerConfig(node, serviceOpts, nil, containerArgs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateAndRegisterStaticServerAndSidecarWithChecks(node libcluster.Agent, serviceOpts *ServiceOpts) (Service, Service, error) {
|
func CreateAndRegisterStaticServerAndSidecarWithChecks(node libcluster.Agent, serviceOpts *ServiceOpts) (Service, Service, error) {
|
||||||
|
@ -149,7 +249,7 @@ func CreateAndRegisterStaticServerAndSidecarWithChecks(node libcluster.Agent, se
|
||||||
Meta: serviceOpts.Meta,
|
Meta: serviceOpts.Meta,
|
||||||
}
|
}
|
||||||
|
|
||||||
return createAndRegisterStaticServerAndSidecar(node, serviceOpts.HTTPPort, serviceOpts.GRPCPort, req)
|
return createAndRegisterStaticServerAndSidecar(node, serviceOpts.HTTPPort, serviceOpts.GRPCPort, req, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateAndRegisterStaticClientSidecar(
|
func CreateAndRegisterStaticClientSidecar(
|
||||||
|
@ -209,7 +309,7 @@ func CreateAndRegisterStaticClientSidecar(
|
||||||
EnableTProxy: enableTProxy,
|
EnableTProxy: enableTProxy,
|
||||||
}
|
}
|
||||||
|
|
||||||
clientConnectProxy, err := NewConnectService(context.Background(), sidecarCfg, []int{libcluster.ServiceUpstreamLocalBindPort}, node)
|
clientConnectProxy, err := NewConnectService(context.Background(), sidecarCfg, []int{libcluster.ServiceUpstreamLocalBindPort}, node, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,11 +7,12 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||||
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// CreateServices
|
// CreateServices
|
||||||
|
|
3
test/integration/consul-container/test/envoy_extensions/testdata/wasm_test_files/Dockerfile
vendored
Normal file
3
test/integration/consul-container/test/envoy_extensions/testdata/wasm_test_files/Dockerfile
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
FROM tinygo/tinygo:sha-598cb1e4ddce53d85600a1b7724ed39eea80e119
|
||||||
|
COPY ./build.sh /
|
||||||
|
ENTRYPOINT ["/build.sh"]
|
14
test/integration/consul-container/test/envoy_extensions/testdata/wasm_test_files/README.md
vendored
Normal file
14
test/integration/consul-container/test/envoy_extensions/testdata/wasm_test_files/README.md
vendored
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
# Building WASM test files
|
||||||
|
|
||||||
|
We have seen some issues with building the wasm test files on the build runners for the integration test. Currently,
|
||||||
|
the theory is that there may be some differences in the clang toolchain on different runners which cause panics in
|
||||||
|
tinygo if the job is scheduled on particular runners but not others.
|
||||||
|
|
||||||
|
In order to get around this, we are just building the wasm test file and checking it into the repo.
|
||||||
|
|
||||||
|
To build the wasm test file,
|
||||||
|
|
||||||
|
```bash
|
||||||
|
~/consul/test/integration/consul-container/test/envoy_extensions/testdata/wasm_test_files
|
||||||
|
> docker run -v ./:/wasm --rm $(docker build -q .)
|
||||||
|
```
|
3
test/integration/consul-container/test/envoy_extensions/testdata/wasm_test_files/build.sh
vendored
Executable file
3
test/integration/consul-container/test/envoy_extensions/testdata/wasm_test_files/build.sh
vendored
Executable file
|
@ -0,0 +1,3 @@
|
||||||
|
#!/bin/sh
|
||||||
|
cd /wasm
|
||||||
|
tinygo build -o /wasm/wasm_add_header.wasm -scheduler=none -target=wasi /wasm/wasm_add_header.go
|
13
test/integration/consul-container/test/envoy_extensions/testdata/wasm_test_files/go.mod
vendored
Normal file
13
test/integration/consul-container/test/envoy_extensions/testdata/wasm_test_files/go.mod
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
module main
|
||||||
|
|
||||||
|
go 1.20
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/tetratelabs/proxy-wasm-go-sdk v0.21.0
|
||||||
|
github.com/tidwall/gjson v1.14.4
|
||||||
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/tidwall/match v1.1.1 // indirect
|
||||||
|
github.com/tidwall/pretty v1.2.0 // indirect
|
||||||
|
)
|
12
test/integration/consul-container/test/envoy_extensions/testdata/wasm_test_files/go.sum
vendored
Normal file
12
test/integration/consul-container/test/envoy_extensions/testdata/wasm_test_files/go.sum
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
||||||
|
github.com/tetratelabs/proxy-wasm-go-sdk v0.21.0 h1:sxuh1wxy/zz4vXwMEC+ESVpwJmej1f22eYsrJlgVn7c=
|
||||||
|
github.com/tetratelabs/proxy-wasm-go-sdk v0.21.0/go.mod h1:jqQTUvJBI6WJ+sVCZON3A4GwmUfBDuiNnZ4kuxsvLCo=
|
||||||
|
github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM=
|
||||||
|
github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||||
|
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
|
||||||
|
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||||
|
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
|
||||||
|
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
13
test/integration/consul-container/test/envoy_extensions/testdata/wasm_test_files/nginx.conf
vendored
Normal file
13
test/integration/consul-container/test/envoy_extensions/testdata/wasm_test_files/nginx.conf
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
server {
|
||||||
|
# send wasm files as download rather than render as html
|
||||||
|
location ~ ^.*/(?P<request_basename>[^/]+\.(wasm))$ {
|
||||||
|
root /www/downloads;
|
||||||
|
|
||||||
|
add_header Content-disposition 'attachment; filename="$request_basename"';
|
||||||
|
types {
|
||||||
|
application/octet-stream .wasm;
|
||||||
|
}
|
||||||
|
default_type application/octet-stream;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,47 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/tetratelabs/proxy-wasm-go-sdk/proxywasm"
|
||||||
|
"github.com/tetratelabs/proxy-wasm-go-sdk/proxywasm/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
proxywasm.SetVMContext(&vmContext{})
|
||||||
|
}
|
||||||
|
|
||||||
|
type vmContext struct {
|
||||||
|
// Embed the default VM context here,
|
||||||
|
// so that we don't need to reimplement all the methods.
|
||||||
|
types.DefaultVMContext
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*vmContext) NewPluginContext(contextID uint32) types.PluginContext {
|
||||||
|
return &pluginContext{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type pluginContext struct {
|
||||||
|
// Embed the default plugin context here,
|
||||||
|
// so that we don't need to reimplement all the methods.
|
||||||
|
types.DefaultPluginContext
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *pluginContext) NewHttpContext(contextID uint32) types.HttpContext {
|
||||||
|
return &httpHeaders{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpHeaders struct {
|
||||||
|
// Embed the default http context here,
|
||||||
|
// so that we don't need to reimplement all the methods.
|
||||||
|
types.DefaultHttpContext
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *httpHeaders) OnHttpResponseHeaders(int, bool) types.Action {
|
||||||
|
proxywasm.LogDebug("adding header: x-test:true")
|
||||||
|
|
||||||
|
err := proxywasm.AddHttpResponseHeader("x-test", "true")
|
||||||
|
if err != nil {
|
||||||
|
proxywasm.LogCriticalf("failed to add test header to response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return types.ActionContinue
|
||||||
|
}
|
BIN
test/integration/consul-container/test/envoy_extensions/testdata/wasm_test_files/wasm_add_header.wasm
vendored
Executable file
BIN
test/integration/consul-container/test/envoy_extensions/testdata/wasm_test_files/wasm_add_header.wasm
vendored
Executable file
Binary file not shown.
|
@ -0,0 +1,461 @@
|
||||||
|
package envoyextensions
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/go-cleanhttp"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"github.com/testcontainers/testcontainers-go"
|
||||||
|
"github.com/testcontainers/testcontainers-go/wait"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||||
|
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||||
|
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||||
|
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
||||||
|
"github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestWASMRemote Summary
|
||||||
|
// This test ensures that a WASM extension can be loaded from a remote file server then executed.
|
||||||
|
// It uses the same basic WASM extension as the TestWASMLocal test which adds the header
|
||||||
|
// "x-test:true" to the response.
|
||||||
|
// This test configures a static server and proxy, a client proxy, as well as an Nginx file server to
|
||||||
|
// serve the compiled wasm extension. The static proxy is configured to apply the wasm filter
|
||||||
|
// and pointed at the remote file server. When the filter is added with the remote wasm file configured
|
||||||
|
// envoy calls out to the nginx file server to download it.
|
||||||
|
func TestWASMRemote(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// build all the file paths we will need for the test
|
||||||
|
cwd, err := os.Getwd()
|
||||||
|
require.NoError(t, err, "could not get current working directory")
|
||||||
|
hostWASMDir := fmt.Sprintf("%s/testdata/wasm_test_files", cwd)
|
||||||
|
|
||||||
|
cluster, _, _ := topology.NewCluster(t, &topology.ClusterConfig{
|
||||||
|
NumServers: 1,
|
||||||
|
NumClients: 1,
|
||||||
|
ApplyDefaultProxySettings: true,
|
||||||
|
BuildOpts: &libcluster.BuildOptions{
|
||||||
|
Datacenter: "dc1",
|
||||||
|
InjectAutoEncryption: true,
|
||||||
|
InjectGossipEncryption: true,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
clientService, staticProxy := createTestServices(t, cluster)
|
||||||
|
_, port := clientService.GetAddr()
|
||||||
|
_, adminPort := clientService.GetAdminAddr()
|
||||||
|
|
||||||
|
libassert.AssertUpstreamEndpointStatus(t, adminPort, "static-server.default", "HEALTHY", 1)
|
||||||
|
libassert.GetEnvoyListenerTCPFilters(t, adminPort)
|
||||||
|
|
||||||
|
libassert.AssertContainerState(t, clientService, "running")
|
||||||
|
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server", "")
|
||||||
|
|
||||||
|
// Check header not present
|
||||||
|
c1 := cleanhttp.DefaultClient()
|
||||||
|
res, err := c1.Get(fmt.Sprintf("http://localhost:%d", port))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// check that header DOES NOT exist before wasm applied
|
||||||
|
if value := res.Header.Get(http.CanonicalHeaderKey("x-test")); value != "" {
|
||||||
|
t.Fatal("unexpected test header present before WASM applied")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create Nginx file server
|
||||||
|
uri, nginxService, nginxProxy := createNginxFileServer(t, cluster,
|
||||||
|
// conf file
|
||||||
|
testcontainers.ContainerFile{
|
||||||
|
HostFilePath: fmt.Sprintf("%s/nginx.conf", hostWASMDir),
|
||||||
|
ContainerFilePath: "/etc/nginx/conf.d/wasm.conf",
|
||||||
|
FileMode: 777,
|
||||||
|
},
|
||||||
|
// extra files loaded after startup
|
||||||
|
testcontainers.ContainerFile{
|
||||||
|
HostFilePath: fmt.Sprintf("%s/wasm_add_header.wasm", hostWASMDir),
|
||||||
|
ContainerFilePath: "/usr/share/nginx/html/wasm_add_header.wasm",
|
||||||
|
FileMode: 777,
|
||||||
|
})
|
||||||
|
|
||||||
|
defer nginxService.Terminate()
|
||||||
|
defer nginxProxy.Terminate()
|
||||||
|
|
||||||
|
// wire up the wasm filter
|
||||||
|
node := cluster.Agents[0]
|
||||||
|
client := node.GetClient()
|
||||||
|
|
||||||
|
agentService, _, err := client.Agent().Service(libservice.StaticServerServiceName, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
agentService.Connect = &api.AgentServiceConnect{
|
||||||
|
SidecarService: &api.AgentServiceRegistration{
|
||||||
|
Kind: api.ServiceKindConnectProxy,
|
||||||
|
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||||
|
Upstreams: []api.Upstream{
|
||||||
|
{
|
||||||
|
DestinationName: "nginx-fileserver",
|
||||||
|
DestinationPeer: "",
|
||||||
|
LocalBindAddress: "0.0.0.0",
|
||||||
|
LocalBindPort: 9595,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upsert the service registration to add the nginx file server as an
|
||||||
|
// upstream so that the static server proxy can retrieve the wasm plugin.
|
||||||
|
err = client.Agent().ServiceRegister(&api.AgentServiceRegistration{
|
||||||
|
Kind: agentService.Kind,
|
||||||
|
ID: agentService.ID,
|
||||||
|
Name: agentService.Service,
|
||||||
|
Tags: agentService.Tags,
|
||||||
|
Port: agentService.Port,
|
||||||
|
Address: agentService.Address,
|
||||||
|
SocketPath: agentService.SocketPath,
|
||||||
|
TaggedAddresses: agentService.TaggedAddresses,
|
||||||
|
EnableTagOverride: agentService.EnableTagOverride,
|
||||||
|
Meta: agentService.Meta,
|
||||||
|
Weights: &agentService.Weights,
|
||||||
|
Check: nil,
|
||||||
|
Checks: nil,
|
||||||
|
Proxy: agentService.Proxy,
|
||||||
|
Connect: agentService.Connect,
|
||||||
|
Namespace: agentService.Namespace,
|
||||||
|
Partition: agentService.Partition,
|
||||||
|
Locality: agentService.Locality,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait until the nginx-fileserver is reachable from the static proxy
|
||||||
|
t.Log("Attempting wait until nginx-fileserver-sidecar-proxy is available")
|
||||||
|
bashScript := "for i in {1..10}; do echo Attempt $1: contacting nginx; if curl -I localhost:9595; then break; fi;" +
|
||||||
|
"if [[ $i -ge 10 ]]; then echo Unable to connect to nginx; exit 1; fi; sleep 3; done; echo nginx available"
|
||||||
|
_, err = staticProxy.Exec(context.Background(), []string{"/bin/bash", "-c", bashScript})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
consul := cluster.APIClient(0)
|
||||||
|
defaults := api.ServiceConfigEntry{
|
||||||
|
Kind: api.ServiceDefaults,
|
||||||
|
Name: "static-server",
|
||||||
|
Protocol: "http",
|
||||||
|
EnvoyExtensions: []api.EnvoyExtension{{
|
||||||
|
Name: "builtin/wasm",
|
||||||
|
Arguments: map[string]any{
|
||||||
|
"Protocol": "http",
|
||||||
|
"ListenerType": "inbound",
|
||||||
|
"PluginConfig": map[string]any{
|
||||||
|
"VmConfig": map[string]any{
|
||||||
|
"Code": map[string]any{
|
||||||
|
"Remote": map[string]any{
|
||||||
|
"HttpURI": map[string]any{
|
||||||
|
"Service": map[string]any{
|
||||||
|
"Name": "nginx-fileserver",
|
||||||
|
},
|
||||||
|
"URI": fmt.Sprintf("%s/wasm_add_header.wasm", uri),
|
||||||
|
},
|
||||||
|
"SHA256": sha256FromFile(t, fmt.Sprintf("%s/wasm_add_header.wasm", hostWASMDir)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, err = consul.ConfigEntries().Set(&defaults, nil)
|
||||||
|
require.NoError(t, err, "could not set config entries")
|
||||||
|
|
||||||
|
// Check that header is present after wasm applied
|
||||||
|
c2 := cleanhttp.DefaultClient()
|
||||||
|
|
||||||
|
// The wasm plugin is not always applied on the first call. Retry and see if it is loaded.
|
||||||
|
retryStrategy := func() *retry.Timer {
|
||||||
|
return &retry.Timer{Timeout: 5 * time.Second, Wait: time.Second}
|
||||||
|
}
|
||||||
|
retry.RunWith(retryStrategy(), t, func(r *retry.R) {
|
||||||
|
res2, err := c2.Get(fmt.Sprintf("http://localhost:%d", port))
|
||||||
|
require.NoError(r, err)
|
||||||
|
|
||||||
|
if value := res2.Header.Get(http.CanonicalHeaderKey("x-test")); value == "" {
|
||||||
|
r.Fatal("test header missing after WASM applied")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestWASMLocal Summary
|
||||||
|
// This test ensures that a WASM extension with basic functionality is executed correctly.
|
||||||
|
// The extension takes an incoming request and adds the header "x-test:true"
|
||||||
|
func TestWASMLocal(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
cluster, _, _ := topology.NewCluster(t, &topology.ClusterConfig{
|
||||||
|
NumServers: 1,
|
||||||
|
NumClients: 1,
|
||||||
|
ApplyDefaultProxySettings: true,
|
||||||
|
BuildOpts: &libcluster.BuildOptions{
|
||||||
|
Datacenter: "dc1",
|
||||||
|
InjectAutoEncryption: true,
|
||||||
|
InjectGossipEncryption: true,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
clientService, _ := createTestServices(t, cluster)
|
||||||
|
_, port := clientService.GetAddr()
|
||||||
|
_, adminPort := clientService.GetAdminAddr()
|
||||||
|
|
||||||
|
libassert.AssertUpstreamEndpointStatus(t, adminPort, "static-server.default", "HEALTHY", 1)
|
||||||
|
libassert.GetEnvoyListenerTCPFilters(t, adminPort)
|
||||||
|
|
||||||
|
libassert.AssertContainerState(t, clientService, "running")
|
||||||
|
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), "static-server", "")
|
||||||
|
|
||||||
|
// Check header not present
|
||||||
|
c1 := cleanhttp.DefaultClient()
|
||||||
|
res, err := c1.Get(fmt.Sprintf("http://localhost:%d", port))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// check that header DOES NOT exist before wasm applied
|
||||||
|
if value := res.Header.Get(http.CanonicalHeaderKey("x-test")); value != "" {
|
||||||
|
t.Fatal("unexpected test header present before WASM applied")
|
||||||
|
}
|
||||||
|
|
||||||
|
// wire up the wasm filter
|
||||||
|
consul := cluster.APIClient(0)
|
||||||
|
defaults := api.ServiceConfigEntry{
|
||||||
|
Kind: api.ServiceDefaults,
|
||||||
|
Name: "static-server",
|
||||||
|
Protocol: "http",
|
||||||
|
EnvoyExtensions: []api.EnvoyExtension{{
|
||||||
|
Name: "builtin/wasm",
|
||||||
|
Arguments: map[string]any{
|
||||||
|
"Protocol": "http",
|
||||||
|
"ListenerType": "inbound",
|
||||||
|
"PluginConfig": map[string]any{
|
||||||
|
"VmConfig": map[string]any{
|
||||||
|
"Code": map[string]any{
|
||||||
|
"Local": map[string]any{
|
||||||
|
"Filename": "/wasm_add_header.wasm",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, err = consul.ConfigEntries().Set(&defaults, nil)
|
||||||
|
require.NoError(t, err, "could not set config entries")
|
||||||
|
|
||||||
|
// Check that header is present after wasm applied
|
||||||
|
c2 := cleanhttp.DefaultClient()
|
||||||
|
|
||||||
|
// The wasm plugin is not always applied on the first call. Retry and see if it is loaded.
|
||||||
|
retryStrategy := func() *retry.Timer {
|
||||||
|
return &retry.Timer{Timeout: 5 * time.Second, Wait: time.Second}
|
||||||
|
}
|
||||||
|
retry.RunWith(retryStrategy(), t, func(r *retry.R) {
|
||||||
|
res2, err := c2.Get(fmt.Sprintf("http://localhost:%d", port))
|
||||||
|
require.NoError(r, err)
|
||||||
|
|
||||||
|
if value := res2.Header.Get(http.CanonicalHeaderKey("x-test")); value == "" {
|
||||||
|
r.Fatal("test header missing after WASM applied")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTestServices(t *testing.T, cluster *libcluster.Cluster) (libservice.Service, libservice.Service) {
|
||||||
|
node := cluster.Agents[0]
|
||||||
|
client := node.GetClient()
|
||||||
|
// Create a service and proxy instance
|
||||||
|
serviceOpts := &libservice.ServiceOpts{
|
||||||
|
Name: libservice.StaticServerServiceName,
|
||||||
|
ID: libservice.StaticServerServiceName,
|
||||||
|
HTTPPort: 8080,
|
||||||
|
GRPCPort: 8079,
|
||||||
|
}
|
||||||
|
cwd, err := os.Getwd()
|
||||||
|
require.NoError(t, err, "could not get current working directory")
|
||||||
|
hostWASMDir := fmt.Sprintf("%s/testdata/wasm_test_files", cwd)
|
||||||
|
|
||||||
|
wasmFile := testcontainers.ContainerFile{
|
||||||
|
HostFilePath: fmt.Sprintf("%s/wasm_add_header.wasm", hostWASMDir),
|
||||||
|
ContainerFilePath: "/wasm_add_header.wasm",
|
||||||
|
FileMode: 777,
|
||||||
|
}
|
||||||
|
|
||||||
|
customFn := chain(
|
||||||
|
copyFilesToContainer([]testcontainers.ContainerFile{wasmFile}),
|
||||||
|
chownFiles([]testcontainers.ContainerFile{wasmFile}, "envoy", true),
|
||||||
|
)
|
||||||
|
|
||||||
|
// Create a service and proxy instance
|
||||||
|
_, staticProxy, err := libservice.CreateAndRegisterStaticServerAndSidecarWithCustomContainerConfig(node, serviceOpts, customFn)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
libassert.CatalogServiceExists(t, client, "static-server-sidecar-proxy", nil)
|
||||||
|
libassert.CatalogServiceExists(t, client, libservice.StaticServerServiceName, nil)
|
||||||
|
|
||||||
|
// Create a client proxy instance with the server as an upstream
|
||||||
|
|
||||||
|
clientConnectProxy, err := libservice.CreateAndRegisterStaticClientSidecar(node, "", false, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
libassert.CatalogServiceExists(t, client, "static-client-sidecar-proxy", nil)
|
||||||
|
|
||||||
|
return clientConnectProxy, staticProxy
|
||||||
|
}
|
||||||
|
|
||||||
|
// createNginxFileServer creates an nginx container configured to serve a wasm file for download, as well as a sidecar
|
||||||
|
// registered for the service.
|
||||||
|
func createNginxFileServer(t *testing.T,
|
||||||
|
cluster *libcluster.Cluster,
|
||||||
|
conf testcontainers.ContainerFile,
|
||||||
|
files ...testcontainers.ContainerFile) (string, libservice.Service, libservice.Service) {
|
||||||
|
|
||||||
|
nginxName := "nginx-fileserver"
|
||||||
|
nginxPort := 80
|
||||||
|
svc := &libservice.ServiceOpts{
|
||||||
|
Name: nginxName,
|
||||||
|
ID: nginxName,
|
||||||
|
HTTPPort: nginxPort,
|
||||||
|
GRPCPort: 9999,
|
||||||
|
}
|
||||||
|
|
||||||
|
node := cluster.Agents[0]
|
||||||
|
|
||||||
|
req := testcontainers.ContainerRequest{
|
||||||
|
// nginx:stable
|
||||||
|
Image: "nginx@sha256:b07a5ab5292bd90c4271a55a44761899cc1b14814172cf7f186e3afb8bdbec28",
|
||||||
|
Name: nginxName,
|
||||||
|
WaitingFor: wait.ForLog("").WithStartupTimeout(time.Second * 30),
|
||||||
|
LifecycleHooks: []testcontainers.ContainerLifecycleHooks{
|
||||||
|
{
|
||||||
|
PostStarts: []testcontainers.ContainerHook{
|
||||||
|
func(ctx context.Context, c testcontainers.Container) error {
|
||||||
|
_, _, err := c.Exec(ctx, []string{"mkdir", "-p", "/www/downloads"})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, f := range files {
|
||||||
|
fBytes, err := os.ReadFile(f.HostFilePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = c.CopyToContainer(ctx, fBytes, f.ContainerFilePath, f.FileMode)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, err = c.Exec(ctx, []string{"chmod", "+r", f.ContainerFilePath})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Files: []testcontainers.ContainerFile{conf},
|
||||||
|
}
|
||||||
|
|
||||||
|
nginxService, nginxProxy, err := libservice.CreateAndRegisterCustomServiceAndSidecar(node, svc, req, nil)
|
||||||
|
require.NoError(t, err, "could not create custom server and sidecar")
|
||||||
|
|
||||||
|
_, port := nginxService.GetAddr()
|
||||||
|
|
||||||
|
client := node.GetClient()
|
||||||
|
libassert.CatalogServiceExists(t, client, nginxName, nil)
|
||||||
|
libassert.CatalogServiceExists(t, client, fmt.Sprintf("%s-sidecar-proxy", nginxName), nil)
|
||||||
|
|
||||||
|
return fmt.Sprintf("http://nginx-fileserver:%d", port), nginxService, nginxProxy
|
||||||
|
}
|
||||||
|
|
||||||
|
// chain takes multiple setup functions for testcontainers.ContainerRequest and chains them together into a single function
|
||||||
|
// of testcontainers.ContainerRequest to testcontainers.ContainerRequest.
|
||||||
|
func chain(fns ...func(testcontainers.ContainerRequest) testcontainers.ContainerRequest) func(testcontainers.ContainerRequest) testcontainers.ContainerRequest {
|
||||||
|
return func(req testcontainers.ContainerRequest) testcontainers.ContainerRequest {
|
||||||
|
for _, fn := range fns {
|
||||||
|
req = fn(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyFilesToContainer is a convenience function to build custom testcontainers.ContainerRequest. It takes a list of files
|
||||||
|
// which need to be copied to the container. It returns a function which updates a given testcontainers.ContainerRequest
|
||||||
|
// to include the files which need to be copied to the container on startup.
|
||||||
|
func copyFilesToContainer(files []testcontainers.ContainerFile) func(testcontainers.ContainerRequest) testcontainers.ContainerRequest {
|
||||||
|
return func(req testcontainers.ContainerRequest) testcontainers.ContainerRequest {
|
||||||
|
req.Files = files
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// chownFiles is a convenience function to build custom testcontainers.ContainerRequest. It takes a list of files,
|
||||||
|
// a user to make the owner, and whether the command requires sudo. It then returns a function which updates
|
||||||
|
// a testcontainers.ContainerRequest with a lifecycle hook which will chown the files to the user after container startup.
|
||||||
|
func chownFiles(files []testcontainers.ContainerFile, user string, sudo bool) func(request testcontainers.ContainerRequest) testcontainers.ContainerRequest {
|
||||||
|
return func(req testcontainers.ContainerRequest) testcontainers.ContainerRequest {
|
||||||
|
req.LifecycleHooks = append(req.LifecycleHooks, testcontainers.ContainerLifecycleHooks{
|
||||||
|
PostStarts: []testcontainers.ContainerHook{
|
||||||
|
func(ctx context.Context, c testcontainers.Container) error {
|
||||||
|
cmd := []string{}
|
||||||
|
if sudo {
|
||||||
|
cmd = append(cmd, "sudo")
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd = append(cmd, "chown", user)
|
||||||
|
|
||||||
|
for _, f := range files {
|
||||||
|
cmd = append(cmd, f.ContainerFilePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, err := c.Exec(ctx, cmd)
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sha256FromFile reads in the file from filepath and computes a sha256 of its contents.
|
||||||
|
func sha256FromFile(t *testing.T, filepath string) string {
|
||||||
|
f, err := os.Open(filepath)
|
||||||
|
require.NoError(t, err, "could not open file for sha")
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
h := sha256.New()
|
||||||
|
_, err = io.Copy(h, f)
|
||||||
|
require.NoError(t, err, "could not copy file to sha")
|
||||||
|
|
||||||
|
return fmt.Sprintf("%x", h.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// safeDelete removes a given file if it exists.
|
||||||
|
func safeDelete(t *testing.T, filePath string) {
|
||||||
|
t.Logf("cleaning up stale build file: %s", filePath)
|
||||||
|
if _, err := os.Stat(filePath); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// build is out of date, wipe compiled wasm
|
||||||
|
err := os.Remove(filePath)
|
||||||
|
require.NoError(t, err, "could not remove file")
|
||||||
|
}
|
|
@ -66,7 +66,7 @@ func CreateAndRegisterStaticClientSidecarWith2Upstreams(c *cluster.Cluster, dest
|
||||||
ServiceID: libservice.StaticClientServiceName,
|
ServiceID: libservice.StaticClientServiceName,
|
||||||
}
|
}
|
||||||
|
|
||||||
clientConnectProxy, err := libservice.NewConnectService(context.Background(), sidecarCfg, []int{cluster.ServiceUpstreamLocalBindPort, cluster.ServiceUpstreamLocalBindPort2}, node)
|
clientConnectProxy, err := libservice.NewConnectService(context.Background(), sidecarCfg, []int{cluster.ServiceUpstreamLocalBindPort, cluster.ServiceUpstreamLocalBindPort2}, node, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue