2023-03-28 22:48:58 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2023-01-11 21:34:27 +00:00
|
|
|
package cluster
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
2023-02-07 19:13:19 +00:00
|
|
|
"io"
|
2023-06-16 20:29:50 +00:00
|
|
|
"net/url"
|
2023-01-11 21:34:27 +00:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"strconv"
|
|
|
|
"time"
|
|
|
|
|
2023-02-07 19:13:19 +00:00
|
|
|
goretry "github.com/avast/retry-go"
|
2023-01-11 21:34:27 +00:00
|
|
|
dockercontainer "github.com/docker/docker/api/types/container"
|
2023-06-16 20:29:50 +00:00
|
|
|
"github.com/docker/go-connections/nat"
|
2023-01-11 21:34:27 +00:00
|
|
|
"github.com/hashicorp/go-multierror"
|
2023-04-12 22:00:56 +00:00
|
|
|
"github.com/otiai10/copy"
|
2023-01-11 21:34:27 +00:00
|
|
|
"github.com/pkg/errors"
|
|
|
|
"github.com/testcontainers/testcontainers-go"
|
|
|
|
"github.com/testcontainers/testcontainers-go/wait"
|
2023-06-16 20:29:50 +00:00
|
|
|
"google.golang.org/grpc"
|
|
|
|
"google.golang.org/grpc/credentials/insecure"
|
2023-01-11 21:34:27 +00:00
|
|
|
|
2023-01-20 22:02:44 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
|
|
|
|
2023-01-11 21:34:27 +00:00
|
|
|
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
|
|
|
)
|
|
|
|
|
|
|
|
const bootLogLine = "Consul agent running"
|
|
|
|
const disableRYUKEnv = "TESTCONTAINERS_RYUK_DISABLED"
|
|
|
|
|
2023-01-27 15:19:10 +00:00
|
|
|
// Exposed ports info
|
2023-02-22 15:22:25 +00:00
|
|
|
const MaxEnvoyOnNode = 10 // the max number of Envoy sidecar can run along with the agent, base is 19000
|
|
|
|
const ServiceUpstreamLocalBindPort = 5000 // local bind Port of service's upstream
|
|
|
|
const ServiceUpstreamLocalBindPort2 = 5001 // local bind Port of service's upstream, for services with 2 upstreams
|
2023-04-18 13:49:53 +00:00
|
|
|
const debugPort = "4000/tcp"
|
2023-01-27 15:19:10 +00:00
|
|
|
|
2023-01-11 21:34:27 +00:00
|
|
|
// consulContainerNode implements the Agent interface by running a Consul agent
|
|
|
|
// in a container.
|
|
|
|
type consulContainerNode struct {
|
|
|
|
ctx context.Context
|
|
|
|
pod testcontainers.Container
|
|
|
|
container testcontainers.Container
|
|
|
|
serverMode bool
|
|
|
|
datacenter string
|
2023-03-06 18:28:02 +00:00
|
|
|
partition string
|
2023-01-11 21:34:27 +00:00
|
|
|
config Config
|
|
|
|
podReq testcontainers.ContainerRequest
|
|
|
|
consulReq testcontainers.ContainerRequest
|
|
|
|
dataDir string
|
|
|
|
network string
|
|
|
|
id int
|
|
|
|
name string
|
|
|
|
terminateFuncs []func() error
|
|
|
|
|
|
|
|
client *api.Client
|
|
|
|
clientAddr string
|
|
|
|
clientCACertFile string
|
|
|
|
ip string
|
|
|
|
|
2023-06-16 20:29:50 +00:00
|
|
|
grpcConn *grpc.ClientConn
|
|
|
|
|
2023-01-20 22:02:44 +00:00
|
|
|
nextAdminPortOffset int
|
|
|
|
nextConnectPortOffset int
|
2023-01-11 21:34:27 +00:00
|
|
|
|
|
|
|
info AgentInfo
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *consulContainerNode) GetPod() testcontainers.Container {
|
|
|
|
return c.pod
|
|
|
|
}
|
|
|
|
|
2023-05-31 17:18:00 +00:00
|
|
|
func (c *consulContainerNode) Logs(context context.Context) (io.ReadCloser, error) {
|
|
|
|
return c.container.Logs(context)
|
|
|
|
}
|
|
|
|
|
2023-01-27 15:19:10 +00:00
|
|
|
func (c *consulContainerNode) ClaimAdminPort() (int, error) {
|
|
|
|
if c.nextAdminPortOffset >= MaxEnvoyOnNode {
|
|
|
|
return 0, fmt.Errorf("running out of envoy admin port, max %d, already claimed %d",
|
|
|
|
MaxEnvoyOnNode, c.nextAdminPortOffset)
|
|
|
|
}
|
2023-01-11 21:34:27 +00:00
|
|
|
p := 19000 + c.nextAdminPortOffset
|
|
|
|
c.nextAdminPortOffset++
|
2023-01-27 15:19:10 +00:00
|
|
|
return p, nil
|
2023-01-11 21:34:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewConsulContainer starts a Consul agent in a container with the given config.
|
2023-02-24 20:57:44 +00:00
|
|
|
func NewConsulContainer(ctx context.Context, config Config, cluster *Cluster, ports ...int) (Agent, error) {
|
2023-02-07 19:13:19 +00:00
|
|
|
network := cluster.NetworkName
|
|
|
|
index := cluster.Index
|
2023-01-11 21:34:27 +00:00
|
|
|
if config.ScratchDir == "" {
|
|
|
|
return nil, fmt.Errorf("ScratchDir is required")
|
|
|
|
}
|
|
|
|
|
|
|
|
license, err := readLicense()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
pc, err := readSomeConfigFileFields(config.JSON)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-04-12 22:00:56 +00:00
|
|
|
name := config.NodeName
|
|
|
|
if name == "" {
|
|
|
|
// Generate a random name for the agent
|
|
|
|
consulType := "client"
|
|
|
|
if pc.Server {
|
|
|
|
consulType = "server"
|
|
|
|
}
|
|
|
|
name = utils.RandName(fmt.Sprintf("%s-consul-%s-%d", pc.Datacenter, consulType, index))
|
2023-01-11 21:34:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Inject new Agent name
|
|
|
|
config.Cmd = append(config.Cmd, "-node", name)
|
|
|
|
|
|
|
|
tmpDirData := filepath.Join(config.ScratchDir, "data")
|
|
|
|
if err := os.MkdirAll(tmpDirData, 0777); err != nil {
|
|
|
|
return nil, fmt.Errorf("error creating data directory %s: %w", tmpDirData, err)
|
|
|
|
}
|
|
|
|
if err := os.Chmod(tmpDirData, 0777); err != nil {
|
|
|
|
return nil, fmt.Errorf("error chowning data directory %s: %w", tmpDirData, err)
|
|
|
|
}
|
|
|
|
|
2023-04-12 22:00:56 +00:00
|
|
|
if config.ExternalDataDir != "" {
|
|
|
|
// copy consul persistent state from an external dir
|
|
|
|
err := copy.Copy(config.ExternalDataDir, tmpDirData)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error copying persistent data from %s: %w", config.ExternalDataDir, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-11 21:34:27 +00:00
|
|
|
var caCertFileForAPI string
|
|
|
|
if config.CACert != "" {
|
|
|
|
caCertFileForAPI = filepath.Join(config.ScratchDir, "ca.pem")
|
|
|
|
if err := os.WriteFile(caCertFileForAPI, []byte(config.CACert), 0644); err != nil {
|
|
|
|
return nil, fmt.Errorf("error writing out CA cert %s: %w", caCertFileForAPI, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
configFile, err := createConfigFile(config.ScratchDir, config.JSON)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error writing out config file %s: %w", configFile, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
opts := containerOpts{
|
|
|
|
name: name,
|
|
|
|
configFile: configFile,
|
|
|
|
dataDir: tmpDirData,
|
|
|
|
license: license,
|
|
|
|
addtionalNetworks: []string{"bridge", network},
|
|
|
|
hostname: fmt.Sprintf("agent-%d", index),
|
|
|
|
}
|
2023-02-24 20:57:44 +00:00
|
|
|
podReq, consulReq := newContainerRequest(config, opts, ports...)
|
2023-01-11 21:34:27 +00:00
|
|
|
|
|
|
|
// Do some trickery to ensure that partial completion is correctly torn
|
|
|
|
// down, but successful execution is not.
|
|
|
|
var deferClean utils.ResettableDefer
|
|
|
|
defer deferClean.Execute()
|
|
|
|
|
|
|
|
podContainer, err := startContainer(ctx, podReq)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error starting pod with image %q: %w", podReq.Image, err)
|
|
|
|
}
|
|
|
|
deferClean.Add(func() {
|
|
|
|
_ = podContainer.Terminate(ctx)
|
|
|
|
})
|
|
|
|
|
|
|
|
var (
|
|
|
|
httpPort = pc.Ports.HTTP
|
|
|
|
httpsPort = pc.Ports.HTTPS
|
|
|
|
|
|
|
|
clientAddr string
|
|
|
|
clientCACertFile string
|
|
|
|
|
2023-06-16 20:29:50 +00:00
|
|
|
info AgentInfo
|
|
|
|
grpcConn *grpc.ClientConn
|
2023-01-11 21:34:27 +00:00
|
|
|
)
|
2023-04-18 13:49:53 +00:00
|
|
|
debugURI := ""
|
|
|
|
if utils.Debug {
|
|
|
|
if err := goretry.Do(
|
|
|
|
func() (err error) {
|
|
|
|
debugURI, err = podContainer.PortEndpoint(ctx, "4000", "tcp")
|
|
|
|
return err
|
|
|
|
},
|
|
|
|
goretry.Delay(10*time.Second),
|
|
|
|
goretry.RetryIf(func(err error) bool {
|
|
|
|
return err != nil
|
|
|
|
}),
|
|
|
|
); err != nil {
|
|
|
|
return nil, fmt.Errorf("container creating: %s", err)
|
|
|
|
}
|
|
|
|
info.DebugURI = debugURI
|
|
|
|
}
|
2023-01-11 21:34:27 +00:00
|
|
|
if httpPort > 0 {
|
2023-03-18 20:43:22 +00:00
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
uri, err := podContainer.PortEndpoint(ctx, "8500", "http")
|
|
|
|
if err != nil {
|
|
|
|
time.Sleep(500 * time.Millisecond)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
clientAddr = uri
|
|
|
|
}
|
2023-01-11 21:34:27 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
} else if httpsPort > 0 {
|
|
|
|
uri, err := podContainer.PortEndpoint(ctx, "8501", "https")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
clientAddr = uri
|
|
|
|
|
|
|
|
clientCACertFile = caCertFileForAPI
|
|
|
|
|
|
|
|
} else {
|
|
|
|
if pc.Server {
|
|
|
|
return nil, fmt.Errorf("server container does not expose HTTP or HTTPS")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if caCertFileForAPI != "" {
|
|
|
|
if config.UseAPIWithTLS {
|
|
|
|
if pc.Ports.HTTPS > 0 {
|
|
|
|
info.UseTLSForAPI = true
|
|
|
|
} else {
|
|
|
|
return nil, fmt.Errorf("UseAPIWithTLS is set but ports.https is not for this agent")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if config.UseGRPCWithTLS {
|
|
|
|
if pc.Ports.GRPCTLS > 0 {
|
|
|
|
info.UseTLSForGRPC = true
|
|
|
|
} else {
|
|
|
|
return nil, fmt.Errorf("UseGRPCWithTLS is set but ports.grpc_tls is not for this agent")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
info.CACertFile = clientCACertFile
|
|
|
|
}
|
|
|
|
|
2023-06-16 20:29:50 +00:00
|
|
|
// TODO: Support gRPC+TLS port.
|
|
|
|
if pc.Ports.GRPC > 0 {
|
|
|
|
port, err := nat.NewPort("tcp", strconv.Itoa(pc.Ports.GRPC))
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to parse gRPC TLS port: %w", err)
|
|
|
|
}
|
|
|
|
endpoint, err := podContainer.PortEndpoint(ctx, port, "tcp")
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to get gRPC TLS endpoint: %w", err)
|
|
|
|
}
|
|
|
|
url, err := url.Parse(endpoint)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to parse gRPC endpoint URL: %w", err)
|
|
|
|
}
|
|
|
|
conn, err := grpc.Dial(url.Host, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to dial gRPC connection: %w", err)
|
|
|
|
}
|
|
|
|
deferClean.Add(func() { _ = conn.Close() })
|
|
|
|
grpcConn = conn
|
|
|
|
}
|
|
|
|
|
2023-01-11 21:34:27 +00:00
|
|
|
ip, err := podContainer.ContainerIP(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
consulContainer, err := startContainer(ctx, consulReq)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error starting main with image %q: %w", consulReq.Image, err)
|
|
|
|
}
|
|
|
|
deferClean.Add(func() {
|
|
|
|
_ = consulContainer.Terminate(ctx)
|
|
|
|
})
|
|
|
|
|
|
|
|
if utils.FollowLog {
|
|
|
|
if err := consulContainer.StartLogProducer(ctx); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
deferClean.Add(func() {
|
|
|
|
_ = consulContainer.StopLogProducer()
|
|
|
|
})
|
|
|
|
|
2023-01-19 15:43:33 +00:00
|
|
|
if config.LogConsumer != nil {
|
|
|
|
consulContainer.FollowOutput(config.LogConsumer)
|
|
|
|
} else {
|
|
|
|
consulContainer.FollowOutput(&LogConsumer{
|
|
|
|
Prefix: opts.name,
|
|
|
|
})
|
|
|
|
}
|
2023-01-11 21:34:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
node := &consulContainerNode{
|
|
|
|
config: config,
|
|
|
|
pod: podContainer,
|
|
|
|
container: consulContainer,
|
|
|
|
serverMode: pc.Server,
|
|
|
|
datacenter: pc.Datacenter,
|
2023-03-06 18:28:02 +00:00
|
|
|
partition: pc.Partition,
|
2023-01-11 21:34:27 +00:00
|
|
|
ctx: ctx,
|
|
|
|
podReq: podReq,
|
|
|
|
consulReq: consulReq,
|
|
|
|
dataDir: tmpDirData,
|
|
|
|
network: network,
|
|
|
|
id: index,
|
|
|
|
name: name,
|
|
|
|
ip: ip,
|
|
|
|
info: info,
|
2023-06-16 20:29:50 +00:00
|
|
|
grpcConn: grpcConn,
|
2023-01-11 21:34:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if httpPort > 0 || httpsPort > 0 {
|
|
|
|
apiConfig := api.DefaultConfig()
|
|
|
|
apiConfig.Address = clientAddr
|
|
|
|
if clientCACertFile != "" {
|
|
|
|
apiConfig.TLSConfig.CAFile = clientCACertFile
|
|
|
|
}
|
|
|
|
|
2023-02-07 19:13:19 +00:00
|
|
|
if cluster.TokenBootstrap != "" {
|
|
|
|
apiConfig.Token = cluster.TokenBootstrap
|
|
|
|
}
|
2023-01-11 21:34:27 +00:00
|
|
|
apiClient, err := api.NewClient(apiConfig)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
node.client = apiClient
|
|
|
|
node.clientAddr = clientAddr
|
|
|
|
node.clientCACertFile = clientCACertFile
|
|
|
|
}
|
|
|
|
|
2023-02-07 19:13:19 +00:00
|
|
|
// Inject node token if ACL is enabled and the bootstrap token is generated
|
|
|
|
if cluster.TokenBootstrap != "" && cluster.ACLEnabled {
|
|
|
|
agentToken, err := cluster.CreateAgentToken(pc.Datacenter, name)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
cmd := []string{"consul", "acl", "set-agent-token",
|
|
|
|
"-token", cluster.TokenBootstrap,
|
|
|
|
"agent", agentToken}
|
|
|
|
|
|
|
|
// retry in case agent has not fully initialized
|
|
|
|
err = goretry.Do(
|
|
|
|
func() error {
|
|
|
|
_, err := node.Exec(context.Background(), cmd)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error setting the agent token, error %s", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
goretry.Delay(time.Second*1),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error setting agent token: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-11 21:34:27 +00:00
|
|
|
// disable cleanup functions now that we have an object with a Terminate() function
|
|
|
|
deferClean.Reset()
|
|
|
|
|
|
|
|
return node, nil
|
|
|
|
}
|
|
|
|
|
2023-02-24 20:57:44 +00:00
|
|
|
func (c *consulContainerNode) GetNetwork() string {
|
|
|
|
return c.network
|
|
|
|
}
|
|
|
|
|
2023-01-11 21:34:27 +00:00
|
|
|
func (c *consulContainerNode) GetName() string {
|
|
|
|
if c.container == nil {
|
|
|
|
return c.consulReq.Name // TODO: is this safe to do all the time?
|
|
|
|
}
|
|
|
|
name, err := c.container.Name(c.ctx)
|
|
|
|
if err != nil {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
return name
|
|
|
|
}
|
|
|
|
|
2023-02-07 19:13:19 +00:00
|
|
|
func (c *consulContainerNode) GetAgentName() string {
|
|
|
|
return c.name
|
|
|
|
}
|
|
|
|
|
2023-01-11 21:34:27 +00:00
|
|
|
func (c *consulContainerNode) GetConfig() Config {
|
|
|
|
return c.config.Clone()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *consulContainerNode) GetDatacenter() string {
|
|
|
|
return c.datacenter
|
|
|
|
}
|
|
|
|
|
2023-03-06 18:28:02 +00:00
|
|
|
func (c *consulContainerNode) GetPartition() string {
|
|
|
|
return c.partition
|
|
|
|
}
|
|
|
|
|
2023-01-11 21:34:27 +00:00
|
|
|
func (c *consulContainerNode) IsServer() bool {
|
|
|
|
return c.serverMode
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetClient returns an API client that can be used to communicate with the Agent.
|
|
|
|
func (c *consulContainerNode) GetClient() *api.Client {
|
|
|
|
return c.client
|
|
|
|
}
|
|
|
|
|
2023-06-16 20:29:50 +00:00
|
|
|
func (c *consulContainerNode) GetGRPCConn() *grpc.ClientConn {
|
|
|
|
return c.grpcConn
|
|
|
|
}
|
|
|
|
|
2023-02-07 19:13:19 +00:00
|
|
|
// NewClient returns an API client by making a new one based on the provided token
|
|
|
|
// - updateDefault: if true update the default client
|
|
|
|
func (c *consulContainerNode) NewClient(token string, updateDefault bool) (*api.Client, error) {
|
|
|
|
apiConfig := api.DefaultConfig()
|
|
|
|
apiConfig.Address = c.clientAddr
|
|
|
|
if c.clientCACertFile != "" {
|
|
|
|
apiConfig.TLSConfig.CAFile = c.clientCACertFile
|
|
|
|
}
|
|
|
|
|
|
|
|
if token != "" {
|
|
|
|
apiConfig.Token = token
|
|
|
|
}
|
|
|
|
apiClient, err := api.NewClient(apiConfig)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if updateDefault {
|
|
|
|
c.client = apiClient
|
|
|
|
}
|
|
|
|
return apiClient, nil
|
|
|
|
}
|
|
|
|
|
2023-01-11 21:34:27 +00:00
|
|
|
func (c *consulContainerNode) GetAPIAddrInfo() (addr, caCert string) {
|
|
|
|
return c.clientAddr, c.clientCACertFile
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *consulContainerNode) GetInfo() AgentInfo {
|
|
|
|
return c.info
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *consulContainerNode) GetIP() string {
|
|
|
|
return c.ip
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *consulContainerNode) RegisterTermination(f func() error) {
|
|
|
|
c.terminateFuncs = append(c.terminateFuncs, f)
|
|
|
|
}
|
|
|
|
|
2023-02-07 19:13:19 +00:00
|
|
|
func (c *consulContainerNode) Exec(ctx context.Context, cmd []string) (string, error) {
|
|
|
|
exitcode, reader, err := c.container.Exec(ctx, cmd)
|
|
|
|
if exitcode != 0 {
|
|
|
|
return "", fmt.Errorf("exec with exit code %d", exitcode)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("exec with error %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
buf, err := io.ReadAll(reader)
|
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("error reading from exe output: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return string(buf), err
|
2023-01-11 21:34:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *consulContainerNode) Upgrade(ctx context.Context, config Config) error {
|
|
|
|
if config.ScratchDir == "" {
|
|
|
|
return fmt.Errorf("ScratchDir is required")
|
|
|
|
}
|
|
|
|
|
|
|
|
newConfigFile, err := createConfigFile(config.ScratchDir, config.JSON)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll keep the same pod.
|
|
|
|
opts := containerOpts{
|
|
|
|
name: c.consulReq.Name,
|
|
|
|
configFile: newConfigFile,
|
|
|
|
dataDir: c.dataDir,
|
|
|
|
license: "",
|
|
|
|
addtionalNetworks: []string{"bridge", c.network},
|
|
|
|
hostname: c.consulReq.Hostname,
|
|
|
|
}
|
|
|
|
_, consulReq2 := newContainerRequest(config, opts)
|
|
|
|
consulReq2.Env = c.consulReq.Env // copy license
|
|
|
|
|
|
|
|
// sanity check two fields
|
|
|
|
if consulReq2.Name != c.consulReq.Name {
|
|
|
|
return fmt.Errorf("new name %q should match old name %q", consulReq2.Name, c.consulReq.Name)
|
|
|
|
}
|
|
|
|
if consulReq2.Hostname != c.consulReq.Hostname {
|
|
|
|
return fmt.Errorf("new hostname %q should match old hostname %q", consulReq2.Hostname, c.consulReq.Hostname)
|
|
|
|
}
|
|
|
|
|
2023-01-30 14:49:52 +00:00
|
|
|
if err := c.TerminateAndRetainPod(true); err != nil {
|
2023-01-11 21:34:27 +00:00
|
|
|
return fmt.Errorf("error terminating running container during upgrade: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
c.consulReq = consulReq2
|
|
|
|
|
|
|
|
container, err := startContainer(ctx, c.consulReq)
|
|
|
|
c.ctx = ctx
|
|
|
|
c.container = container
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if utils.FollowLog {
|
|
|
|
if err := container.StartLogProducer(ctx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
container.FollowOutput(&LogConsumer{
|
|
|
|
Prefix: opts.name,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Terminate attempts to terminate the agent container.
|
|
|
|
// This might also include running termination functions for containers associated with the agent.
|
|
|
|
// On failure, an error will be returned and the reaper process (RYUK) will handle cleanup.
|
|
|
|
func (c *consulContainerNode) Terminate() error {
|
2023-01-30 14:49:52 +00:00
|
|
|
return c.terminate(false, false)
|
2023-01-11 21:34:27 +00:00
|
|
|
}
|
2023-01-30 14:49:52 +00:00
|
|
|
func (c *consulContainerNode) TerminateAndRetainPod(skipFuncs bool) error {
|
|
|
|
return c.terminate(true, skipFuncs)
|
2023-01-11 21:34:27 +00:00
|
|
|
}
|
2023-01-30 14:49:52 +00:00
|
|
|
func (c *consulContainerNode) terminate(retainPod bool, skipFuncs bool) error {
|
2023-01-11 21:34:27 +00:00
|
|
|
// Services might register a termination function that should also fire
|
2023-01-30 14:49:52 +00:00
|
|
|
// when the "agent" is cleaned up.
|
|
|
|
// If skipFuncs is tru, We skip the terminateFuncs of connect sidecar, e.g.,
|
|
|
|
// during upgrade
|
|
|
|
if !skipFuncs {
|
|
|
|
for _, f := range c.terminateFuncs {
|
|
|
|
err := f()
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
2023-01-11 21:34:27 +00:00
|
|
|
}
|
2023-06-16 20:29:50 +00:00
|
|
|
|
|
|
|
// if the pod is retained and therefore the IP then the grpc conn
|
|
|
|
// should handle reconnecting so there is no reason to close it.
|
|
|
|
c.closeGRPC()
|
2023-01-11 21:34:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var merr error
|
|
|
|
if c.container != nil {
|
|
|
|
if err := TerminateContainer(c.ctx, c.container, true); err != nil {
|
|
|
|
merr = multierror.Append(merr, err)
|
|
|
|
}
|
|
|
|
c.container = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if !retainPod && c.pod != nil {
|
|
|
|
if err := TerminateContainer(c.ctx, c.pod, false); err != nil {
|
|
|
|
merr = multierror.Append(merr, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
c.pod = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return merr
|
|
|
|
}
|
|
|
|
|
2023-06-16 20:29:50 +00:00
|
|
|
func (c *consulContainerNode) closeGRPC() error {
|
|
|
|
if c.grpcConn != nil {
|
|
|
|
if err := c.grpcConn.Close(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
c.grpcConn = nil
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-01-11 21:34:27 +00:00
|
|
|
func (c *consulContainerNode) DataDir() string {
|
|
|
|
return c.dataDir
|
|
|
|
}
|
|
|
|
|
|
|
|
func startContainer(ctx context.Context, req testcontainers.ContainerRequest) (testcontainers.Container, error) {
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, time.Second*40)
|
|
|
|
defer cancel()
|
|
|
|
return testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
|
|
|
|
ContainerRequest: req,
|
|
|
|
Started: true,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-02-23 22:51:20 +00:00
|
|
|
const pauseImage = "registry.k8s.io/pause:3.3"
|
2023-01-11 21:34:27 +00:00
|
|
|
|
|
|
|
type containerOpts struct {
|
|
|
|
configFile string
|
|
|
|
dataDir string
|
|
|
|
hostname string
|
|
|
|
index int
|
|
|
|
license string
|
|
|
|
name string
|
|
|
|
addtionalNetworks []string
|
|
|
|
}
|
|
|
|
|
2023-02-24 20:57:44 +00:00
|
|
|
func newContainerRequest(config Config, opts containerOpts, ports ...int) (podRequest, consulRequest testcontainers.ContainerRequest) {
|
2023-01-11 21:34:27 +00:00
|
|
|
skipReaper := isRYUKDisabled()
|
|
|
|
|
|
|
|
pod := testcontainers.ContainerRequest{
|
|
|
|
Image: pauseImage,
|
|
|
|
AutoRemove: false,
|
|
|
|
Name: opts.name + "-pod",
|
|
|
|
SkipReaper: skipReaper,
|
|
|
|
ExposedPorts: []string{
|
2023-01-27 15:19:10 +00:00
|
|
|
"8500/tcp", // Consul HTTP API
|
|
|
|
"8501/tcp", // Consul HTTPs API
|
2023-06-16 20:29:50 +00:00
|
|
|
"8502/tcp", // Consul gRPC API
|
2023-01-11 21:34:27 +00:00
|
|
|
|
|
|
|
"8443/tcp", // Envoy Gateway Listener
|
|
|
|
|
2023-02-09 14:45:31 +00:00
|
|
|
"8079/tcp", // Envoy App Listener - grpc port used by static-server
|
|
|
|
"8078/tcp", // Envoy App Listener - grpc port used by static-server-v1
|
|
|
|
"8077/tcp", // Envoy App Listener - grpc port used by static-server-v2
|
2023-02-23 22:28:42 +00:00
|
|
|
"8076/tcp", // Envoy App Listener - grpc port used by static-server-v3
|
2023-02-09 14:45:31 +00:00
|
|
|
|
|
|
|
"8080/tcp", // Envoy App Listener - http port used by static-server
|
|
|
|
"8081/tcp", // Envoy App Listener - http port used by static-server-v1
|
|
|
|
"8082/tcp", // Envoy App Listener - http port used by static-server-v2
|
2023-02-23 22:28:42 +00:00
|
|
|
"8083/tcp", // Envoy App Listener - http port used by static-server-v3
|
2023-03-29 16:51:21 +00:00
|
|
|
|
|
|
|
"9997/tcp", // Envoy App Listener
|
2023-01-20 22:02:44 +00:00
|
|
|
"9998/tcp", // Envoy App Listener
|
|
|
|
"9999/tcp", // Envoy App Listener
|
2023-01-11 21:34:27 +00:00
|
|
|
},
|
|
|
|
Hostname: opts.hostname,
|
|
|
|
Networks: opts.addtionalNetworks,
|
|
|
|
}
|
|
|
|
|
2023-01-27 15:19:10 +00:00
|
|
|
// Envoy upstream listener
|
|
|
|
pod.ExposedPorts = append(pod.ExposedPorts, fmt.Sprintf("%d/tcp", ServiceUpstreamLocalBindPort))
|
2023-02-22 15:22:25 +00:00
|
|
|
pod.ExposedPorts = append(pod.ExposedPorts, fmt.Sprintf("%d/tcp", ServiceUpstreamLocalBindPort2))
|
2023-01-27 15:19:10 +00:00
|
|
|
|
|
|
|
// Reserve the exposed ports for Envoy admin port, e.g., 19000 - 19009
|
|
|
|
basePort := 19000
|
|
|
|
for i := 0; i < MaxEnvoyOnNode; i++ {
|
|
|
|
pod.ExposedPorts = append(pod.ExposedPorts, fmt.Sprintf("%d/tcp", basePort+i))
|
|
|
|
}
|
|
|
|
|
2023-02-24 20:57:44 +00:00
|
|
|
for _, port := range ports {
|
|
|
|
pod.ExposedPorts = append(pod.ExposedPorts, fmt.Sprintf("%d/tcp", port))
|
|
|
|
}
|
2023-04-18 13:49:53 +00:00
|
|
|
if utils.Debug {
|
|
|
|
pod.ExposedPorts = append(pod.ExposedPorts, debugPort)
|
|
|
|
}
|
2023-02-24 20:57:44 +00:00
|
|
|
|
2023-01-11 21:34:27 +00:00
|
|
|
// For handshakes like auto-encrypt, it can take 10's of seconds for the agent to become "ready".
|
|
|
|
// If we only wait until the log stream starts, subsequent commands to agents will fail.
|
|
|
|
// TODO: optimize the wait strategy
|
|
|
|
app := testcontainers.ContainerRequest{
|
|
|
|
NetworkMode: dockercontainer.NetworkMode("container:" + opts.name + "-pod"),
|
|
|
|
Image: config.DockerImage(),
|
|
|
|
WaitingFor: wait.ForLog(bootLogLine).WithStartupTimeout(60 * time.Second), // See note above
|
|
|
|
AutoRemove: false,
|
|
|
|
Name: opts.name,
|
|
|
|
Mounts: []testcontainers.ContainerMount{
|
|
|
|
{
|
|
|
|
Source: testcontainers.DockerBindMountSource{HostPath: opts.configFile},
|
|
|
|
Target: "/consul/config/config.json",
|
|
|
|
ReadOnly: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Source: testcontainers.DockerBindMountSource{HostPath: opts.dataDir},
|
|
|
|
Target: "/consul/data",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Cmd: config.Cmd,
|
|
|
|
SkipReaper: skipReaper,
|
|
|
|
Env: map[string]string{"CONSUL_LICENSE": opts.license},
|
|
|
|
}
|
|
|
|
|
|
|
|
if config.CertVolume != "" {
|
|
|
|
app.Mounts = append(app.Mounts, testcontainers.ContainerMount{
|
|
|
|
Source: testcontainers.DockerVolumeMountSource{
|
|
|
|
Name: config.CertVolume,
|
|
|
|
},
|
|
|
|
Target: "/consul/config/certs",
|
|
|
|
ReadOnly: true,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// fmt.Printf("app: %s\n", utils.Dump(app))
|
|
|
|
|
|
|
|
return pod, app
|
|
|
|
}
|
|
|
|
|
|
|
|
// isRYUKDisabled returns whether the reaper process (RYUK) has been disabled
|
|
|
|
// by an environment variable.
|
|
|
|
//
|
|
|
|
// https://github.com/testcontainers/moby-ryuk
|
|
|
|
func isRYUKDisabled() bool {
|
|
|
|
skipReaperStr := os.Getenv(disableRYUKEnv)
|
|
|
|
skipReaper, err := strconv.ParseBool(skipReaperStr)
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return skipReaper
|
|
|
|
}
|
|
|
|
|
|
|
|
func readLicense() (string, error) {
|
|
|
|
if license := os.Getenv("CONSUL_LICENSE"); license != "" {
|
|
|
|
return license, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
licensePath := os.Getenv("CONSUL_LICENSE_PATH")
|
|
|
|
if licensePath == "" {
|
|
|
|
return "", nil
|
|
|
|
}
|
|
|
|
|
|
|
|
licenseBytes, err := os.ReadFile(licensePath)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
return string(licenseBytes), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func createConfigFile(scratchDir string, JSON string) (string, error) {
|
|
|
|
configDir := filepath.Join(scratchDir, "config")
|
|
|
|
|
|
|
|
if err := os.MkdirAll(configDir, 0777); err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
if err := os.Chmod(configDir, 0777); err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
configFile := filepath.Join(configDir, "config.hcl")
|
|
|
|
|
|
|
|
if err := os.WriteFile(configFile, []byte(JSON), 0644); err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
return configFile, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type parsedConfig struct {
|
|
|
|
Datacenter string `json:"datacenter"`
|
|
|
|
Server bool `json:"server"`
|
|
|
|
Ports parsedPorts `json:"ports"`
|
2023-03-06 18:28:02 +00:00
|
|
|
Partition string `json:"partition"`
|
2023-01-11 21:34:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type parsedPorts struct {
|
|
|
|
DNS int `json:"dns"`
|
|
|
|
HTTP int `json:"http"`
|
|
|
|
HTTPS int `json:"https"`
|
|
|
|
GRPC int `json:"grpc"`
|
|
|
|
GRPCTLS int `json:"grpc_tls"`
|
|
|
|
SerfLAN int `json:"serf_lan"`
|
|
|
|
SerfWAN int `json:"serf_wan"`
|
|
|
|
Server int `json:"server"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func readSomeConfigFileFields(JSON string) (parsedConfig, error) {
|
|
|
|
var pc parsedConfig
|
|
|
|
if err := json.Unmarshal([]byte(JSON), &pc); err != nil {
|
|
|
|
return pc, errors.Wrap(err, "failed to parse config file")
|
|
|
|
}
|
|
|
|
if pc.Datacenter == "" {
|
|
|
|
pc.Datacenter = "dc1"
|
|
|
|
}
|
|
|
|
return pc, nil
|
|
|
|
}
|