mirror of https://github.com/status-im/consul.git
auto-reload configuration when config files change (#12329)
* add config watcher to the config package * add logging to watcher * add test and refactor to add WatcherEvent. * add all API calls and fix a bug with recreated files * add tests for watcher * remove the unnecessary use of context * Add debug log and a test for file rename * use inode to detect if the file is recreated/replaced and only listen to create events. * tidy ups (#1535) * tidy ups * Add tests for inode reconcile * fix linux vs windows syscall * fix linux vs windows syscall * fix windows compile error * increase timeout * use ctime ID * remove remove/creation test as it's a use case that fail in linux * fix linux/windows to use Ino/CreationTime * fix the watcher to only overwrite current file id * fix linter error * fix remove/create test * set reconcile loop to 200 Milliseconds * fix watcher to not trigger event on remove, add more tests * on a remove event try to add the file back to the watcher and trigger the handler if success * fix race condition * fix flaky test * fix race conditions * set level to info * fix when file is removed and get an event for it after * fix to trigger handler when we get a remove but re-add fail * fix error message * add tests for directory watch and fixes * detect if a file is a symlink and return an error on Add * rename Watcher to FileWatcher and remove symlink deref * add fsnotify@v1.5.1 * fix go mod * do not reset timer on errors, rename OS specific files * rename New func * events trigger on write and rename * add missing test * fix flaking tests * fix flaky test * check reconcile when removed * delete invalid file * fix test to create files with different mod time. * back date file instead of sleeping * add watching file in agent command. * fix watcher call to use new API * add configuration and stop watcher when server stop * add certs as watched files * move FileWatcher to the agent start instead of the command code * stop watcher before replacing it * save watched files in agent * add add and remove interfaces to the file watcher * fix remove to not return an error * use `Add` and `Remove` to update certs files * fix tests * close events channel on the file watcher even when the context is done * extract `NotAutoReloadableRuntimeConfig` is a separate struct * fix linter errors * add Ca configs and outgoing verify to the not auto reloadable config * add some logs and fix to use background context * add tests to auto-config reload * remove stale test * add tests to changes to config files * add check to see if old cert files still trigger updates * rename `NotAutoReloadableRuntimeConfig` to `StaticRuntimeConfig` * fix to re add both key and cert file. Add test to cover this case. * review suggestion Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> * add check to static runtime config changes * fix test * add changelog file * fix review comments * Apply suggestions from code review Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> * update flag description Co-authored-by: FFMMM <FFMMM@users.noreply.github.com> * fix compilation error * add static runtime config support * fix test * fix review comments * fix log test * Update .changelog/12329.txt Co-authored-by: Dan Upton <daniel@floppy.co> * transfer tests to runtime_test.go * fix filewatcher Replace to not deadlock. * avoid having lingering locks Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> * split ReloadConfig func * fix warning message Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> * convert `FileWatcher` into an interface * fix compilation errors * fix tests * extract func for adding and removing files Co-authored-by: Ashwin Venkatesh <ashwin@hashicorp.com> Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> Co-authored-by: FFMMM <FFMMM@users.noreply.github.com> Co-authored-by: Daniel Upton <daniel@floppy.co>
This commit is contained in:
parent
dc62293978
commit
16b19dd82d
|
@ -0,0 +1,3 @@
|
|||
```release-note:feature
|
||||
config: automatically reload config when a file changes using the `auto-reload-config` CLI flag or `auto_reload_config` config option.
|
||||
```
|
117
agent/agent.go
117
agent/agent.go
|
@ -11,6 +11,7 @@ import (
|
|||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -360,6 +361,10 @@ type Agent struct {
|
|||
// run by the Agent
|
||||
routineManager *routine.Manager
|
||||
|
||||
// FileWatcher is the watcher responsible to report events when a config file
|
||||
// changed
|
||||
FileWatcher config.Watcher
|
||||
|
||||
// xdsServer serves the XDS protocol for configuring Envoy proxies.
|
||||
xdsServer *xds.Server
|
||||
|
||||
|
@ -443,6 +448,21 @@ func New(bd BaseDeps) (*Agent, error) {
|
|||
// TODO: pass in a fully populated apiServers into Agent.New
|
||||
a.apiServers = NewAPIServers(a.logger)
|
||||
|
||||
for _, f := range []struct {
|
||||
Cfg tlsutil.ProtocolConfig
|
||||
}{
|
||||
{a.baseDeps.RuntimeConfig.TLS.InternalRPC},
|
||||
{a.baseDeps.RuntimeConfig.TLS.GRPC},
|
||||
{a.baseDeps.RuntimeConfig.TLS.HTTPS},
|
||||
} {
|
||||
if f.Cfg.KeyFile != "" {
|
||||
a.baseDeps.WatchedFiles = append(a.baseDeps.WatchedFiles, f.Cfg.KeyFile)
|
||||
}
|
||||
if f.Cfg.CertFile != "" {
|
||||
a.baseDeps.WatchedFiles = append(a.baseDeps.WatchedFiles, f.Cfg.CertFile)
|
||||
}
|
||||
}
|
||||
|
||||
return &a, nil
|
||||
}
|
||||
|
||||
|
@ -692,6 +712,26 @@ func (a *Agent) Start(ctx context.Context) error {
|
|||
{Name: "pre_release", Value: a.config.VersionPrerelease},
|
||||
})
|
||||
|
||||
// start a go routine to reload config based on file watcher events
|
||||
if a.baseDeps.RuntimeConfig.AutoReloadConfig && len(a.baseDeps.WatchedFiles) > 0 {
|
||||
w, err := config.NewFileWatcher(a.baseDeps.WatchedFiles, a.baseDeps.Logger)
|
||||
if err != nil {
|
||||
a.baseDeps.Logger.Error("error loading config", "error", err)
|
||||
} else {
|
||||
a.FileWatcher = w
|
||||
a.baseDeps.Logger.Debug("starting file watcher")
|
||||
a.FileWatcher.Start(context.Background())
|
||||
go func() {
|
||||
for event := range a.FileWatcher.EventsCh() {
|
||||
a.baseDeps.Logger.Debug("auto-reload config triggered", "event-file", event.Filename)
|
||||
err := a.AutoReloadConfig()
|
||||
if err != nil {
|
||||
a.baseDeps.Logger.Error("error loading config", "error", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1084,8 +1124,8 @@ func newConsulConfig(runtimeCfg *config.RuntimeConfig, logger hclog.Logger) (*co
|
|||
cfg.SerfWANConfig.MemberlistConfig.CIDRsAllowed = runtimeCfg.SerfAllowedCIDRsWAN
|
||||
cfg.SerfLANConfig.MemberlistConfig.AdvertiseAddr = runtimeCfg.SerfAdvertiseAddrLAN.IP.String()
|
||||
cfg.SerfLANConfig.MemberlistConfig.AdvertisePort = runtimeCfg.SerfAdvertiseAddrLAN.Port
|
||||
cfg.SerfLANConfig.MemberlistConfig.GossipVerifyIncoming = runtimeCfg.EncryptVerifyIncoming
|
||||
cfg.SerfLANConfig.MemberlistConfig.GossipVerifyOutgoing = runtimeCfg.EncryptVerifyOutgoing
|
||||
cfg.SerfLANConfig.MemberlistConfig.GossipVerifyIncoming = runtimeCfg.StaticRuntimeConfig.EncryptVerifyIncoming
|
||||
cfg.SerfLANConfig.MemberlistConfig.GossipVerifyOutgoing = runtimeCfg.StaticRuntimeConfig.EncryptVerifyOutgoing
|
||||
cfg.SerfLANConfig.MemberlistConfig.GossipInterval = runtimeCfg.GossipLANGossipInterval
|
||||
cfg.SerfLANConfig.MemberlistConfig.GossipNodes = runtimeCfg.GossipLANGossipNodes
|
||||
cfg.SerfLANConfig.MemberlistConfig.ProbeInterval = runtimeCfg.GossipLANProbeInterval
|
||||
|
@ -1101,8 +1141,8 @@ func newConsulConfig(runtimeCfg *config.RuntimeConfig, logger hclog.Logger) (*co
|
|||
cfg.SerfWANConfig.MemberlistConfig.BindPort = runtimeCfg.SerfBindAddrWAN.Port
|
||||
cfg.SerfWANConfig.MemberlistConfig.AdvertiseAddr = runtimeCfg.SerfAdvertiseAddrWAN.IP.String()
|
||||
cfg.SerfWANConfig.MemberlistConfig.AdvertisePort = runtimeCfg.SerfAdvertiseAddrWAN.Port
|
||||
cfg.SerfWANConfig.MemberlistConfig.GossipVerifyIncoming = runtimeCfg.EncryptVerifyIncoming
|
||||
cfg.SerfWANConfig.MemberlistConfig.GossipVerifyOutgoing = runtimeCfg.EncryptVerifyOutgoing
|
||||
cfg.SerfWANConfig.MemberlistConfig.GossipVerifyIncoming = runtimeCfg.StaticRuntimeConfig.EncryptVerifyIncoming
|
||||
cfg.SerfWANConfig.MemberlistConfig.GossipVerifyOutgoing = runtimeCfg.StaticRuntimeConfig.EncryptVerifyOutgoing
|
||||
cfg.SerfWANConfig.MemberlistConfig.GossipInterval = runtimeCfg.GossipWANGossipInterval
|
||||
cfg.SerfWANConfig.MemberlistConfig.GossipNodes = runtimeCfg.GossipWANGossipNodes
|
||||
cfg.SerfWANConfig.MemberlistConfig.ProbeInterval = runtimeCfg.GossipWANProbeInterval
|
||||
|
@ -1294,11 +1334,11 @@ func segmentConfig(config *config.RuntimeConfig) ([]consul.NetworkSegment, error
|
|||
if config.ReconnectTimeoutLAN != 0 {
|
||||
serfConf.ReconnectTimeout = config.ReconnectTimeoutLAN
|
||||
}
|
||||
if config.EncryptVerifyIncoming {
|
||||
serfConf.MemberlistConfig.GossipVerifyIncoming = config.EncryptVerifyIncoming
|
||||
if config.StaticRuntimeConfig.EncryptVerifyIncoming {
|
||||
serfConf.MemberlistConfig.GossipVerifyIncoming = config.StaticRuntimeConfig.EncryptVerifyIncoming
|
||||
}
|
||||
if config.EncryptVerifyOutgoing {
|
||||
serfConf.MemberlistConfig.GossipVerifyOutgoing = config.EncryptVerifyOutgoing
|
||||
if config.StaticRuntimeConfig.EncryptVerifyOutgoing {
|
||||
serfConf.MemberlistConfig.GossipVerifyOutgoing = config.StaticRuntimeConfig.EncryptVerifyOutgoing
|
||||
}
|
||||
|
||||
var rpcAddr *net.TCPAddr
|
||||
|
@ -1372,6 +1412,11 @@ func (a *Agent) ShutdownAgent() error {
|
|||
// Stop the watches to avoid any notification/state change during shutdown
|
||||
a.stopAllWatches()
|
||||
|
||||
// Stop config file watcher
|
||||
if a.FileWatcher != nil {
|
||||
a.FileWatcher.Stop()
|
||||
}
|
||||
|
||||
a.stopLicenseManager()
|
||||
|
||||
// this would be cancelled anyways (by the closing of the shutdown ch) but
|
||||
|
@ -3694,10 +3739,18 @@ func (a *Agent) DisableNodeMaintenance() {
|
|||
a.logger.Info("Node left maintenance mode")
|
||||
}
|
||||
|
||||
func (a *Agent) AutoReloadConfig() error {
|
||||
return a.reloadConfig(true)
|
||||
}
|
||||
|
||||
func (a *Agent) ReloadConfig() error {
|
||||
return a.reloadConfig(false)
|
||||
}
|
||||
|
||||
// ReloadConfig will atomically reload all configuration, including
|
||||
// all services, checks, tokens, metadata, dnsServer configs, etc.
|
||||
// It will also reload all ongoing watches.
|
||||
func (a *Agent) ReloadConfig() error {
|
||||
func (a *Agent) reloadConfig(autoReload bool) error {
|
||||
newCfg, err := a.baseDeps.AutoConfig.ReadConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -3708,6 +3761,39 @@ func (a *Agent) ReloadConfig() error {
|
|||
// breaking some existing behavior.
|
||||
newCfg.NodeID = a.config.NodeID
|
||||
|
||||
//if auto reload is enabled, make sure we have the right certs file watched.
|
||||
if autoReload {
|
||||
for _, f := range []struct {
|
||||
oldCfg tlsutil.ProtocolConfig
|
||||
newCfg tlsutil.ProtocolConfig
|
||||
}{
|
||||
{a.config.TLS.InternalRPC, newCfg.TLS.InternalRPC},
|
||||
{a.config.TLS.GRPC, newCfg.TLS.GRPC},
|
||||
{a.config.TLS.HTTPS, newCfg.TLS.HTTPS},
|
||||
} {
|
||||
if f.oldCfg.KeyFile != f.newCfg.KeyFile {
|
||||
a.FileWatcher.Replace(f.oldCfg.KeyFile, f.newCfg.KeyFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if f.oldCfg.CertFile != f.newCfg.CertFile {
|
||||
a.FileWatcher.Replace(f.oldCfg.CertFile, f.newCfg.CertFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if revertStaticConfig(f.oldCfg, f.newCfg) {
|
||||
a.logger.Warn("Changes to your configuration were detected that for security reasons cannot be automatically applied by 'auto_reload_config'. Manually reload your configuration (e.g. with 'consul reload') to apply these changes.", "StaticRuntimeConfig", f.oldCfg, "StaticRuntimeConfig From file", f.newCfg)
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(newCfg.StaticRuntimeConfig, a.config.StaticRuntimeConfig) {
|
||||
a.logger.Warn("Changes to your configuration were detected that for security reasons cannot be automatically applied by 'auto_reload_config'. Manually reload your configuration (e.g. with 'consul reload') to apply these changes.", "StaticRuntimeConfig", a.config.StaticRuntimeConfig, "StaticRuntimeConfig From file", newCfg.StaticRuntimeConfig)
|
||||
// reset not reloadable fields
|
||||
newCfg.StaticRuntimeConfig = a.config.StaticRuntimeConfig
|
||||
}
|
||||
}
|
||||
|
||||
// DEPRECATED: Warn users on reload if they're emitting deprecated metrics. Remove this warning and the flagged
|
||||
// metrics in a future release of Consul.
|
||||
if !a.config.Telemetry.DisableCompatOneNine {
|
||||
|
@ -3717,6 +3803,19 @@ func (a *Agent) ReloadConfig() error {
|
|||
return a.reloadConfigInternal(newCfg)
|
||||
}
|
||||
|
||||
func revertStaticConfig(oldCfg tlsutil.ProtocolConfig, newCfg tlsutil.ProtocolConfig) bool {
|
||||
newNewCfg := oldCfg
|
||||
newNewCfg.CertFile = newCfg.CertFile
|
||||
newNewCfg.KeyFile = newCfg.KeyFile
|
||||
newOldcfg := newCfg
|
||||
newOldcfg.CertFile = oldCfg.CertFile
|
||||
newOldcfg.KeyFile = oldCfg.KeyFile
|
||||
if !reflect.DeepEqual(newOldcfg, oldCfg) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// reloadConfigInternal is mainly needed for some unit tests. Instead of parsing
|
||||
// the configuration using CLI flags and on disk config, this just takes a
|
||||
// runtime configuration and applies it.
|
||||
|
|
|
@ -5328,9 +5328,395 @@ func uniqueAddrs(srvs []apiServer) map[string]struct{} {
|
|||
return result
|
||||
}
|
||||
|
||||
func runStep(t *testing.T, name string, fn func(t *testing.T)) {
|
||||
t.Helper()
|
||||
if !t.Run(name, fn) {
|
||||
t.FailNow()
|
||||
func TestAgent_AutoReloadDoReload_WhenCertAndKeyUpdated(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
certsDir := testutil.TempDir(t, "auto-config")
|
||||
|
||||
// write some test TLS certificates out to the cfg dir
|
||||
serverName := "server.dc1.consul"
|
||||
signer, _, err := tlsutil.GeneratePrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
ca, _, err := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer})
|
||||
require.NoError(t, err)
|
||||
|
||||
cert, privateKey, err := tlsutil.GenerateCert(tlsutil.CertOpts{
|
||||
Signer: signer,
|
||||
CA: ca,
|
||||
Name: "Test Cert Name",
|
||||
Days: 365,
|
||||
DNSNames: []string{serverName},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
certFile := filepath.Join(certsDir, "cert.pem")
|
||||
caFile := filepath.Join(certsDir, "cacert.pem")
|
||||
keyFile := filepath.Join(certsDir, "key.pem")
|
||||
|
||||
require.NoError(t, ioutil.WriteFile(certFile, []byte(cert), 0600))
|
||||
require.NoError(t, ioutil.WriteFile(caFile, []byte(ca), 0600))
|
||||
require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKey), 0600))
|
||||
|
||||
// generate a gossip key
|
||||
gossipKey := make([]byte, 32)
|
||||
n, err := rand.Read(gossipKey)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 32, n)
|
||||
gossipKeyEncoded := base64.StdEncoding.EncodeToString(gossipKey)
|
||||
|
||||
hclConfig := TestACLConfigWithParams(nil) + `
|
||||
encrypt = "` + gossipKeyEncoded + `"
|
||||
encrypt_verify_incoming = true
|
||||
encrypt_verify_outgoing = true
|
||||
verify_incoming = true
|
||||
verify_outgoing = true
|
||||
verify_server_hostname = true
|
||||
ca_file = "` + caFile + `"
|
||||
cert_file = "` + certFile + `"
|
||||
key_file = "` + keyFile + `"
|
||||
connect { enabled = true }
|
||||
auto_reload_config = true
|
||||
`
|
||||
|
||||
srv := StartTestAgent(t, TestAgent{Name: "TestAgent-Server", HCL: hclConfig})
|
||||
defer srv.Shutdown()
|
||||
|
||||
testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken))
|
||||
|
||||
aeCert := srv.tlsConfigurator.Cert()
|
||||
require.NotNil(t, aeCert)
|
||||
|
||||
cert2, privateKey2, err := tlsutil.GenerateCert(tlsutil.CertOpts{
|
||||
Signer: signer,
|
||||
CA: ca,
|
||||
Name: "Test Cert Name",
|
||||
Days: 365,
|
||||
DNSNames: []string{serverName},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, ioutil.WriteFile(certFile, []byte(cert2), 0600))
|
||||
require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKey2), 0600))
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
aeCert2 := srv.tlsConfigurator.Cert()
|
||||
require.NotEqual(r, aeCert.Certificate, aeCert2.Certificate)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAgent_AutoReloadDoNotReload_WhenCaUpdated(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
certsDir := testutil.TempDir(t, "auto-config")
|
||||
|
||||
// write some test TLS certificates out to the cfg dir
|
||||
serverName := "server.dc1.consul"
|
||||
signer, _, err := tlsutil.GeneratePrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
ca, _, err := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer})
|
||||
require.NoError(t, err)
|
||||
|
||||
cert, privateKey, err := tlsutil.GenerateCert(tlsutil.CertOpts{
|
||||
Signer: signer,
|
||||
CA: ca,
|
||||
Name: "Test Cert Name",
|
||||
Days: 365,
|
||||
DNSNames: []string{serverName},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
certFile := filepath.Join(certsDir, "cert.pem")
|
||||
caFile := filepath.Join(certsDir, "cacert.pem")
|
||||
keyFile := filepath.Join(certsDir, "key.pem")
|
||||
|
||||
require.NoError(t, ioutil.WriteFile(certFile, []byte(cert), 0600))
|
||||
require.NoError(t, ioutil.WriteFile(caFile, []byte(ca), 0600))
|
||||
require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKey), 0600))
|
||||
|
||||
// generate a gossip key
|
||||
gossipKey := make([]byte, 32)
|
||||
n, err := rand.Read(gossipKey)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 32, n)
|
||||
gossipKeyEncoded := base64.StdEncoding.EncodeToString(gossipKey)
|
||||
|
||||
hclConfig := TestACLConfigWithParams(nil) + `
|
||||
encrypt = "` + gossipKeyEncoded + `"
|
||||
encrypt_verify_incoming = true
|
||||
encrypt_verify_outgoing = true
|
||||
verify_incoming = true
|
||||
verify_outgoing = true
|
||||
verify_server_hostname = true
|
||||
ca_file = "` + caFile + `"
|
||||
cert_file = "` + certFile + `"
|
||||
key_file = "` + keyFile + `"
|
||||
connect { enabled = true }
|
||||
auto_reload_config = true
|
||||
`
|
||||
|
||||
srv := StartTestAgent(t, TestAgent{Name: "TestAgent-Server", HCL: hclConfig})
|
||||
defer srv.Shutdown()
|
||||
|
||||
testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken))
|
||||
|
||||
aeCA := srv.tlsConfigurator.ManualCAPems()
|
||||
require.NotNil(t, aeCA)
|
||||
|
||||
ca2, _, err := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, ioutil.WriteFile(caFile, []byte(ca2), 0600))
|
||||
|
||||
// wait a bit to see if it get updated.
|
||||
time.Sleep(time.Second)
|
||||
|
||||
aeCA2 := srv.tlsConfigurator.ManualCAPems()
|
||||
require.NotNil(t, aeCA2)
|
||||
require.Equal(t, aeCA, aeCA2)
|
||||
}
|
||||
|
||||
func TestAgent_AutoReloadDoReload_WhenCertThenKeyUpdated(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
certsDir := testutil.TempDir(t, "auto-config")
|
||||
|
||||
// write some test TLS certificates out to the cfg dir
|
||||
serverName := "server.dc1.consul"
|
||||
signer, _, err := tlsutil.GeneratePrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
ca, _, err := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer})
|
||||
require.NoError(t, err)
|
||||
|
||||
cert, privateKey, err := tlsutil.GenerateCert(tlsutil.CertOpts{
|
||||
Signer: signer,
|
||||
CA: ca,
|
||||
Name: "Test Cert Name",
|
||||
Days: 365,
|
||||
DNSNames: []string{serverName},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
certFile := filepath.Join(certsDir, "cert.pem")
|
||||
caFile := filepath.Join(certsDir, "cacert.pem")
|
||||
keyFile := filepath.Join(certsDir, "key.pem")
|
||||
|
||||
require.NoError(t, ioutil.WriteFile(certFile, []byte(cert), 0600))
|
||||
require.NoError(t, ioutil.WriteFile(caFile, []byte(ca), 0600))
|
||||
require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKey), 0600))
|
||||
|
||||
// generate a gossip key
|
||||
gossipKey := make([]byte, 32)
|
||||
n, err := rand.Read(gossipKey)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 32, n)
|
||||
gossipKeyEncoded := base64.StdEncoding.EncodeToString(gossipKey)
|
||||
|
||||
hclConfig := TestACLConfigWithParams(nil)
|
||||
|
||||
configFile := testutil.TempDir(t, "config") + "/config.hcl"
|
||||
require.NoError(t, ioutil.WriteFile(configFile, []byte(`
|
||||
encrypt = "`+gossipKeyEncoded+`"
|
||||
encrypt_verify_incoming = true
|
||||
encrypt_verify_outgoing = true
|
||||
verify_incoming = true
|
||||
verify_outgoing = true
|
||||
verify_server_hostname = true
|
||||
ca_file = "`+caFile+`"
|
||||
cert_file = "`+certFile+`"
|
||||
key_file = "`+keyFile+`"
|
||||
connect { enabled = true }
|
||||
auto_reload_config = true
|
||||
`), 0600))
|
||||
|
||||
srv := StartTestAgent(t, TestAgent{Name: "TestAgent-Server", HCL: hclConfig, configFiles: []string{configFile}})
|
||||
defer srv.Shutdown()
|
||||
|
||||
testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken))
|
||||
|
||||
cert1 := srv.tlsConfigurator.Cert()
|
||||
|
||||
certNew, privateKeyNew, err := tlsutil.GenerateCert(tlsutil.CertOpts{
|
||||
Signer: signer,
|
||||
CA: ca,
|
||||
Name: "Test Cert Name",
|
||||
Days: 365,
|
||||
DNSNames: []string{serverName},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
certFileNew := filepath.Join(certsDir, "cert_new.pem")
|
||||
require.NoError(t, ioutil.WriteFile(certFileNew, []byte(certNew), 0600))
|
||||
require.NoError(t, ioutil.WriteFile(configFile, []byte(`
|
||||
encrypt = "`+gossipKeyEncoded+`"
|
||||
encrypt_verify_incoming = true
|
||||
encrypt_verify_outgoing = true
|
||||
verify_incoming = true
|
||||
verify_outgoing = true
|
||||
verify_server_hostname = true
|
||||
ca_file = "`+caFile+`"
|
||||
cert_file = "`+certFileNew+`"
|
||||
key_file = "`+keyFile+`"
|
||||
connect { enabled = true }
|
||||
auto_reload_config = true
|
||||
`), 0600))
|
||||
|
||||
// cert should not change as we did not update the associated key
|
||||
time.Sleep(1 * time.Second)
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, cert1.Certificate, srv.tlsConfigurator.Cert().Certificate)
|
||||
require.Equal(r, cert1.PrivateKey, srv.tlsConfigurator.Cert().PrivateKey)
|
||||
})
|
||||
|
||||
require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKeyNew), 0600))
|
||||
|
||||
// cert should change as we did not update the associated key
|
||||
time.Sleep(1 * time.Second)
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.NotEqual(r, cert1.Certificate, srv.tlsConfigurator.Cert().Certificate)
|
||||
require.NotEqual(r, cert1.PrivateKey, srv.tlsConfigurator.Cert().PrivateKey)
|
||||
})
|
||||
}
|
||||
|
||||
func TestAgent_AutoReloadDoReload_WhenKeyThenCertUpdated(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
certsDir := testutil.TempDir(t, "auto-config")
|
||||
|
||||
// write some test TLS certificates out to the cfg dir
|
||||
serverName := "server.dc1.consul"
|
||||
signer, _, err := tlsutil.GeneratePrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
ca, _, err := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer})
|
||||
require.NoError(t, err)
|
||||
|
||||
cert, privateKey, err := tlsutil.GenerateCert(tlsutil.CertOpts{
|
||||
Signer: signer,
|
||||
CA: ca,
|
||||
Name: "Test Cert Name",
|
||||
Days: 365,
|
||||
DNSNames: []string{serverName},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
certFile := filepath.Join(certsDir, "cert.pem")
|
||||
caFile := filepath.Join(certsDir, "cacert.pem")
|
||||
keyFile := filepath.Join(certsDir, "key.pem")
|
||||
|
||||
require.NoError(t, ioutil.WriteFile(certFile, []byte(cert), 0600))
|
||||
require.NoError(t, ioutil.WriteFile(caFile, []byte(ca), 0600))
|
||||
require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKey), 0600))
|
||||
|
||||
// generate a gossip key
|
||||
gossipKey := make([]byte, 32)
|
||||
n, err := rand.Read(gossipKey)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 32, n)
|
||||
gossipKeyEncoded := base64.StdEncoding.EncodeToString(gossipKey)
|
||||
|
||||
hclConfig := TestACLConfigWithParams(nil)
|
||||
|
||||
configFile := testutil.TempDir(t, "config") + "/config.hcl"
|
||||
require.NoError(t, ioutil.WriteFile(configFile, []byte(`
|
||||
encrypt = "`+gossipKeyEncoded+`"
|
||||
encrypt_verify_incoming = true
|
||||
encrypt_verify_outgoing = true
|
||||
verify_incoming = true
|
||||
verify_outgoing = true
|
||||
verify_server_hostname = true
|
||||
ca_file = "`+caFile+`"
|
||||
cert_file = "`+certFile+`"
|
||||
key_file = "`+keyFile+`"
|
||||
connect { enabled = true }
|
||||
auto_reload_config = true
|
||||
`), 0600))
|
||||
|
||||
srv := StartTestAgent(t, TestAgent{Name: "TestAgent-Server", HCL: hclConfig, configFiles: []string{configFile}})
|
||||
defer srv.Shutdown()
|
||||
|
||||
testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken(TestDefaultInitialManagementToken))
|
||||
|
||||
cert1 := srv.tlsConfigurator.Cert()
|
||||
|
||||
certNew, privateKeyNew, err := tlsutil.GenerateCert(tlsutil.CertOpts{
|
||||
Signer: signer,
|
||||
CA: ca,
|
||||
Name: "Test Cert Name",
|
||||
Days: 365,
|
||||
DNSNames: []string{serverName},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
certFileNew := filepath.Join(certsDir, "cert_new.pem")
|
||||
require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKeyNew), 0600))
|
||||
// cert should not change as we did not update the associated key
|
||||
time.Sleep(1 * time.Second)
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, cert1.Certificate, srv.tlsConfigurator.Cert().Certificate)
|
||||
require.Equal(r, cert1.PrivateKey, srv.tlsConfigurator.Cert().PrivateKey)
|
||||
})
|
||||
|
||||
require.NoError(t, ioutil.WriteFile(certFileNew, []byte(certNew), 0600))
|
||||
require.NoError(t, ioutil.WriteFile(configFile, []byte(`
|
||||
encrypt = "`+gossipKeyEncoded+`"
|
||||
encrypt_verify_incoming = true
|
||||
encrypt_verify_outgoing = true
|
||||
verify_incoming = true
|
||||
verify_outgoing = true
|
||||
verify_server_hostname = true
|
||||
ca_file = "`+caFile+`"
|
||||
cert_file = "`+certFileNew+`"
|
||||
key_file = "`+keyFile+`"
|
||||
connect { enabled = true }
|
||||
auto_reload_config = true
|
||||
`), 0600))
|
||||
|
||||
// cert should change as we did not update the associated key
|
||||
time.Sleep(1 * time.Second)
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.NotEqual(r, cert1.Certificate, srv.tlsConfigurator.Cert().Certificate)
|
||||
require.NotEqual(r, cert1.PrivateKey, srv.tlsConfigurator.Cert().PrivateKey)
|
||||
})
|
||||
cert2 := srv.tlsConfigurator.Cert()
|
||||
|
||||
certNew2, privateKeyNew2, err := tlsutil.GenerateCert(tlsutil.CertOpts{
|
||||
Signer: signer,
|
||||
CA: ca,
|
||||
Name: "Test Cert Name",
|
||||
Days: 365,
|
||||
DNSNames: []string{serverName},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, ioutil.WriteFile(keyFile, []byte(privateKeyNew2), 0600))
|
||||
// cert should not change as we did not update the associated cert
|
||||
time.Sleep(1 * time.Second)
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, cert2.Certificate, srv.tlsConfigurator.Cert().Certificate)
|
||||
require.Equal(r, cert2.PrivateKey, srv.tlsConfigurator.Cert().PrivateKey)
|
||||
})
|
||||
|
||||
require.NoError(t, ioutil.WriteFile(certFileNew, []byte(certNew2), 0600))
|
||||
|
||||
// cert should change as we did update the associated key
|
||||
time.Sleep(1 * time.Second)
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.NotEqual(r, cert2.Certificate, srv.tlsConfigurator.Cert().Certificate)
|
||||
require.NotEqual(r, cert2.PrivateKey, srv.tlsConfigurator.Cert().PrivateKey)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ import (
|
|||
"github.com/hashicorp/consul/agent/token"
|
||||
"github.com/hashicorp/consul/ipaddr"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/lib/stringslice"
|
||||
libtempl "github.com/hashicorp/consul/lib/template"
|
||||
"github.com/hashicorp/consul/logging"
|
||||
"github.com/hashicorp/consul/tlsutil"
|
||||
|
@ -107,7 +108,8 @@ func Load(opts LoadOpts) (LoadResult, error) {
|
|||
if err := b.validate(cfg); err != nil {
|
||||
return r, err
|
||||
}
|
||||
return LoadResult{RuntimeConfig: &cfg, Warnings: b.Warnings}, nil
|
||||
watcherFiles := stringslice.CloneStringSlice(opts.ConfigFiles)
|
||||
return LoadResult{RuntimeConfig: &cfg, Warnings: b.Warnings, WatchedFiles: watcherFiles}, nil
|
||||
}
|
||||
|
||||
// LoadResult is the result returned from Load. The caller is responsible for
|
||||
|
@ -115,6 +117,7 @@ func Load(opts LoadOpts) (LoadResult, error) {
|
|||
type LoadResult struct {
|
||||
RuntimeConfig *RuntimeConfig
|
||||
Warnings []string
|
||||
WatchedFiles []string
|
||||
}
|
||||
|
||||
// builder constructs and validates a runtime configuration from multiple
|
||||
|
@ -938,6 +941,7 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
|
|||
c.Cache.EntryFetchMaxBurst, cache.DefaultEntryFetchMaxBurst,
|
||||
),
|
||||
},
|
||||
AutoReloadConfig: boolVal(c.AutoReloadConfig),
|
||||
CheckUpdateInterval: b.durationVal("check_update_interval", c.CheckUpdateInterval),
|
||||
CheckOutputMaxSize: intValWithDefault(c.CheckOutputMaxSize, 4096),
|
||||
Checks: checks,
|
||||
|
@ -978,8 +982,6 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
|
|||
EnableRemoteScriptChecks: enableRemoteScriptChecks,
|
||||
EnableLocalScriptChecks: enableLocalScriptChecks,
|
||||
EncryptKey: stringVal(c.EncryptKey),
|
||||
EncryptVerifyIncoming: boolVal(c.EncryptVerifyIncoming),
|
||||
EncryptVerifyOutgoing: boolVal(c.EncryptVerifyOutgoing),
|
||||
GRPCPort: grpcPort,
|
||||
GRPCAddrs: grpcAddrs,
|
||||
HTTPMaxConnsPerClient: intVal(c.Limits.HTTPMaxConnsPerClient),
|
||||
|
@ -987,6 +989,11 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
|
|||
KVMaxValueSize: uint64Val(c.Limits.KVMaxValueSize),
|
||||
LeaveDrainTime: b.durationVal("performance.leave_drain_time", c.Performance.LeaveDrainTime),
|
||||
LeaveOnTerm: leaveOnTerm,
|
||||
StaticRuntimeConfig: StaticRuntimeConfig{
|
||||
EncryptVerifyIncoming: boolVal(c.EncryptVerifyIncoming),
|
||||
EncryptVerifyOutgoing: boolVal(c.EncryptVerifyOutgoing),
|
||||
},
|
||||
|
||||
Logging: logging.Config{
|
||||
LogLevel: stringVal(c.LogLevel),
|
||||
LogJSON: boolVal(c.LogJSON),
|
||||
|
|
|
@ -210,6 +210,7 @@ type Config struct {
|
|||
ReconnectTimeoutLAN *string `mapstructure:"reconnect_timeout"`
|
||||
ReconnectTimeoutWAN *string `mapstructure:"reconnect_timeout_wan"`
|
||||
RejoinAfterLeave *bool `mapstructure:"rejoin_after_leave"`
|
||||
AutoReloadConfig *bool `mapstructure:"auto_reload_config"`
|
||||
RetryJoinIntervalLAN *string `mapstructure:"retry_interval"`
|
||||
RetryJoinIntervalWAN *string `mapstructure:"retry_interval_wan"`
|
||||
RetryJoinLAN []string `mapstructure:"retry_join"`
|
||||
|
|
|
@ -14,19 +14,29 @@ import (
|
|||
|
||||
const timeoutDuration = 200 * time.Millisecond
|
||||
|
||||
type FileWatcher struct {
|
||||
type Watcher interface {
|
||||
Start(ctx context.Context)
|
||||
Stop() error
|
||||
Add(filename string) error
|
||||
Remove(filename string)
|
||||
Replace(oldFile, newFile string) error
|
||||
EventsCh() chan *FileWatcherEvent
|
||||
}
|
||||
|
||||
type fileWatcher struct {
|
||||
watcher *fsnotify.Watcher
|
||||
configFiles map[string]*watchedFile
|
||||
configFilesLock sync.RWMutex
|
||||
logger hclog.Logger
|
||||
reconcileTimeout time.Duration
|
||||
cancel context.CancelFunc
|
||||
done chan interface{}
|
||||
stopOnce sync.Once
|
||||
|
||||
//EventsCh Channel where an event will be emitted when a file change is detected
|
||||
//eventsCh Channel where an event will be emitted when a file change is detected
|
||||
// a call to Start is needed before any event is emitted
|
||||
// after a Call to Stop succeed, the channel will be closed
|
||||
EventsCh chan *FileWatcherEvent
|
||||
eventsCh chan *FileWatcherEvent
|
||||
}
|
||||
|
||||
type watchedFile struct {
|
||||
|
@ -38,24 +48,23 @@ type FileWatcherEvent struct {
|
|||
}
|
||||
|
||||
//NewFileWatcher create a file watcher that will watch all the files/folders from configFiles
|
||||
// if success a FileWatcher will be returned and a nil error
|
||||
// otherwise an error and a nil FileWatcher are returned
|
||||
func NewFileWatcher(configFiles []string, logger hclog.Logger) (*FileWatcher, error) {
|
||||
// if success a fileWatcher will be returned and a nil error
|
||||
// otherwise an error and a nil fileWatcher are returned
|
||||
func NewFileWatcher(configFiles []string, logger hclog.Logger) (Watcher, error) {
|
||||
ws, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w := &FileWatcher{
|
||||
w := &fileWatcher{
|
||||
watcher: ws,
|
||||
logger: logger.Named("file-watcher"),
|
||||
configFiles: make(map[string]*watchedFile),
|
||||
EventsCh: make(chan *FileWatcherEvent),
|
||||
eventsCh: make(chan *FileWatcherEvent),
|
||||
reconcileTimeout: timeoutDuration,
|
||||
done: make(chan interface{}),
|
||||
stopOnce: sync.Once{},
|
||||
}
|
||||
for _, f := range configFiles {
|
||||
err = w.add(f)
|
||||
err = w.Add(f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error adding file %q: %w", f, err)
|
||||
}
|
||||
|
@ -66,7 +75,7 @@ func NewFileWatcher(configFiles []string, logger hclog.Logger) (*FileWatcher, er
|
|||
|
||||
// Start start a file watcher, with a copy of the passed context.
|
||||
// calling Start multiple times is a noop
|
||||
func (w *FileWatcher) Start(ctx context.Context) {
|
||||
func (w *fileWatcher) Start(ctx context.Context) {
|
||||
if w.cancel == nil {
|
||||
cancelCtx, cancel := context.WithCancel(ctx)
|
||||
w.cancel = cancel
|
||||
|
@ -76,21 +85,19 @@ func (w *FileWatcher) Start(ctx context.Context) {
|
|||
|
||||
// Stop the file watcher
|
||||
// calling Stop multiple times is a noop, Stop must be called after a Start
|
||||
func (w *FileWatcher) Stop() error {
|
||||
func (w *fileWatcher) Stop() error {
|
||||
var err error
|
||||
w.stopOnce.Do(func() {
|
||||
w.cancel()
|
||||
<-w.done
|
||||
close(w.EventsCh)
|
||||
err = w.watcher.Close()
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *FileWatcher) add(filename string) error {
|
||||
if isSymLink(filename) {
|
||||
return fmt.Errorf("symbolic links are not supported %s", filename)
|
||||
}
|
||||
// Add a file to the file watcher
|
||||
// Add will lock the file watcher during the add
|
||||
func (w *fileWatcher) Add(filename string) error {
|
||||
filename = filepath.Clean(filename)
|
||||
w.logger.Trace("adding file", "file", filename)
|
||||
if err := w.watcher.Add(filename); err != nil {
|
||||
|
@ -100,25 +107,63 @@ func (w *FileWatcher) add(filename string) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.configFiles[filename] = &watchedFile{modTime: modTime}
|
||||
w.addFile(filename, modTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
func isSymLink(filename string) bool {
|
||||
fi, err := os.Lstat(filename)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if fi.Mode()&os.ModeSymlink != 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
// Remove a file from the file watcher
|
||||
// Remove will lock the file watcher during the remove
|
||||
func (w *fileWatcher) Remove(filename string) {
|
||||
w.removeFile(filename)
|
||||
}
|
||||
|
||||
func (w *FileWatcher) watch(ctx context.Context) {
|
||||
// Replace a file in the file watcher
|
||||
// Replace will lock the file watcher during the replace
|
||||
func (w *fileWatcher) Replace(oldFile, newFile string) error {
|
||||
if oldFile == newFile {
|
||||
return nil
|
||||
}
|
||||
newFile = filepath.Clean(newFile)
|
||||
w.logger.Trace("adding file", "file", newFile)
|
||||
if err := w.watcher.Add(newFile); err != nil {
|
||||
return err
|
||||
}
|
||||
modTime, err := w.getFileModifiedTime(newFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.replaceFile(oldFile, newFile, modTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *fileWatcher) replaceFile(oldFile, newFile string, modTime time.Time) {
|
||||
w.configFilesLock.Lock()
|
||||
defer w.configFilesLock.Unlock()
|
||||
delete(w.configFiles, oldFile)
|
||||
w.configFiles[newFile] = &watchedFile{modTime: modTime}
|
||||
}
|
||||
|
||||
func (w *fileWatcher) addFile(filename string, modTime time.Time) {
|
||||
w.configFilesLock.Lock()
|
||||
defer w.configFilesLock.Unlock()
|
||||
w.configFiles[filename] = &watchedFile{modTime: modTime}
|
||||
}
|
||||
|
||||
func (w *fileWatcher) removeFile(filename string) {
|
||||
w.configFilesLock.Lock()
|
||||
defer w.configFilesLock.Unlock()
|
||||
delete(w.configFiles, filename)
|
||||
}
|
||||
|
||||
func (w *fileWatcher) EventsCh() chan *FileWatcherEvent {
|
||||
return w.eventsCh
|
||||
}
|
||||
|
||||
func (w *fileWatcher) watch(ctx context.Context) {
|
||||
ticker := time.NewTicker(w.reconcileTimeout)
|
||||
defer ticker.Stop()
|
||||
defer close(w.done)
|
||||
defer close(w.eventsCh)
|
||||
|
||||
for {
|
||||
select {
|
||||
|
@ -144,7 +189,7 @@ func (w *FileWatcher) watch(ctx context.Context) {
|
|||
}
|
||||
}
|
||||
|
||||
func (w *FileWatcher) handleEvent(ctx context.Context, event fsnotify.Event) error {
|
||||
func (w *fileWatcher) handleEvent(ctx context.Context, event fsnotify.Event) error {
|
||||
w.logger.Trace("event received ", "filename", event.Name, "OP", event.Op)
|
||||
// we only want Create and Remove events to avoid triggering a reload on file modification
|
||||
if !isCreateEvent(event) && !isRemoveEvent(event) && !isWriteEvent(event) && !isRenameEvent(event) {
|
||||
|
@ -168,7 +213,7 @@ func (w *FileWatcher) handleEvent(ctx context.Context, event fsnotify.Event) err
|
|||
if isCreateEvent(event) || isWriteEvent(event) || isRenameEvent(event) {
|
||||
w.logger.Trace("call the handler", "filename", event.Name, "OP", event.Op)
|
||||
select {
|
||||
case w.EventsCh <- &FileWatcherEvent{Filename: filename}:
|
||||
case w.eventsCh <- &FileWatcherEvent{Filename: filename}:
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
|
@ -177,9 +222,11 @@ func (w *FileWatcher) handleEvent(ctx context.Context, event fsnotify.Event) err
|
|||
return nil
|
||||
}
|
||||
|
||||
func (w *FileWatcher) isWatched(filename string) (*watchedFile, string, bool) {
|
||||
func (w *fileWatcher) isWatched(filename string) (*watchedFile, string, bool) {
|
||||
path := filename
|
||||
w.configFilesLock.RLock()
|
||||
configFile, ok := w.configFiles[path]
|
||||
w.configFilesLock.RUnlock()
|
||||
if ok {
|
||||
return configFile, path, true
|
||||
}
|
||||
|
@ -192,14 +239,17 @@ func (w *FileWatcher) isWatched(filename string) (*watchedFile, string, bool) {
|
|||
// try to see if the watched path is the parent dir
|
||||
newPath := filepath.Dir(path)
|
||||
w.logger.Trace("get dir", "dir", newPath)
|
||||
w.configFilesLock.RLock()
|
||||
configFile, ok = w.configFiles[newPath]
|
||||
w.configFilesLock.RUnlock()
|
||||
}
|
||||
return configFile, path, ok
|
||||
}
|
||||
|
||||
func (w *FileWatcher) reconcile(ctx context.Context) {
|
||||
func (w *fileWatcher) reconcile(ctx context.Context) {
|
||||
w.configFilesLock.Lock()
|
||||
defer w.configFilesLock.Unlock()
|
||||
for filename, configFile := range w.configFiles {
|
||||
w.logger.Trace("reconciling", "filename", filename)
|
||||
newModTime, err := w.getFileModifiedTime(filename)
|
||||
if err != nil {
|
||||
w.logger.Error("failed to get file modTime", "file", filename, "err", err)
|
||||
|
@ -213,9 +263,9 @@ func (w *FileWatcher) reconcile(ctx context.Context) {
|
|||
}
|
||||
if !configFile.modTime.Equal(newModTime) {
|
||||
w.logger.Trace("call the handler", "filename", filename, "old modTime", configFile.modTime, "new modTime", newModTime)
|
||||
w.configFiles[filename].modTime = newModTime
|
||||
configFile.modTime = newModTime
|
||||
select {
|
||||
case w.EventsCh <- &FileWatcherEvent{Filename: filename}:
|
||||
case w.eventsCh <- &FileWatcherEvent{Filename: filename}:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
@ -239,7 +289,7 @@ func isRenameEvent(event fsnotify.Event) bool {
|
|||
return event.Op&fsnotify.Rename == fsnotify.Rename
|
||||
}
|
||||
|
||||
func (w *FileWatcher) getFileModifiedTime(filename string) (time.Time, error) {
|
||||
func (w *fileWatcher) getFileModifiedTime(filename string) (time.Time, error) {
|
||||
fileInfo, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
|
|
|
@ -27,7 +27,9 @@ func TestWatcherRenameEvent(t *testing.T) {
|
|||
|
||||
fileTmp := createTempConfigFile(t, "temp_config3")
|
||||
filepaths := []string{createTempConfigFile(t, "temp_config1"), createTempConfigFile(t, "temp_config2")}
|
||||
w, err := NewFileWatcher(filepaths, hclog.New(&hclog.LoggerOptions{}))
|
||||
wi, err := NewFileWatcher(filepaths, hclog.New(&hclog.LoggerOptions{}))
|
||||
w := wi.(*fileWatcher)
|
||||
|
||||
require.NoError(t, err)
|
||||
w.Start(context.Background())
|
||||
defer func() {
|
||||
|
@ -36,10 +38,66 @@ func TestWatcherRenameEvent(t *testing.T) {
|
|||
|
||||
require.NoError(t, err)
|
||||
err = os.Rename(fileTmp, filepaths[0])
|
||||
time.Sleep(w.reconcileTimeout + 50*time.Millisecond)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, assertEvent(filepaths[0], w.EventsCh, defaultTimeout))
|
||||
require.NoError(t, assertEvent(filepaths[0], w.eventsCh, defaultTimeout))
|
||||
// make sure we consume all events
|
||||
assertEvent(filepaths[0], w.EventsCh, defaultTimeout)
|
||||
_ = assertEvent(filepaths[0], w.eventsCh, defaultTimeout)
|
||||
}
|
||||
|
||||
func TestWatcherAddRemove(t *testing.T) {
|
||||
var filepaths []string
|
||||
wi, err := NewFileWatcher(filepaths, hclog.New(&hclog.LoggerOptions{}))
|
||||
w := wi.(*fileWatcher)
|
||||
require.NoError(t, err)
|
||||
file1 := createTempConfigFile(t, "temp_config1")
|
||||
err = w.Add(file1)
|
||||
require.NoError(t, err)
|
||||
file2 := createTempConfigFile(t, "temp_config2")
|
||||
err = w.Add(file2)
|
||||
require.NoError(t, err)
|
||||
w.Remove(file2)
|
||||
_, ok := w.configFiles[file1]
|
||||
require.True(t, ok)
|
||||
_, ok = w.configFiles[file2]
|
||||
require.False(t, ok)
|
||||
|
||||
}
|
||||
|
||||
func TestWatcherAddWhileRunning(t *testing.T) {
|
||||
var filepaths []string
|
||||
wi, err := NewFileWatcher(filepaths, hclog.New(&hclog.LoggerOptions{}))
|
||||
w := wi.(*fileWatcher)
|
||||
require.NoError(t, err)
|
||||
w.Start(context.Background())
|
||||
defer func() {
|
||||
_ = w.Stop()
|
||||
}()
|
||||
file1 := createTempConfigFile(t, "temp_config1")
|
||||
err = w.Add(file1)
|
||||
require.NoError(t, err)
|
||||
file2 := createTempConfigFile(t, "temp_config2")
|
||||
err = w.Add(file2)
|
||||
require.NoError(t, err)
|
||||
w.Remove(file2)
|
||||
require.Len(t, w.configFiles, 1)
|
||||
_, ok := w.configFiles[file1]
|
||||
require.True(t, ok)
|
||||
_, ok = w.configFiles[file2]
|
||||
require.False(t, ok)
|
||||
}
|
||||
|
||||
func TestWatcherRemoveNotFound(t *testing.T) {
|
||||
var filepaths []string
|
||||
w, err := NewFileWatcher(filepaths, hclog.New(&hclog.LoggerOptions{}))
|
||||
require.NoError(t, err)
|
||||
w.Start(context.Background())
|
||||
defer func() {
|
||||
_ = w.Stop()
|
||||
}()
|
||||
|
||||
file := createTempConfigFile(t, "temp_config2")
|
||||
w.Remove(file)
|
||||
}
|
||||
|
||||
func TestWatcherAddNotExist(t *testing.T) {
|
||||
|
@ -69,7 +127,7 @@ func TestEventWatcherWrite(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
err = file.Sync()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, assertEvent(file.Name(), w.EventsCh, defaultTimeout))
|
||||
require.NoError(t, assertEvent(file.Name(), w.EventsCh(), defaultTimeout))
|
||||
}
|
||||
|
||||
func TestEventWatcherRead(t *testing.T) {
|
||||
|
@ -84,7 +142,7 @@ func TestEventWatcherRead(t *testing.T) {
|
|||
|
||||
_, err = os.ReadFile(filepath)
|
||||
require.NoError(t, err)
|
||||
require.Error(t, assertEvent(filepath, w.EventsCh, defaultTimeout), "timedout waiting for event")
|
||||
require.Error(t, assertEvent(filepath, w.EventsCh(), defaultTimeout), "timedout waiting for event")
|
||||
}
|
||||
|
||||
func TestEventWatcherChmod(t *testing.T) {
|
||||
|
@ -107,7 +165,7 @@ func TestEventWatcherChmod(t *testing.T) {
|
|||
|
||||
err = file.Chmod(0777)
|
||||
require.NoError(t, err)
|
||||
require.Error(t, assertEvent(file.Name(), w.EventsCh, defaultTimeout), "timedout waiting for event")
|
||||
require.Error(t, assertEvent(file.Name(), w.EventsCh(), defaultTimeout), "timedout waiting for event")
|
||||
}
|
||||
|
||||
func TestEventWatcherRemoveCreate(t *testing.T) {
|
||||
|
@ -130,7 +188,7 @@ func TestEventWatcherRemoveCreate(t *testing.T) {
|
|||
err = recreated.Sync()
|
||||
require.NoError(t, err)
|
||||
// this an event coming from the reconcile loop
|
||||
require.NoError(t, assertEvent(filepath, w.EventsCh, defaultTimeout))
|
||||
require.NoError(t, assertEvent(filepath, w.EventsCh(), defaultTimeout))
|
||||
}
|
||||
|
||||
func TestEventWatcherMove(t *testing.T) {
|
||||
|
@ -147,8 +205,9 @@ func TestEventWatcherMove(t *testing.T) {
|
|||
for i := 0; i < 10; i++ {
|
||||
filepath2 := createTempConfigFile(t, "temp_config2")
|
||||
err = os.Rename(filepath2, filepath)
|
||||
time.Sleep(timeoutDuration + 50*time.Millisecond)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, assertEvent(filepath, w.EventsCh, defaultTimeout))
|
||||
require.NoError(t, assertEvent(filepath, w.EventsCh(), defaultTimeout))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -157,7 +216,8 @@ func TestEventReconcileMove(t *testing.T) {
|
|||
filepath2 := createTempConfigFile(t, "temp_config2")
|
||||
err := os.Chtimes(filepath, time.Now(), time.Now().Add(-1*time.Second))
|
||||
require.NoError(t, err)
|
||||
w, err := NewFileWatcher([]string{filepath}, hclog.New(&hclog.LoggerOptions{}))
|
||||
wi, err := NewFileWatcher([]string{filepath}, hclog.New(&hclog.LoggerOptions{}))
|
||||
w := wi.(*fileWatcher)
|
||||
require.NoError(t, err)
|
||||
w.Start(context.Background())
|
||||
defer func() {
|
||||
|
@ -169,8 +229,9 @@ func TestEventReconcileMove(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
err = os.Rename(filepath2, filepath)
|
||||
time.Sleep(timeoutDuration + 50*time.Millisecond)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, assertEvent(filepath, w.EventsCh, 2000*time.Millisecond))
|
||||
require.NoError(t, assertEvent(filepath, w.EventsCh(), 2000*time.Millisecond))
|
||||
}
|
||||
|
||||
func TestEventWatcherDirCreateRemove(t *testing.T) {
|
||||
|
@ -187,11 +248,11 @@ func TestEventWatcherDirCreateRemove(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
err = file.Close()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, assertEvent(filepath, w.EventsCh, defaultTimeout))
|
||||
require.NoError(t, assertEvent(filepath, w.EventsCh(), defaultTimeout))
|
||||
|
||||
err = os.Remove(name)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, assertEvent(filepath, w.EventsCh, defaultTimeout))
|
||||
require.NoError(t, assertEvent(filepath, w.EventsCh(), defaultTimeout))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -212,9 +273,9 @@ func TestEventWatcherDirMove(t *testing.T) {
|
|||
|
||||
for i := 0; i < 100; i++ {
|
||||
filepathTmp := createTempConfigFile(t, "temp_config2")
|
||||
os.Rename(filepathTmp, name)
|
||||
err = os.Rename(filepathTmp, name)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, assertEvent(filepath, w.EventsCh, defaultTimeout))
|
||||
require.NoError(t, assertEvent(filepath, w.EventsCh(), defaultTimeout))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -235,9 +296,9 @@ func TestEventWatcherDirMoveTrim(t *testing.T) {
|
|||
|
||||
for i := 0; i < 100; i++ {
|
||||
filepathTmp := createTempConfigFile(t, "temp_config2")
|
||||
os.Rename(filepathTmp, name)
|
||||
err = os.Rename(filepathTmp, name)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, assertEvent(filepath, w.EventsCh, defaultTimeout))
|
||||
require.NoError(t, assertEvent(filepath, w.EventsCh(), defaultTimeout))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -260,9 +321,9 @@ func TestEventWatcherSubDirMove(t *testing.T) {
|
|||
|
||||
for i := 0; i < 2; i++ {
|
||||
filepathTmp := createTempConfigFile(t, "temp_config2")
|
||||
os.Rename(filepathTmp, name)
|
||||
err = os.Rename(filepathTmp, name)
|
||||
require.NoError(t, err)
|
||||
require.Error(t, assertEvent(filepath, w.EventsCh, defaultTimeout), "timedout waiting for event")
|
||||
require.Error(t, assertEvent(filepath, w.EventsCh(), defaultTimeout), "timedout waiting for event")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -283,7 +344,7 @@ func TestEventWatcherDirRead(t *testing.T) {
|
|||
|
||||
_, err = os.ReadFile(name)
|
||||
require.NoError(t, err)
|
||||
require.Error(t, assertEvent(filepath, w.EventsCh, defaultTimeout), "timedout waiting for event")
|
||||
require.Error(t, assertEvent(filepath, w.EventsCh(), defaultTimeout), "timedout waiting for event")
|
||||
}
|
||||
|
||||
func TestEventWatcherMoveSoftLink(t *testing.T) {
|
||||
|
@ -295,8 +356,8 @@ func TestEventWatcherMoveSoftLink(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
w, err := NewFileWatcher([]string{name}, hclog.New(&hclog.LoggerOptions{}))
|
||||
require.Error(t, err, "symbolic link are not supported")
|
||||
require.Nil(t, w)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, w)
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -76,6 +76,7 @@ func AddFlags(fs *flag.FlagSet, f *LoadOpts) {
|
|||
add(&f.FlagValues.DNSRecursors, "recursor", "Address of an upstream DNS server. Can be specified multiple times.")
|
||||
add(&f.FlagValues.PrimaryGateways, "primary-gateway", "Address of a mesh gateway in the primary datacenter to use to bootstrap WAN federation at start time with retries enabled. Can be specified multiple times.")
|
||||
add(&f.FlagValues.RejoinAfterLeave, "rejoin", "Ignores a previous leave and attempts to rejoin the cluster.")
|
||||
add(&f.FlagValues.AutoReloadConfig, "auto-reload-config", "Watches config files for changes and auto reloads the files when modified.")
|
||||
add(&f.FlagValues.RetryJoinIntervalLAN, "retry-interval", "Time to wait between join attempts.")
|
||||
add(&f.FlagValues.RetryJoinIntervalWAN, "retry-interval-wan", "Time to wait between join -wan attempts.")
|
||||
add(&f.FlagValues.RetryJoinLAN, "retry-join", "Address of an agent to join at start time with retries enabled. Can be specified multiple times.")
|
||||
|
|
|
@ -29,6 +29,22 @@ type RuntimeSOAConfig struct {
|
|||
Minttl uint32 // 0,
|
||||
}
|
||||
|
||||
// StaticRuntimeConfig specifies the subset of configuration the consul agent actually
|
||||
// uses and that are not reloadable by configuration auto reload.
|
||||
type StaticRuntimeConfig struct {
|
||||
// EncryptVerifyIncoming enforces incoming gossip encryption and can be
|
||||
// used to upshift to encrypted gossip on a running cluster.
|
||||
//
|
||||
// hcl: encrypt_verify_incoming = (true|false)
|
||||
EncryptVerifyIncoming bool
|
||||
|
||||
// EncryptVerifyOutgoing enforces outgoing gossip encryption and can be
|
||||
// used to upshift to encrypted gossip on a running cluster.
|
||||
//
|
||||
// hcl: encrypt_verify_outgoing = (true|false)
|
||||
EncryptVerifyOutgoing bool
|
||||
}
|
||||
|
||||
// RuntimeConfig specifies the configuration the consul agent actually
|
||||
// uses. Is is derived from one or more Config structures which can come
|
||||
// from files, flags and/or environment variables.
|
||||
|
@ -651,18 +667,6 @@ type RuntimeConfig struct {
|
|||
// flag: -encrypt string
|
||||
EncryptKey string
|
||||
|
||||
// EncryptVerifyIncoming enforces incoming gossip encryption and can be
|
||||
// used to upshift to encrypted gossip on a running cluster.
|
||||
//
|
||||
// hcl: encrypt_verify_incoming = (true|false)
|
||||
EncryptVerifyIncoming bool
|
||||
|
||||
// EncryptVerifyOutgoing enforces outgoing gossip encryption and can be
|
||||
// used to upshift to encrypted gossip on a running cluster.
|
||||
//
|
||||
// hcl: encrypt_verify_outgoing = (true|false)
|
||||
EncryptVerifyOutgoing bool
|
||||
|
||||
// GRPCPort is the port the gRPC server listens on. Currently this only
|
||||
// exposes the xDS and ext_authz APIs for Envoy and it is disabled by default.
|
||||
//
|
||||
|
@ -1298,6 +1302,11 @@ type RuntimeConfig struct {
|
|||
// hcl: skip_leave_on_interrupt = (true|false)
|
||||
SkipLeaveOnInt bool
|
||||
|
||||
// AutoReloadConfig indicate if the config will be
|
||||
//auto reloaded bases on config file modification
|
||||
// hcl: auto_reload_config = (true|false)
|
||||
AutoReloadConfig bool
|
||||
|
||||
// StartJoinAddrsLAN is a list of addresses to attempt to join -lan when the
|
||||
// agent starts. If Serf is unable to communicate with any of these
|
||||
// addresses, then the agent will error and exit.
|
||||
|
@ -1374,6 +1383,8 @@ type RuntimeConfig struct {
|
|||
// hcl: unix_sockets { user = string }
|
||||
UnixSocketUser string
|
||||
|
||||
StaticRuntimeConfig StaticRuntimeConfig
|
||||
|
||||
// Watches are used to monitor various endpoints and to invoke a
|
||||
// handler to act appropriately. These are managed entirely in the
|
||||
// agent layer using the standard APIs.
|
||||
|
|
|
@ -906,6 +906,18 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
},
|
||||
})
|
||||
|
||||
run(t, testCase{
|
||||
desc: "-datacenter empty",
|
||||
args: []string{
|
||||
`-auto-reload-config`,
|
||||
`-data-dir=` + dataDir,
|
||||
},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
rt.AutoReloadConfig = true
|
||||
rt.DataDir = dataDir
|
||||
},
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// ports and addresses
|
||||
//
|
||||
|
@ -5906,24 +5918,27 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
EnableRemoteScriptChecks: true,
|
||||
EnableLocalScriptChecks: true,
|
||||
EncryptKey: "A4wELWqH",
|
||||
EncryptVerifyIncoming: true,
|
||||
EncryptVerifyOutgoing: true,
|
||||
GRPCPort: 4881,
|
||||
GRPCAddrs: []net.Addr{tcpAddr("32.31.61.91:4881")},
|
||||
HTTPAddrs: []net.Addr{tcpAddr("83.39.91.39:7999")},
|
||||
HTTPBlockEndpoints: []string{"RBvAFcGD", "fWOWFznh"},
|
||||
AllowWriteHTTPFrom: []*net.IPNet{cidr("127.0.0.0/8"), cidr("22.33.44.55/32"), cidr("0.0.0.0/0")},
|
||||
HTTPPort: 7999,
|
||||
HTTPResponseHeaders: map[string]string{"M6TKa9NP": "xjuxjOzQ", "JRCrHZed": "rl0mTx81"},
|
||||
HTTPSAddrs: []net.Addr{tcpAddr("95.17.17.19:15127")},
|
||||
HTTPMaxConnsPerClient: 100,
|
||||
HTTPMaxHeaderBytes: 10,
|
||||
HTTPSHandshakeTimeout: 2391 * time.Millisecond,
|
||||
HTTPSPort: 15127,
|
||||
HTTPUseCache: false,
|
||||
KVMaxValueSize: 1234567800,
|
||||
LeaveDrainTime: 8265 * time.Second,
|
||||
LeaveOnTerm: true,
|
||||
StaticRuntimeConfig: StaticRuntimeConfig{
|
||||
EncryptVerifyIncoming: true,
|
||||
EncryptVerifyOutgoing: true,
|
||||
},
|
||||
|
||||
GRPCPort: 4881,
|
||||
GRPCAddrs: []net.Addr{tcpAddr("32.31.61.91:4881")},
|
||||
HTTPAddrs: []net.Addr{tcpAddr("83.39.91.39:7999")},
|
||||
HTTPBlockEndpoints: []string{"RBvAFcGD", "fWOWFznh"},
|
||||
AllowWriteHTTPFrom: []*net.IPNet{cidr("127.0.0.0/8"), cidr("22.33.44.55/32"), cidr("0.0.0.0/0")},
|
||||
HTTPPort: 7999,
|
||||
HTTPResponseHeaders: map[string]string{"M6TKa9NP": "xjuxjOzQ", "JRCrHZed": "rl0mTx81"},
|
||||
HTTPSAddrs: []net.Addr{tcpAddr("95.17.17.19:15127")},
|
||||
HTTPMaxConnsPerClient: 100,
|
||||
HTTPMaxHeaderBytes: 10,
|
||||
HTTPSHandshakeTimeout: 2391 * time.Millisecond,
|
||||
HTTPSPort: 15127,
|
||||
HTTPUseCache: false,
|
||||
KVMaxValueSize: 1234567800,
|
||||
LeaveDrainTime: 8265 * time.Second,
|
||||
LeaveOnTerm: true,
|
||||
Logging: logging.Config{
|
||||
LogLevel: "k1zo9Spt",
|
||||
LogJSON: true,
|
||||
|
@ -6760,7 +6775,8 @@ func TestRuntime_APIConfigHTTP(t *testing.T) {
|
|||
&net.UnixAddr{Name: "/var/run/foo"},
|
||||
&net.TCPAddr{IP: net.ParseIP("198.18.0.1"), Port: 5678},
|
||||
},
|
||||
Datacenter: "dc-test",
|
||||
Datacenter: "dc-test",
|
||||
StaticRuntimeConfig: StaticRuntimeConfig{},
|
||||
}
|
||||
|
||||
cfg, err := rt.APIConfig(false)
|
||||
|
|
|
@ -63,6 +63,7 @@
|
|||
"AutoEncryptDNSSAN": [],
|
||||
"AutoEncryptIPSAN": [],
|
||||
"AutoEncryptTLS": false,
|
||||
"AutoReloadConfig": false,
|
||||
"AutopilotCleanupDeadServers": false,
|
||||
"AutopilotDisableUpgradeMigration": false,
|
||||
"AutopilotLastContactThreshold": "0s",
|
||||
|
@ -182,8 +183,6 @@
|
|||
"EnableLocalScriptChecks": false,
|
||||
"EnableRemoteScriptChecks": false,
|
||||
"EncryptKey": "hidden",
|
||||
"EncryptVerifyIncoming": false,
|
||||
"EncryptVerifyOutgoing": false,
|
||||
"EnterpriseRuntimeConfig": {},
|
||||
"ExposeMaxPort": 0,
|
||||
"ExposeMinPort": 0,
|
||||
|
@ -348,6 +347,10 @@
|
|||
"SkipLeaveOnInt": false,
|
||||
"StartJoinAddrsLAN": [],
|
||||
"StartJoinAddrsWAN": [],
|
||||
"StaticRuntimeConfig": {
|
||||
"EncryptVerifyIncoming": false,
|
||||
"EncryptVerifyOutgoing": false
|
||||
},
|
||||
"SyncCoordinateIntervalMin": "0s",
|
||||
"SyncCoordinateRateTarget": 0,
|
||||
"TLS": {
|
||||
|
|
|
@ -9,6 +9,8 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/lib/stringslice"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/armon/go-metrics/prometheus"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
@ -1104,7 +1106,7 @@ func (l *State) updateSyncState() error {
|
|||
// copy so that we don't retain a pointer to any actual state
|
||||
// store info for in-memory RPCs.
|
||||
if nextService.EnableTagOverride {
|
||||
nextService.Tags = structs.CloneStringSlice(rs.Tags)
|
||||
nextService.Tags = stringslice.CloneStringSlice(rs.Tags)
|
||||
changed = true
|
||||
}
|
||||
|
||||
|
|
|
@ -45,6 +45,7 @@ type BaseDeps struct {
|
|||
AutoConfig *autoconf.AutoConfig // TODO: use an interface
|
||||
Cache *cache.Cache
|
||||
ViewStore *submatview.Store
|
||||
WatchedFiles []string
|
||||
}
|
||||
|
||||
// MetricsHandler provides an http.Handler for displaying metrics.
|
||||
|
@ -61,7 +62,7 @@ func NewBaseDeps(configLoader ConfigLoader, logOut io.Writer) (BaseDeps, error)
|
|||
if err != nil {
|
||||
return d, err
|
||||
}
|
||||
|
||||
d.WatchedFiles = result.WatchedFiles
|
||||
cfg := result.RuntimeConfig
|
||||
logConf := cfg.Logging
|
||||
logConf.Name = logging.Agent
|
||||
|
|
|
@ -11,6 +11,8 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/lib/stringslice"
|
||||
|
||||
"golang.org/x/crypto/blake2b"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
|
@ -128,7 +130,7 @@ type ACLServiceIdentity struct {
|
|||
|
||||
func (s *ACLServiceIdentity) Clone() *ACLServiceIdentity {
|
||||
s2 := *s
|
||||
s2.Datacenters = CloneStringSlice(s.Datacenters)
|
||||
s2.Datacenters = stringslice.CloneStringSlice(s.Datacenters)
|
||||
return &s2
|
||||
}
|
||||
|
||||
|
@ -606,7 +608,7 @@ func (t *ACLPolicy) UnmarshalJSON(data []byte) error {
|
|||
|
||||
func (p *ACLPolicy) Clone() *ACLPolicy {
|
||||
p2 := *p
|
||||
p2.Datacenters = CloneStringSlice(p.Datacenters)
|
||||
p2.Datacenters = stringslice.CloneStringSlice(p.Datacenters)
|
||||
return &p2
|
||||
}
|
||||
|
||||
|
@ -1415,15 +1417,6 @@ type ACLPolicyBatchDeleteRequest struct {
|
|||
PolicyIDs []string
|
||||
}
|
||||
|
||||
func CloneStringSlice(s []string) []string {
|
||||
if len(s) == 0 {
|
||||
return nil
|
||||
}
|
||||
out := make([]string, len(s))
|
||||
copy(out, s)
|
||||
return out
|
||||
}
|
||||
|
||||
// ACLRoleSetRequest is used at the RPC layer for creation and update requests
|
||||
type ACLRoleSetRequest struct {
|
||||
Role ACLRole // The role to upsert
|
||||
|
|
|
@ -6,6 +6,8 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/lib/stringslice"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
)
|
||||
|
||||
|
@ -303,7 +305,7 @@ func (p *IntentionHTTPPermission) Clone() *IntentionHTTPPermission {
|
|||
}
|
||||
}
|
||||
|
||||
p2.Methods = CloneStringSlice(p.Methods)
|
||||
p2.Methods = stringslice.CloneStringSlice(p.Methods)
|
||||
|
||||
return &p2
|
||||
}
|
||||
|
|
|
@ -5,6 +5,8 @@ import (
|
|||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/lib/stringslice"
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
|
||||
"github.com/hashicorp/consul/lib"
|
||||
|
@ -156,7 +158,7 @@ func (c *CARoot) Clone() *CARoot {
|
|||
}
|
||||
|
||||
newCopy := *c
|
||||
newCopy.IntermediateCerts = CloneStringSlice(c.IntermediateCerts)
|
||||
newCopy.IntermediateCerts = stringslice.CloneStringSlice(c.IntermediateCerts)
|
||||
return &newCopy
|
||||
}
|
||||
|
||||
|
|
|
@ -43,7 +43,8 @@ type TestAgent struct {
|
|||
// Name is an optional name of the agent.
|
||||
Name string
|
||||
|
||||
HCL string
|
||||
configFiles []string
|
||||
HCL string
|
||||
|
||||
// Config is the agent configuration. If Config is nil then
|
||||
// TestConfig() is used. If Config.DataDir is set then it is
|
||||
|
@ -94,6 +95,16 @@ func NewTestAgent(t *testing.T, hcl string) *TestAgent {
|
|||
return a
|
||||
}
|
||||
|
||||
// NewTestAgent returns a started agent with the given configuration. It fails
|
||||
// the test if the Agent could not be started.
|
||||
// The caller is responsible for calling Shutdown() to stop the agent and remove
|
||||
// temporary directories.
|
||||
func NewTestAgentWithConfigFile(t *testing.T, hcl string, configFiles []string) *TestAgent {
|
||||
a := StartTestAgent(t, TestAgent{configFiles: configFiles, HCL: hcl})
|
||||
t.Cleanup(func() { a.Shutdown() })
|
||||
return a
|
||||
}
|
||||
|
||||
// StartTestAgent and wait for it to become available. If the agent fails to
|
||||
// start the test will be marked failed and execution will stop.
|
||||
//
|
||||
|
@ -186,6 +197,7 @@ func (a *TestAgent) Start(t *testing.T) error {
|
|||
config.DefaultConsulSource(),
|
||||
config.DevConsulSource(),
|
||||
},
|
||||
ConfigFiles: a.configFiles,
|
||||
}
|
||||
result, err := config.Load(opts)
|
||||
if result.RuntimeConfig != nil {
|
||||
|
|
|
@ -172,7 +172,6 @@ func (c *cmd) run(args []string) int {
|
|||
ui.Error(err.Error())
|
||||
return 1
|
||||
}
|
||||
|
||||
c.logger = bd.Logger
|
||||
agent, err := agent.New(bd)
|
||||
if err != nil {
|
||||
|
|
|
@ -68,3 +68,12 @@ func MergeSorted(a, b []string) []string {
|
|||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func CloneStringSlice(s []string) []string {
|
||||
if len(s) == 0 {
|
||||
return nil
|
||||
}
|
||||
out := make([]string, len(s))
|
||||
copy(out, s)
|
||||
return out
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue