mirror of https://github.com/status-im/consul.git
remove redundant vault api retry logic (#16143)
remove redundant vault api retry logic We upgraded Vault API module version to a version that has built-in retry logic. So this code is no longer necessary. Also add mention of re-configuring the provider in comments.
This commit is contained in:
parent
1e7e52e3ef
commit
ed7367b6f4
|
@ -21,7 +21,6 @@ import (
|
|||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/lib/decode"
|
||||
"github.com/hashicorp/consul/lib/retry"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -46,14 +45,12 @@ const (
|
|||
VaultAuthMethodTypeUserpass = "userpass"
|
||||
|
||||
defaultK8SServiceAccountTokenPath = "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
|
||||
retryMin = 1 * time.Second
|
||||
retryMax = 5 * time.Second
|
||||
retryJitter = 20
|
||||
)
|
||||
|
||||
var ErrBackendNotMounted = fmt.Errorf("backend not mounted")
|
||||
var ErrBackendNotInitialized = fmt.Errorf("backend not initialized")
|
||||
var (
|
||||
ErrBackendNotMounted = fmt.Errorf("backend not mounted")
|
||||
ErrBackendNotInitialized = fmt.Errorf("backend not initialized")
|
||||
)
|
||||
|
||||
type VaultProvider struct {
|
||||
config *structs.VaultCAProviderConfig
|
||||
|
@ -100,6 +97,7 @@ func vaultTLSConfig(config *structs.VaultCAProviderConfig) *vaultapi.TLSConfig {
|
|||
}
|
||||
|
||||
// Configure sets up the provider using the given configuration.
|
||||
// Configure supports being called multiple times to re-configure the provider.
|
||||
func (v *VaultProvider) Configure(cfg ProviderConfig) error {
|
||||
config, err := ParseVaultCAConfig(cfg.RawConfig)
|
||||
if err != nil {
|
||||
|
@ -176,6 +174,7 @@ func (v *VaultProvider) Configure(cfg ProviderConfig) error {
|
|||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
if v.stopWatcher != nil {
|
||||
// stop the running watcher loop if we are re-configuring
|
||||
v.stopWatcher()
|
||||
}
|
||||
v.stopWatcher = cancel
|
||||
|
@ -225,33 +224,17 @@ func (v *VaultProvider) renewToken(ctx context.Context, watcher *vaultapi.Lifeti
|
|||
go watcher.Start()
|
||||
defer watcher.Stop()
|
||||
|
||||
// TODO: Once we've upgraded to a later version of protobuf we can upgrade to github.com/hashicorp/vault/api@1.1.1
|
||||
// or later and rip this out.
|
||||
retrier := retry.Waiter{
|
||||
MinFailures: 5,
|
||||
MinWait: retryMin,
|
||||
MaxWait: retryMax,
|
||||
Jitter: retry.NewJitter(retryJitter),
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
||||
case err := <-watcher.DoneCh():
|
||||
// In the event we fail to login to Vault or our token is no longer valid we can overwhelm a Vault instance
|
||||
// with rate limit configured. We would make these requests to Vault as fast as we possibly could and start
|
||||
// causing all client's to receive 429 response codes. To mitigate that we're sleeping 1 second or less
|
||||
// before moving on to login again and restart the lifetime watcher. Once we can upgrade to
|
||||
// github.com/hashicorp/vault/api@v1.1.1 or later the LifetimeWatcher _should_ perform that backoff for us.
|
||||
// Watcher has stopped
|
||||
if err != nil {
|
||||
v.logger.Error("Error renewing token for Vault provider", "error", err)
|
||||
}
|
||||
|
||||
// wait at least 1 second after returning from the lifetime watcher
|
||||
retrier.Wait(ctx)
|
||||
|
||||
// If the watcher has exited and auth method is enabled,
|
||||
// re-authenticate using the auth method and set up a new watcher.
|
||||
if v.config.AuthMethod != nil {
|
||||
|
@ -259,7 +242,7 @@ func (v *VaultProvider) renewToken(ctx context.Context, watcher *vaultapi.Lifeti
|
|||
loginResp, err := vaultLogin(v.client, v.config.AuthMethod)
|
||||
if err != nil {
|
||||
v.logger.Error("Error login in to Vault with %q auth method", v.config.AuthMethod.Type)
|
||||
// Restart the watcher
|
||||
|
||||
go watcher.Start()
|
||||
continue
|
||||
}
|
||||
|
@ -279,12 +262,10 @@ func (v *VaultProvider) renewToken(ctx context.Context, watcher *vaultapi.Lifeti
|
|||
continue
|
||||
}
|
||||
}
|
||||
// Restart the watcher.
|
||||
|
||||
go watcher.Start()
|
||||
|
||||
case <-watcher.RenewCh():
|
||||
retrier.Reset()
|
||||
v.logger.Info("Successfully renewed token for Vault provider")
|
||||
}
|
||||
}
|
||||
|
@ -430,7 +411,6 @@ func (v *VaultProvider) setupIntermediatePKIPath() error {
|
|||
"error", err,
|
||||
)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -274,7 +274,7 @@ func TestVaultCAProvider_RenewToken(t *testing.T) {
|
|||
firstRenewal, err := secret.Data["last_renewal_time"].(json.Number).Int64()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait past the TTL and make sure the token has been renewed.
|
||||
// Retry past the TTL and make sure the token has been renewed.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
secret, err = testVault.client.Auth().Token().Lookup(providerToken)
|
||||
require.NoError(r, err)
|
||||
|
|
Loading…
Reference in New Issue