2018-04-04 03:46:07 +00:00
|
|
|
// Package cache provides caching features for data from a Consul server.
|
|
|
|
//
|
|
|
|
// While this is similar in some ways to the "agent/ae" package, a key
|
|
|
|
// difference is that with anti-entropy, the agent is the authoritative
|
|
|
|
// source so it resolves differences the server may have. With caching (this
|
|
|
|
// package), the server is the authoritative source and we do our best to
|
|
|
|
// balance performance and correctness, depending on the type of data being
|
|
|
|
// requested.
|
|
|
|
//
|
2018-04-17 23:42:49 +00:00
|
|
|
// The types of data that can be cached is configurable via the Type interface.
|
|
|
|
// This allows specialized behavior for certain types of data. Each type of
|
|
|
|
// Consul data (CA roots, leaf certs, intentions, KV, catalog, etc.) will
|
|
|
|
// have to be manually implemented. This usually is not much work, see
|
|
|
|
// the "agent/cache-types" package.
|
2018-04-04 03:46:07 +00:00
|
|
|
package cache
|
|
|
|
|
|
|
|
import (
|
2018-04-20 00:31:50 +00:00
|
|
|
"container/heap"
|
2018-04-04 03:46:07 +00:00
|
|
|
"fmt"
|
|
|
|
"sync"
|
2018-04-11 09:18:24 +00:00
|
|
|
"sync/atomic"
|
2018-04-04 03:46:07 +00:00
|
|
|
"time"
|
2018-04-17 23:03:13 +00:00
|
|
|
|
|
|
|
"github.com/armon/go-metrics"
|
2018-04-04 03:46:07 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
//go:generate mockery -all -inpkg
|
|
|
|
|
2018-05-09 18:04:52 +00:00
|
|
|
// Constants related to refresh backoff. We probably don't ever need to
|
|
|
|
// make these configurable knobs since they primarily exist to lower load.
|
|
|
|
const (
|
|
|
|
CacheRefreshBackoffMin = 3 // 3 attempts before backing off
|
|
|
|
CacheRefreshMaxWait = 1 * time.Minute // maximum backoff wait time
|
|
|
|
)
|
|
|
|
|
2018-04-17 23:42:49 +00:00
|
|
|
// Cache is a agent-local cache of Consul data. Create a Cache using the
|
|
|
|
// New function. A zero-value Cache is not ready for usage and will result
|
|
|
|
// in a panic.
|
|
|
|
//
|
|
|
|
// The types of data to be cached must be registered via RegisterType. Then,
|
|
|
|
// calls to Get specify the type and a Request implementation. The
|
|
|
|
// implementation of Request is usually done directly on the standard RPC
|
|
|
|
// struct in agent/structs. This API makes cache usage a mostly drop-in
|
|
|
|
// replacement for non-cached RPC calls.
|
|
|
|
//
|
|
|
|
// The cache is partitioned by ACL and datacenter. This allows the cache
|
|
|
|
// to be safe for multi-DC queries and for queries where the data is modified
|
|
|
|
// due to ACLs all without the cache having to have any clever logic, at
|
|
|
|
// the slight expense of a less perfect cache.
|
|
|
|
//
|
|
|
|
// The Cache exposes various metrics via go-metrics. Please view the source
|
|
|
|
// searching for "metrics." to see the various metrics exposed. These can be
|
|
|
|
// used to explore the performance of the cache.
|
2018-04-04 03:46:07 +00:00
|
|
|
type Cache struct {
|
2018-04-11 09:18:24 +00:00
|
|
|
// Keeps track of the cache hits and misses in total. This is used by
|
|
|
|
// tests currently to verify cache behavior and is not meant for general
|
|
|
|
// analytics; for that, go-metrics emitted values are better.
|
|
|
|
hits, misses uint64
|
|
|
|
|
2018-04-10 15:05:34 +00:00
|
|
|
// types stores the list of data types that the cache knows how to service.
|
|
|
|
// These can be dynamically registered with RegisterType.
|
2018-04-04 03:46:07 +00:00
|
|
|
typesLock sync.RWMutex
|
|
|
|
types map[string]typeEntry
|
2018-04-10 15:05:34 +00:00
|
|
|
|
2018-04-20 00:31:50 +00:00
|
|
|
// entries contains the actual cache data. Access to entries and
|
|
|
|
// entriesExpiryHeap must be protected by entriesLock.
|
|
|
|
//
|
|
|
|
// entriesExpiryHeap is a heap of *cacheEntry values ordered by
|
|
|
|
// expiry, with the soonest to expire being first in the list (index 0).
|
2018-04-10 15:05:34 +00:00
|
|
|
//
|
|
|
|
// NOTE(mitchellh): The entry map key is currently a string in the format
|
|
|
|
// of "<DC>/<ACL token>/<Request key>" in order to properly partition
|
|
|
|
// requests to different datacenters and ACL tokens. This format has some
|
|
|
|
// big drawbacks: we can't evict by datacenter, ACL token, etc. For an
|
2018-05-09 19:30:43 +00:00
|
|
|
// initial implementation this works and the tests are agnostic to the
|
2018-04-10 15:05:34 +00:00
|
|
|
// internal storage format so changing this should be possible safely.
|
2018-04-20 00:31:50 +00:00
|
|
|
entriesLock sync.RWMutex
|
|
|
|
entries map[string]cacheEntry
|
|
|
|
entriesExpiryHeap *expiryHeap
|
2018-04-04 03:46:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// typeEntry is a single type that is registered with a Cache.
|
|
|
|
type typeEntry struct {
|
|
|
|
Type Type
|
|
|
|
Opts *RegisterOptions
|
|
|
|
}
|
|
|
|
|
2018-04-08 13:30:14 +00:00
|
|
|
// Options are options for the Cache.
|
|
|
|
type Options struct {
|
|
|
|
// Nothing currently, reserved.
|
|
|
|
}
|
|
|
|
|
2018-04-04 03:46:07 +00:00
|
|
|
// New creates a new cache with the given RPC client and reasonable defaults.
|
|
|
|
// Further settings can be tweaked on the returned value.
|
2018-04-08 13:30:14 +00:00
|
|
|
func New(*Options) *Cache {
|
2018-04-20 00:31:50 +00:00
|
|
|
// Initialize the heap. The buffer of 1 is really important because
|
|
|
|
// its possible for the expiry loop to trigger the heap to update
|
|
|
|
// itself and it'd block forever otherwise.
|
|
|
|
h := &expiryHeap{NotifyCh: make(chan struct{}, 1)}
|
|
|
|
heap.Init(h)
|
|
|
|
|
|
|
|
c := &Cache{
|
|
|
|
types: make(map[string]typeEntry),
|
|
|
|
entries: make(map[string]cacheEntry),
|
|
|
|
entriesExpiryHeap: h,
|
2018-04-04 03:46:07 +00:00
|
|
|
}
|
2018-04-20 00:31:50 +00:00
|
|
|
|
|
|
|
// Start the expiry watcher
|
|
|
|
go c.runExpiryLoop()
|
|
|
|
|
|
|
|
return c
|
2018-04-04 03:46:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// RegisterOptions are options that can be associated with a type being
|
|
|
|
// registered for the cache. This changes the behavior of the cache for
|
|
|
|
// this type.
|
|
|
|
type RegisterOptions struct {
|
2018-04-20 00:31:50 +00:00
|
|
|
// LastGetTTL is the time that the values returned by this type remain
|
|
|
|
// in the cache after the last get operation. If a value isn't accessed
|
|
|
|
// within this duration, the value is purged from the cache and
|
|
|
|
// background refreshing will cease.
|
|
|
|
LastGetTTL time.Duration
|
|
|
|
|
2018-04-04 03:46:07 +00:00
|
|
|
// Refresh configures whether the data is actively refreshed or if
|
|
|
|
// the data is only refreshed on an explicit Get. The default (false)
|
|
|
|
// is to only request data on explicit Get.
|
|
|
|
Refresh bool
|
|
|
|
|
|
|
|
// RefreshTimer is the time between attempting to refresh data.
|
|
|
|
// If this is zero, then data is refreshed immediately when a fetch
|
|
|
|
// is returned.
|
|
|
|
//
|
|
|
|
// RefreshTimeout determines the maximum query time for a refresh
|
|
|
|
// operation. This is specified as part of the query options and is
|
|
|
|
// expected to be implemented by the Type itself.
|
|
|
|
//
|
|
|
|
// Using these values, various "refresh" mechanisms can be implemented:
|
|
|
|
//
|
|
|
|
// * With a high timer duration and a low timeout, a timer-based
|
|
|
|
// refresh can be set that minimizes load on the Consul servers.
|
|
|
|
//
|
|
|
|
// * With a low timer and high timeout duration, a blocking-query-based
|
|
|
|
// refresh can be set so that changes in server data are recognized
|
|
|
|
// within the cache very quickly.
|
|
|
|
//
|
|
|
|
RefreshTimer time.Duration
|
|
|
|
RefreshTimeout time.Duration
|
|
|
|
}
|
|
|
|
|
|
|
|
// RegisterType registers a cacheable type.
|
2018-04-17 23:03:13 +00:00
|
|
|
//
|
|
|
|
// This makes the type available for Get but does not automatically perform
|
|
|
|
// any prefetching. In order to populate the cache, Get must be called.
|
2018-04-04 03:46:07 +00:00
|
|
|
func (c *Cache) RegisterType(n string, typ Type, opts *RegisterOptions) {
|
2018-04-19 18:36:14 +00:00
|
|
|
if opts == nil {
|
|
|
|
opts = &RegisterOptions{}
|
|
|
|
}
|
2018-04-20 00:31:50 +00:00
|
|
|
if opts.LastGetTTL == 0 {
|
|
|
|
opts.LastGetTTL = 72 * time.Hour // reasonable default is days
|
|
|
|
}
|
2018-04-19 18:36:14 +00:00
|
|
|
|
2018-04-04 03:46:07 +00:00
|
|
|
c.typesLock.Lock()
|
|
|
|
defer c.typesLock.Unlock()
|
|
|
|
c.types[n] = typeEntry{Type: typ, Opts: opts}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get loads the data for the given type and request. If data satisfying the
|
|
|
|
// minimum index is present in the cache, it is returned immediately. Otherwise,
|
|
|
|
// this will block until the data is available or the request timeout is
|
|
|
|
// reached.
|
|
|
|
//
|
|
|
|
// Multiple Get calls for the same Request (matching CacheKey value) will
|
|
|
|
// block on a single network request.
|
2018-04-17 23:03:13 +00:00
|
|
|
//
|
|
|
|
// The timeout specified by the Request will be the timeout on the cache
|
|
|
|
// Get, and does not correspond to the timeout of any background data
|
|
|
|
// fetching. If the timeout is reached before data satisfying the minimum
|
|
|
|
// index is retrieved, the last known value (maybe nil) is returned. No
|
|
|
|
// error is returned on timeout. This matches the behavior of Consul blocking
|
|
|
|
// queries.
|
2018-04-04 03:46:07 +00:00
|
|
|
func (c *Cache) Get(t string, r Request) (interface{}, error) {
|
2018-04-08 14:08:34 +00:00
|
|
|
info := r.CacheInfo()
|
|
|
|
if info.Key == "" {
|
2018-04-17 23:03:13 +00:00
|
|
|
metrics.IncrCounter([]string{"consul", "cache", "bypass"}, 1)
|
|
|
|
|
2018-04-08 13:30:14 +00:00
|
|
|
// If no key is specified, then we do not cache this request.
|
|
|
|
// Pass directly through to the backend.
|
|
|
|
return c.fetchDirect(t, r)
|
|
|
|
}
|
2018-04-04 03:46:07 +00:00
|
|
|
|
2018-04-10 15:05:34 +00:00
|
|
|
// Get the actual key for our entry
|
|
|
|
key := c.entryKey(&info)
|
|
|
|
|
2018-04-11 09:18:24 +00:00
|
|
|
// First time through
|
2018-06-03 20:15:09 +00:00
|
|
|
first := true
|
2018-04-11 09:18:24 +00:00
|
|
|
|
2018-04-22 20:52:48 +00:00
|
|
|
// timeoutCh for watching our timeout
|
2018-04-16 10:06:08 +00:00
|
|
|
var timeoutCh <-chan time.Time
|
|
|
|
|
2018-04-04 03:46:07 +00:00
|
|
|
RETRY_GET:
|
|
|
|
// Get the current value
|
|
|
|
c.entriesLock.RLock()
|
2018-04-10 15:05:34 +00:00
|
|
|
entry, ok := c.entries[key]
|
2018-04-04 03:46:07 +00:00
|
|
|
c.entriesLock.RUnlock()
|
|
|
|
|
|
|
|
// If we have a current value and the index is greater than the
|
|
|
|
// currently stored index then we return that right away. If the
|
|
|
|
// index is zero and we have something in the cache we accept whatever
|
|
|
|
// we have.
|
2018-04-08 13:30:14 +00:00
|
|
|
if ok && entry.Valid {
|
2018-04-08 14:08:34 +00:00
|
|
|
if info.MinIndex == 0 || info.MinIndex < entry.Index {
|
2018-06-03 20:15:09 +00:00
|
|
|
if first {
|
2018-04-17 23:03:13 +00:00
|
|
|
metrics.IncrCounter([]string{"consul", "cache", t, "hit"}, 1)
|
2018-04-11 09:18:24 +00:00
|
|
|
atomic.AddUint64(&c.hits, 1)
|
|
|
|
}
|
|
|
|
|
2018-04-20 01:28:01 +00:00
|
|
|
// Touch the expiration and fix the heap.
|
2018-04-20 00:31:50 +00:00
|
|
|
c.entriesLock.Lock()
|
2018-04-20 01:28:01 +00:00
|
|
|
entry.Expiry.Reset()
|
2018-04-20 01:35:10 +00:00
|
|
|
c.entriesExpiryHeap.Fix(entry.Expiry)
|
2018-04-20 00:31:50 +00:00
|
|
|
c.entriesLock.Unlock()
|
|
|
|
|
2018-06-03 20:15:09 +00:00
|
|
|
// We purposely do not return an error here since the cache
|
|
|
|
// only works with fetching values that either have a value
|
|
|
|
// or have an error, but not both. The Error may be non-nil
|
|
|
|
// in the entry because of this to note future fetch errors.
|
|
|
|
return entry.Value, nil
|
2018-04-08 13:30:14 +00:00
|
|
|
}
|
2018-04-04 03:46:07 +00:00
|
|
|
}
|
|
|
|
|
2018-04-19 16:19:55 +00:00
|
|
|
// If this isn't our first time through and our last value has an error,
|
|
|
|
// then we return the error. This has the behavior that we don't sit in
|
|
|
|
// a retry loop getting the same error for the entire duration of the
|
|
|
|
// timeout. Instead, we make one effort to fetch a new value, and if
|
|
|
|
// there was an error, we return.
|
2018-06-03 20:15:09 +00:00
|
|
|
if !first && entry.Error != nil {
|
2018-04-19 16:19:55 +00:00
|
|
|
return entry.Value, entry.Error
|
|
|
|
}
|
|
|
|
|
2018-06-03 20:15:09 +00:00
|
|
|
if first {
|
2018-04-11 09:18:24 +00:00
|
|
|
// Record the miss if its our first time through
|
|
|
|
atomic.AddUint64(&c.misses, 1)
|
2018-04-17 23:03:13 +00:00
|
|
|
|
|
|
|
// We increment two different counters for cache misses depending on
|
|
|
|
// whether we're missing because we didn't have the data at all,
|
|
|
|
// or if we're missing because we're blocking on a set index.
|
|
|
|
if info.MinIndex == 0 {
|
|
|
|
metrics.IncrCounter([]string{"consul", "cache", t, "miss_new"}, 1)
|
|
|
|
} else {
|
|
|
|
metrics.IncrCounter([]string{"consul", "cache", t, "miss_block"}, 1)
|
|
|
|
}
|
2018-04-11 09:18:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// No longer our first time through
|
2018-06-03 20:15:09 +00:00
|
|
|
first = false
|
2018-04-11 09:18:24 +00:00
|
|
|
|
2018-04-16 10:06:08 +00:00
|
|
|
// Set our timeout channel if we must
|
|
|
|
if info.Timeout > 0 && timeoutCh == nil {
|
|
|
|
timeoutCh = time.After(info.Timeout)
|
|
|
|
}
|
|
|
|
|
2018-04-04 03:46:07 +00:00
|
|
|
// At this point, we know we either don't have a value at all or the
|
|
|
|
// value we have is too old. We need to wait for new data.
|
2018-06-03 20:15:09 +00:00
|
|
|
waiterCh, err := c.fetch(t, key, r, true, 0)
|
2018-04-04 03:46:07 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-04-16 10:06:08 +00:00
|
|
|
select {
|
|
|
|
case <-waiterCh:
|
|
|
|
// Our fetch returned, retry the get from the cache
|
|
|
|
goto RETRY_GET
|
|
|
|
|
|
|
|
case <-timeoutCh:
|
|
|
|
// Timeout on the cache read, just return whatever we have.
|
|
|
|
return entry.Value, nil
|
|
|
|
}
|
2018-04-04 03:46:07 +00:00
|
|
|
}
|
|
|
|
|
2018-04-10 15:05:34 +00:00
|
|
|
// entryKey returns the key for the entry in the cache. See the note
|
|
|
|
// about the entry key format in the structure docs for Cache.
|
|
|
|
func (c *Cache) entryKey(r *RequestInfo) string {
|
|
|
|
return fmt.Sprintf("%s/%s/%s", r.Datacenter, r.Token, r.Key)
|
|
|
|
}
|
|
|
|
|
2018-04-17 23:03:13 +00:00
|
|
|
// fetch triggers a new background fetch for the given Request. If a
|
|
|
|
// background fetch is already running for a matching Request, the waiter
|
|
|
|
// channel for that request is returned. The effect of this is that there
|
|
|
|
// is only ever one blocking query for any matching requests.
|
2018-04-20 00:31:50 +00:00
|
|
|
//
|
|
|
|
// If allowNew is true then the fetch should create the cache entry
|
|
|
|
// if it doesn't exist. If this is false, then fetch will do nothing
|
|
|
|
// if the entry doesn't exist. This latter case is to support refreshing.
|
2018-05-09 18:54:15 +00:00
|
|
|
func (c *Cache) fetch(t, key string, r Request, allowNew bool, attempt uint) (<-chan struct{}, error) {
|
2018-04-04 03:46:07 +00:00
|
|
|
// Get the type that we're fetching
|
|
|
|
c.typesLock.RLock()
|
|
|
|
tEntry, ok := c.types[t]
|
|
|
|
c.typesLock.RUnlock()
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("unknown type in cache: %s", t)
|
|
|
|
}
|
|
|
|
|
2018-04-17 23:03:13 +00:00
|
|
|
// We acquire a write lock because we may have to set Fetching to true.
|
2018-04-04 03:46:07 +00:00
|
|
|
c.entriesLock.Lock()
|
|
|
|
defer c.entriesLock.Unlock()
|
2018-04-10 15:05:34 +00:00
|
|
|
entry, ok := c.entries[key]
|
2018-04-04 03:46:07 +00:00
|
|
|
|
2018-04-20 00:31:50 +00:00
|
|
|
// If we aren't allowing new values and we don't have an existing value,
|
|
|
|
// return immediately. We return an immediately-closed channel so nothing
|
|
|
|
// blocks.
|
|
|
|
if !ok && !allowNew {
|
|
|
|
ch := make(chan struct{})
|
|
|
|
close(ch)
|
|
|
|
return ch, nil
|
|
|
|
}
|
|
|
|
|
2018-04-04 03:46:07 +00:00
|
|
|
// If we already have an entry and it is actively fetching, then return
|
|
|
|
// the currently active waiter.
|
|
|
|
if ok && entry.Fetching {
|
|
|
|
return entry.Waiter, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we don't have an entry, then create it. The entry must be marked
|
|
|
|
// as invalid so that it isn't returned as a valid value for a zero index.
|
|
|
|
if !ok {
|
|
|
|
entry = cacheEntry{Valid: false, Waiter: make(chan struct{})}
|
|
|
|
}
|
|
|
|
|
2018-06-13 05:56:23 +00:00
|
|
|
// We always specify an index greater than zero since index of zero
|
|
|
|
// means to always return immediately and we want to block if possible.
|
|
|
|
// Index 1 is always safe since Consul's own initialization always results
|
|
|
|
// in a higher index (around 10 or above).
|
|
|
|
if entry.Index == 0 {
|
|
|
|
entry.Index = 1
|
|
|
|
}
|
|
|
|
|
2018-04-04 03:46:07 +00:00
|
|
|
// Set that we're fetching to true, which makes it so that future
|
|
|
|
// identical calls to fetch will return the same waiter rather than
|
|
|
|
// perform multiple fetches.
|
|
|
|
entry.Fetching = true
|
2018-04-10 15:05:34 +00:00
|
|
|
c.entries[key] = entry
|
2018-04-17 23:03:13 +00:00
|
|
|
metrics.SetGauge([]string{"consul", "cache", "entries_count"}, float32(len(c.entries)))
|
2018-04-04 03:46:07 +00:00
|
|
|
|
|
|
|
// The actual Fetch must be performed in a goroutine.
|
|
|
|
go func() {
|
|
|
|
// Start building the new entry by blocking on the fetch.
|
|
|
|
result, err := tEntry.Type.Fetch(FetchOptions{
|
|
|
|
MinIndex: entry.Index,
|
2018-04-19 18:36:14 +00:00
|
|
|
Timeout: tEntry.Opts.RefreshTimeout,
|
2018-04-04 03:46:07 +00:00
|
|
|
}, r)
|
|
|
|
|
2018-04-20 00:31:50 +00:00
|
|
|
// Copy the existing entry to start.
|
|
|
|
newEntry := entry
|
|
|
|
newEntry.Fetching = false
|
|
|
|
if result.Value != nil {
|
2018-04-16 10:06:08 +00:00
|
|
|
// A new value was given, so we create a brand new entry.
|
|
|
|
newEntry.Value = result.Value
|
|
|
|
newEntry.Index = result.Index
|
|
|
|
|
|
|
|
// This is a valid entry with a result
|
|
|
|
newEntry.Valid = true
|
|
|
|
}
|
2018-04-04 03:46:07 +00:00
|
|
|
|
2018-05-09 18:04:52 +00:00
|
|
|
// Error handling
|
|
|
|
if err == nil {
|
|
|
|
metrics.IncrCounter([]string{"consul", "cache", "fetch_success"}, 1)
|
|
|
|
metrics.IncrCounter([]string{"consul", "cache", t, "fetch_success"}, 1)
|
|
|
|
|
2018-05-09 18:10:17 +00:00
|
|
|
// Reset the attempts counter so we don't have any backoff
|
|
|
|
attempt = 0
|
2018-05-09 18:04:52 +00:00
|
|
|
} else {
|
|
|
|
metrics.IncrCounter([]string{"consul", "cache", "fetch_error"}, 1)
|
|
|
|
metrics.IncrCounter([]string{"consul", "cache", t, "fetch_error"}, 1)
|
|
|
|
|
2018-05-09 18:10:17 +00:00
|
|
|
// Increment attempt counter
|
|
|
|
attempt++
|
2018-05-09 18:04:52 +00:00
|
|
|
|
2018-06-04 20:39:57 +00:00
|
|
|
// Always set the error. We don't override the value here because
|
|
|
|
// if Valid is true, then we can reuse the Value in the case a
|
|
|
|
// specific index isn't requested. However, for blocking queries,
|
|
|
|
// we want Error to be set so that we can return early with the
|
|
|
|
// error.
|
2018-06-03 20:15:09 +00:00
|
|
|
newEntry.Error = err
|
2018-04-19 16:19:55 +00:00
|
|
|
}
|
|
|
|
|
2018-04-04 03:46:07 +00:00
|
|
|
// Create a new waiter that will be used for the next fetch.
|
|
|
|
newEntry.Waiter = make(chan struct{})
|
|
|
|
|
2018-04-20 00:31:50 +00:00
|
|
|
// Set our entry
|
2018-04-04 03:46:07 +00:00
|
|
|
c.entriesLock.Lock()
|
2018-04-20 01:28:01 +00:00
|
|
|
|
|
|
|
// If this is a new entry (not in the heap yet), then setup the
|
|
|
|
// initial expiry information and insert. If we're already in
|
|
|
|
// the heap we do nothing since we're reusing the same entry.
|
|
|
|
if newEntry.Expiry == nil || newEntry.Expiry.HeapIndex == -1 {
|
|
|
|
newEntry.Expiry = &cacheEntryExpiry{
|
|
|
|
Key: key,
|
|
|
|
TTL: tEntry.Opts.LastGetTTL,
|
|
|
|
}
|
|
|
|
newEntry.Expiry.Reset()
|
|
|
|
heap.Push(c.entriesExpiryHeap, newEntry.Expiry)
|
2018-04-20 00:31:50 +00:00
|
|
|
}
|
2018-04-20 01:28:01 +00:00
|
|
|
|
2018-04-10 15:05:34 +00:00
|
|
|
c.entries[key] = newEntry
|
2018-04-04 03:46:07 +00:00
|
|
|
c.entriesLock.Unlock()
|
|
|
|
|
2018-04-20 00:31:50 +00:00
|
|
|
// Trigger the old waiter
|
2018-04-04 03:46:07 +00:00
|
|
|
close(entry.Waiter)
|
|
|
|
|
|
|
|
// If refresh is enabled, run the refresh in due time. The refresh
|
|
|
|
// below might block, but saves us from spawning another goroutine.
|
2018-06-14 05:26:01 +00:00
|
|
|
if tEntry.Opts.Refresh {
|
2018-05-09 18:10:17 +00:00
|
|
|
c.refresh(tEntry.Opts, attempt, t, key, r)
|
2018-04-04 03:46:07 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return entry.Waiter, nil
|
|
|
|
}
|
|
|
|
|
2018-04-17 23:03:13 +00:00
|
|
|
// fetchDirect fetches the given request with no caching. Because this
|
|
|
|
// bypasses the caching entirely, multiple matching requests will result
|
|
|
|
// in multiple actual RPC calls (unlike fetch).
|
2018-04-08 13:30:14 +00:00
|
|
|
func (c *Cache) fetchDirect(t string, r Request) (interface{}, error) {
|
|
|
|
// Get the type that we're fetching
|
|
|
|
c.typesLock.RLock()
|
|
|
|
tEntry, ok := c.types[t]
|
|
|
|
c.typesLock.RUnlock()
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("unknown type in cache: %s", t)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch it with the min index specified directly by the request.
|
|
|
|
result, err := tEntry.Type.Fetch(FetchOptions{
|
2018-04-08 14:08:34 +00:00
|
|
|
MinIndex: r.CacheInfo().MinIndex,
|
2018-04-08 13:30:14 +00:00
|
|
|
}, r)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return the result and ignore the rest
|
|
|
|
return result.Value, nil
|
|
|
|
}
|
|
|
|
|
2018-04-17 23:03:13 +00:00
|
|
|
// refresh triggers a fetch for a specific Request according to the
|
|
|
|
// registration options.
|
2018-05-09 18:54:15 +00:00
|
|
|
func (c *Cache) refresh(opts *RegisterOptions, attempt uint, t string, key string, r Request) {
|
2018-04-04 03:46:07 +00:00
|
|
|
// Sanity-check, we should not schedule anything that has refresh disabled
|
|
|
|
if !opts.Refresh {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-06-03 20:15:09 +00:00
|
|
|
// If we're over the attempt minimum, start an exponential backoff.
|
|
|
|
if attempt > CacheRefreshBackoffMin {
|
|
|
|
waitTime := (1 << (attempt - CacheRefreshBackoffMin)) * time.Second
|
|
|
|
if waitTime > CacheRefreshMaxWait {
|
|
|
|
waitTime = CacheRefreshMaxWait
|
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(waitTime)
|
|
|
|
}
|
|
|
|
|
2018-04-04 03:46:07 +00:00
|
|
|
// If we have a timer, wait for it
|
|
|
|
if opts.RefreshTimer > 0 {
|
|
|
|
time.Sleep(opts.RefreshTimer)
|
|
|
|
}
|
|
|
|
|
2018-04-20 00:31:50 +00:00
|
|
|
// Trigger. The "allowNew" field is false because in the time we were
|
|
|
|
// waiting to refresh we may have expired and got evicted. If that
|
|
|
|
// happened, we don't want to create a new entry.
|
2018-05-09 18:10:17 +00:00
|
|
|
c.fetch(t, key, r, false, attempt)
|
2018-04-20 00:31:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// runExpiryLoop is a blocking function that watches the expiration
|
|
|
|
// heap and invalidates entries that have expired.
|
|
|
|
func (c *Cache) runExpiryLoop() {
|
|
|
|
var expiryTimer *time.Timer
|
|
|
|
for {
|
|
|
|
// If we have a previous timer, stop it.
|
|
|
|
if expiryTimer != nil {
|
|
|
|
expiryTimer.Stop()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the entry expiring soonest
|
2018-04-20 01:28:01 +00:00
|
|
|
var entry *cacheEntryExpiry
|
2018-04-20 00:31:50 +00:00
|
|
|
var expiryCh <-chan time.Time
|
|
|
|
c.entriesLock.RLock()
|
|
|
|
if len(c.entriesExpiryHeap.Entries) > 0 {
|
|
|
|
entry = c.entriesExpiryHeap.Entries[0]
|
2018-04-20 01:28:01 +00:00
|
|
|
expiryTimer = time.NewTimer(entry.Expires.Sub(time.Now()))
|
2018-04-20 00:31:50 +00:00
|
|
|
expiryCh = expiryTimer.C
|
|
|
|
}
|
|
|
|
c.entriesLock.RUnlock()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-c.entriesExpiryHeap.NotifyCh:
|
|
|
|
// Entries changed, so the heap may have changed. Restart loop.
|
|
|
|
|
|
|
|
case <-expiryCh:
|
|
|
|
c.entriesLock.Lock()
|
2018-04-20 01:28:01 +00:00
|
|
|
|
|
|
|
// Entry expired! Remove it.
|
2018-04-20 00:31:50 +00:00
|
|
|
delete(c.entries, entry.Key)
|
2018-04-20 01:28:01 +00:00
|
|
|
heap.Remove(c.entriesExpiryHeap, entry.HeapIndex)
|
|
|
|
|
|
|
|
// This is subtle but important: if we race and simultaneously
|
|
|
|
// evict and fetch a new value, then we set this to -1 to
|
|
|
|
// have it treated as a new value so that the TTL is extended.
|
|
|
|
entry.HeapIndex = -1
|
|
|
|
|
2018-04-20 01:40:12 +00:00
|
|
|
// Set some metrics
|
2018-04-20 00:31:50 +00:00
|
|
|
metrics.IncrCounter([]string{"consul", "cache", "evict_expired"}, 1)
|
2018-04-20 01:40:12 +00:00
|
|
|
metrics.SetGauge([]string{"consul", "cache", "entries_count"}, float32(len(c.entries)))
|
|
|
|
|
|
|
|
c.entriesLock.Unlock()
|
2018-04-20 00:31:50 +00:00
|
|
|
}
|
|
|
|
}
|
2018-04-04 03:46:07 +00:00
|
|
|
}
|
2018-04-11 09:18:24 +00:00
|
|
|
|
|
|
|
// Returns the number of cache hits. Safe to call concurrently.
|
|
|
|
func (c *Cache) Hits() uint64 {
|
|
|
|
return atomic.LoadUint64(&c.hits)
|
|
|
|
}
|