status-go/vendor/github.com/allegro/bigcache/shard.go

260 lines
5.6 KiB
Go
Raw Normal View History

2018-12-19 10:02:07 +00:00
package bigcache
import (
"fmt"
"sync"
"sync/atomic"
"github.com/allegro/bigcache/queue"
)
2019-06-09 07:24:20 +00:00
type onRemoveCallback func(wrappedEntry []byte, reason RemoveReason)
2018-12-19 10:02:07 +00:00
type cacheShard struct {
hashmap map[uint64]uint32
entries queue.BytesQueue
lock sync.RWMutex
entryBuffer []byte
2019-06-09 07:24:20 +00:00
onRemove onRemoveCallback
2018-12-19 10:02:07 +00:00
isVerbose bool
logger Logger
clock clock
lifeWindow uint64
stats Stats
}
func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) {
s.lock.RLock()
itemIndex := s.hashmap[hashedKey]
if itemIndex == 0 {
s.lock.RUnlock()
s.miss()
2019-06-09 07:24:20 +00:00
return nil, ErrEntryNotFound
2018-12-19 10:02:07 +00:00
}
wrappedEntry, err := s.entries.Get(int(itemIndex))
if err != nil {
s.lock.RUnlock()
s.miss()
return nil, err
}
if entryKey := readKeyFromEntry(wrappedEntry); key != entryKey {
if s.isVerbose {
s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, entryKey, hashedKey)
}
s.lock.RUnlock()
s.collision()
2019-06-09 07:24:20 +00:00
return nil, ErrEntryNotFound
2018-12-19 10:02:07 +00:00
}
2019-06-09 07:24:20 +00:00
entry := readEntry(wrappedEntry)
2018-12-19 10:02:07 +00:00
s.lock.RUnlock()
s.hit()
2019-06-09 07:24:20 +00:00
return entry, nil
2018-12-19 10:02:07 +00:00
}
func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error {
currentTimestamp := uint64(s.clock.epoch())
s.lock.Lock()
if previousIndex := s.hashmap[hashedKey]; previousIndex != 0 {
if previousEntry, err := s.entries.Get(int(previousIndex)); err == nil {
resetKeyFromEntry(previousEntry)
}
}
if oldestEntry, err := s.entries.Peek(); err == nil {
s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry)
}
w := wrapEntry(currentTimestamp, hashedKey, key, entry, &s.entryBuffer)
for {
if index, err := s.entries.Push(w); err == nil {
s.hashmap[hashedKey] = uint32(index)
s.lock.Unlock()
return nil
}
2019-06-09 07:24:20 +00:00
if s.removeOldestEntry(NoSpace) != nil {
2018-12-19 10:02:07 +00:00
s.lock.Unlock()
return fmt.Errorf("entry is bigger than max shard size")
}
}
}
func (s *cacheShard) del(key string, hashedKey uint64) error {
2019-06-09 07:24:20 +00:00
// Optimistic pre-check using only readlock
2018-12-19 10:02:07 +00:00
s.lock.RLock()
itemIndex := s.hashmap[hashedKey]
if itemIndex == 0 {
s.lock.RUnlock()
s.delmiss()
2019-06-09 07:24:20 +00:00
return ErrEntryNotFound
2018-12-19 10:02:07 +00:00
}
2019-06-09 07:24:20 +00:00
if err := s.entries.CheckGet(int(itemIndex)); err != nil {
2018-12-19 10:02:07 +00:00
s.lock.RUnlock()
s.delmiss()
return err
}
s.lock.RUnlock()
s.lock.Lock()
{
2019-06-09 07:24:20 +00:00
// After obtaining the writelock, we need to read the same again,
// since the data delivered earlier may be stale now
itemIndex = s.hashmap[hashedKey]
if itemIndex == 0 {
s.lock.Unlock()
s.delmiss()
return ErrEntryNotFound
}
wrappedEntry, err := s.entries.Get(int(itemIndex))
if err != nil {
s.lock.Unlock()
s.delmiss()
return err
}
2018-12-19 10:02:07 +00:00
delete(s.hashmap, hashedKey)
2019-06-09 07:24:20 +00:00
s.onRemove(wrappedEntry, Deleted)
2018-12-19 10:02:07 +00:00
resetKeyFromEntry(wrappedEntry)
}
s.lock.Unlock()
s.delhit()
return nil
}
2019-06-09 07:24:20 +00:00
func (s *cacheShard) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func(reason RemoveReason) error) bool {
2018-12-19 10:02:07 +00:00
oldestTimestamp := readTimestampFromEntry(oldestEntry)
if currentTimestamp-oldestTimestamp > s.lifeWindow {
2019-06-09 07:24:20 +00:00
evict(Expired)
2018-12-19 10:02:07 +00:00
return true
}
return false
}
func (s *cacheShard) cleanUp(currentTimestamp uint64) {
s.lock.Lock()
for {
if oldestEntry, err := s.entries.Peek(); err != nil {
break
} else if evicted := s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry); !evicted {
break
}
}
s.lock.Unlock()
}
func (s *cacheShard) getOldestEntry() ([]byte, error) {
2019-06-09 07:24:20 +00:00
s.lock.RLock()
defer s.lock.RUnlock()
2018-12-19 10:02:07 +00:00
return s.entries.Peek()
}
func (s *cacheShard) getEntry(index int) ([]byte, error) {
2019-06-09 07:24:20 +00:00
s.lock.RLock()
entry, err := s.entries.Get(index)
s.lock.RUnlock()
return entry, err
2018-12-19 10:02:07 +00:00
}
func (s *cacheShard) copyKeys() (keys []uint32, next int) {
s.lock.RLock()
2019-06-09 07:24:20 +00:00
keys = make([]uint32, len(s.hashmap))
2018-12-19 10:02:07 +00:00
for _, index := range s.hashmap {
keys[next] = index
next++
}
s.lock.RUnlock()
return keys, next
}
2019-06-09 07:24:20 +00:00
func (s *cacheShard) removeOldestEntry(reason RemoveReason) error {
2018-12-19 10:02:07 +00:00
oldest, err := s.entries.Pop()
if err == nil {
hash := readHashFromEntry(oldest)
delete(s.hashmap, hash)
2019-06-09 07:24:20 +00:00
s.onRemove(oldest, reason)
2018-12-19 10:02:07 +00:00
return nil
}
return err
}
func (s *cacheShard) reset(config Config) {
s.lock.Lock()
s.hashmap = make(map[uint64]uint32, config.initialShardSize())
s.entryBuffer = make([]byte, config.MaxEntrySize+headersSizeInBytes)
s.entries.Reset()
s.lock.Unlock()
}
func (s *cacheShard) len() int {
s.lock.RLock()
res := len(s.hashmap)
s.lock.RUnlock()
return res
}
2019-06-09 07:24:20 +00:00
func (s *cacheShard) capacity() int {
s.lock.RLock()
res := s.entries.Capacity()
s.lock.RUnlock()
return res
}
2018-12-19 10:02:07 +00:00
func (s *cacheShard) getStats() Stats {
var stats = Stats{
Hits: atomic.LoadInt64(&s.stats.Hits),
Misses: atomic.LoadInt64(&s.stats.Misses),
DelHits: atomic.LoadInt64(&s.stats.DelHits),
DelMisses: atomic.LoadInt64(&s.stats.DelMisses),
Collisions: atomic.LoadInt64(&s.stats.Collisions),
}
return stats
}
func (s *cacheShard) hit() {
atomic.AddInt64(&s.stats.Hits, 1)
}
func (s *cacheShard) miss() {
atomic.AddInt64(&s.stats.Misses, 1)
}
func (s *cacheShard) delhit() {
atomic.AddInt64(&s.stats.DelHits, 1)
}
func (s *cacheShard) delmiss() {
atomic.AddInt64(&s.stats.DelMisses, 1)
}
func (s *cacheShard) collision() {
atomic.AddInt64(&s.stats.Collisions, 1)
}
func initNewShard(config Config, callback onRemoveCallback, clock clock) *cacheShard {
return &cacheShard{
hashmap: make(map[uint64]uint32, config.initialShardSize()),
entries: *queue.NewBytesQueue(config.initialShardSize()*config.MaxEntrySize, config.maximumShardSize(), config.Verbose),
entryBuffer: make([]byte, config.MaxEntrySize+headersSizeInBytes),
onRemove: callback,
isVerbose: config.Verbose,
logger: newLogger(config.Logger),
clock: clock,
lifeWindow: uint64(config.LifeWindow.Seconds()),
}
}