// Package fastcache implements fast in-memory cache. // // The package has been extracted from https://victoriametrics.com/ package fastcache import ( "fmt" "sync" "sync/atomic" xxhash "github.com/cespare/xxhash/v2" ) const bucketsCount = 512 const chunkSize = 64 * 1024 const bucketSizeBits = 40 const genSizeBits = 64 - bucketSizeBits const maxGen = 1<= maxBucketSize { panic(fmt.Errorf("too big maxBytes=%d; should be smaller than %d", maxBytes, maxBucketSize)) } maxChunks := (maxBytes + chunkSize - 1) / chunkSize b.chunks = make([][]byte, maxChunks) b.m = make(map[uint64]uint64) b.Reset() } func (b *bucket) Reset() { b.mu.Lock() chunks := b.chunks for i := range chunks { putChunk(chunks[i]) chunks[i] = nil } bm := b.m for k := range bm { delete(bm, k) } b.idx = 0 b.gen = 1 atomic.StoreUint64(&b.getCalls, 0) atomic.StoreUint64(&b.setCalls, 0) atomic.StoreUint64(&b.misses, 0) atomic.StoreUint64(&b.collisions, 0) atomic.StoreUint64(&b.corruptions, 0) b.mu.Unlock() } func (b *bucket) Clean() { b.mu.Lock() bGen := b.gen & ((1 << genSizeBits) - 1) bIdx := b.idx bm := b.m for k, v := range bm { gen := v >> bucketSizeBits idx := v & ((1 << bucketSizeBits) - 1) if gen == bGen && idx < bIdx || gen+1 == bGen && idx >= bIdx || gen == maxGen && bGen == 1 && idx >= bIdx { continue } delete(bm, k) } b.mu.Unlock() } func (b *bucket) UpdateStats(s *Stats) { s.GetCalls += atomic.LoadUint64(&b.getCalls) s.SetCalls += atomic.LoadUint64(&b.setCalls) s.Misses += atomic.LoadUint64(&b.misses) s.Collisions += atomic.LoadUint64(&b.collisions) s.Corruptions += atomic.LoadUint64(&b.corruptions) b.mu.RLock() s.EntriesCount += uint64(len(b.m)) for _, chunk := range b.chunks { s.BytesSize += uint64(cap(chunk)) } b.mu.RUnlock() } func (b *bucket) Set(k, v []byte, h uint64) { setCalls := atomic.AddUint64(&b.setCalls, 1) if setCalls%(1<<14) == 0 { b.Clean() } if len(k) >= (1<<16) || len(v) >= (1<<16) { // Too big key or value - its length cannot be encoded // with 2 bytes (see below). Skip the entry. return } var kvLenBuf [4]byte kvLenBuf[0] = byte(uint16(len(k)) >> 8) kvLenBuf[1] = byte(len(k)) kvLenBuf[2] = byte(uint16(len(v)) >> 8) kvLenBuf[3] = byte(len(v)) kvLen := uint64(len(kvLenBuf) + len(k) + len(v)) if kvLen >= chunkSize { // Do not store too big keys and values, since they do not // fit a chunk. return } b.mu.Lock() idx := b.idx idxNew := idx + kvLen chunkIdx := idx / chunkSize chunkIdxNew := idxNew / chunkSize if chunkIdxNew > chunkIdx { if chunkIdxNew >= uint64(len(b.chunks)) { idx = 0 idxNew = kvLen chunkIdx = 0 b.gen++ if b.gen&((1< 0 { gen := v >> bucketSizeBits idx := v & ((1 << bucketSizeBits) - 1) if gen == bGen && idx < b.idx || gen+1 == bGen && idx >= b.idx || gen == maxGen && bGen == 1 && idx >= b.idx { chunkIdx := idx / chunkSize if chunkIdx >= uint64(len(b.chunks)) { // Corrupted data during the load from file. Just skip it. atomic.AddUint64(&b.corruptions, 1) goto end } chunk := b.chunks[chunkIdx] idx %= chunkSize if idx+4 >= chunkSize { // Corrupted data during the load from file. Just skip it. atomic.AddUint64(&b.corruptions, 1) goto end } kvLenBuf := chunk[idx : idx+4] keyLen := (uint64(kvLenBuf[0]) << 8) | uint64(kvLenBuf[1]) valLen := (uint64(kvLenBuf[2]) << 8) | uint64(kvLenBuf[3]) idx += 4 if idx+keyLen+valLen >= chunkSize { // Corrupted data during the load from file. Just skip it. atomic.AddUint64(&b.corruptions, 1) goto end } if string(k) == string(chunk[idx:idx+keyLen]) { idx += keyLen if returnDst { dst = append(dst, chunk[idx:idx+valLen]...) } found = true } else { atomic.AddUint64(&b.collisions, 1) } } } end: b.mu.RUnlock() if !found { atomic.AddUint64(&b.misses, 1) } return dst, found } func (b *bucket) Del(h uint64) { b.mu.Lock() delete(b.m, h) b.mu.Unlock() }