core, ethdb: two tiny fixes (#17183)

* ethdb: fix memory database

* core: fix bloombits checking

* core: minor polish
This commit is contained in:
gary rong 2018-07-18 18:41:36 +08:00 committed by Péter Szilágyi
parent 323428865f
commit dcdd57df62
3 changed files with 48 additions and 16 deletions

View File

@ -22,16 +22,22 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
) )
var (
// errSectionOutOfBounds is returned if the user tried to add more bloom filters // errSectionOutOfBounds is returned if the user tried to add more bloom filters
// to the batch than available space, or if tries to retrieve above the capacity, // to the batch than available space, or if tries to retrieve above the capacity.
var errSectionOutOfBounds = errors.New("section out of bounds") errSectionOutOfBounds = errors.New("section out of bounds")
// errBloomBitOutOfBounds is returned if the user tried to retrieve specified
// bit bloom above the capacity.
errBloomBitOutOfBounds = errors.New("bloom bit out of bounds")
)
// Generator takes a number of bloom filters and generates the rotated bloom bits // Generator takes a number of bloom filters and generates the rotated bloom bits
// to be used for batched filtering. // to be used for batched filtering.
type Generator struct { type Generator struct {
blooms [types.BloomBitLength][]byte // Rotated blooms for per-bit matching blooms [types.BloomBitLength][]byte // Rotated blooms for per-bit matching
sections uint // Number of sections to batch together sections uint // Number of sections to batch together
nextBit uint // Next bit to set when adding a bloom nextSec uint // Next section to set when adding a bloom
} }
// NewGenerator creates a rotated bloom generator that can iteratively fill a // NewGenerator creates a rotated bloom generator that can iteratively fill a
@ -51,15 +57,15 @@ func NewGenerator(sections uint) (*Generator, error) {
// in memory accordingly. // in memory accordingly.
func (b *Generator) AddBloom(index uint, bloom types.Bloom) error { func (b *Generator) AddBloom(index uint, bloom types.Bloom) error {
// Make sure we're not adding more bloom filters than our capacity // Make sure we're not adding more bloom filters than our capacity
if b.nextBit >= b.sections { if b.nextSec >= b.sections {
return errSectionOutOfBounds return errSectionOutOfBounds
} }
if b.nextBit != index { if b.nextSec != index {
return errors.New("bloom filter with unexpected index") return errors.New("bloom filter with unexpected index")
} }
// Rotate the bloom and insert into our collection // Rotate the bloom and insert into our collection
byteIndex := b.nextBit / 8 byteIndex := b.nextSec / 8
bitMask := byte(1) << byte(7-b.nextBit%8) bitMask := byte(1) << byte(7-b.nextSec%8)
for i := 0; i < types.BloomBitLength; i++ { for i := 0; i < types.BloomBitLength; i++ {
bloomByteIndex := types.BloomByteLength - 1 - i/8 bloomByteIndex := types.BloomByteLength - 1 - i/8
@ -69,7 +75,7 @@ func (b *Generator) AddBloom(index uint, bloom types.Bloom) error {
b.blooms[i][byteIndex] |= bitMask b.blooms[i][byteIndex] |= bitMask
} }
} }
b.nextBit++ b.nextSec++
return nil return nil
} }
@ -77,11 +83,11 @@ func (b *Generator) AddBloom(index uint, bloom types.Bloom) error {
// Bitset returns the bit vector belonging to the given bit index after all // Bitset returns the bit vector belonging to the given bit index after all
// blooms have been added. // blooms have been added.
func (b *Generator) Bitset(idx uint) ([]byte, error) { func (b *Generator) Bitset(idx uint) ([]byte, error) {
if b.nextBit != b.sections { if b.nextSec != b.sections {
return nil, errors.New("bloom not fully generated yet") return nil, errors.New("bloom not fully generated yet")
} }
if idx >= b.sections { if idx >= types.BloomBitLength {
return nil, errSectionOutOfBounds return nil, errBloomBitOutOfBounds
} }
return b.blooms[idx], nil return b.blooms[idx], nil
} }

View File

@ -59,6 +59,28 @@ func TestMemoryDB_PutGet(t *testing.T) {
func testPutGet(db ethdb.Database, t *testing.T) { func testPutGet(db ethdb.Database, t *testing.T) {
t.Parallel() t.Parallel()
for _, k := range test_values {
err := db.Put([]byte(k), nil)
if err != nil {
t.Fatalf("put failed: %v", err)
}
}
for _, k := range test_values {
data, err := db.Get([]byte(k))
if err != nil {
t.Fatalf("get failed: %v", err)
}
if len(data) != 0 {
t.Fatalf("get returned wrong result, got %q expected nil", string(data))
}
}
_, err := db.Get([]byte("non-exist-key"))
if err == nil {
t.Fatalf("expect to return a not found error")
}
for _, v := range test_values { for _, v := range test_values {
err := db.Put([]byte(v), []byte(v)) err := db.Put([]byte(v), []byte(v))
if err != nil { if err != nil {

View File

@ -96,7 +96,10 @@ func (db *MemDatabase) NewBatch() Batch {
func (db *MemDatabase) Len() int { return len(db.db) } func (db *MemDatabase) Len() int { return len(db.db) }
type kv struct{ k, v []byte } type kv struct {
k, v []byte
del bool
}
type memBatch struct { type memBatch struct {
db *MemDatabase db *MemDatabase
@ -105,13 +108,14 @@ type memBatch struct {
} }
func (b *memBatch) Put(key, value []byte) error { func (b *memBatch) Put(key, value []byte) error {
b.writes = append(b.writes, kv{common.CopyBytes(key), common.CopyBytes(value)}) b.writes = append(b.writes, kv{common.CopyBytes(key), common.CopyBytes(value), false})
b.size += len(value) b.size += len(value)
return nil return nil
} }
func (b *memBatch) Delete(key []byte) error { func (b *memBatch) Delete(key []byte) error {
b.writes = append(b.writes, kv{common.CopyBytes(key), nil}) b.writes = append(b.writes, kv{common.CopyBytes(key), nil, true})
b.size += 1
return nil return nil
} }
@ -120,7 +124,7 @@ func (b *memBatch) Write() error {
defer b.db.lock.Unlock() defer b.db.lock.Unlock()
for _, kv := range b.writes { for _, kv := range b.writes {
if kv.v == nil { if kv.del {
delete(b.db.db, string(kv.k)) delete(b.db.db, string(kv.k))
continue continue
} }