snapshot, trie: fixed typos, mostly in snapshot pkg (#22133)

This commit is contained in:
Melvin Junhee Woo 2021-01-07 15:36:21 +09:00 committed by GitHub
parent 072fd96254
commit d2e1b17f18
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 21 additions and 21 deletions

View File

@ -240,7 +240,7 @@ func generateTrieRoot(it Iterator, account common.Hash, generatorFn trieGenerato
}
in <- leaf
// Accumulate the generaation statistic if it's required.
// Accumulate the generation statistic if it's required.
processed++
if time.Since(logged) > 3*time.Second && stats != nil {
if account == (common.Hash{}) {

View File

@ -44,7 +44,7 @@ var (
// aggregatorItemLimit is an approximate number of items that will end up
// in the agregator layer before it's flushed out to disk. A plain account
// weighs around 14B (+hash), a storage slot 32B (+hash), a deleted slot
// 0B (+hash). Slots are mostly set/unset in lockstep, so thet average at
// 0B (+hash). Slots are mostly set/unset in lockstep, so that average at
// 16B (+hash). All in all, the average entry seems to be 15+32=47B. Use a
// smaller number to be on the safe side.
aggregatorItemLimit = aggregatorMemoryLimit / 42
@ -114,9 +114,9 @@ type diffLayer struct {
// deleted, all data in other set belongs to the "new" A.
destructSet map[common.Hash]struct{} // Keyed markers for deleted (and potentially) recreated accounts
accountList []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
accountData map[common.Hash][]byte // Keyed accounts for direct retrival (nil means deleted)
accountData map[common.Hash][]byte // Keyed accounts for direct retrieval (nil means deleted)
storageList map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrival. one per account (nil means deleted)
storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrieval. one per account (nil means deleted)
diffed *bloomfilter.Filter // Bloom filter tracking all the diffed items up to the disk layer
@ -482,7 +482,7 @@ func (dl *diffLayer) flatten() snapshot {
}
}
// AccountList returns a sorted list of all accounts in this difflayer, including
// AccountList returns a sorted list of all accounts in this diffLayer, including
// the deleted ones.
//
// Note, the returned slice is not a copy, so do not modify it.
@ -513,7 +513,7 @@ func (dl *diffLayer) AccountList() []common.Hash {
return dl.accountList
}
// StorageList returns a sorted list of all storage slot hashes in this difflayer
// StorageList returns a sorted list of all storage slot hashes in this diffLayer
// for the given account. If the whole storage is destructed in this layer, then
// an additional flag *destructed = true* will be returned, otherwise the flag is
// false. Besides, the returned list will include the hash of deleted storage slot.

View File

@ -314,7 +314,7 @@ func BenchmarkSearchSlot(b *testing.B) {
// With accountList and sorting
// BenchmarkFlatten-6 50 29890856 ns/op
//
// Without sorting and tracking accountlist
// Without sorting and tracking accountList
// BenchmarkFlatten-6 300 5511511 ns/op
func BenchmarkFlatten(b *testing.B) {
fill := func(parent snapshot) *diffLayer {

View File

@ -31,7 +31,7 @@ import (
// diskLayer is a low level persistent snapshot built on top of a key-value store.
type diskLayer struct {
diskdb ethdb.KeyValueStore // Key-value store containing the base snapshot
triedb *trie.Database // Trie node cache for reconstuction purposes
triedb *trie.Database // Trie node cache for reconstruction purposes
cache *fastcache.Cache // Cache to avoid hitting the disk for direct access
root common.Hash // Root hash of the base snapshot

View File

@ -482,7 +482,7 @@ func TestDiskGeneratorPersistence(t *testing.T) {
if !bytes.Equal(generator.Marker, genMarker) {
t.Fatalf("Generator marker is not matched")
}
// Test senario 2, the disk layer is fully generated
// Test scenario 2, the disk layer is fully generated
// Modify or delete some accounts, flatten everything onto disk
if err := snaps.Update(diffTwoRoot, diffRoot, nil, map[common.Hash][]byte{
accThree: accThree.Bytes(),

View File

@ -133,7 +133,7 @@ func (it *diffAccountIterator) Hash() common.Hash {
// Account returns the RLP encoded slim account the iterator is currently at.
// This method may _fail_, if the underlying layer has been flattened between
// the call to Next and Acccount. That type of error will set it.Err.
// the call to Next and Account. That type of error will set it.Err.
// This method assumes that flattening does not delete elements from
// the accountdata mapping (writing nil into it is fine though), and will panic
// if elements have been deleted.
@ -243,7 +243,7 @@ type diffStorageIterator struct {
}
// StorageIterator creates a storage iterator over a single diff layer.
// Execept the storage iterator is returned, there is an additional flag
// Except the storage iterator is returned, there is an additional flag
// "destructed" returned. If it's true then it means the whole storage is
// destructed in this layer(maybe recreated too), don't bother deeper layer
// for storage retrieval.

View File

@ -37,7 +37,7 @@ type binaryIterator struct {
}
// initBinaryAccountIterator creates a simplistic iterator to step over all the
// accounts in a slow, but eaily verifiable way. Note this function is used for
// accounts in a slow, but easily verifiable way. Note this function is used for
// initialization, use `newBinaryAccountIterator` as the API.
func (dl *diffLayer) initBinaryAccountIterator() Iterator {
parent, ok := dl.parent.(*diffLayer)
@ -62,7 +62,7 @@ func (dl *diffLayer) initBinaryAccountIterator() Iterator {
}
// initBinaryStorageIterator creates a simplistic iterator to step over all the
// storage slots in a slow, but eaily verifiable way. Note this function is used
// storage slots in a slow, but easily verifiable way. Note this function is used
// for initialization, use `newBinaryStorageIterator` as the API.
func (dl *diffLayer) initBinaryStorageIterator(account common.Hash) Iterator {
parent, ok := dl.parent.(*diffLayer)
@ -199,14 +199,14 @@ func (it *binaryIterator) Release() {
}
// newBinaryAccountIterator creates a simplistic account iterator to step over
// all the accounts in a slow, but eaily verifiable way.
// all the accounts in a slow, but easily verifiable way.
func (dl *diffLayer) newBinaryAccountIterator() AccountIterator {
iter := dl.initBinaryAccountIterator()
return iter.(AccountIterator)
}
// newBinaryStorageIterator creates a simplistic account iterator to step over
// all the storage slots in a slow, but eaily verifiable way.
// all the storage slots in a slow, but easily verifiable way.
func (dl *diffLayer) newBinaryStorageIterator(account common.Hash) StorageIterator {
iter := dl.initBinaryStorageIterator(account)
return iter.(StorageIterator)

View File

@ -75,7 +75,7 @@ type fastIterator struct {
fail error
}
// newFastIterator creates a new hierarhical account or storage iterator with one
// newFastIterator creates a new hierarchical account or storage iterator with one
// element per diff layer. The returned combo iterator can be used to walk over
// the entire snapshot diff stack simultaneously.
func newFastIterator(tree *Tree, root common.Hash, account common.Hash, seek common.Hash, accountIterator bool) (*fastIterator, error) {
@ -335,14 +335,14 @@ func (fi *fastIterator) Debug() {
fmt.Println()
}
// newFastAccountIterator creates a new hierarhical account iterator with one
// newFastAccountIterator creates a new hierarchical account iterator with one
// element per diff layer. The returned combo iterator can be used to walk over
// the entire snapshot diff stack simultaneously.
func newFastAccountIterator(tree *Tree, root common.Hash, seek common.Hash) (AccountIterator, error) {
return newFastIterator(tree, root, common.Hash{}, seek, true)
}
// newFastStorageIterator creates a new hierarhical storage iterator with one
// newFastStorageIterator creates a new hierarchical storage iterator with one
// element per diff layer. The returned combo iterator can be used to walk over
// the entire snapshot diff stack simultaneously.
func newFastStorageIterator(tree *Tree, root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) {

View File

@ -368,7 +368,7 @@ func (t *Tree) Cap(root common.Hash, layers int) error {
// crossed. All diffs beyond the permitted number are flattened downwards. If the
// layer limit is reached, memory cap is also enforced (but not before).
//
// The method returns the new disk layer if diffs were persistend into it.
// The method returns the new disk layer if diffs were persisted into it.
func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer {
// Dive until we run out of layers or reach the persistent database
for ; layers > 2; layers-- {
@ -647,7 +647,7 @@ func (t *Tree) Rebuild(root common.Hash) {
panic(fmt.Sprintf("unknown layer type: %T", layer))
}
}
// Start generating a new snapshot from scratch on a backgroung thread. The
// Start generating a new snapshot from scratch on a background thread. The
// generator will run a wiper first if there's not one running right now.
log.Info("Rebuilding state snapshot")
t.layers = map[common.Hash]snapshot{

View File

@ -736,7 +736,7 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H
batch.Replay(uncacher)
batch.Reset()
// Reset the storage counters and bumpd metrics
// Reset the storage counters and bumped metrics
if db.preimages != nil {
db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0
}