random warning fixes
This commit is contained in:
parent
fabf8b05ce
commit
14712bbbdc
|
@ -62,8 +62,7 @@ func subkey(root: Eth2Digest, slot: Slot): auto =
|
||||||
ret
|
ret
|
||||||
|
|
||||||
proc init*(T: type BeaconChainDB, backend: TrieDatabaseRef): BeaconChainDB =
|
proc init*(T: type BeaconChainDB, backend: TrieDatabaseRef): BeaconChainDB =
|
||||||
new result
|
T(backend: backend)
|
||||||
result.backend = backend
|
|
||||||
|
|
||||||
proc putBlock*(db: BeaconChainDB, key: Eth2Digest, value: BeaconBlock) =
|
proc putBlock*(db: BeaconChainDB, key: Eth2Digest, value: BeaconBlock) =
|
||||||
db.backend.put(subkey(type value, key), SSZ.encode(value))
|
db.backend.put(subkey(type value, key), SSZ.encode(value))
|
||||||
|
|
|
@ -48,9 +48,6 @@ func localValidatorsDir(conf: BeaconNodeConf): string =
|
||||||
func databaseDir(conf: BeaconNodeConf): string =
|
func databaseDir(conf: BeaconNodeConf): string =
|
||||||
conf.dataDir / "db"
|
conf.dataDir / "db"
|
||||||
|
|
||||||
template `//`(url, fragment: string): string =
|
|
||||||
url & "/" & fragment
|
|
||||||
|
|
||||||
proc saveValidatorKey(keyName, key: string, conf: BeaconNodeConf) =
|
proc saveValidatorKey(keyName, key: string, conf: BeaconNodeConf) =
|
||||||
let validatorsDir = conf.dataDir / dataDirValidators
|
let validatorsDir = conf.dataDir / dataDirValidators
|
||||||
let outputFile = validatorsDir / keyName
|
let outputFile = validatorsDir / keyName
|
||||||
|
@ -157,7 +154,7 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async
|
||||||
|
|
||||||
result.attachedValidators = ValidatorPool.init
|
result.attachedValidators = ValidatorPool.init
|
||||||
|
|
||||||
let trieDB = trieDB newChainDb(string conf.databaseDir)
|
let trieDB = trieDB newChainDb(conf.databaseDir)
|
||||||
result.db = BeaconChainDB.init(trieDB)
|
result.db = BeaconChainDB.init(trieDB)
|
||||||
|
|
||||||
# TODO this is problably not the right place to ensure that db is sane..
|
# TODO this is problably not the right place to ensure that db is sane..
|
||||||
|
@ -219,10 +216,10 @@ template withState(
|
||||||
|
|
||||||
updateStateData(pool, cache, blockSlot)
|
updateStateData(pool, cache, blockSlot)
|
||||||
|
|
||||||
template hashedState(): HashedBeaconState {.inject.} = cache.data
|
template hashedState(): HashedBeaconState {.inject, used.} = cache.data
|
||||||
template state(): BeaconState {.inject.} = cache.data.data
|
template state(): BeaconState {.inject, used.} = cache.data.data
|
||||||
template blck(): BlockRef {.inject.} = cache.blck
|
template blck(): BlockRef {.inject, used.} = cache.blck
|
||||||
template root(): Eth2Digest {.inject.} = cache.data.root
|
template root(): Eth2Digest {.inject, used.} = cache.data.root
|
||||||
|
|
||||||
body
|
body
|
||||||
|
|
||||||
|
@ -1056,8 +1053,7 @@ when isMainModule:
|
||||||
|
|
||||||
of query:
|
of query:
|
||||||
var
|
var
|
||||||
trieDB = trieDB newChainDb(string config.databaseDir)
|
trieDB = trieDB newChainDb(config.databaseDir)
|
||||||
db = BeaconChainDB.init(trieDB)
|
|
||||||
|
|
||||||
case config.queryCmd
|
case config.queryCmd
|
||||||
of QueryCmd.nimQuery:
|
of QueryCmd.nimQuery:
|
||||||
|
|
|
@ -65,9 +65,6 @@ type
|
||||||
|
|
||||||
ProtocolInfo* = ptr ProtocolInfoObj
|
ProtocolInfo* = ptr ProtocolInfoObj
|
||||||
|
|
||||||
CompressedMsgId = tuple
|
|
||||||
protocolIdx, methodId: int
|
|
||||||
|
|
||||||
ResponseCode* = enum
|
ResponseCode* = enum
|
||||||
Success
|
Success
|
||||||
InvalidRequest
|
InvalidRequest
|
||||||
|
|
|
@ -65,9 +65,6 @@ type
|
||||||
|
|
||||||
ProtocolInfo* = ptr ProtocolInfoObj
|
ProtocolInfo* = ptr ProtocolInfoObj
|
||||||
|
|
||||||
CompressedMsgId = tuple
|
|
||||||
protocolIdx, methodId: int
|
|
||||||
|
|
||||||
ResponseCode* = enum
|
ResponseCode* = enum
|
||||||
Success
|
Success
|
||||||
InvalidRequest
|
InvalidRequest
|
||||||
|
|
|
@ -315,7 +315,7 @@ func process_registry_updates*(state: var BeaconState) =
|
||||||
if i.uint64 >= churn_limit:
|
if i.uint64 >= churn_limit:
|
||||||
break
|
break
|
||||||
let
|
let
|
||||||
(epoch, index) = epoch_and_index
|
(_, index) = epoch_and_index
|
||||||
validator = addr state.validators[index]
|
validator = addr state.validators[index]
|
||||||
if validator.activation_epoch == FAR_FUTURE_EPOCH:
|
if validator.activation_epoch == FAR_FUTURE_EPOCH:
|
||||||
validator.activation_epoch =
|
validator.activation_epoch =
|
||||||
|
@ -359,7 +359,7 @@ func get_attesting_indices*(state: BeaconState,
|
||||||
stateCache: var StateCache):
|
stateCache: var StateCache):
|
||||||
HashSet[ValidatorIndex] =
|
HashSet[ValidatorIndex] =
|
||||||
# Return the set of attesting indices corresponding to ``data`` and ``bits``.
|
# Return the set of attesting indices corresponding to ``data`` and ``bits``.
|
||||||
result = initSet[ValidatorIndex]()
|
result = initHashSet[ValidatorIndex]()
|
||||||
let committee = get_beacon_committee(state, data.slot, data.index, stateCache)
|
let committee = get_beacon_committee(state, data.slot, data.index, stateCache)
|
||||||
for i, index in committee:
|
for i, index in committee:
|
||||||
if bits[i]:
|
if bits[i]:
|
||||||
|
|
|
@ -27,7 +27,7 @@ func shortLog*(x: Checkpoint): string =
|
||||||
func get_attesting_indices*(
|
func get_attesting_indices*(
|
||||||
state: BeaconState, attestations: openarray[PendingAttestation],
|
state: BeaconState, attestations: openarray[PendingAttestation],
|
||||||
stateCache: var StateCache): HashSet[ValidatorIndex] =
|
stateCache: var StateCache): HashSet[ValidatorIndex] =
|
||||||
result = initSet[ValidatorIndex]()
|
result = initHashSet[ValidatorIndex]()
|
||||||
for a in attestations:
|
for a in attestations:
|
||||||
result = result.union(get_attesting_indices(
|
result = result.union(get_attesting_indices(
|
||||||
state, a.data, a.aggregation_bits, stateCache))
|
state, a.data, a.aggregation_bits, stateCache))
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
import
|
import
|
||||||
stew/objects, stew/ranges/ptr_arith,
|
stew/[ptrops, objects], stew/ranges/ptr_arith,
|
||||||
./types, ./bytes_reader
|
./types, ./bytes_reader
|
||||||
|
|
||||||
type
|
type
|
||||||
|
@ -44,11 +44,11 @@ func navigateToField*[T](n: SszNavigator[T],
|
||||||
|
|
||||||
when isFixedSize(SszFieldType):
|
when isFixedSize(SszFieldType):
|
||||||
SszNavigator[FieldType](m: MemRange(
|
SszNavigator[FieldType](m: MemRange(
|
||||||
startAddr: shift(n.m.startAddr, boundingOffsets[0]),
|
startAddr: offset(n.m.startAddr, boundingOffsets[0]),
|
||||||
length: boundingOffsets[1] - boundingOffsets[0]))
|
length: boundingOffsets[1] - boundingOffsets[0]))
|
||||||
else:
|
else:
|
||||||
template readOffset(offset): int =
|
template readOffset(off): int =
|
||||||
int fromSszBytes(uint32, makeOpenArray(shift(n.m.startAddr, offset),
|
int fromSszBytes(uint32, makeOpenArray(offset(n.m.startAddr, off),
|
||||||
sizeof(uint32)))
|
sizeof(uint32)))
|
||||||
let
|
let
|
||||||
startOffset = readOffset boundingOffsets[0]
|
startOffset = readOffset boundingOffsets[0]
|
||||||
|
@ -59,7 +59,7 @@ func navigateToField*[T](n: SszNavigator[T],
|
||||||
raise newException(MalformedSszError, "Incorrect offset values")
|
raise newException(MalformedSszError, "Incorrect offset values")
|
||||||
|
|
||||||
SszNavigator[FieldType](m: MemRange(
|
SszNavigator[FieldType](m: MemRange(
|
||||||
startAddr: shift(n.m.startAddr, startOffset),
|
startAddr: offset(n.m.startAddr, startOffset),
|
||||||
length: endOffset - startOffset))
|
length: endOffset - startOffset))
|
||||||
|
|
||||||
template `.`*[T](n: SszNavigator[T], field: untyped): auto =
|
template `.`*[T](n: SszNavigator[T], field: untyped): auto =
|
||||||
|
@ -69,7 +69,7 @@ template `.`*[T](n: SszNavigator[T], field: untyped): auto =
|
||||||
|
|
||||||
func indexVarSizeList(m: MemRange, idx: int): MemRange =
|
func indexVarSizeList(m: MemRange, idx: int): MemRange =
|
||||||
template readOffset(pos): int =
|
template readOffset(pos): int =
|
||||||
int fromSszBytes(uint32, makeOpenArray(shift(m.startAddr, pos), offsetSize))
|
int fromSszBytes(uint32, makeOpenArray(offset(m.startAddr, pos), offsetSize))
|
||||||
|
|
||||||
let offsetPos = offsetSize * idx
|
let offsetPos = offsetSize * idx
|
||||||
checkBounds(m, offsetPos + offsetSize)
|
checkBounds(m, offsetPos + offsetSize)
|
||||||
|
@ -93,7 +93,7 @@ func indexVarSizeList(m: MemRange, idx: int): MemRange =
|
||||||
else:
|
else:
|
||||||
m.length
|
m.length
|
||||||
|
|
||||||
MemRange(startAddr: m.startAddr.shift(elemPos), length: endPos - elemPos)
|
MemRange(startAddr: m.startAddr.offset(elemPos), length: endPos - elemPos)
|
||||||
|
|
||||||
template indexList(n, idx, T: untyped): untyped =
|
template indexList(n, idx, T: untyped): untyped =
|
||||||
type R = T
|
type R = T
|
||||||
|
@ -103,7 +103,7 @@ template indexList(n, idx, T: untyped): untyped =
|
||||||
const elemSize = fixedPortionSize(ElemType)
|
const elemSize = fixedPortionSize(ElemType)
|
||||||
let elemPos = idx * elemSize
|
let elemPos = idx * elemSize
|
||||||
checkBounds(n.m, elemPos + elemSize)
|
checkBounds(n.m, elemPos + elemSize)
|
||||||
SszNavigator[R](m: MemRange(startAddr: shift(n.m.startAddr, elemPos),
|
SszNavigator[R](m: MemRange(startAddr: offset(n.m.startAddr, elemPos),
|
||||||
length: elemSize))
|
length: elemSize))
|
||||||
else:
|
else:
|
||||||
SszNavigator[R](m: indexVarSizeList(n.m, idx))
|
SszNavigator[R](m: indexVarSizeList(n.m, idx))
|
||||||
|
|
Loading…
Reference in New Issue