rm unused Nim modules (#2270)

This commit is contained in:
tersec 2024-06-01 10:49:46 +00:00 committed by GitHub
parent bda760f41d
commit cfbbcda4f7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 0 additions and 1776 deletions

View File

@ -1,59 +0,0 @@
# Nimbus
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import
./hardforks
#[
* - [EIP-1153](https://eips.ethereum.org/EIPS/eip-1153) - Transient Storage Opcodes (`experimental`)
* - [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559) - EIP-1559 Fee Market
* - [EIP-2315](https://eips.ethereum.org/EIPS/eip-2315) - VM simple subroutines (`experimental`)
* - [EIP-2537](https://eips.ethereum.org/EIPS/eip-2537) - BLS12-381 precompiles (`experimental`)
* - [EIP-2565](https://eips.ethereum.org/EIPS/eip-2565) - ModExp Gas Cost
* - [EIP-2718](https://eips.ethereum.org/EIPS/eip-2718) - Typed Transactions
* - [EIP-2929](https://eips.ethereum.org/EIPS/eip-2929) - Gas cost increases for state access opcodes
* - [EIP-2930](https://eips.ethereum.org/EIPS/eip-2930) - Access List Transaction Type
* - [EIP-3198](https://eips.ethereum.org/EIPS/eip-3198) - BASEFEE opcode
* - [EIP-3529](https://eips.ethereum.org/EIPS/eip-3529) - Reduction in refunds
* - [EIP-3540](https://eips.ethereum.org/EIPS/eip-3541) - EVM Object Format (EOF) v1 (`experimental`)
* - [EIP-3541](https://eips.ethereum.org/EIPS/eip-3541) - Reject new contracts starting with the 0xEF byte
* [EIP-3651](https://eips.ethereum.org/EIPS/eip-3651) - Warm COINBASE (`experimental`)
* - [EIP-3670](https://eips.ethereum.org/EIPS/eip-3670) - EOF - Code Validation (`experimental`)
* - [EIP-3855](https://eips.ethereum.org/EIPS/eip-3855) - PUSH0 instruction (`experimental`)
* - [EIP-3860](https://eips.ethereum.org/EIPS/eip-3860) - Limit and meter initcode (`experimental`)
* - [EIP-4399](https://eips.ethereum.org/EIPS/eip-4399) - Supplant DIFFICULTY opcode with PREVRANDAO (Merge)
* [EIP-4895](https://eips.ethereum.org/EIPS/eip-4895) - Beacon chain push withdrawals as operations (`experimental`)
* - [EIP-5133](https://eips.ethereum.org/EIPS/eip-5133) - Delaying Difficulty Bomb to mid-September 2022
]#
type
EIP* = enum
EIP3541
EIP3670
EIP1559
EIP2537
EIP4895
ForkToEIP* = array[HardFork, set[EIP]]
func makeForkToEIP(): ForkToEIP {.compileTime.} =
var map: ForkToEIP
# example:
# map[London] = {EIP1559}
# map[Shanghai] = {EIP3541,EIP3670}
# the latest fork will accumulate most EIPs
for fork in HardFork:
result[fork] = map[fork]
if fork > Frontier:
result[fork].incl map[pred(fork)]
const
ForkToEipList* = makeForktoEip()

View File

@ -1,28 +0,0 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
# This code was duplicated enough times around the codebase
# that it seemed worth factoring it out.
import
stint,
eth/[common, rlp]
proc accountFromBytes*(accountBytes: seq[byte]): Account =
if accountBytes.len > 0:
rlp.decode(accountBytes, Account)
else:
newAccount()
proc slotValueFromBytes*(rec: seq[byte]): UInt256 =
if rec.len > 0:
rlp.decode(rec, UInt256)
else:
UInt256.zero()

View File

@ -1,231 +0,0 @@
# Nimbus
# Copyright (c) 2020-2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import blscurve/miracl/[common, milagro]
# IETF Standard Draft: https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-10
# The Hash-To-Curve v7 is binary compatible with Hash-To-Curve v9, v10
# constants for 11-isogeny map for BLS12-381 G1. Apendix E.2
const
xNumHex = [
"0x11a05f2b1e833340b809101dd99815856b303e88a2d7005ff2627b56cdb4e2c85610c2d5f2e62d6eaeac1662734649b7",
"0x17294ed3e943ab2f0588bab22147a81c7c17e75b2f6a8417f565e33c70d1e86b4838f2a6f318c356e834eef1b3cb83bb",
"0x0d54005db97678ec1d1048c5d10a9a1bce032473295983e56878e501ec68e25c958c3e3d2a09729fe0179f9dac9edcb0",
"0x1778e7166fcc6db74e0609d307e55412d7f5e4656a8dbf25f1b33289f1b330835336e25ce3107193c5b388641d9b6861",
"0x0e99726a3199f4436642b4b3e4118e5499db995a1257fb3f086eeb65982fac18985a286f301e77c451154ce9ac8895d9",
"0x1630c3250d7313ff01d1201bf7a74ab5db3cb17dd952799b9ed3ab9097e68f90a0870d2dcae73d19cd13c1c66f652983",
"0x0d6ed6553fe44d296a3726c38ae652bfb11586264f0f8ce19008e218f9c86b2a8da25128c1052ecaddd7f225a139ed84",
"0x17b81e7701abdbe2e8743884d1117e53356de5ab275b4db1a682c62ef0f2753339b7c8f8c8f475af9ccb5618e3f0c88e",
"0x080d3cf1f9a78fc47b90b33563be990dc43b756ce79f5574a2c596c928c5d1de4fa295f296b74e956d71986a8497e317",
"0x169b1f8e1bcfa7c42e0c37515d138f22dd2ecb803a0c5c99676314baf4bb1b7fa3190b2edc0327797f241067be390c9e",
"0x10321da079ce07e272d8ec09d2565b0dfa7dccdde6787f96d50af36003b14866f69b771f8c285decca67df3f1605fb7b",
"0x06e08c248e260e70bd1e962381edee3d31d79d7e22c837bc23c0bf1bc24c6b68c24b1b80b64d391fa9c8ba2e8ba2d229"
]
xDenHex = [
"0x08ca8d548cff19ae18b2e62f4bd3fa6f01d5ef4ba35b48ba9c9588617fc8ac62b558d681be343df8993cf9fa40d21b1c",
"0x12561a5deb559c4348b4711298e536367041e8ca0cf0800c0126c2588c48bf5713daa8846cb026e9e5c8276ec82b3bff",
"0x0b2962fe57a3225e8137e629bff2991f6f89416f5a718cd1fca64e00b11aceacd6a3d0967c94fedcfcc239ba5cb83e19",
"0x03425581a58ae2fec83aafef7c40eb545b08243f16b1655154cca8abc28d6fd04976d5243eecf5c4130de8938dc62cd8",
"0x13a8e162022914a80a6f1d5f43e7a07dffdfc759a12062bb8d6b44e833b306da9bd29ba81f35781d539d395b3532a21e",
"0x0e7355f8e4e667b955390f7f0506c6e9395735e9ce9cad4d0a43bcef24b8982f7400d24bc4228f11c02df9a29f6304a5",
"0x0772caacf16936190f3e0c63e0596721570f5799af53a1894e2e073062aede9cea73b3538f0de06cec2574496ee84a3a",
"0x14a7ac2a9d64a8b230b3f5b074cf01996e7f63c21bca68a81996e1cdf9822c580fa5b9489d11e2d311f7d99bbdcc5a5e",
"0x0a10ecf6ada54f825e920b3dafc7a3cce07f8d1d7161366b74100da67f39883503826692abba43704776ec3a79a1d641",
"0x095fc13ab9e92ad4476d6e3eb3a56680f682b4ee96f7d03776df533978f31c1593174e4b4b7865002d6384d168ecdd0a",
"0x01"
]
yNumHex = [
"0x090d97c81ba24ee0259d1f094980dcfa11ad138e48a869522b52af6c956543d3cd0c7aee9b3ba3c2be9845719707bb33",
"0x134996a104ee5811d51036d776fb46831223e96c254f383d0f906343eb67ad34d6c56711962fa8bfe097e75a2e41c696",
"0x00cc786baa966e66f4a384c86a3b49942552e2d658a31ce2c344be4b91400da7d26d521628b00523b8dfe240c72de1f6",
"0x01f86376e8981c217898751ad8746757d42aa7b90eeb791c09e4a3ec03251cf9de405aba9ec61deca6355c77b0e5f4cb",
"0x08cc03fdefe0ff135caf4fe2a21529c4195536fbe3ce50b879833fd221351adc2ee7f8dc099040a841b6daecf2e8fedb",
"0x16603fca40634b6a2211e11db8f0a6a074a7d0d4afadb7bd76505c3d3ad5544e203f6326c95a807299b23ab13633a5f0",
"0x04ab0b9bcfac1bbcb2c977d027796b3ce75bb8ca2be184cb5231413c4d634f3747a87ac2460f415ec961f8855fe9d6f2",
"0x0987c8d5333ab86fde9926bd2ca6c674170a05bfe3bdd81ffd038da6c26c842642f64550fedfe935a15e4ca31870fb29",
"0x09fc4018bd96684be88c9e221e4da1bb8f3abd16679dc26c1e8b6e6a1f20cabe69d65201c78607a360370e577bdba587",
"0x0e1bba7a1186bdb5223abde7ada14a23c42a0ca7915af6fe06985e7ed1e4d43b9b3f7055dd4eba6f2bafaaebca731c30",
"0x19713e47937cd1be0dfd0b8f1d43fb93cd2fcbcb6caf493fd1183e416389e61031bf3a5cce3fbafce813711ad011c132",
"0x18b46a908f36f6deb918c143fed2edcc523559b8aaf0c2462e6bfe7f911f643249d9cdf41b44d606ce07c8a4d0074d8e",
"0x0b182cac101b9399d155096004f53f447aa7b12a3426b08ec02710e807b4633f06c851c1919211f20d4c04f00b971ef8",
"0x0245a394ad1eca9b72fc00ae7be315dc757b3b080d4c158013e6632d3c40659cc6cf90ad1c232a6442d9d3f5db980133",
"0x05c129645e44cf1102a159f748c4a3fc5e673d81d7e86568d9ab0f5d396a7ce46ba1049b6579afb7866b1e715475224b",
"0x15e6be4e990f03ce4ea50b3b42df2eb5cb181d8f84965a3957add4fa95af01b2b665027efec01c7704b456be69c8b604"
]
yDenHex = [
"0x16112c4c3a9c98b252181140fad0eae9601a6de578980be6eec3232b5be72e7a07f3688ef60c206d01479253b03663c1",
"0x1962d75c2381201e1a0cbd6c43c348b885c84ff731c4d59ca4a10356f453e01f78a4260763529e3532f6102c2e49a03d",
"0x058df3306640da276faaae7d6e8eb15778c4855551ae7f310c35a5dd279cd2eca6757cd636f96f891e2538b53dbf67f2",
"0x16b7d288798e5395f20d23bf89edb4d1d115c5dbddbcd30e123da489e726af41727364f2c28297ada8d26d98445f5416",
"0x0be0e079545f43e4b00cc912f8228ddcc6d19c9f0f69bbb0542eda0fc9dec916a20b15dc0fd2ededda39142311a5001d",
"0x08d9e5297186db2d9fb266eaac783182b70152c65550d881c5ecd87b6f0f5a6449f38db9dfa9cce202c6477faaf9b7ac",
"0x166007c08a99db2fc3ba8734ace9824b5eecfdfa8d0cf8ef5dd365bc400a0051d5fa9c01a58b1fb93d1a1399126a775c",
"0x16a3ef08be3ea7ea03bcddfabba6ff6ee5a4375efa1f4fd7feb34fd206357132b920f5b00801dee460ee415a15812ed9",
"0x1866c8ed336c61231a1be54fd1d74cc4f9fb0ce4c6af5920abc5750c4bf39b4852cfe2f7bb9248836b233d9d55535d4a",
"0x167a55cda70a6e1cea820597d94a84903216f763e13d87bb5308592e7ea7d4fbc7385ea3d529b35e346ef48bb8913f55",
"0x04d2f259eea405bd48f010a01ad2911d9c6dd039bb61a6290e591b36e636a5c871a5c29f4f83060400f8b49cba8f6aa8",
"0x0accbb67481d033ff5852c1e48c50c477f94ff8aefce42d28c0f9a88cea7913516f968986f7ebbea9684b529e2561092",
"0x0ad6b9514c767fe3c3613144b45f1496543346d98adf02267d5ceef9a00d9b8693000763e3b90ac11e99b138573345cc",
"0x02660400eb2e4f3b628bdd0d53cd76f2bf565b94e72927c1cb748df27942480e420517bd8714cc80d1fadc1326ed06f7",
"0x0e0fa1d816ddc03e6b24255e0d7819c171c40f65e273b853324efcd6356caa205ca2f570f13497804415473a1d634b8f",
"0x01"
]
func hexToFP(hex: string): FP_BLS12381 =
var big: BIG_384
discard big.fromHex(hex)
big.nres()
func hexToBig(hex: string): BIG_384 {.inline.} =
discard result.fromHex(hex)
# syntactic sugars
proc `*=`(a: var FP_BLS12381, b: FP_BLS12381) {.inline.} =
FP_BLS12381_mul(a.addr, a.addr, b.unsafeAddr)
proc `*`(a: FP_BLS12381, b: FP_BLS12381): FP_BLS12381 {.inline.} =
FP_BLS12381_mul(result.addr, a.unsafeAddr, b.unsafeAddr)
proc `+`(a: FP_BLS12381, b: FP_BLS12381): FP_BLS12381 {.inline.} =
FP_BLS12381_add(result.addr, a.unsafeAddr, b.unsafeAddr)
proc `+=`(a: var FP_BLS12381, b: FP_BLS12381) {.inline.} =
FP_BLS12381_add(a.addr, a.addr, b.unsafeAddr)
proc inv(a: FP_BLS12381): FP_BLS12381 {.inline.} =
FP_BLS12381_inv(result.addr, a.unsafeAddr, nil)
proc `/`(a, b: FP_BLS12381): FP_BLS12381 {.inline.} =
result = a * inv(b)
proc inc(a: var FP_BLS12381) {.inline.} =
var one: FP_BLS12381
FP_BLS12381_one(addr one)
FP_BLS12381_add(addr a, addr a, addr one)
proc cmov(a: var FP_BLS12381, b: FP_BLS12381, c: bool) {.inline.} =
# branchless conditional move
FP_BLS12381_cmove(addr a, unsafeAddr b, cint(c))
proc cmov(a: FP_BLS12381, b: FP_BLS12381, c: bool): FP_BLS12381 {.inline.} =
# branchless conditional move
result = a
FP_BLS12381_cmove(addr result, unsafeAddr b, cint(c))
func isSquare(a: FP_BLS12381): bool {.inline.} =
# returns true if `a` is a quadratic residue
FP_BLS12381_qr(unsafeAddr a, nil) == 1
proc sqrt(a: FP_BLS12381): FP_BLS12381 {.inline.} =
FP_BLS12381_sqrt(addr result, unsafeAddr a, nil)
func sign0(x: FP_BLS12381): bool {.inline.} =
# The sgn0 function. Section 4.1
when false:
const
sign_0 = 0
zero_0 = 1
let sign_1 = x.parity()
# hope the compiler can optimize this
bool(sign_0 or (zero_0 and sign_1))
else:
bool x.parity
func initArray[N: static[int]](hex: array[N, string]): array[N, FP_BLS12381] =
for i in 0..<N:
result[i] = hex[i].hexToFP
func evalPoly(x: FP_BLS12381, c: openArray[FP_BLS12381]): FP_BLS12381 =
# Note: 32-bit use 29 bits limbs so you can do at most 3 additions before normalizing
# but during test there is no problem
result = c[^1]
let NN = c.len - 1
for i in 1..<c.len:
result *= x
result += c[NN - i]
result.norm
func init(z: var ECP_BLS12381, x, y: FP_BLS12381) =
var xx, yy: BIG_384
xx.FP_BLS12381_redc(unsafeAddr x)
yy.FP_BLS12381_redc(unsafeAddr y)
discard ECP_BLS12381_set(addr z, xx, yy)
func isogenyMapG1(xp, yp: FP_BLS12381): ECP_BLS12381 =
# 11-isogeny map for BLS12-381 G1. Apendix E.2
# we use globals to ensure they are computed only once.
{.noSideEffect.}:
let
g1xnum {.global.} = initArray(xNumHex)
g1xden {.global.} = initArray(xDenHex)
g1ynum {.global.} = initArray(yNumHex)
g1yden {.global.} = initArray(yDenHex)
let
xn = evalPoly(xp, g1xnum)
xd = evalPoly(xp, g1xden)
yn = evalPoly(xp, g1ynum)
yd = evalPoly(xp, g1yden)
x = xn / xd
y = yp * yn / yd
result.init(x, y)
func mapToIsoCurveSSWU(u: FP_BLS12381): tuple[x, y: FP_BLS12381] =
# BLS12-381 G1 Suite. Section 8.8.1
{.noSideEffect.}:
let
A {.global.} = hexToFP "0x00144698a3b8e9433d693a02c96d4982b0ea985383ee66a8d8e8981aefd881ac98936f8da0e0f97f5cf428082d584c1d"
B {.global.} = hexToFP "0x12e2908d11688030018b12e8753eee3b2016c1f0f24f4070a0b9c14fcef35ef55a23215a316ceaa5d1cc48e98e172be0"
Z {.global.} = hexToFP "0x0B" # 11
c1 {.global.} = neg B/A # -B/A
c2 {.global.} = neg inv(Z) # -1/Z
# Simplified Shallue-van de Woestijne-Ulas method. Apendix F.2.
let tv1 = Z * sqr(u)
var tv2 = sqr(tv1)
var x1 = tv1 + tv2
x1 = inv(x1) # TODO: Spec defines inv0(0) == 0; inv0(x) == x^(q-2)
let e1 = x1.isZilch()
inc x1 # // no norm needed when adding one
x1.cmov(c2, e1) # If (tv1 + tv2) == 0, set x1 = -1 / Z
x1 = x1 * c1 # x1 = (-B / A) * (1 + (1 / (Z² * u^4 + Z * u²)))
var gx1 = sqr(x1)
gx1 = gx1 + A; gx1.norm()
gx1 = gx1 * x1
gx1 = gx1 + B; gx1.norm() # gx1 = g(x1) = x1³ + A * x1 + B
let x2 = tv1 * x1 # x2 = Z * u² * x1
tv2 = tv1 * tv2
let gx2 = gx1 * tv2 # gx2 = (Z * u²)³ * gx1
let e2 = gx1.isSquare()
let x = cmov(x2, x1, e2) # If is_square(gx1), x = x1, else x = x2
let y2 = cmov(gx2, gx1, e2) # If is_square(gx1), y2 = gx1, else y2 = gx2
var y = sqrt(y2)
let e3 = u.sign0() == y.sign0() # Fix sign of y
y = cmov(neg y, y, e3)
result.x = x
result.y = y
func mapToCurveG1*(u: FP_BLS12381): ECP_BLS12381 =
when false:
{.noSideEffect.}:
let cofactor {.global.} = hexToBig("d201000000010001")
let p = mapToIsoCurveSSWU(u)
result = isogenyMapG1(p.x, p.y)
result.mul cofactor
else:
let p = mapToIsoCurveSSWU(u)
result = isogenyMapG1(p.x, p.y)
ECP_BLS12381_cfp(addr result)

View File

@ -1,8 +0,0 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# not implemented

View File

@ -1,577 +0,0 @@
# Nimbus
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
import
std/sequtils,
chronicles,
chronos,
eth/[common, p2p, trie/db, trie/nibbles],
stew/[byteutils, interval_set],
../../core/chain,
../../db/core_db,
../snap/[constants, range_desc],
../snap/worker/db/[hexary_desc, hexary_error, hexary_paths,
snapdb_persistent, hexary_range],
../protocol,
../protocol/snap/snap_types
logScope:
topics = "snap-wire"
type
SnapWireRef* = ref object of SnapWireBase
chain: ChainRef
elaFetchMax: chronos.Duration
dataSizeMax: int
peerPool: PeerPool
SlotsSpecs = object
slotFn: HexaryGetFn # For accessing storage slots
stoRoot: NodeKey # Storage root
const
extraTraceMessages = false # or true
## Enabled additional logging noise
estimatedProofSize = hexaryRangeRlpNodesListSizeMax(10)
## Some expected upper limit, typically not mote than 10 proof nodes
emptySnapStorageList = seq[SnapStorage].default
## Dummy list for empty slots
defaultElaFetchMax = 990.milliseconds
## Fetching accounts or slots can be extensive, stop in the middle if
## it takes too long
defaultDataSizeMax = fetchRequestBytesLimit
## Truncate maximum data size
when false:
const
estimatedNodeSize = hexaryRangeRlpNodesListSizeMax(1)
## Some expected upper limit for a single node
# ------------------------------------------------------------------------------
# Private functions: helpers
# ------------------------------------------------------------------------------
template logTxt(info: static[string]): static[string] =
"handlers.snap." & info
proc notImplemented(name: string) {.used.} =
debug "Wire handler method not implemented", meth=name
# ----------------------------------
proc getAccountFn(
ctx: SnapWireRef;
): HexaryGetFn
{.gcsafe.} =
# The snap sync implementation provides a function `persistentAccountGetFn()`
# similar to this one. But it is not safe to use it at the moment as the
# storage table might (or might not) differ.
let db = ctx.chain.com.db
return proc(key: openArray[byte]): Blob =
if db.isLegacy:
return db.newKvt.backend.toLegacy.get(key)
proc getStoSlotFn(
ctx: SnapWireRef;
accKey: NodeKey;
): HexaryGetFn
{.gcsafe.} =
# The snap sync implementation provides a function
# `persistentStorageSlotsGetFn()` similar to this one. But it is not safe to
# use it at the moment as the storage table might (or might not) differ.
let db = ctx.chain.com.db
return proc(key: openArray[byte]): Blob =
if db.isLegacy:
return db.newKvt.backend.toLegacy.get(key)
proc getCodeFn(
ctx: SnapWireRef;
): HexaryGetFn
{.gcsafe.} =
# It is save to borrow this function from the snap sync implementation.
ctx.chain.com.db.persistentContractsGetFn
# ----------------------------------
proc to(
rl: RangeLeaf;
T: type SnapAccount;
): T
{.gcsafe, raises: [RlpError].} =
## Convert the generic `RangeLeaf` argument to payload type.
T(accHash: rl.key.to(Hash256),
accBody: rl.data.decode(Account))
proc to(
rl: RangeLeaf;
T: type SnapStorage;
): T
{.gcsafe.} =
## Convert the generic `RangeLeaf` argument to payload type.
T(slotHash: rl.key.to(Hash256),
slotData: rl.data)
# ------------------------------------------------------------------------------
# Private functions: fetch leaf range
# ------------------------------------------------------------------------------
proc getSlotsSpecs(
ctx: SnapWireRef; # Handler descriptor
rootKey: NodeKey; # State root
accGetFn: HexaryGetFn; # Database abstraction
accKey: NodeKey; # Current account
): Result[SlotsSpecs,void]
{.gcsafe, raises: [CatchableError].} =
## Retrieve storage slots specs from account data
let accData = accKey.hexaryPath(rootKey, accGetFn).leafData
# Ignore missing account entry
if accData.len == 0:
when extraTraceMessages:
trace logTxt "getSlotsSpecs: no such account", accKey, rootKey
return err()
# Ignore empty storage list
let stoRoot = rlp.decode(accData,Account).storageRoot
if stoRoot == EMPTY_ROOT_HASH:
when extraTraceMessages:
trace logTxt "getSlotsSpecs: no slots", accKey
return err()
ok(SlotsSpecs(
slotFn: ctx.getStoSlotFn(accKey),
stoRoot: stoRoot.to(NodeKey)))
iterator doTrieNodeSpecs(
ctx: SnapWireRef; # Handler descriptor
rootKey: NodeKey; # State root
pGroups: openArray[SnapTriePaths]; # Group of partial paths
): (NodeKey, HexaryGetFn, Blob, int)
{.gcsafe, raises: [CatchableError].} =
## Helper for `getTrieNodes()` to cycle over `pathGroups`
let accGetFn = ctx.getAccountFn
for w in pGroups:
# Special case: fetch account node
if w.slotPaths.len == 0:
yield (rootKey, accGetFn, w.accPath, 0)
continue
# Compile account key
var accKey: NodeKey
if accKey.init(w.accPath):
# Derive slot specs from accounts
let rc = ctx.getSlotsSpecs(rootKey, accGetFn, accKey)
if rc.isOk:
# Loop over slot paths
for path in w.slotPaths:
when extraTraceMessages:
trace logTxt "doTrieNodeSpecs",
rootKey=rc.value.stoRoot, slotPath=path.toHex
yield (rc.value.stoRoot, rc.value.slotFn, path, w.slotPaths.len)
continue
# Fail on this group
when extraTraceMessages:
trace logTxt "doTrieNodeSpecs (blind)", accPath=w.accPath.toHex,
nBlind=w.slotPaths.len, nBlind0=w.slotPaths[0].toHex
yield (NodeKey.default, nil, EmptyBlob, w.slotPaths.len)
proc mkNodeTagRange(
origin: openArray[byte];
limit: openArray[byte];
nAccounts = 1;
): Result[NodeTagRange,void] =
## Verify and convert range arguments to interval
var (minPt, maxPt) = (low(NodeTag), high(NodeTag))
if 0 < origin.len or 0 < limit.len:
# Range applies only if there is exactly one account. A number of accounts
# different from 1 may be used by `getStorageRanges()`
if nAccounts == 0:
return err() # oops: no account
# Verify range arguments
if not minPt.init(origin) or not maxPt.init(limit) or maxPt < minPt:
when extraTraceMessages:
trace logTxt "mkNodeTagRange: malformed range",
origin=origin.toHex, limit=limit.toHex
return err()
if 1 < nAccounts:
return ok(NodeTagRange.new(low(NodeTag), high(NodeTag)))
ok(NodeTagRange.new(minPt, maxPt))
proc fetchLeafRange(
ctx: SnapWireRef; # Handler descriptor
getFn: HexaryGetFn; # Database abstraction
rootKey: NodeKey; # State root
iv: NodeTagRange; # Proofed range of leaf paths
replySizeMax: int; # Updated size counter for the raw list
stopAt: Moment; # Implies timeout
): Result[RangeProof,HexaryError]
{.gcsafe, raises: [CatchableError].} =
## Generic leaf fetcher
let
sizeMax = replySizeMax - estimatedProofSize
now = Moment.now()
timeout = if now < stopAt: stopAt - now else: 1.milliseconds
rc = getFn.hexaryRangeLeafsProof(rootKey, iv, sizeMax, timeout)
if rc.isErr:
error logTxt "fetchLeafRange: database problem",
iv, replySizeMax, error=rc.error
return rc # database error
let sizeOnWire = rc.value.leafsSize + rc.value.proofSize
if sizeOnWire <= replySizeMax:
return rc
# Estimate the overhead size on wire needed for a single leaf tail item
const leafExtraSize = (sizeof RangeLeaf()) - (sizeof newSeq[Blob](0))
let nLeafs = rc.value.leafs.len
when extraTraceMessages:
trace logTxt "fetchLeafRange: reducing reply sample",
iv, sizeOnWire, replySizeMax, nLeafs
# Strip parts of leafs result and amend remainder by adding proof nodes
var (tailSize, tailItems, reduceBy) = (0, 0, replySizeMax - sizeOnWire)
while tailSize <= reduceBy:
tailItems.inc
if nLeafs <= tailItems:
when extraTraceMessages:
trace logTxt "fetchLeafRange: stripping leaf list failed",
iv, replySizeMax, nLeafs, tailItems
return err(DataSizeError) # empty tail (package size too small)
tailSize += rc.value.leafs[^tailItems].data.len + leafExtraSize
# Provide truncated leafs list
let
leafProof = getFn.hexaryRangeLeafsProof(
rootKey, RangeProof(leafs: rc.value.leafs[0 ..< nLeafs - tailItems]))
strippedSizeOnWire = leafProof.leafsSize + leafProof.proofSize
if strippedSizeOnWire <= replySizeMax:
return ok(leafProof)
when extraTraceMessages:
trace logTxt "fetchLeafRange: data size problem",
iv, replySizeMax, nLeafs, tailItems, strippedSizeOnWire
err(DataSizeError)
# ------------------------------------------------------------------------------
# Private functions: peer observer
# ------------------------------------------------------------------------------
#proc onPeerConnected(ctx: SnapWireRef, peer: Peer) =
# debug "snapWire: add peer", peer
# discard
#
#proc onPeerDisconnected(ctx: SnapWireRef, peer: Peer) =
# debug "snapWire: remove peer", peer
# discard
#
#proc setupPeerObserver(ctx: SnapWireRef) =
# var po = PeerObserver(
# onPeerConnected:
# proc(p: Peer) {.gcsafe.} =
# ctx.onPeerConnected(p),
# onPeerDisconnected:
# proc(p: Peer) {.gcsafe.} =
# ctx.onPeerDisconnected(p))
# po.setProtocol protocol.snap
# ctx.peerPool.addObserver(ctx, po)
# ------------------------------------------------------------------------------
# Public constructor/destructor
# ------------------------------------------------------------------------------
proc init*(
T: type SnapWireRef;
chain: ChainRef;
peerPool: PeerPool;
): T =
## Constructor (uses `init()` as suggested in style guide.)
let ctx = T(
chain: chain,
elaFetchMax: defaultElaFetchMax,
dataSizeMax: defaultDataSizeMax,
peerPool: peerPool)
#ctx.setupPeerObserver()
ctx
# ------------------------------------------------------------------------------
# Public functions: helpers
# ------------------------------------------------------------------------------
proc proofEncode*(proof: seq[SnapProof]): Blob =
var writer = initRlpWriter()
writer.snapAppend SnapProofNodes(nodes: proof)
writer.finish
proc proofDecode*(data: Blob): seq[SnapProof] {.gcsafe, raises: [RlpError].} =
var reader = data.rlpFromBytes
reader.snapRead(SnapProofNodes).nodes
# ------------------------------------------------------------------------------
# Public functions: snap wire protocol handlers
# ------------------------------------------------------------------------------
method getAccountRange*(
ctx: SnapWireRef;
root: Hash256;
origin: openArray[byte];
limit: openArray[byte];
replySizeMax: uint64;
): Result[(seq[SnapAccount], SnapProofNodes), string]
{.gcsafe.} =
## Fetch accounts list from database
let sizeMax = min(replySizeMax, ctx.dataSizeMax.uint64).int
if sizeMax <= estimatedProofSize:
when extraTraceMessages:
trace logTxt "getAccountRange: max data size too small",
origin=origin.toHex, limit=limit.toHex, sizeMax
return ok((@[], SnapProofNodes())) # package size too small
try:
let
rootKey = root.to(NodeKey)
iv = block: # Calculate effective accounts range (if any)
let rc = origin.mkNodeTagRange limit
if rc.isErr:
return ok((@[], SnapProofNodes())) # malformed interval
rc.value
stopAt = Moment.now() + ctx.elaFetchMax
rc = ctx.fetchLeafRange(ctx.getAccountFn, rootKey, iv, sizeMax, stopAt)
if rc.isErr:
return ok((@[], SnapProofNodes())) # extraction failed
let
accounts = rc.value.leafs.mapIt(it.to(SnapAccount))
proof = rc.value.proof
#when extraTraceMessages:
# trace logTxt "getAccountRange: done", iv, replySizeMax,
# nAccounts=accounts.len, nProof=proof.len
return ok((accounts, SnapProofNodes(nodes: proof)))
except CatchableError as exc:
return err(exc.msg)
method getStorageRanges*(
ctx: SnapWireRef;
root: Hash256;
accounts: openArray[Hash256];
origin: openArray[byte];
limit: openArray[byte];
replySizeMax: uint64;
): Result[(seq[seq[SnapStorage]], SnapProofNodes), string]
{.gcsafe.} =
## Fetch storage slots list from database
let sizeMax = min(replySizeMax, ctx.dataSizeMax.uint64).int
if sizeMax <= estimatedProofSize:
when extraTraceMessages:
trace logTxt "getStorageRanges: max data size too small",
origin=origin.toHex, limit=limit.toHex, sizeMax
return ok((@[], SnapProofNodes())) # package size too small
let
iv = block: # Calculate effective slots range (if any)
let rc = origin.mkNodeTagRange(limit, accounts.len)
if rc.isErr:
return ok((@[], SnapProofNodes())) # malformed interval
rc.value
rootKey = root.to(NodeKey)
accGetFn = ctx.getAccountFn
stopAt = Moment.now() + ctx.elaFetchMax
# Loop over accounts
var
dataAllocated = 0
timeExceeded = false
slotLists: seq[seq[SnapStorage]]
proof: seq[SnapProof]
try:
for accHash in accounts:
let sp = block:
let rc = ctx.getSlotsSpecs(rootKey, accGetFn, accHash.to(NodeKey))
if rc.isErr:
slotLists.add emptySnapStorageList
dataAllocated.inc # empty list
continue
rc.value
# Collect data slots for this account => `rangeProof`
let
sizeLeft = sizeMax - dataAllocated
rangeProof = block:
let rc = ctx.fetchLeafRange(sp.slotFn, sp.stoRoot, iv, sizeLeft, stopAt)
if rc.isErr:
when extraTraceMessages:
trace logTxt "getStorageRanges: failed", iv, sizeMax, sizeLeft,
accKey=accHash.to(NodeKey), stoRoot=sp.stoRoot, error=rc.error
return ok((@[], SnapProofNodes())) # extraction failed
rc.value
# Process data slots for this account
dataAllocated += rangeProof.leafsSize
when extraTraceMessages:
trace logTxt "getStorageRanges: data slots", iv, sizeMax, dataAllocated,
nAccounts=accounts.len, accKey=accHash.to(NodeKey), stoRoot=sp.stoRoot,
nSlots=rangeProof.leafs.len, nProof=rangeProof.proof.len
slotLists.add rangeProof.leafs.mapIt(it.to(SnapStorage))
if 0 < rangeProof.proof.len:
proof = rangeProof.proof
break # only last entry has a proof
# Stop unless there is enough space left
if sizeMax - dataAllocated <= estimatedProofSize:
break
if stopAt <= Moment.now():
timeExceeded = true
break
when extraTraceMessages:
trace logTxt "getStorageRanges: done", iv, sizeMax, dataAllocated,
nAccounts=accounts.len, nLeafLists=slotLists.len, nProof=proof.len,
timeExceeded
return ok((slotLists, SnapProofNodes(nodes: proof)))
except CatchableError as exc:
return err(exc.msg)
method getByteCodes*(
ctx: SnapWireRef;
nodes: openArray[Hash256];
replySizeMax: uint64;
): Result[seq[Blob], string]
{.gcsafe.} =
## Fetch contract codes from the database
let
sizeMax = min(replySizeMax, ctx.dataSizeMax.uint64).int
pfxMax = (hexaryRangeRlpSize sizeMax) - sizeMax # RLP list/blob pfx max
effSizeMax = sizeMax - pfxMax
stopAt = Moment.now() + ctx.elaFetchMax
getFn = ctx.getCodeFn
var
dataAllocated = 0
timeExceeded = false
list: seq[Blob]
when extraTraceMessages:
trace logTxt "getByteCodes", sizeMax, nNodes=nodes.len
try:
for w in nodes:
let data = w.data.toSeq.getFn
if 0 < data.len:
let effDataLen = hexaryRangeRlpSize data.len
if effSizeMax - effDataLen < dataAllocated:
break
dataAllocated += effDataLen
list.add data
else:
when extraTraceMessages:
trace logTxt "getByteCodes: empty record", sizeMax, nNodes=nodes.len,
key=w
if stopAt <= Moment.now():
timeExceeded = true
break
when extraTraceMessages:
trace logTxt "getByteCodes: done", sizeMax, dataAllocated,
nNodes=nodes.len, nResult=list.len, timeExceeded
return ok(list)
except CatchableError as exc:
return err(exc.msg)
method getTrieNodes*(
ctx: SnapWireRef;
root: Hash256;
pathGroups: openArray[SnapTriePaths];
replySizeMax: uint64;
): Result[seq[Blob], string]
{.gcsafe.} =
## Fetch nodes from the database
let
sizeMax = min(replySizeMax, ctx.dataSizeMax.uint64).int
someSlack = sizeMax.hexaryRangeRlpSize() - sizeMax
if sizeMax <= someSlack:
when extraTraceMessages:
trace logTxt "getTrieNodes: max data size too small",
root=root.to(NodeKey), nPathGroups=pathGroups.len, sizeMax, someSlack
return ok(newSeq[Blob]()) # package size too small
let
rootKey = root.to(NodeKey)
effSizeMax = sizeMax - someSlack
stopAt = Moment.now() + ctx.elaFetchMax
var
dataAllocated = 0
timeExceeded = false
list: seq[Blob]
try:
for (stateKey,getFn,partPath,n) in ctx.doTrieNodeSpecs(rootKey, pathGroups):
# Special case: no data available
if getFn.isNil:
if effSizeMax < dataAllocated + n:
break # no need to add trailing empty nodes
list &= EmptyBlob.repeat(n)
dataAllocated += n
continue
# Fetch node blob
let node = block:
let steps = partPath.hexPrefixDecode[1].hexaryPath(stateKey, getFn)
if 0 < steps.path.len and
steps.tail.len == 0 and steps.path[^1].nibble < 0:
steps.path[^1].node.convertTo(Blob)
else:
EmptyBlob
if effSizeMax < dataAllocated + node.len:
break
if stopAt <= Moment.now():
timeExceeded = true
break
list &= node
when extraTraceMessages:
trace logTxt "getTrieNodes: done", sizeMax, dataAllocated,
nGroups=pathGroups.mapIt(max(1,it.slotPaths.len)).foldl(a+b,0),
nPaths=pathGroups.len, nResult=list.len, timeExceeded
return ok(list)
except CatchableError as exc:
return err(exc.msg)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,667 +0,0 @@
# Nimbus - Rapidly converge on and track the canonical chain head of each peer
#
# Copyright (c) 2021-2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## Note: This module is currently unused (not used anymore)
## This module fetches and tracks the canonical chain head of each connected
## peer. (Or in future, each peer we care about; we won't poll them all so
## often.)
##
## This is for when we aren't sure of the block number of a peer's canonical
## chain head. Most of the time, after finding which block, it quietly polls
## to track small updates to the "best" block number and hash of each peer.
##
## But sometimes that can get out of step. If there has been a deeper reorg
## than our tracking window, or a burst of more than a few new blocks, network
## delays, downtime, or the peer is itself syncing. Perhaps we stopped Nimbus
## and restarted a while later, e.g. suspending a laptop or Control-Z. Then
## this will catch up. It is even possible that the best hash the peer gave us
## in the `Status` handshake has disappeared by the time we query for the
## corresponding block number, so we start at zero.
##
## The steps here perform a robust and efficient O(log N) search to rapidly
## converge on the new best block if it's moved out of the polling window no
## matter where it starts, confirm the peer's canonical chain head boundary,
## then track the peer's chain head in real-time by polling. The method is
## robust to peer state changes at any time.
##
## The purpose is to:
##
## - Help with finding a peer common chain prefix ("fast sync pivot") in a
## consistent, fast and explicit way.
##
## - Catch up quickly after any long pauses of network downtime, program not
## running, or deep chain reorgs.
##
## - Be able to display real-time peer states, so they are less mysterious.
##
## - Tell the beam/snap/trie sync processes when to start and what blocks to
## fetch, and keep those fetchers in the head-adjacent window of the
## ever-changing chain.
##
## - Help the sync process bootstrap usefully when we only have one peer,
## speculatively fetching and validating what data we can before we have more
## peers to corroborate the consensus.
##
## - Help detect consensus failures in the network.
##
## We cannot assume a peer's canonical chain stays the same or only gains new
## blocks from one query to the next. There can be reorgs, including deep
## reorgs. When a reorg happens, the best block number can decrease if the new
## canonical chain is shorter than the old one, and the best block hash we
## previously knew can become unavailable on the peer. So we must detect when
## the current best block disappears and be able to reduce block number.
import
std/[bitops, sequtils, strutils],
chronicles,
chronos,
eth/[common, p2p, p2p/private/p2p_types],
"../.."/[constants, genesis, p2p/chain/chain_desc],
".."/[protocol, sync_desc, types],
../snap/worker_desc
{.push raises: [].}
logScope:
topics = "snap-pivot"
const
syncLockedMinimumReply = 8
## Minimum number of headers we assume any peers will send if they have
## them in contiguous ascending queries. Fewer than this confirms we have
## found the peer's canonical chain head boundary. Must be at least 2, and
## at least `syncLockedQueryOverlap+2` to stay `SyncLocked` when the chain
## extends. Should not be large as that would be stretching assumptions
## about peer implementations. 8 is chosen as it allows 3-deep extensions
## and 3-deep reorgs to be followed in a single round trip.
syncLockedQueryOverlap = 4
## Number of headers to re-query on each poll when `SyncLocked` so that we
## get small reorg updates in one round trip. Must be no more than
## `syncLockedMinimumReply-1`, no more than `syncLockedMinimumReply-2` to
## stay `SyncLocked` when the chain extends, and not too large to avoid
## excessive duplicate fetching. 4 is chosen as it allows 3-deep reorgs
## to be followed in single round trip.
syncLockedQuerySize = 192
## Query size when polling `SyncLocked`. Must be at least
## `syncLockedMinimumReply`. Large is fine, if we get a large reply the
## values are almost always useful.
huntQuerySize = 16
## Query size when hunting for canonical head boundary. Small is good
## because we don't want to keep most of the headers at hunt time.
huntForwardExpandShift = 4
## Expansion factor during `HuntForward` exponential search.
## 16 is chosen for rapid convergence when bootstrapping or catching up.
huntBackwardExpandShift = 1
## Expansion factor during `HuntBackward` exponential search.
## 2 is chosen for better convergence when tracking a chain reorg.
type
WorkerMode = enum
## The current state of tracking the peer's canonical chain head.
## `bestBlockNumber` is only valid when this is `SyncLocked`.
SyncLocked
SyncOnlyHash
HuntForward
HuntBackward
HuntRange
HuntRangeFinal
SnapWorkerStats* = tuple
## Statistics counters for events associated with this peer.
## These may be used to recognise errors and select good peers.
ok: tuple[
reorgDetected: uint,
getBlockHeaders: uint,
getNodeData: uint]
minor: tuple[
timeoutBlockHeaders: uint,
unexpectedBlockHash: uint]
major: tuple[
networkErrors: uint,
excessBlockHeaders: uint,
wrongBlockHeader: uint]
SnapPivotCtxRef* = ref object of RootRef
stats*: SnapWorkerStats ## Statistics counters
ctx: SnapCtxRef ## For debugging
chain: Chain ## Block chain database
SnapPivotWorkerRef* = ref object of RootRef
## Peer canonical chain head ("best block") search state.
header: Option[BlockHeader] ## Pivot header (if any)
syncMode: WorkerMode ## Action mode
lowNumber: BlockNumber ## Recent lowest known block number.
highNumber: BlockNumber ## Recent highest known block number.
bestNumber: BlockNumber
bestHash: BlockHash
step: uint
global: SnapPivotCtxRef
peer: Peer ## Current network peer
ctrl: BuddyCtrlRef ## Worker control start/stop
static:
doAssert syncLockedMinimumReply >= 2
doAssert syncLockedMinimumReply >= syncLockedQueryOverlap + 2
doAssert syncLockedQuerySize <= maxHeadersFetch
doAssert huntQuerySize >= 1 and huntQuerySize <= maxHeadersFetch
doAssert huntForwardExpandShift >= 1 and huntForwardExpandShift <= 8
doAssert huntBackwardExpandShift >= 1 and huntBackwardExpandShift <= 8
# Make sure that request/response wire protocol messages are id-tracked and
# would not overlap (no multi-protocol legacy support)
doAssert 66 <= protocol.ethVersion
# ------------------------------------------------------------------------------
# Private logging helpers
# ------------------------------------------------------------------------------
proc pp(a: MDigest[256]; collapse = true): string =
if not collapse:
a.data.mapIt(it.toHex(2)).join.toLowerAscii
elif a == EMPTY_ROOT_HASH:
"EMPTY_ROOT_HASH"
elif a == EMPTY_UNCLE_HASH:
"EMPTY_UNCLE_HASH"
elif a == EMPTY_SHA3:
"EMPTY_SHA3"
elif a == ZERO_HASH256:
"ZERO_HASH256"
else:
a.data.mapIt(it.toHex(2)).join[56 .. 63].toLowerAscii
proc pp(bh: BlockHash): string =
"%" & $bh.Hash256.pp
proc pp(bn: BlockNumber): string =
"#" & $bn
proc pp(bhn: HashOrNum): string =
if bhn.isHash: bhn.hash.pp else: bhn.number.pp
proc traceSyncLocked(
sp: SnapPivotWorkerRef;
num: BlockNumber;
hash: BlockHash;
) =
## Trace messages when peer canonical head is confirmed or updated.
let
peer = sp.peer
bestBlock = num.pp
if sp.syncMode != SyncLocked:
debug "Now tracking chain head of peer", peer,
bestBlock
elif num > sp.bestNumber:
if num == sp.bestNumber + 1:
debug "Peer chain head advanced one block", peer,
advance=1, bestBlock
else:
debug "Peer chain head advanced some blocks", peer,
advance=(sp.bestNumber - num), bestBlock
elif num < sp.bestNumber or hash != sp.bestHash:
debug "Peer chain head reorg detected", peer,
advance=(sp.bestNumber - num), bestBlock
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc clearSyncStateRoot(sp: SnapPivotWorkerRef) =
if sp.header.isSome:
debug "Stopping state sync from this peer", peer=sp.peer
sp.header = none(BlockHeader)
proc lockSyncStateAndFetch(sp: SnapPivotWorkerRef; header: BlockHeader) =
let
peer = sp.peer
stateRoot = header.stateRoot
hash = header.blockHash.BlockHash
thisBlock = header.blockNumber.pp
sp.traceSyncLocked(header.blockNumber, hash)
sp.bestNumber = header.blockNumber
sp.bestHash = hash
sp.syncMode = SyncLocked
if sp.header.isNone:
debug "Starting state sync from this peer", peer, thisBlock, stateRoot
elif sp.header.unsafeGet.stateRoot != stateRoot:
trace "Adjusting state sync root from this peer", peer, thisBlock, stateRoot
sp.header = some(header)
proc setHuntBackward(sp: SnapPivotWorkerRef, lowestAbsent: BlockNumber) =
## Start exponential search mode backward due to new uncertainty.
sp.syncMode = HuntBackward
sp.step = 0
# Block zero is always present.
sp.lowNumber = 0.toBlockNumber
# Zero `lowestAbsent` is never correct, but an incorrect peer could send it.
sp.highNumber =
if lowestAbsent > 0: lowestAbsent
else: 1.toBlockNumber
sp.clearSyncStateRoot()
proc setHuntForward(sp: SnapPivotWorkerRef, highestPresent: BlockNumber) =
## Start exponential search mode forward due to new uncertainty.
sp.syncMode = HuntForward
sp.step = 0
sp.lowNumber = highestPresent
sp.highNumber = high(BlockNumber)
sp.clearSyncStateRoot()
proc updateHuntAbsent(sp: SnapPivotWorkerRef, lowestAbsent: BlockNumber) =
## Converge uncertainty range backward.
if lowestAbsent < sp.highNumber:
sp.highNumber = lowestAbsent
# If uncertainty range has moved outside the search window, change to hunt
# backward to block zero. Note that empty uncertainty range is allowed
# (empty range is `hunt.lowNumber + 1 == hunt.highNumber`).
if sp.highNumber <= sp.lowNumber:
sp.setHuntBackward(lowestAbsent)
sp.clearSyncStateRoot()
proc updateHuntPresent(sp: SnapPivotWorkerRef, highestPresent: BlockNumber) =
## Converge uncertainty range forward.
if highestPresent > sp.lowNumber:
sp.lowNumber = highestPresent
# If uncertainty range has moved outside the search window, change to hunt
# forward to no upper limit. Note that empty uncertainty range is allowed
# (empty range is `hunt.lowNumber + 1 == hunt.highNumber`).
if sp.lowNumber >= sp.highNumber:
sp.setHuntForward(highestPresent)
sp.clearSyncStateRoot()
# ------------------------------------------------------------------------------
# Private functions, assemble request
# ------------------------------------------------------------------------------
proc peerSyncChainRequest(sp: SnapPivotWorkerRef): BlocksRequest =
## Choose `GetBlockHeaders` parameters when hunting or following the canonical
## chain of a peer.
if sp.syncMode == SyncLocked:
# Stable and locked. This is just checking for changes including reorgs.
# `sp.bestNumber` was recently the head of the peer's canonical
# chain. We must include this block number to detect when the canonical
# chain gets shorter versus no change.
result.startBlock.number =
if sp.bestNumber <= syncLockedQueryOverlap:
# Every peer should send genesis for block 0, so don't ask for it.
# `peerSyncChainEmptyReply` has logic to handle this reply as if it
# was for block 0. Aside from saving bytes, this is more robust if
# some client doesn't do genesis reply correctly.
1.toBlockNumber
else:
min(sp.bestNumber - syncLockedQueryOverlap.toBlockNumber,
high(BlockNumber) - (syncLockedQuerySize - 1).toBlockNumber)
result.maxResults = syncLockedQuerySize
return
if sp.syncMode == SyncOnlyHash:
# We only have the hash of the recent head of the peer's canonical chain.
# Like `SyncLocked`, query more than one item to detect when the
# canonical chain gets shorter, no change or longer.
result.startBlock = sp.bestHash.to(HashOrNum)
result.maxResults = syncLockedQuerySize
return
# Searching for the peers's canonical head. An ascending query is always
# used, regardless of search direction. This is because a descending query
# (`reverse = true` and `maxResults > 1`) is useless for searching: Either
# `startBlock` is present, in which case the extra descending results
# contribute no more information about the canonical head boundary, or
# `startBlock` is absent in which case there are zero results. It's not
# defined in the `eth` specification that there must be zero results (in
# principle peers could return the lower numbered blocks), but in practice
# peers stop at the first absent block in the sequence from `startBlock`.
#
# Guaranteeing O(log N) time convergence in all scenarios requires some
# properties to be true in both exponential search (expanding) and
# quasi-binary search (converging in a range). The most important is that
# the gap to `startBlock` after `hunt.lowNumber` and also before
# `hunt.highNumber` are proportional to the query step, where the query step
# is `hunt.step` exponentially expanding each round, or `maxStep`
# approximately evenly distributed in the range.
#
# `hunt.lowNumber+1` must not be used consistently as the start, even with a
# large enough query step size, as that will sometimes take O(N) to converge
# in both the exponential and quasi-binary searches. (Ending at
# `hunt.highNumber-1` is fine if `huntQuerySize > 1`. This asymmetry is
# due to ascending queries (see earlier comment), and non-empty truncated
# query reply being proof of presence before the truncation point, but not
# proof of absence after it. A reply can be truncated just because the peer
# decides to.)
#
# The proportional gap requirement is why we divide by query size here,
# instead of stretching to fit more strictly with `(range-1)/(size-1)`.
const huntFinalSize = max(2, huntQuerySize)
var maxStep = 0u
let fullRangeClamped =
if sp.highNumber <= sp.lowNumber: 0u
else: min(high(uint).toBlockNumber,
sp.highNumber - sp.lowNumber).truncate(uint) - 1
if fullRangeClamped >= huntFinalSize: # `HuntRangeFinal` condition.
maxStep = if huntQuerySize == 1:
fullRangeClamped
elif (huntQuerySize and (huntQuerySize-1)) == 0:
fullRangeClamped shr fastLog2(huntQuerySize)
else:
fullRangeClamped div huntQuerySize
doAssert huntFinalSize >= huntQuerySize
doAssert maxStep >= 1 # Ensured by the above assertion.
# Check for exponential search (expanding). Iterate `hunt.step`. O(log N)
# requires `startBlock` to be offset from `hunt.lowNumber`/`hunt.highNumber`.
if sp.syncMode in {HuntForward, HuntBackward} and
fullRangeClamped >= huntFinalSize:
let forward = sp.syncMode == HuntForward
let expandShift = if forward: huntForwardExpandShift
else: huntBackwardExpandShift
# Switches to range search when this condition is no longer true.
if sp.step < maxStep shr expandShift:
# The `if` above means the next line cannot overflow.
sp.step = if sp.step > 0: sp.step shl expandShift
else: 1
# Satisfy the O(log N) convergence conditions.
result.startBlock.number =
if forward: sp.lowNumber + sp.step.toBlockNumber
else: sp.highNumber -
(sp.step * huntQuerySize).toBlockNumber
result.maxResults = huntQuerySize
result.skip = sp.step - 1
return
# For tracing/display.
sp.step = maxStep
sp.syncMode = HuntRange
if maxStep > 0:
# Quasi-binary search (converging in a range). O(log N) requires
# `startBlock` to satisfy the constraints described above, with the
# proportionality from both ends of the range. The optimal information
# gathering position is tricky and doesn't make much difference, so don't
# bother. We'll centre the query in the range.
var offset = fullRangeClamped - maxStep * (huntQuerySize-1)
# Rounding must bias towards end to ensure `offset >= 1` after this.
offset -= offset shr 1
result.startBlock.number = sp.lowNumber + offset.toBlockNumber
result.maxResults = huntQuerySize
result.skip = maxStep - 1
else:
# Small range, final step. At `fullRange == 0` we must query at least one
# block before and after the range to confirm the canonical head boundary,
# or find it has moved. This ensures progress without getting stuck. When
# `fullRange` is small this is also beneficial, to get `SyncLocked` in one
# round trip from hereand it simplifies the other search branches below.
# Ideally the query is similar to `SyncLocked`, enough to get `SyncLocked`
# in one round trip, and accommodate a small reorg or extension.
const afterSoftMax = syncLockedMinimumReply - syncLockedQueryOverlap
const beforeHardMax = syncLockedQueryOverlap
let extra = huntFinalSize - fullRangeClamped
var before = (extra + 1) shr 1
before = max(before + afterSoftMax, extra) - afterSoftMax
before = min(before, beforeHardMax)
# See `SyncLocked` case.
result.startBlock.number =
if sp.bestNumber <= before.toBlockNumber: 1.toBlockNumber
else: min(sp.bestNumber - before.toBlockNumber,
high(BlockNumber) - (huntFinalSize - 1).toBlockNumber)
result.maxResults = huntFinalSize
sp.syncMode = HuntRangeFinal
# ------------------------------------------------------------------------------
# Private functions, reply handling
# ------------------------------------------------------------------------------
proc peerSyncChainEmptyReply(sp: SnapPivotWorkerRef; request: BlocksRequest) =
## Handle empty `GetBlockHeaders` reply. This means `request.startBlock` is
## absent on the peer. If it was `SyncLocked` there must have been a reorg
## and the previous canonical chain head has disappeared. If hunting, this
## updates the range of uncertainty.
let peer = sp.peer
# Treat empty response to a request starting from block 1 as equivalent to
# length 1 starting from block 0 in `peerSyncChainNonEmptyReply`. We treat
# every peer as if it would send genesis for block 0, without asking for it.
if request.skip == 0 and
not request.reverse and
not request.startBlock.isHash and
request.startBlock.number == 1.toBlockNumber:
try:
sp.lockSyncStateAndFetch(sp.global.chain.db.toGenesisHeader)
except RlpError as e:
raiseAssert "Gensis/chain problem (" & $e.name & "): " & e.msg
return
if sp.syncMode in {SyncLocked, SyncOnlyHash}:
inc sp.global.stats.ok.reorgDetected
trace "Peer reorg detected, best block disappeared", peer,
startBlock=request.startBlock
let lowestAbsent = request.startBlock.number
case sp.syncMode:
of SyncLocked:
# If this message doesn't change our knowledge, ignore it.
if lowestAbsent > sp.bestNumber:
return
# Due to a reorg, peer's canonical head has lower block number, outside
# our tracking window. Sync lock is no longer valid. Switch to hunt
# backward to find the new canonical head.
sp.setHuntBackward(lowestAbsent)
of SyncOnlyHash:
# Due to a reorg, peer doesn't have the block hash it originally gave us.
# Switch to hunt forward from block zero to find the canonical head.
sp.setHuntForward(0.toBlockNumber)
of HuntForward, HuntBackward, HuntRange, HuntRangeFinal:
# Update the hunt range.
sp.updateHuntAbsent(lowestAbsent)
# Update best block number. It is invalid except when `SyncLocked`, but
# still useful as a hint of what we knew recently, for example in displays.
if lowestAbsent <= sp.bestNumber:
sp.bestNumber =
if lowestAbsent == 0.toBlockNumber: lowestAbsent
else: lowestAbsent - 1.toBlockNumber
sp.bestHash = default(typeof(sp.bestHash))
proc peerSyncChainNonEmptyReply(
sp: SnapPivotWorkerRef;
request: BlocksRequest;
headers: openArray[BlockHeader]) =
## Handle non-empty `GetBlockHeaders` reply. This means `request.startBlock`
## is present on the peer and in its canonical chain (unless the request was
## made with a hash). If it's a short, contiguous, ascending order reply, it
## reveals the abrupt transition at the end of the chain and we have learned
## or reconfirmed the real-time head block. If hunting, this updates the
## range of uncertainty.
let
len = headers.len
highestIndex = if request.reverse: 0 else: len - 1
# We assume a short enough reply means we've learned the peer's canonical
# head, because it would have replied with another header if not at the head.
# This is not justified when the request used a general hash, because the
# peer doesn't have to reply with its canonical chain in that case, except it
# is still justified if the hash was the known canonical head, which is
# the case in a `SyncOnlyHash` request.
if len < syncLockedMinimumReply and
request.skip == 0 and not request.reverse and
len.uint < request.maxResults:
sp.lockSyncStateAndFetch(headers[highestIndex])
return
# Be careful, this number is from externally supplied data and arithmetic
# in the upward direction could overflow.
let highestPresent = headers[highestIndex].blockNumber
# A reply that isn't short enough for the canonical head criterion above
# tells us headers up to some number, but it doesn't tell us if there are
# more after it in the peer's canonical chain. We have to request more
# headers to find out.
case sp.syncMode:
of SyncLocked:
# If this message doesn't change our knowledge, ignore it.
if highestPresent <= sp.bestNumber:
return
# Sync lock is no longer valid as we don't have confirmed canonical head.
# Switch to hunt forward to find the new canonical head.
sp.setHuntForward(highestPresent)
of SyncOnlyHash:
# As `SyncLocked` but without the block number check.
sp.setHuntForward(highestPresent)
of HuntForward, HuntBackward, HuntRange, HuntRangeFinal:
# Update the hunt range.
sp.updateHuntPresent(highestPresent)
# Update best block number. It is invalid except when `SyncLocked`, but
# still useful as a hint of what we knew recently, for example in displays.
if highestPresent > sp.bestNumber:
sp.bestNumber = highestPresent
sp.bestHash = headers[highestIndex].blockHash.BlockHash
# ------------------------------------------------------------------------------
# Public functions, constructor
# ------------------------------------------------------------------------------
proc init*(
T: type SnapPivotCtxRef;
ctx: SnapCtxRef; ## For debugging
chain: Chain; ## Block chain database
): T =
T(ctx: ctx,
chain: chain)
proc clear*(sp: SnapPivotWorkerRef) =
sp.syncMode = HuntForward
sp.lowNumber = 0.toBlockNumber.BlockNumber
sp.highNumber = high(BlockNumber).BlockNumber
sp.bestNumber = 0.toBlockNumber.BlockNumber
sp.bestHash = sp.peer.state(protocol.eth).bestBlockHash.BlockHash
sp.step = 0u
proc init*(
T: type SnapPivotWorkerRef;
ctx: SnapPivotCtxRef; ## Global descriptor
ctrl: BuddyCtrlRef; ## Worker control start/stop
peer: Peer; ## Current network peer
): T =
result = T(global: ctx,
peer: peer,
ctrl: ctrl)
result.clear()
# TODO: Temporarily disabled because it's useful to test the worker.
# result.syncMode = SyncOnlyHash
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc pivotHeader*(sp: SnapPivotWorkerRef): Result[BlockHeader,void] =
## Returns cached block header if available
if sp.header.isSome:
let header = sp.header.unsafeGet
if header.blockNumber != 0:
return ok(header)
err()
proc pivotNegotiate*(
sp: SnapPivotWorkerRef;
ign: Option[BlockNumber]; ## Minimum block number to expect,ignored for now
): Future[bool]
{.async.} =
## Query a peer to update our knowledge of its canonical chain and its best
## block, which is its canonical chain head. This can be called at any time
## after a peer has negotiated the connection.
##
## This function is called in an exponential then binary search style
## during initial sync to find the canonical head, real-time polling
## afterwards to check for updates.
##
## All replies to this query are part of the peer's canonical chain at the
## time the peer sends them.
##
## This function can run in *multi mode*.
let peer = sp.peer
trace "Starting pivotExec()", peer
let request = sp.peerSyncChainRequest
trace trEthSendSendingGetBlockHeaders, peer,
count=request.maxResults,
startBlock=request.startBlock.pp, step=request.traceStep
inc sp.global.stats.ok.getBlockHeaders
var reply: Option[protocol.blockHeadersObj]
try:
reply = await peer.getBlockHeaders(request)
except CatchableError as e:
trace trEthRecvError & "waiting for GetBlockHeaders reply", peer,
error=e.msg
inc sp.global.stats.major.networkErrors
# Just try another peer
sp.ctrl.zombie = true
return false
if reply.isNone:
trace trEthRecvTimeoutWaiting & "for GetBlockHeaders reply", peer
# TODO: Should disconnect?
inc sp.global.stats.minor.timeoutBlockHeaders
return false
let nHeaders = reply.get.headers.len
if nHeaders == 0:
trace trEthRecvReceivedBlockHeaders, peer,
got=0, requested=request.maxResults
else:
trace trEthRecvReceivedBlockHeaders, peer,
got=nHeaders, requested=request.maxResults,
firstBlock=reply.get.headers[0].blockNumber,
lastBlock=reply.get.headers[^1].blockNumber
if request.maxResults.int < nHeaders:
trace trEthRecvProtocolViolation & "excess headers in BlockHeaders message",
peer, got=nHeaders, requested=request.maxResults
# TODO: Should disconnect.
inc sp.global.stats.major.excessBlockHeaders
return false
if 0 < nHeaders:
# TODO: Check this is not copying the `headers`.
sp.peerSyncChainNonEmptyReply(request, reply.get.headers)
else:
sp.peerSyncChainEmptyReply(request)
trace "Done pivotExec()", peer
return sp.header.isSome
# ------------------------------------------------------------------------------
# Debugging
# ------------------------------------------------------------------------------
proc pp*(sp: SnapPivotWorkerRef): string =
result &= "(mode=" & $sp.syncMode
result &= ",num=(" & sp.lowNumber.pp & "," & sp.highNumber.pp & ")"
result &= ",best=(" & sp.bestNumber.pp & "," & sp.bestHash.pp & ")"
result &= ",step=" & $sp.step
result &= ")"
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,206 +0,0 @@
# Nimbus
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
import
std/sets,
eth/[common, trie/nibbles]
const
EmptyBlob* = seq[byte].default
## Useful shortcut
EmptyBlobSet* = HashSet[Blob].default
## Useful shortcut
EmptyBlobSeq* = seq[Blob].default
## Useful shortcut
EmptyNibbleSeq* = EmptyBlob.initNibbleRange
## Useful shortcut
# ---------
pivotTableLruEntriesMax* = 50
## Max depth of pivot table. On overflow, the oldest one will be removed.
pivotBlockDistanceMin* = 128
## The minimal depth of two block headers needed to activate a new state
## root pivot.
##
## Effects on assembling the state via `snap/1` protocol:
##
## * A small value of this constant increases the propensity to update the
## pivot header more often. This is so because each new peer negoiates a
## pivot block number at least the current one.
##
## * A large value keeps the current pivot more stable but some experiments
## suggest that the `snap/1` protocol is answered only for later block
## numbers (aka pivot blocks.) So a large value tends to keep the pivot
## farther away from the chain head.
##
## Note that 128 is the magic distance for snapshots used by *Geth*.
# --------------
fetchRequestBytesLimit* = 2 * 1024 * 1024
## Soft bytes limit to request in `snap/1` protocol calls.
fetchRequestTrieNodesMax* = 1024
## Informal maximal number of trie nodes to fetch at once in `snap/1`
## protocol calls. This is not an official limit but found with several
## implementations (e.g. Geth.)
##
## Resticting the fetch list length early allows to better parallelise
## healing.
fetchRequestStorageSlotsMax* = 2 * 1024
## Maximal number of storage tries to fetch with a single request message.
# --------------
fetchRequestContractsMax* = 1024
## Maximal number of contract codes fetch with a single request message.
# --------------
saveAccountsProcessedChunksMax* = 1000
## Recovery data are stored if the processed ranges list contains no more
## than this many range *chunks*.
##
## If the range set is too much fragmented, no data will be saved and
## restart has to perform from scratch or an earlier checkpoint.
saveStorageSlotsMax* = 20_000
## Recovery data are stored if the oustanding storage slots to process do
## not amount to more than this many entries.
##
## If there are too many dangling nodes, no data will be saved and restart
## has to perform from scratch or an earlier checkpoint.
saveContactsMax* = 10_000
## Similar to `saveStorageSlotsMax`
# --------------
storageSlotsFetchFailedFullMax* = fetchRequestStorageSlotsMax + 100
## Maximal number of failures when fetching full range storage slots.
## These failed slot ranges are only called for once in the same cycle.
storageSlotsFetchFailedPartialMax* = 300
## Ditto for partial range storage slots.
storageSlotsTrieInheritPerusalMax* = 30_000
## Maximal number of nodes to visit in order to find out whether this
## storage slots trie is complete. This allows to *inherit* the full trie
## for an existing root node if the trie is small enough.
storageSlotsQuPrioThresh* = 5_000
## For a new worker, prioritise processing the storage slots queue over
## processing accounts if the queue has more than this many items.
##
## For a running worker processing accounts, stop processing accounts
## and switch to processing the storage slots queue if the queue has
## more than this many items.
# --------------
contractsQuPrioThresh* = 2_000
## Similar to `storageSlotsQuPrioThresh`
# --------------
healAccountsCoverageTrigger* = 1.01
## Apply accounts healing if the global snap download coverage factor
## exceeds this setting. The global coverage factor is derived by merging
## all account ranges retrieved for all pivot state roots (see
## `coveredAccounts` in the object `CtxData`.) Note that a coverage factor
## greater than 100% is not exact but rather a lower bound estimate.
healAccountsInspectionPlanBLevel* = 4
## Search this level deep for missing nodes if `hexaryEnvelopeDecompose()`
## only produces existing nodes.
healAccountsInspectionPlanBRetryMax* = 2
## Retry inspection with depth level argument starting at
## `healAccountsInspectionPlanBLevel-1` and counting down at most this
## many times until there is at least one dangling node found and the
## depth level argument remains positive. The cumulative depth of the
## iterated seach is
## ::
## b 1
## Σ ν = --- (b - a + 1) (a + b)
## a 2
## for
## ::
## b = healAccountsInspectionPlanBLevel
## a = b - healAccountsInspectionPlanBRetryMax
##
healAccountsInspectionPlanBRetryNapMSecs* = 2
## Sleep beween inspection retrys to allow thread switch. If this constant
## is set `0`, `1`ns wait is used.
# --------------
healStorageSlotsInspectionPlanBLevel* = 5
## Similar to `healAccountsInspectionPlanBLevel`
healStorageSlotsInspectionPlanBRetryMax* = 99 # 5 + 4 + .. + 1 => 15
## Similar to `healAccountsInspectionPlanBRetryMax`
healStorageSlotsInspectionPlanBRetryNapMSecs* = 2
## Similar to `healAccountsInspectionPlanBRetryNapMSecs`
healStorageSlotsBatchMax* = 32
## Maximal number of storage tries to to heal in a single batch run. Only
## this many items will be removed from the batch queue. These items will
## then be processed one by one.
healStorageSlotsFailedMax* = 300
## Ditto for partial range storage slots.
# --------------
comErrorsTimeoutMax* = 3
## Maximal number of non-resonses accepted in a row. If there are more than
## `comErrorsTimeoutMax` consecutive errors, the worker will be degraded
## as zombie.
comErrorsTimeoutSleepMSecs* = 5000
## Wait/suspend for this many seconds after a timeout error if there are
## not more than `comErrorsTimeoutMax` errors in a row (maybe some other
## network or no-data errors mixed in.) Set 0 to disable.
comErrorsNetworkMax* = 5
## Similar to `comErrorsTimeoutMax` but for network errors.
comErrorsNetworkSleepMSecs* = 5000
## Similar to `comErrorsTimeoutSleepSecs` but for network errors.
## Set 0 to disable.
comErrorsNoDataMax* = 3
## Similar to `comErrorsTimeoutMax` but for missing data errors.
comErrorsNoDataSleepMSecs* = 0
## Similar to `comErrorsTimeoutSleepSecs` but for missing data errors.
## Set 0 to disable.
static:
doAssert storageSlotsQuPrioThresh < saveStorageSlotsMax
doAssert contractsQuPrioThresh < saveContactsMax
doAssert 0 <= storageSlotsFetchFailedFullMax
doAssert 0 <= storageSlotsFetchFailedPartialMax
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------