2019-08-21 09:29:34 +00:00
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
2020-10-21 08:56:33 +00:00
"crypto/ecdsa"
2019-08-21 09:29:34 +00:00
"encoding/binary"
"encoding/json"
"errors"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core"
2020-12-10 16:20:55 +00:00
"github.com/ethereum/go-ethereum/core/forkid"
2019-08-21 09:29:34 +00:00
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
2021-02-19 13:44:16 +00:00
vfs "github.com/ethereum/go-ethereum/les/vflux/server"
2019-08-21 09:29:34 +00:00
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p"
2020-10-21 08:56:33 +00:00
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nodestate"
2019-08-21 09:29:34 +00:00
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
const (
softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
estHeaderRlpSize = 500 // Approximate size of an RLP encoded block header
2020-12-14 09:27:15 +00:00
ethVersion = 64 // equivalent eth version for the downloader
2019-08-21 09:29:34 +00:00
MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request
MaxBodyFetch = 32 // Amount of block bodies to be fetched per retrieval request
MaxReceiptFetch = 128 // Amount of transaction receipts to allow fetching per request
MaxCodeFetch = 64 // Amount of contract codes to allow fetching per request
MaxProofsFetch = 64 // Amount of merkle proofs to be fetched per retrieval request
MaxHelperTrieProofsFetch = 64 // Amount of helper tries to be fetched per retrieval request
MaxTxSend = 64 // Amount of transactions to be send per request
MaxTxStatus = 256 // Amount of transactions to queried per request
)
2019-08-27 11:07:25 +00:00
var (
errTooManyInvalidRequest = errors . New ( "too many invalid requests made" )
errFullClientPool = errors . New ( "client pool is full" )
)
2019-08-21 09:29:34 +00:00
// serverHandler is responsible for serving light client and process
// all incoming light requests.
type serverHandler struct {
2020-12-10 16:20:55 +00:00
forkFilter forkid . Filter
2019-08-21 09:29:34 +00:00
blockchain * core . BlockChain
chainDb ethdb . Database
txpool * core . TxPool
server * LesServer
closeCh chan struct { } // Channel used to exit all background routines of handler.
wg sync . WaitGroup // WaitGroup used to track all background routines of handler.
synced func ( ) bool // Callback function used to determine whether local node is synced.
// Testing fields
addTxsSync bool
}
func newServerHandler ( server * LesServer , blockchain * core . BlockChain , chainDb ethdb . Database , txpool * core . TxPool , synced func ( ) bool ) * serverHandler {
handler := & serverHandler {
2020-12-10 16:20:55 +00:00
forkFilter : forkid . NewFilter ( blockchain ) ,
2019-08-21 09:29:34 +00:00
server : server ,
blockchain : blockchain ,
chainDb : chainDb ,
txpool : txpool ,
closeCh : make ( chan struct { } ) ,
synced : synced ,
}
return handler
}
// start starts the server handler.
func ( h * serverHandler ) start ( ) {
h . wg . Add ( 1 )
2020-10-21 08:56:33 +00:00
go h . broadcastLoop ( )
2019-08-21 09:29:34 +00:00
}
// stop stops the server handler.
func ( h * serverHandler ) stop ( ) {
close ( h . closeCh )
h . wg . Wait ( )
}
// runPeer is the p2p protocol run function for the given version.
func ( h * serverHandler ) runPeer ( version uint , p * p2p . Peer , rw p2p . MsgReadWriter ) error {
2020-02-26 09:41:24 +00:00
peer := newClientPeer ( int ( version ) , h . server . config . NetworkId , p , newMeteredMsgWriter ( rw , int ( version ) ) )
defer peer . close ( )
2019-08-21 09:29:34 +00:00
h . wg . Add ( 1 )
defer h . wg . Done ( )
return h . handle ( peer )
}
2020-02-26 09:41:24 +00:00
func ( h * serverHandler ) handle ( p * clientPeer ) error {
2019-08-21 09:29:34 +00:00
p . Log ( ) . Debug ( "Light Ethereum peer connected" , "name" , p . Name ( ) )
// Execute the LES handshake
var (
head = h . blockchain . CurrentHeader ( )
hash = head . Hash ( )
number = head . Number . Uint64 ( )
td = h . blockchain . GetTd ( hash , number )
2020-12-10 16:20:55 +00:00
forkID = forkid . NewID ( h . blockchain . Config ( ) , h . blockchain . Genesis ( ) . Hash ( ) , h . blockchain . CurrentBlock ( ) . NumberU64 ( ) )
2019-08-21 09:29:34 +00:00
)
2020-12-10 16:20:55 +00:00
if err := p . Handshake ( td , hash , number , h . blockchain . Genesis ( ) . Hash ( ) , forkID , h . forkFilter , h . server ) ; err != nil {
2019-08-21 09:29:34 +00:00
p . Log ( ) . Debug ( "Light Ethereum handshake failed" , "err" , err )
return err
}
les, p2p/simulations/adapters: fix issues found while simulating les (#21761)
This adds a few tiny fixes for les and the p2p simulation framework:
LES Parts
- Keep the LES-SERVER connection even it's non-synced
We had this idea to reject the connections in LES protocol if the les-server itself is
not synced. However, in LES protocol we will also receive the connection from another
les-server. In this case even the local node is not synced yet, we should keep the tcp
connection for other protocols(e.g. eth protocol).
- Don't count "invalid message" for non-existing GetBlockHeadersMsg request
In the eth syncing mechanism (full sync, fast sync, light sync), it will try to fetch
some non-existent blocks or headers(to ensure we indeed download all the missing chain).
In this case, it's possible that the les-server will receive the request for
non-existent headers. So don't count it as the "invalid message" for scheduling
dropping.
- Copy the announce object in the closure
Before the les-server pushes the latest headers to all connected clients, it will create
a closure and queue it in the underlying request scheduler. In some scenarios it's
problematic. E.g, in private networks, the block can be mined very fast. So before the
first closure is executed, we may already update the latest_announce object. So actually
the "announce" object we want to send is replaced.
The downsize is the client will receive two announces with the same td and then drop the
server.
P2P Simulation Framework
- Don't double register the protocol services in p2p-simulation "Start".
The protocols upon the devp2p are registered in the "New node stage". So don't reigster
them again when starting a node in the p2p simulation framework
- Add one more new config field "ExternalSigner", in order to use clef service in the
framework.
2020-10-30 17:04:38 +00:00
// Reject the duplicated peer, otherwise register it to peerset.
2020-10-21 08:56:33 +00:00
var registered bool
if err := h . server . ns . Operation ( func ( ) {
if h . server . ns . GetField ( p . Node ( ) , clientPeerField ) != nil {
registered = true
} else {
h . server . ns . SetFieldSub ( p . Node ( ) , clientPeerField , p )
}
} ) ; err != nil {
return err
}
if registered {
return errAlreadyRegistered
}
defer func ( ) {
h . server . ns . SetField ( p . Node ( ) , clientPeerField , nil )
if p . fcClient != nil { // is nil when connecting another server
p . fcClient . Disconnect ( )
}
} ( )
if p . server {
// connected to another server, no messages expected, just wait for disconnection
_ , err := p . rw . ReadMsg ( )
return err
}
les, p2p/simulations/adapters: fix issues found while simulating les (#21761)
This adds a few tiny fixes for les and the p2p simulation framework:
LES Parts
- Keep the LES-SERVER connection even it's non-synced
We had this idea to reject the connections in LES protocol if the les-server itself is
not synced. However, in LES protocol we will also receive the connection from another
les-server. In this case even the local node is not synced yet, we should keep the tcp
connection for other protocols(e.g. eth protocol).
- Don't count "invalid message" for non-existing GetBlockHeadersMsg request
In the eth syncing mechanism (full sync, fast sync, light sync), it will try to fetch
some non-existent blocks or headers(to ensure we indeed download all the missing chain).
In this case, it's possible that the les-server will receive the request for
non-existent headers. So don't count it as the "invalid message" for scheduling
dropping.
- Copy the announce object in the closure
Before the les-server pushes the latest headers to all connected clients, it will create
a closure and queue it in the underlying request scheduler. In some scenarios it's
problematic. E.g, in private networks, the block can be mined very fast. So before the
first closure is executed, we may already update the latest_announce object. So actually
the "announce" object we want to send is replaced.
The downsize is the client will receive two announces with the same td and then drop the
server.
P2P Simulation Framework
- Don't double register the protocol services in p2p-simulation "Start".
The protocols upon the devp2p are registered in the "New node stage". So don't reigster
them again when starting a node in the p2p simulation framework
- Add one more new config field "ExternalSigner", in order to use clef service in the
framework.
2020-10-30 17:04:38 +00:00
// Reject light clients if server is not synced.
//
// Put this checking here, so that "non-synced" les-server peers are still allowed
// to keep the connection.
if ! h . synced ( ) {
p . Log ( ) . Debug ( "Light server not synced, rejecting peer" )
return p2p . DiscRequested
}
2019-08-27 11:07:25 +00:00
// Disconnect the inbound peer if it's rejected by clientPool
2020-09-14 20:44:20 +00:00
if cap , err := h . server . clientPool . connect ( p ) ; cap != p . fcParams . MinRecharge || err != nil {
p . Log ( ) . Debug ( "Light Ethereum peer rejected" , "err" , errFullClientPool )
2019-08-27 11:07:25 +00:00
return errFullClientPool
}
2021-02-19 13:44:16 +00:00
p . balance , _ = h . server . ns . GetField ( p . Node ( ) , h . server . clientPool . BalanceField ) . ( * vfs . NodeBalance )
2020-09-14 20:44:20 +00:00
if p . balance == nil {
return p2p . DiscRequested
}
2020-10-21 08:56:33 +00:00
activeCount , _ := h . server . clientPool . pp . Active ( )
clientConnectionGauge . Update ( int64 ( activeCount ) )
2019-08-21 09:29:34 +00:00
2019-08-27 11:07:25 +00:00
var wg sync . WaitGroup // Wait group used to track all in-flight task routines.
2019-08-21 09:29:34 +00:00
connectedAt := mclock . Now ( )
defer func ( ) {
2019-08-27 11:07:25 +00:00
wg . Wait ( ) // Ensure all background task routines have exited.
h . server . clientPool . disconnect ( p )
2020-09-14 20:44:20 +00:00
p . balance = nil
2020-10-21 08:56:33 +00:00
activeCount , _ := h . server . clientPool . pp . Active ( )
clientConnectionGauge . Update ( int64 ( activeCount ) )
2019-08-21 09:29:34 +00:00
connectionTimer . Update ( time . Duration ( mclock . Now ( ) - connectedAt ) )
} ( )
2020-03-12 10:25:52 +00:00
// Mark the peer starts to be served.
atomic . StoreUint32 ( & p . serving , 1 )
defer atomic . StoreUint32 ( & p . serving , 0 )
2019-08-21 09:29:34 +00:00
// Spawn a main loop to handle all incoming messages.
for {
select {
case err := <- p . errCh :
p . Log ( ) . Debug ( "Failed to send light ethereum response" , "err" , err )
return err
default :
}
2019-08-27 11:07:25 +00:00
if err := h . handleMsg ( p , & wg ) ; err != nil {
2019-08-21 09:29:34 +00:00
p . Log ( ) . Debug ( "Light Ethereum message handling failed" , "err" , err )
return err
}
}
}
// handleMsg is invoked whenever an inbound message is received from a remote
// peer. The remote connection is torn down upon returning any error.
2020-02-26 09:41:24 +00:00
func ( h * serverHandler ) handleMsg ( p * clientPeer , wg * sync . WaitGroup ) error {
2019-08-21 09:29:34 +00:00
// Read the next message from the remote peer, and ensure it's fully consumed
msg , err := p . rw . ReadMsg ( )
if err != nil {
return err
}
p . Log ( ) . Trace ( "Light Ethereum message arrived" , "code" , msg . Code , "bytes" , msg . Size )
// Discard large message which exceeds the limitation.
if msg . Size > ProtocolMaxMsgSize {
clientErrorMeter . Mark ( 1 )
return errResp ( ErrMsgTooLarge , "%v > %v" , msg . Size , ProtocolMaxMsgSize )
}
defer msg . Discard ( )
var (
maxCost uint64
task * servingTask
)
p . responseCount ++
responseCount := p . responseCount
// accept returns an indicator whether the request can be served.
// If so, deduct the max cost from the flow control buffer.
accept := func ( reqID , reqCnt , maxCnt uint64 ) bool {
// Short circuit if the peer is already frozen or the request is invalid.
inSizeCost := h . server . costTracker . realCost ( 0 , msg . Size , 0 )
if p . isFrozen ( ) || reqCnt == 0 || reqCnt > maxCnt {
p . fcClient . OneTimeCost ( inSizeCost )
return false
}
// Prepaid max cost units before request been serving.
maxCost = p . fcCosts . getMaxCost ( msg . Code , reqCnt )
accepted , bufShort , priority := p . fcClient . AcceptRequest ( reqID , responseCount , maxCost )
if ! accepted {
2020-02-26 09:41:24 +00:00
p . freeze ( )
2019-08-21 09:29:34 +00:00
p . Log ( ) . Error ( "Request came too early" , "remaining" , common . PrettyDuration ( time . Duration ( bufShort * 1000000 / p . fcParams . MinRecharge ) ) )
p . fcClient . OneTimeCost ( inSizeCost )
return false
}
// Create a multi-stage task, estimate the time it takes for the task to
// execute, and cache it in the request service queue.
factor := h . server . costTracker . globalFactor ( )
if factor < 0.001 {
factor = 1
p . Log ( ) . Error ( "Invalid global cost factor" , "factor" , factor )
}
maxTime := uint64 ( float64 ( maxCost ) / factor )
task = h . server . servingQueue . newTask ( p , maxTime , priority )
if task . start ( ) {
return true
}
p . fcClient . RequestProcessed ( reqID , responseCount , maxCost , inSizeCost )
return false
}
// sendResponse sends back the response and updates the flow control statistic.
sendResponse := func ( reqID , amount uint64 , reply * reply , servingTime uint64 ) {
p . responseLock . Lock ( )
defer p . responseLock . Unlock ( )
// Short circuit if the client is already frozen.
if p . isFrozen ( ) {
realCost := h . server . costTracker . realCost ( servingTime , msg . Size , 0 )
p . fcClient . RequestProcessed ( reqID , responseCount , maxCost , realCost )
return
}
// Positive correction buffer value with real cost.
var replySize uint32
if reply != nil {
replySize = reply . size ( )
}
var realCost uint64
if h . server . costTracker . testing {
realCost = maxCost // Assign a fake cost for testing purpose
} else {
realCost = h . server . costTracker . realCost ( servingTime , msg . Size , replySize )
2020-09-14 20:44:20 +00:00
if realCost > maxCost {
realCost = maxCost
}
2019-08-21 09:29:34 +00:00
}
bv := p . fcClient . RequestProcessed ( reqID , responseCount , maxCost , realCost )
if amount != 0 {
// Feed cost tracker request serving statistic.
h . server . costTracker . updateStats ( msg . Code , amount , servingTime , realCost )
// Reduce priority "balance" for the specific peer.
2020-09-14 20:44:20 +00:00
p . balance . RequestServed ( realCost )
2019-08-21 09:29:34 +00:00
}
if reply != nil {
2020-05-12 08:02:15 +00:00
p . queueSend ( func ( ) {
2019-08-21 09:29:34 +00:00
if err := reply . send ( bv ) ; err != nil {
select {
case p . errCh <- err :
default :
}
}
} )
}
}
switch msg . Code {
case GetBlockHeadersMsg :
p . Log ( ) . Trace ( "Received block header request" )
if metrics . EnabledExpensive {
miscInHeaderPacketsMeter . Mark ( 1 )
miscInHeaderTrafficMeter . Mark ( int64 ( msg . Size ) )
}
var req struct {
ReqID uint64
Query getBlockHeadersData
}
if err := msg . Decode ( & req ) ; err != nil {
clientErrorMeter . Mark ( 1 )
return errResp ( ErrDecode , "%v: %v" , msg , err )
}
query := req . Query
if accept ( req . ReqID , query . Amount , MaxHeaderFetch ) {
2019-08-27 11:07:25 +00:00
wg . Add ( 1 )
2019-08-21 09:29:34 +00:00
go func ( ) {
2019-08-27 11:07:25 +00:00
defer wg . Done ( )
2019-08-21 09:29:34 +00:00
hashMode := query . Origin . Hash != ( common . Hash { } )
first := true
maxNonCanonical := uint64 ( 100 )
// Gather headers until the fetch or network limits is reached
var (
bytes common . StorageSize
headers [ ] * types . Header
unknown bool
)
for ! unknown && len ( headers ) < int ( query . Amount ) && bytes < softResponseLimit {
if ! first && ! task . waitOrStop ( ) {
sendResponse ( req . ReqID , 0 , nil , task . servingTime )
return
}
// Retrieve the next header satisfying the query
var origin * types . Header
if hashMode {
if first {
origin = h . blockchain . GetHeaderByHash ( query . Origin . Hash )
if origin != nil {
query . Origin . Number = origin . Number . Uint64 ( )
}
} else {
origin = h . blockchain . GetHeader ( query . Origin . Hash , query . Origin . Number )
}
} else {
origin = h . blockchain . GetHeaderByNumber ( query . Origin . Number )
}
if origin == nil {
break
}
headers = append ( headers , origin )
bytes += estHeaderRlpSize
// Advance to the next header of the query
switch {
case hashMode && query . Reverse :
// Hash based traversal towards the genesis block
ancestor := query . Skip + 1
if ancestor == 0 {
unknown = true
} else {
query . Origin . Hash , query . Origin . Number = h . blockchain . GetAncestor ( query . Origin . Hash , query . Origin . Number , ancestor , & maxNonCanonical )
unknown = query . Origin . Hash == common . Hash { }
}
case hashMode && ! query . Reverse :
// Hash based traversal towards the leaf block
var (
current = origin . Number . Uint64 ( )
next = current + query . Skip + 1
)
if next <= current {
infos , _ := json . MarshalIndent ( p . Peer . Info ( ) , "" , " " )
p . Log ( ) . Warn ( "GetBlockHeaders skip overflow attack" , "current" , current , "skip" , query . Skip , "next" , next , "attacker" , infos )
unknown = true
} else {
if header := h . blockchain . GetHeaderByNumber ( next ) ; header != nil {
nextHash := header . Hash ( )
expOldHash , _ := h . blockchain . GetAncestor ( nextHash , next , query . Skip + 1 , & maxNonCanonical )
if expOldHash == query . Origin . Hash {
query . Origin . Hash , query . Origin . Number = nextHash , next
} else {
unknown = true
}
} else {
unknown = true
}
}
case query . Reverse :
// Number based traversal towards the genesis block
if query . Origin . Number >= query . Skip + 1 {
query . Origin . Number -= query . Skip + 1
} else {
unknown = true
}
case ! query . Reverse :
// Number based traversal towards the leaf block
query . Origin . Number += query . Skip + 1
}
first = false
}
2020-02-26 09:41:24 +00:00
reply := p . replyBlockHeaders ( req . ReqID , headers )
2020-09-14 20:44:20 +00:00
sendResponse ( req . ReqID , query . Amount , reply , task . done ( ) )
2019-08-21 09:29:34 +00:00
if metrics . EnabledExpensive {
miscOutHeaderPacketsMeter . Mark ( 1 )
miscOutHeaderTrafficMeter . Mark ( int64 ( reply . size ( ) ) )
2020-01-08 13:08:56 +00:00
miscServingTimeHeaderTimer . Update ( time . Duration ( task . servingTime ) )
2019-08-21 09:29:34 +00:00
}
} ( )
}
case GetBlockBodiesMsg :
p . Log ( ) . Trace ( "Received block bodies request" )
if metrics . EnabledExpensive {
miscInBodyPacketsMeter . Mark ( 1 )
miscInBodyTrafficMeter . Mark ( int64 ( msg . Size ) )
}
var req struct {
ReqID uint64
Hashes [ ] common . Hash
}
if err := msg . Decode ( & req ) ; err != nil {
clientErrorMeter . Mark ( 1 )
return errResp ( ErrDecode , "msg %v: %v" , msg , err )
}
var (
bytes int
bodies [ ] rlp . RawValue
)
reqCnt := len ( req . Hashes )
if accept ( req . ReqID , uint64 ( reqCnt ) , MaxBodyFetch ) {
2019-08-27 11:07:25 +00:00
wg . Add ( 1 )
2019-08-21 09:29:34 +00:00
go func ( ) {
2019-08-27 11:07:25 +00:00
defer wg . Done ( )
2019-08-21 09:29:34 +00:00
for i , hash := range req . Hashes {
if i != 0 && ! task . waitOrStop ( ) {
sendResponse ( req . ReqID , 0 , nil , task . servingTime )
return
}
if bytes >= softResponseLimit {
break
}
body := h . blockchain . GetBodyRLP ( hash )
if body == nil {
2020-07-28 15:02:35 +00:00
p . bumpInvalid ( )
2019-08-21 09:29:34 +00:00
continue
}
bodies = append ( bodies , body )
bytes += len ( body )
}
2020-02-26 09:41:24 +00:00
reply := p . replyBlockBodiesRLP ( req . ReqID , bodies )
2019-08-21 09:29:34 +00:00
sendResponse ( req . ReqID , uint64 ( reqCnt ) , reply , task . done ( ) )
if metrics . EnabledExpensive {
miscOutBodyPacketsMeter . Mark ( 1 )
miscOutBodyTrafficMeter . Mark ( int64 ( reply . size ( ) ) )
2020-01-08 13:08:56 +00:00
miscServingTimeBodyTimer . Update ( time . Duration ( task . servingTime ) )
2019-08-21 09:29:34 +00:00
}
} ( )
}
case GetCodeMsg :
p . Log ( ) . Trace ( "Received code request" )
if metrics . EnabledExpensive {
miscInCodePacketsMeter . Mark ( 1 )
miscInCodeTrafficMeter . Mark ( int64 ( msg . Size ) )
}
var req struct {
ReqID uint64
Reqs [ ] CodeReq
}
if err := msg . Decode ( & req ) ; err != nil {
clientErrorMeter . Mark ( 1 )
return errResp ( ErrDecode , "msg %v: %v" , msg , err )
}
var (
bytes int
data [ ] [ ] byte
)
reqCnt := len ( req . Reqs )
if accept ( req . ReqID , uint64 ( reqCnt ) , MaxCodeFetch ) {
2019-08-27 11:07:25 +00:00
wg . Add ( 1 )
2019-08-21 09:29:34 +00:00
go func ( ) {
2019-08-27 11:07:25 +00:00
defer wg . Done ( )
2019-08-21 09:29:34 +00:00
for i , request := range req . Reqs {
if i != 0 && ! task . waitOrStop ( ) {
sendResponse ( req . ReqID , 0 , nil , task . servingTime )
return
}
// Look up the root hash belonging to the request
header := h . blockchain . GetHeaderByHash ( request . BHash )
if header == nil {
p . Log ( ) . Warn ( "Failed to retrieve associate header for code" , "hash" , request . BHash )
2020-07-28 15:02:35 +00:00
p . bumpInvalid ( )
2019-08-21 09:29:34 +00:00
continue
}
// Refuse to search stale state data in the database since looking for
// a non-exist key is kind of expensive.
local := h . blockchain . CurrentHeader ( ) . Number . Uint64 ( )
if ! h . server . archiveMode && header . Number . Uint64 ( ) + core . TriesInMemory <= local {
p . Log ( ) . Debug ( "Reject stale code request" , "number" , header . Number . Uint64 ( ) , "head" , local )
2020-07-28 15:02:35 +00:00
p . bumpInvalid ( )
2019-08-21 09:29:34 +00:00
continue
}
triedb := h . blockchain . StateCache ( ) . TrieDB ( )
account , err := h . getAccount ( triedb , header . Root , common . BytesToHash ( request . AccKey ) )
if err != nil {
p . Log ( ) . Warn ( "Failed to retrieve account for code" , "block" , header . Number , "hash" , header . Hash ( ) , "account" , common . BytesToHash ( request . AccKey ) , "err" , err )
2020-07-28 15:02:35 +00:00
p . bumpInvalid ( )
2019-08-21 09:29:34 +00:00
continue
}
2020-08-21 12:10:40 +00:00
code , err := h . blockchain . StateCache ( ) . ContractCode ( common . BytesToHash ( request . AccKey ) , common . BytesToHash ( account . CodeHash ) )
2019-08-21 09:29:34 +00:00
if err != nil {
p . Log ( ) . Warn ( "Failed to retrieve account code" , "block" , header . Number , "hash" , header . Hash ( ) , "account" , common . BytesToHash ( request . AccKey ) , "codehash" , common . BytesToHash ( account . CodeHash ) , "err" , err )
continue
}
// Accumulate the code and abort if enough data was retrieved
data = append ( data , code )
if bytes += len ( code ) ; bytes >= softResponseLimit {
break
}
}
2020-02-26 09:41:24 +00:00
reply := p . replyCode ( req . ReqID , data )
2019-08-21 09:29:34 +00:00
sendResponse ( req . ReqID , uint64 ( reqCnt ) , reply , task . done ( ) )
if metrics . EnabledExpensive {
miscOutCodePacketsMeter . Mark ( 1 )
miscOutCodeTrafficMeter . Mark ( int64 ( reply . size ( ) ) )
2020-01-08 13:08:56 +00:00
miscServingTimeCodeTimer . Update ( time . Duration ( task . servingTime ) )
2019-08-21 09:29:34 +00:00
}
} ( )
}
case GetReceiptsMsg :
p . Log ( ) . Trace ( "Received receipts request" )
if metrics . EnabledExpensive {
miscInReceiptPacketsMeter . Mark ( 1 )
miscInReceiptTrafficMeter . Mark ( int64 ( msg . Size ) )
}
var req struct {
ReqID uint64
Hashes [ ] common . Hash
}
if err := msg . Decode ( & req ) ; err != nil {
clientErrorMeter . Mark ( 1 )
return errResp ( ErrDecode , "msg %v: %v" , msg , err )
}
var (
bytes int
receipts [ ] rlp . RawValue
)
reqCnt := len ( req . Hashes )
if accept ( req . ReqID , uint64 ( reqCnt ) , MaxReceiptFetch ) {
2019-08-27 11:07:25 +00:00
wg . Add ( 1 )
2019-08-21 09:29:34 +00:00
go func ( ) {
2019-08-27 11:07:25 +00:00
defer wg . Done ( )
2019-08-21 09:29:34 +00:00
for i , hash := range req . Hashes {
if i != 0 && ! task . waitOrStop ( ) {
sendResponse ( req . ReqID , 0 , nil , task . servingTime )
return
}
if bytes >= softResponseLimit {
break
}
// Retrieve the requested block's receipts, skipping if unknown to us
results := h . blockchain . GetReceiptsByHash ( hash )
if results == nil {
if header := h . blockchain . GetHeaderByHash ( hash ) ; header == nil || header . ReceiptHash != types . EmptyRootHash {
2020-07-28 15:02:35 +00:00
p . bumpInvalid ( )
2019-08-21 09:29:34 +00:00
continue
}
}
// If known, encode and queue for response packet
if encoded , err := rlp . EncodeToBytes ( results ) ; err != nil {
log . Error ( "Failed to encode receipt" , "err" , err )
} else {
receipts = append ( receipts , encoded )
bytes += len ( encoded )
}
}
2020-02-26 09:41:24 +00:00
reply := p . replyReceiptsRLP ( req . ReqID , receipts )
2019-08-21 09:29:34 +00:00
sendResponse ( req . ReqID , uint64 ( reqCnt ) , reply , task . done ( ) )
if metrics . EnabledExpensive {
miscOutReceiptPacketsMeter . Mark ( 1 )
miscOutReceiptTrafficMeter . Mark ( int64 ( reply . size ( ) ) )
2020-01-08 13:08:56 +00:00
miscServingTimeReceiptTimer . Update ( time . Duration ( task . servingTime ) )
2019-08-21 09:29:34 +00:00
}
} ( )
}
case GetProofsV2Msg :
p . Log ( ) . Trace ( "Received les/2 proofs request" )
if metrics . EnabledExpensive {
miscInTrieProofPacketsMeter . Mark ( 1 )
miscInTrieProofTrafficMeter . Mark ( int64 ( msg . Size ) )
}
var req struct {
ReqID uint64
Reqs [ ] ProofReq
}
if err := msg . Decode ( & req ) ; err != nil {
clientErrorMeter . Mark ( 1 )
return errResp ( ErrDecode , "msg %v: %v" , msg , err )
}
// Gather state data until the fetch or network limits is reached
var (
lastBHash common . Hash
root common . Hash
2020-11-24 09:55:17 +00:00
header * types . Header
2019-08-21 09:29:34 +00:00
)
reqCnt := len ( req . Reqs )
if accept ( req . ReqID , uint64 ( reqCnt ) , MaxProofsFetch ) {
2019-08-27 11:07:25 +00:00
wg . Add ( 1 )
2019-08-21 09:29:34 +00:00
go func ( ) {
2019-08-27 11:07:25 +00:00
defer wg . Done ( )
2019-08-21 09:29:34 +00:00
nodes := light . NewNodeSet ( )
for i , request := range req . Reqs {
if i != 0 && ! task . waitOrStop ( ) {
sendResponse ( req . ReqID , 0 , nil , task . servingTime )
return
}
// Look up the root hash belonging to the request
if request . BHash != lastBHash {
root , lastBHash = common . Hash { } , request . BHash
if header = h . blockchain . GetHeaderByHash ( request . BHash ) ; header == nil {
2019-08-27 13:29:00 +00:00
p . Log ( ) . Warn ( "Failed to retrieve header for proof" , "hash" , request . BHash )
2020-07-28 15:02:35 +00:00
p . bumpInvalid ( )
2019-08-21 09:29:34 +00:00
continue
}
// Refuse to search stale state data in the database since looking for
// a non-exist key is kind of expensive.
local := h . blockchain . CurrentHeader ( ) . Number . Uint64 ( )
if ! h . server . archiveMode && header . Number . Uint64 ( ) + core . TriesInMemory <= local {
p . Log ( ) . Debug ( "Reject stale trie request" , "number" , header . Number . Uint64 ( ) , "head" , local )
2020-07-28 15:02:35 +00:00
p . bumpInvalid ( )
2019-08-21 09:29:34 +00:00
continue
}
root = header . Root
}
// If a header lookup failed (non existent), ignore subsequent requests for the same header
if root == ( common . Hash { } ) {
2020-07-28 15:02:35 +00:00
p . bumpInvalid ( )
2019-08-21 09:29:34 +00:00
continue
}
// Open the account or storage trie for the request
statedb := h . blockchain . StateCache ( )
2020-11-24 09:55:17 +00:00
var trie state . Trie
2019-08-21 09:29:34 +00:00
switch len ( request . AccKey ) {
case 0 :
// No account key specified, open an account trie
trie , err = statedb . OpenTrie ( root )
if trie == nil || err != nil {
p . Log ( ) . Warn ( "Failed to open storage trie for proof" , "block" , header . Number , "hash" , header . Hash ( ) , "root" , root , "err" , err )
continue
}
default :
// Account key specified, open a storage trie
account , err := h . getAccount ( statedb . TrieDB ( ) , root , common . BytesToHash ( request . AccKey ) )
if err != nil {
p . Log ( ) . Warn ( "Failed to retrieve account for proof" , "block" , header . Number , "hash" , header . Hash ( ) , "account" , common . BytesToHash ( request . AccKey ) , "err" , err )
2020-07-28 15:02:35 +00:00
p . bumpInvalid ( )
2019-08-21 09:29:34 +00:00
continue
}
trie , err = statedb . OpenStorageTrie ( common . BytesToHash ( request . AccKey ) , account . Root )
if trie == nil || err != nil {
p . Log ( ) . Warn ( "Failed to open storage trie for proof" , "block" , header . Number , "hash" , header . Hash ( ) , "account" , common . BytesToHash ( request . AccKey ) , "root" , account . Root , "err" , err )
continue
}
}
// Prove the user's request from the account or stroage trie
if err := trie . Prove ( request . Key , request . FromLevel , nodes ) ; err != nil {
p . Log ( ) . Warn ( "Failed to prove state request" , "block" , header . Number , "hash" , header . Hash ( ) , "err" , err )
continue
}
if nodes . DataSize ( ) >= softResponseLimit {
break
}
}
2020-02-26 09:41:24 +00:00
reply := p . replyProofsV2 ( req . ReqID , nodes . NodeList ( ) )
2019-08-21 09:29:34 +00:00
sendResponse ( req . ReqID , uint64 ( reqCnt ) , reply , task . done ( ) )
if metrics . EnabledExpensive {
miscOutTrieProofPacketsMeter . Mark ( 1 )
miscOutTrieProofTrafficMeter . Mark ( int64 ( reply . size ( ) ) )
2020-01-08 13:08:56 +00:00
miscServingTimeTrieProofTimer . Update ( time . Duration ( task . servingTime ) )
2019-08-21 09:29:34 +00:00
}
} ( )
}
case GetHelperTrieProofsMsg :
p . Log ( ) . Trace ( "Received helper trie proof request" )
if metrics . EnabledExpensive {
miscInHelperTriePacketsMeter . Mark ( 1 )
miscInHelperTrieTrafficMeter . Mark ( int64 ( msg . Size ) )
}
var req struct {
ReqID uint64
Reqs [ ] HelperTrieReq
}
if err := msg . Decode ( & req ) ; err != nil {
clientErrorMeter . Mark ( 1 )
return errResp ( ErrDecode , "msg %v: %v" , msg , err )
}
// Gather state data until the fetch or network limits is reached
var (
auxBytes int
auxData [ ] [ ] byte
)
reqCnt := len ( req . Reqs )
if accept ( req . ReqID , uint64 ( reqCnt ) , MaxHelperTrieProofsFetch ) {
2019-08-27 11:07:25 +00:00
wg . Add ( 1 )
2019-08-21 09:29:34 +00:00
go func ( ) {
2019-08-27 11:07:25 +00:00
defer wg . Done ( )
2019-08-21 09:29:34 +00:00
var (
lastIdx uint64
lastType uint
root common . Hash
auxTrie * trie . Trie
)
nodes := light . NewNodeSet ( )
for i , request := range req . Reqs {
if i != 0 && ! task . waitOrStop ( ) {
sendResponse ( req . ReqID , 0 , nil , task . servingTime )
return
}
if auxTrie == nil || request . Type != lastType || request . TrieIdx != lastIdx {
auxTrie , lastType , lastIdx = nil , request . Type , request . TrieIdx
var prefix string
if root , prefix = h . getHelperTrie ( request . Type , request . TrieIdx ) ; root != ( common . Hash { } ) {
auxTrie , _ = trie . New ( root , trie . NewDatabase ( rawdb . NewTable ( h . chainDb , prefix ) ) )
}
}
2021-01-16 18:06:18 +00:00
if auxTrie == nil {
sendResponse ( req . ReqID , 0 , nil , task . servingTime )
return
}
// TODO(rjl493456442) short circuit if the proving is failed.
// The original client side code has a dirty hack to retrieve
// the headers with no valid proof. Keep the compatibility for
// legacy les protocol and drop this hack when the les2/3 are
// not supported.
err := auxTrie . Prove ( request . Key , request . FromLevel , nodes )
if p . version >= lpv4 && err != nil {
sendResponse ( req . ReqID , 0 , nil , task . servingTime )
return
}
if request . AuxReq == htAuxHeader {
data := h . getAuxiliaryHeaders ( request )
2019-08-21 09:29:34 +00:00
auxData = append ( auxData , data )
auxBytes += len ( data )
}
if nodes . DataSize ( ) + auxBytes >= softResponseLimit {
break
}
}
2020-02-26 09:41:24 +00:00
reply := p . replyHelperTrieProofs ( req . ReqID , HelperTrieResps { Proofs : nodes . NodeList ( ) , AuxData : auxData } )
2019-08-21 09:29:34 +00:00
sendResponse ( req . ReqID , uint64 ( reqCnt ) , reply , task . done ( ) )
if metrics . EnabledExpensive {
miscOutHelperTriePacketsMeter . Mark ( 1 )
miscOutHelperTrieTrafficMeter . Mark ( int64 ( reply . size ( ) ) )
2020-01-08 13:08:56 +00:00
miscServingTimeHelperTrieTimer . Update ( time . Duration ( task . servingTime ) )
2019-08-21 09:29:34 +00:00
}
} ( )
}
case SendTxV2Msg :
p . Log ( ) . Trace ( "Received new transactions" )
if metrics . EnabledExpensive {
miscInTxsPacketsMeter . Mark ( 1 )
miscInTxsTrafficMeter . Mark ( int64 ( msg . Size ) )
}
var req struct {
ReqID uint64
Txs [ ] * types . Transaction
}
if err := msg . Decode ( & req ) ; err != nil {
clientErrorMeter . Mark ( 1 )
return errResp ( ErrDecode , "msg %v: %v" , msg , err )
}
reqCnt := len ( req . Txs )
if accept ( req . ReqID , uint64 ( reqCnt ) , MaxTxSend ) {
2019-08-27 11:07:25 +00:00
wg . Add ( 1 )
2019-08-21 09:29:34 +00:00
go func ( ) {
2019-08-27 11:07:25 +00:00
defer wg . Done ( )
2019-08-21 09:29:34 +00:00
stats := make ( [ ] light . TxStatus , len ( req . Txs ) )
for i , tx := range req . Txs {
if i != 0 && ! task . waitOrStop ( ) {
return
}
hash := tx . Hash ( )
stats [ i ] = h . txStatus ( hash )
if stats [ i ] . Status == core . TxStatusUnknown {
addFn := h . txpool . AddRemotes
// Add txs synchronously for testing purpose
if h . addTxsSync {
addFn = h . txpool . AddRemotesSync
}
if errs := addFn ( [ ] * types . Transaction { tx } ) ; errs [ 0 ] != nil {
stats [ i ] . Error = errs [ 0 ] . Error ( )
continue
}
stats [ i ] = h . txStatus ( hash )
}
}
2020-02-26 09:41:24 +00:00
reply := p . replyTxStatus ( req . ReqID , stats )
2019-08-21 09:29:34 +00:00
sendResponse ( req . ReqID , uint64 ( reqCnt ) , reply , task . done ( ) )
if metrics . EnabledExpensive {
miscOutTxsPacketsMeter . Mark ( 1 )
miscOutTxsTrafficMeter . Mark ( int64 ( reply . size ( ) ) )
2020-01-08 13:08:56 +00:00
miscServingTimeTxTimer . Update ( time . Duration ( task . servingTime ) )
2019-08-21 09:29:34 +00:00
}
} ( )
}
case GetTxStatusMsg :
p . Log ( ) . Trace ( "Received transaction status query request" )
if metrics . EnabledExpensive {
miscInTxStatusPacketsMeter . Mark ( 1 )
miscInTxStatusTrafficMeter . Mark ( int64 ( msg . Size ) )
}
var req struct {
ReqID uint64
Hashes [ ] common . Hash
}
if err := msg . Decode ( & req ) ; err != nil {
clientErrorMeter . Mark ( 1 )
return errResp ( ErrDecode , "msg %v: %v" , msg , err )
}
reqCnt := len ( req . Hashes )
if accept ( req . ReqID , uint64 ( reqCnt ) , MaxTxStatus ) {
2019-08-27 11:07:25 +00:00
wg . Add ( 1 )
2019-08-21 09:29:34 +00:00
go func ( ) {
2019-08-27 11:07:25 +00:00
defer wg . Done ( )
2019-08-21 09:29:34 +00:00
stats := make ( [ ] light . TxStatus , len ( req . Hashes ) )
for i , hash := range req . Hashes {
if i != 0 && ! task . waitOrStop ( ) {
sendResponse ( req . ReqID , 0 , nil , task . servingTime )
return
}
stats [ i ] = h . txStatus ( hash )
}
2020-02-26 09:41:24 +00:00
reply := p . replyTxStatus ( req . ReqID , stats )
2019-08-21 09:29:34 +00:00
sendResponse ( req . ReqID , uint64 ( reqCnt ) , reply , task . done ( ) )
if metrics . EnabledExpensive {
miscOutTxStatusPacketsMeter . Mark ( 1 )
miscOutTxStatusTrafficMeter . Mark ( int64 ( reply . size ( ) ) )
2020-01-08 13:08:56 +00:00
miscServingTimeTxStatusTimer . Update ( time . Duration ( task . servingTime ) )
2019-08-21 09:29:34 +00:00
}
} ( )
}
default :
p . Log ( ) . Trace ( "Received invalid message" , "code" , msg . Code )
clientErrorMeter . Mark ( 1 )
return errResp ( ErrInvalidMsgCode , "%v" , msg . Code )
}
2020-07-28 15:02:35 +00:00
// If the client has made too much invalid request(e.g. request a non-existent data),
2019-08-21 09:29:34 +00:00
// reject them to prevent SPAM attack.
2020-07-28 15:02:35 +00:00
if p . getInvalid ( ) > maxRequestErrors {
2019-08-21 09:29:34 +00:00
clientErrorMeter . Mark ( 1 )
return errTooManyInvalidRequest
}
return nil
}
// getAccount retrieves an account from the state based on root.
func ( h * serverHandler ) getAccount ( triedb * trie . Database , root , hash common . Hash ) ( state . Account , error ) {
trie , err := trie . New ( root , triedb )
if err != nil {
return state . Account { } , err
}
blob , err := trie . TryGet ( hash [ : ] )
if err != nil {
return state . Account { } , err
}
var account state . Account
if err = rlp . DecodeBytes ( blob , & account ) ; err != nil {
return state . Account { } , err
}
return account , nil
}
// getHelperTrie returns the post-processed trie root for the given trie ID and section index
func ( h * serverHandler ) getHelperTrie ( typ uint , index uint64 ) ( common . Hash , string ) {
switch typ {
case htCanonical :
sectionHead := rawdb . ReadCanonicalHash ( h . chainDb , ( index + 1 ) * h . server . iConfig . ChtSize - 1 )
return light . GetChtRoot ( h . chainDb , index , sectionHead ) , light . ChtTablePrefix
case htBloomBits :
sectionHead := rawdb . ReadCanonicalHash ( h . chainDb , ( index + 1 ) * h . server . iConfig . BloomTrieSize - 1 )
return light . GetBloomTrieRoot ( h . chainDb , index , sectionHead ) , light . BloomTrieTablePrefix
}
return common . Hash { } , ""
}
// getAuxiliaryHeaders returns requested auxiliary headers for the CHT request.
func ( h * serverHandler ) getAuxiliaryHeaders ( req HelperTrieReq ) [ ] byte {
2021-01-16 18:06:18 +00:00
if req . Type == htCanonical && req . AuxReq == htAuxHeader && len ( req . Key ) == 8 {
2019-08-21 09:29:34 +00:00
blockNum := binary . BigEndian . Uint64 ( req . Key )
hash := rawdb . ReadCanonicalHash ( h . chainDb , blockNum )
return rawdb . ReadHeaderRLP ( h . chainDb , hash , blockNum )
}
return nil
}
// txStatus returns the status of a specified transaction.
func ( h * serverHandler ) txStatus ( hash common . Hash ) light . TxStatus {
var stat light . TxStatus
// Looking the transaction in txpool first.
stat . Status = h . txpool . Status ( [ ] common . Hash { hash } ) [ 0 ]
// If the transaction is unknown to the pool, try looking it up locally.
if stat . Status == core . TxStatusUnknown {
lookup := h . blockchain . GetTransactionLookup ( hash )
if lookup != nil {
stat . Status = core . TxStatusIncluded
stat . Lookup = lookup
}
}
return stat
}
2020-10-21 08:56:33 +00:00
// broadcastLoop broadcasts new block information to all connected light
2019-08-21 09:29:34 +00:00
// clients. According to the agreement between client and server, server should
// only broadcast new announcement if the total difficulty is higher than the
// last one. Besides server will add the signature if client requires.
2020-10-21 08:56:33 +00:00
func ( h * serverHandler ) broadcastLoop ( ) {
2019-08-21 09:29:34 +00:00
defer h . wg . Done ( )
headCh := make ( chan core . ChainHeadEvent , 10 )
headSub := h . blockchain . SubscribeChainHeadEvent ( headCh )
defer headSub . Unsubscribe ( )
var (
lastHead * types . Header
lastTd = common . Big0
)
for {
select {
case ev := <- headCh :
header := ev . Block . Header ( )
hash , number := header . Hash ( ) , header . Number . Uint64 ( )
td := h . blockchain . GetTd ( hash , number )
if td == nil || td . Cmp ( lastTd ) <= 0 {
continue
}
var reorg uint64
if lastHead != nil {
reorg = lastHead . Number . Uint64 ( ) - rawdb . FindCommonAncestor ( h . chainDb , header , lastHead ) . Number . Uint64 ( )
}
lastHead , lastTd = header , td
log . Debug ( "Announcing block to peers" , "number" , number , "hash" , hash , "td" , td , "reorg" , reorg )
2020-10-21 08:56:33 +00:00
h . server . broadcaster . broadcast ( announceData { Hash : hash , Number : number , Td : td , ReorgDepth : reorg } )
2019-08-21 09:29:34 +00:00
case <- h . closeCh :
return
}
}
}
2020-10-21 08:56:33 +00:00
// broadcaster sends new header announcements to active client peers
type broadcaster struct {
ns * nodestate . NodeStateMachine
privateKey * ecdsa . PrivateKey
lastAnnounce , signedAnnounce announceData
}
// newBroadcaster creates a new broadcaster
func newBroadcaster ( ns * nodestate . NodeStateMachine ) * broadcaster {
b := & broadcaster { ns : ns }
ns . SubscribeState ( priorityPoolSetup . ActiveFlag , func ( node * enode . Node , oldState , newState nodestate . Flags ) {
if newState . Equals ( priorityPoolSetup . ActiveFlag ) {
// send last announcement to activated peers
b . sendTo ( node )
}
} )
return b
}
// setSignerKey sets the signer key for signed announcements. Should be called before
// starting the protocol handler.
func ( b * broadcaster ) setSignerKey ( privateKey * ecdsa . PrivateKey ) {
b . privateKey = privateKey
}
// broadcast sends the given announcements to all active peers
func ( b * broadcaster ) broadcast ( announce announceData ) {
b . ns . Operation ( func ( ) {
// iterate in an Operation to ensure that the active set does not change while iterating
b . lastAnnounce = announce
b . ns . ForEach ( priorityPoolSetup . ActiveFlag , nodestate . Flags { } , func ( node * enode . Node , state nodestate . Flags ) {
b . sendTo ( node )
} )
} )
}
// sendTo sends the most recent announcement to the given node unless the same or higher Td
// announcement has already been sent.
func ( b * broadcaster ) sendTo ( node * enode . Node ) {
if b . lastAnnounce . Td == nil {
return
}
if p , _ := b . ns . GetField ( node , clientPeerField ) . ( * clientPeer ) ; p != nil {
if p . headInfo . Td == nil || b . lastAnnounce . Td . Cmp ( p . headInfo . Td ) > 0 {
les, p2p/simulations/adapters: fix issues found while simulating les (#21761)
This adds a few tiny fixes for les and the p2p simulation framework:
LES Parts
- Keep the LES-SERVER connection even it's non-synced
We had this idea to reject the connections in LES protocol if the les-server itself is
not synced. However, in LES protocol we will also receive the connection from another
les-server. In this case even the local node is not synced yet, we should keep the tcp
connection for other protocols(e.g. eth protocol).
- Don't count "invalid message" for non-existing GetBlockHeadersMsg request
In the eth syncing mechanism (full sync, fast sync, light sync), it will try to fetch
some non-existent blocks or headers(to ensure we indeed download all the missing chain).
In this case, it's possible that the les-server will receive the request for
non-existent headers. So don't count it as the "invalid message" for scheduling
dropping.
- Copy the announce object in the closure
Before the les-server pushes the latest headers to all connected clients, it will create
a closure and queue it in the underlying request scheduler. In some scenarios it's
problematic. E.g, in private networks, the block can be mined very fast. So before the
first closure is executed, we may already update the latest_announce object. So actually
the "announce" object we want to send is replaced.
The downsize is the client will receive two announces with the same td and then drop the
server.
P2P Simulation Framework
- Don't double register the protocol services in p2p-simulation "Start".
The protocols upon the devp2p are registered in the "New node stage". So don't reigster
them again when starting a node in the p2p simulation framework
- Add one more new config field "ExternalSigner", in order to use clef service in the
framework.
2020-10-30 17:04:38 +00:00
announce := b . lastAnnounce
2020-10-21 08:56:33 +00:00
switch p . announceType {
case announceTypeSimple :
les, p2p/simulations/adapters: fix issues found while simulating les (#21761)
This adds a few tiny fixes for les and the p2p simulation framework:
LES Parts
- Keep the LES-SERVER connection even it's non-synced
We had this idea to reject the connections in LES protocol if the les-server itself is
not synced. However, in LES protocol we will also receive the connection from another
les-server. In this case even the local node is not synced yet, we should keep the tcp
connection for other protocols(e.g. eth protocol).
- Don't count "invalid message" for non-existing GetBlockHeadersMsg request
In the eth syncing mechanism (full sync, fast sync, light sync), it will try to fetch
some non-existent blocks or headers(to ensure we indeed download all the missing chain).
In this case, it's possible that the les-server will receive the request for
non-existent headers. So don't count it as the "invalid message" for scheduling
dropping.
- Copy the announce object in the closure
Before the les-server pushes the latest headers to all connected clients, it will create
a closure and queue it in the underlying request scheduler. In some scenarios it's
problematic. E.g, in private networks, the block can be mined very fast. So before the
first closure is executed, we may already update the latest_announce object. So actually
the "announce" object we want to send is replaced.
The downsize is the client will receive two announces with the same td and then drop the
server.
P2P Simulation Framework
- Don't double register the protocol services in p2p-simulation "Start".
The protocols upon the devp2p are registered in the "New node stage". So don't reigster
them again when starting a node in the p2p simulation framework
- Add one more new config field "ExternalSigner", in order to use clef service in the
framework.
2020-10-30 17:04:38 +00:00
if ! p . queueSend ( func ( ) { p . sendAnnounce ( announce ) } ) {
log . Debug ( "Drop announcement because queue is full" , "number" , announce . Number , "hash" , announce . Hash )
} else {
log . Debug ( "Sent announcement" , "number" , announce . Number , "hash" , announce . Hash )
2020-10-21 08:56:33 +00:00
}
case announceTypeSigned :
if b . signedAnnounce . Hash != b . lastAnnounce . Hash {
b . signedAnnounce = b . lastAnnounce
b . signedAnnounce . sign ( b . privateKey )
}
les, p2p/simulations/adapters: fix issues found while simulating les (#21761)
This adds a few tiny fixes for les and the p2p simulation framework:
LES Parts
- Keep the LES-SERVER connection even it's non-synced
We had this idea to reject the connections in LES protocol if the les-server itself is
not synced. However, in LES protocol we will also receive the connection from another
les-server. In this case even the local node is not synced yet, we should keep the tcp
connection for other protocols(e.g. eth protocol).
- Don't count "invalid message" for non-existing GetBlockHeadersMsg request
In the eth syncing mechanism (full sync, fast sync, light sync), it will try to fetch
some non-existent blocks or headers(to ensure we indeed download all the missing chain).
In this case, it's possible that the les-server will receive the request for
non-existent headers. So don't count it as the "invalid message" for scheduling
dropping.
- Copy the announce object in the closure
Before the les-server pushes the latest headers to all connected clients, it will create
a closure and queue it in the underlying request scheduler. In some scenarios it's
problematic. E.g, in private networks, the block can be mined very fast. So before the
first closure is executed, we may already update the latest_announce object. So actually
the "announce" object we want to send is replaced.
The downsize is the client will receive two announces with the same td and then drop the
server.
P2P Simulation Framework
- Don't double register the protocol services in p2p-simulation "Start".
The protocols upon the devp2p are registered in the "New node stage". So don't reigster
them again when starting a node in the p2p simulation framework
- Add one more new config field "ExternalSigner", in order to use clef service in the
framework.
2020-10-30 17:04:38 +00:00
announce := b . signedAnnounce
if ! p . queueSend ( func ( ) { p . sendAnnounce ( announce ) } ) {
log . Debug ( "Drop announcement because queue is full" , "number" , announce . Number , "hash" , announce . Hash )
} else {
log . Debug ( "Sent announcement" , "number" , announce . Number , "hash" , announce . Hash )
2020-10-21 08:56:33 +00:00
}
}
p . headInfo = blockInfo { b . lastAnnounce . Hash , b . lastAnnounce . Number , b . lastAnnounce . Td }
}
}
}