eth, les: add sanity checks for unbounded block fields (#19573)

This PR adds some hardening in the lower levels of the protocol stack, to bail early on invalid data. Primarily, attacks that this PR protects against are on the "annoyance"-level, which would otherwise write a couple of megabytes of data into the log output, which is a bit resource intensive.
This commit is contained in:
Martin Holst Swende 2019-07-08 11:42:22 +02:00 committed by Felix Lange
parent 5bc9ccfa0a
commit cdfe9a3a2a
6 changed files with 57 additions and 5 deletions

View File

@ -19,6 +19,7 @@ package types
import ( import (
"encoding/binary" "encoding/binary"
"fmt"
"io" "io"
"math/big" "math/big"
"reflect" "reflect"
@ -110,6 +111,25 @@ func (h *Header) Size() common.StorageSize {
return headerSize + common.StorageSize(len(h.Extra)+(h.Difficulty.BitLen()+h.Number.BitLen())/8) return headerSize + common.StorageSize(len(h.Extra)+(h.Difficulty.BitLen()+h.Number.BitLen())/8)
} }
// SanityCheck checks a few basic things -- these checks are way beyond what
// any 'sane' production values should hold, and can mainly be used to prevent
// that the unbounded fields are stuffed with junk data to add processing
// overhead
func (h *Header) SanityCheck() error {
if h.Number != nil && !h.Number.IsUint64() {
return fmt.Errorf("too large block number: bitlen %d", h.Number.BitLen())
}
if h.Difficulty != nil {
if diffLen := h.Difficulty.BitLen(); diffLen > 80 {
return fmt.Errorf("too large block difficulty: bitlen %d", diffLen)
}
}
if eLen := len(h.Extra); eLen > 100*1024 {
return fmt.Errorf("too large block extradata: size %d", eLen)
}
return nil
}
func rlpHash(x interface{}) (h common.Hash) { func rlpHash(x interface{}) (h common.Hash) {
hw := sha3.NewLegacyKeccak256() hw := sha3.NewLegacyKeccak256()
rlp.Encode(hw, x) rlp.Encode(hw, x)
@ -316,6 +336,12 @@ func (b *Block) Size() common.StorageSize {
return common.StorageSize(c) return common.StorageSize(c)
} }
// SanityCheck can be used to prevent that unbounded fields are
// stuffed with junk data to add processing overhead
func (b *Block) SanityCheck() error {
return b.header.SanityCheck()
}
type writeCounter common.StorageSize type writeCounter common.StorageSize
func (c *writeCounter) Write(b []byte) (int, error) { func (c *writeCounter) Write(b []byte) (int, error) {

View File

@ -685,7 +685,7 @@ func (f *Fetcher) forgetHash(hash common.Hash) {
// Remove all pending announces and decrement DOS counters // Remove all pending announces and decrement DOS counters
for _, announce := range f.announced[hash] { for _, announce := range f.announced[hash] {
f.announces[announce.origin]-- f.announces[announce.origin]--
if f.announces[announce.origin] == 0 { if f.announces[announce.origin] <= 0 {
delete(f.announces, announce.origin) delete(f.announces, announce.origin)
} }
} }
@ -696,7 +696,7 @@ func (f *Fetcher) forgetHash(hash common.Hash) {
// Remove any pending fetches and decrement the DOS counters // Remove any pending fetches and decrement the DOS counters
if announce := f.fetching[hash]; announce != nil { if announce := f.fetching[hash]; announce != nil {
f.announces[announce.origin]-- f.announces[announce.origin]--
if f.announces[announce.origin] == 0 { if f.announces[announce.origin] <= 0 {
delete(f.announces, announce.origin) delete(f.announces, announce.origin)
} }
delete(f.fetching, hash) delete(f.fetching, hash)
@ -705,7 +705,7 @@ func (f *Fetcher) forgetHash(hash common.Hash) {
// Remove any pending completion requests and decrement the DOS counters // Remove any pending completion requests and decrement the DOS counters
for _, announce := range f.fetched[hash] { for _, announce := range f.fetched[hash] {
f.announces[announce.origin]-- f.announces[announce.origin]--
if f.announces[announce.origin] == 0 { if f.announces[announce.origin] <= 0 {
delete(f.announces, announce.origin) delete(f.announces, announce.origin)
} }
} }
@ -714,7 +714,7 @@ func (f *Fetcher) forgetHash(hash common.Hash) {
// Remove any pending completions and decrement the DOS counters // Remove any pending completions and decrement the DOS counters
if announce := f.completing[hash]; announce != nil { if announce := f.completing[hash]; announce != nil {
f.announces[announce.origin]-- f.announces[announce.origin]--
if f.announces[announce.origin] == 0 { if f.announces[announce.origin] <= 0 {
delete(f.announces, announce.origin) delete(f.announces, announce.origin)
} }
delete(f.completing, hash) delete(f.completing, hash)

View File

@ -697,6 +697,9 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
if err := msg.Decode(&request); err != nil { if err := msg.Decode(&request); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err) return errResp(ErrDecode, "%v: %v", msg, err)
} }
if err := request.sanityCheck(); err != nil {
return err
}
request.Block.ReceivedAt = msg.ReceivedAt request.Block.ReceivedAt = msg.ReceivedAt
request.Block.ReceivedFrom = p request.Block.ReceivedFrom = p

View File

@ -173,6 +173,19 @@ type newBlockData struct {
TD *big.Int TD *big.Int
} }
// sanityCheck verifies that the values are reasonable, as a DoS protection
func (request *newBlockData) sanityCheck() error {
if err := request.Block.SanityCheck(); err != nil {
return err
}
//TD at mainnet block #7753254 is 76 bits. If it becomes 100 million times
// larger, it will still fit within 100 bits
if tdlen := request.TD.BitLen(); tdlen > 100 {
return fmt.Errorf("too large block TD: bitlen %d", tdlen)
}
return nil
}
// blockBody represents the data content of a single block. // blockBody represents the data content of a single block.
type blockBody struct { type blockBody struct {
Transactions []*types.Transaction // Transactions contained within a block Transactions []*types.Transaction // Transactions contained within a block

View File

@ -442,7 +442,9 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
if err := msg.Decode(&req); err != nil { if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err) return errResp(ErrDecode, "%v: %v", msg, err)
} }
if err := req.sanityCheck(); err != nil {
return err
}
update, size := req.Update.decode() update, size := req.Update.decode()
if p.rejectUpdate(size) { if p.rejectUpdate(size) {
return errResp(ErrRequestRejected, "") return errResp(ErrRequestRejected, "")

View File

@ -149,6 +149,14 @@ type announceData struct {
Update keyValueList Update keyValueList
} }
// sanityCheck verifies that the values are reasonable, as a DoS protection
func (a *announceData) sanityCheck() error {
if tdlen := a.Td.BitLen(); tdlen > 100 {
return fmt.Errorf("too large block TD: bitlen %d", tdlen)
}
return nil
}
// sign adds a signature to the block announcement by the given privKey // sign adds a signature to the block announcement by the given privKey
func (a *announceData) sign(privKey *ecdsa.PrivateKey) { func (a *announceData) sign(privKey *ecdsa.PrivateKey) {
rlp, _ := rlp.EncodeToBytes(announceBlock{a.Hash, a.Number, a.Td}) rlp, _ := rlp.EncodeToBytes(announceBlock{a.Hash, a.Number, a.Td})