mirror of https://github.com/status-im/op-geth.git
Merge pull request #1064 from karalabe/downloader-attacks
Fix two additional download vulnerabilities
This commit is contained in:
commit
af28736bd0
|
@ -15,8 +15,10 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
maxHashFetch = 512 // Amount of hashes to be fetched per chunk
|
MinHashFetch = 512 // Minimum amount of hashes to not consider a peer stalling
|
||||||
maxBlockFetch = 128 // Amount of blocks to be fetched per chunk
|
MaxHashFetch = 2048 // Amount of hashes to be fetched per retrieval request
|
||||||
|
MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request
|
||||||
|
|
||||||
peerCountTimeout = 12 * time.Second // Amount of time it takes for the peer handler to ignore minDesiredPeerCount
|
peerCountTimeout = 12 * time.Second // Amount of time it takes for the peer handler to ignore minDesiredPeerCount
|
||||||
hashTTL = 5 * time.Second // Time it takes for a hash request to time out
|
hashTTL = 5 * time.Second // Time it takes for a hash request to time out
|
||||||
)
|
)
|
||||||
|
@ -28,10 +30,11 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errLowTd = errors.New("peer's TD is too low")
|
errLowTd = errors.New("peers TD is too low")
|
||||||
ErrBusy = errors.New("busy")
|
ErrBusy = errors.New("busy")
|
||||||
errUnknownPeer = errors.New("peer's unknown or unhealthy")
|
errUnknownPeer = errors.New("peer is unknown or unhealthy")
|
||||||
ErrBadPeer = errors.New("action from bad peer ignored")
|
ErrBadPeer = errors.New("action from bad peer ignored")
|
||||||
|
ErrStallingPeer = errors.New("peer is stalling")
|
||||||
errNoPeers = errors.New("no peers to keep download active")
|
errNoPeers = errors.New("no peers to keep download active")
|
||||||
ErrPendingQueue = errors.New("pending items in queue")
|
ErrPendingQueue = errors.New("pending items in queue")
|
||||||
ErrTimeout = errors.New("timeout")
|
ErrTimeout = errors.New("timeout")
|
||||||
|
@ -60,13 +63,18 @@ type hashPack struct {
|
||||||
hashes []common.Hash
|
hashes []common.Hash
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type crossCheck struct {
|
||||||
|
expire time.Time
|
||||||
|
parent common.Hash
|
||||||
|
}
|
||||||
|
|
||||||
type Downloader struct {
|
type Downloader struct {
|
||||||
mux *event.TypeMux
|
mux *event.TypeMux
|
||||||
|
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
queue *queue // Scheduler for selecting the hashes to download
|
queue *queue // Scheduler for selecting the hashes to download
|
||||||
peers *peerSet // Set of active peers from which download can proceed
|
peers *peerSet // Set of active peers from which download can proceed
|
||||||
checks map[common.Hash]time.Time // Pending cross checks to verify a hash chain
|
checks map[common.Hash]*crossCheck // Pending cross checks to verify a hash chain
|
||||||
|
|
||||||
// Callbacks
|
// Callbacks
|
||||||
hasBlock hashCheckFn
|
hasBlock hashCheckFn
|
||||||
|
@ -157,7 +165,7 @@ func (d *Downloader) Synchronise(id string, hash common.Hash) error {
|
||||||
// Reset the queue and peer set to clean any internal leftover state
|
// Reset the queue and peer set to clean any internal leftover state
|
||||||
d.queue.Reset()
|
d.queue.Reset()
|
||||||
d.peers.Reset()
|
d.peers.Reset()
|
||||||
d.checks = make(map[common.Hash]time.Time)
|
d.checks = make(map[common.Hash]*crossCheck)
|
||||||
|
|
||||||
// Retrieve the origin peer and initiate the downloading process
|
// Retrieve the origin peer and initiate the downloading process
|
||||||
p := d.peers.Peer(id)
|
p := d.peers.Peer(id)
|
||||||
|
@ -283,15 +291,22 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
|
||||||
return ErrBadPeer
|
return ErrBadPeer
|
||||||
}
|
}
|
||||||
if !done {
|
if !done {
|
||||||
|
// Check that the peer is not stalling the sync
|
||||||
|
if len(inserts) < MinHashFetch {
|
||||||
|
return ErrStallingPeer
|
||||||
|
}
|
||||||
// Try and fetch a random block to verify the hash batch
|
// Try and fetch a random block to verify the hash batch
|
||||||
// Skip the last hash as the cross check races with the next hash fetch
|
// Skip the last hash as the cross check races with the next hash fetch
|
||||||
if len(inserts) > 1 {
|
cross := rand.Intn(len(inserts) - 1)
|
||||||
cross := inserts[rand.Intn(len(inserts)-1)]
|
origin, parent := inserts[cross], inserts[cross+1]
|
||||||
glog.V(logger.Detail).Infof("Cross checking (%s) with %x", active.id, cross)
|
glog.V(logger.Detail).Infof("Cross checking (%s) with %x/%x", active.id, origin, parent)
|
||||||
|
|
||||||
d.checks[cross] = time.Now().Add(blockTTL)
|
d.checks[origin] = &crossCheck{
|
||||||
active.getBlocks([]common.Hash{cross})
|
expire: time.Now().Add(blockTTL),
|
||||||
|
parent: parent,
|
||||||
}
|
}
|
||||||
|
active.getBlocks([]common.Hash{origin})
|
||||||
|
|
||||||
// Also fetch a fresh
|
// Also fetch a fresh
|
||||||
active.getHashes(head)
|
active.getHashes(head)
|
||||||
continue
|
continue
|
||||||
|
@ -310,8 +325,8 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
block := blockPack.blocks[0]
|
block := blockPack.blocks[0]
|
||||||
if _, ok := d.checks[block.Hash()]; ok {
|
if check, ok := d.checks[block.Hash()]; ok {
|
||||||
if !d.queue.Has(block.ParentHash()) {
|
if block.ParentHash() != check.parent {
|
||||||
return ErrCrossCheckFailed
|
return ErrCrossCheckFailed
|
||||||
}
|
}
|
||||||
delete(d.checks, block.Hash())
|
delete(d.checks, block.Hash())
|
||||||
|
@ -319,8 +334,8 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
|
||||||
|
|
||||||
case <-crossTicker.C:
|
case <-crossTicker.C:
|
||||||
// Iterate over all the cross checks and fail the hash chain if they're not verified
|
// Iterate over all the cross checks and fail the hash chain if they're not verified
|
||||||
for hash, deadline := range d.checks {
|
for hash, check := range d.checks {
|
||||||
if time.Now().After(deadline) {
|
if time.Now().After(check.expire) {
|
||||||
glog.V(logger.Debug).Infof("Cross check timeout for %x", hash)
|
glog.V(logger.Debug).Infof("Cross check timeout for %x", hash)
|
||||||
return ErrCrossCheckFailed
|
return ErrCrossCheckFailed
|
||||||
}
|
}
|
||||||
|
@ -438,7 +453,7 @@ out:
|
||||||
}
|
}
|
||||||
// Get a possible chunk. If nil is returned no chunk
|
// Get a possible chunk. If nil is returned no chunk
|
||||||
// could be returned due to no hashes available.
|
// could be returned due to no hashes available.
|
||||||
request := d.queue.Reserve(peer, maxBlockFetch)
|
request := d.queue.Reserve(peer, MaxBlockFetch)
|
||||||
if request == nil {
|
if request == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -53,6 +53,8 @@ type downloadTester struct {
|
||||||
blocks map[common.Hash]*types.Block // Blocks associated with the hashes
|
blocks map[common.Hash]*types.Block // Blocks associated with the hashes
|
||||||
chain []common.Hash // Block-chain being constructed
|
chain []common.Hash // Block-chain being constructed
|
||||||
|
|
||||||
|
maxHashFetch int // Overrides the maximum number of retrieved hashes
|
||||||
|
|
||||||
t *testing.T
|
t *testing.T
|
||||||
pcount int
|
pcount int
|
||||||
done chan bool
|
done chan bool
|
||||||
|
@ -133,8 +135,12 @@ func (dl *downloadTester) getBlock(hash common.Hash) *types.Block {
|
||||||
|
|
||||||
// getHashes retrieves a batch of hashes for reconstructing the chain.
|
// getHashes retrieves a batch of hashes for reconstructing the chain.
|
||||||
func (dl *downloadTester) getHashes(head common.Hash) error {
|
func (dl *downloadTester) getHashes(head common.Hash) error {
|
||||||
|
limit := MaxHashFetch
|
||||||
|
if dl.maxHashFetch > 0 {
|
||||||
|
limit = dl.maxHashFetch
|
||||||
|
}
|
||||||
// Gather the next batch of hashes
|
// Gather the next batch of hashes
|
||||||
hashes := make([]common.Hash, 0, maxHashFetch)
|
hashes := make([]common.Hash, 0, limit)
|
||||||
for i, hash := range dl.hashes {
|
for i, hash := range dl.hashes {
|
||||||
if hash == head {
|
if hash == head {
|
||||||
i++
|
i++
|
||||||
|
@ -469,6 +475,23 @@ func TestMadeupHashChainAttack(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tests that if a malicious peer makes up a random hash chain, and tries to push
|
||||||
|
// indefinitely, one hash at a time, it actually gets caught with it. The reason
|
||||||
|
// this is separate from the classical made up chain attack is that sending hashes
|
||||||
|
// one by one prevents reliable block/parent verification.
|
||||||
|
func TestMadeupHashChainDrippingAttack(t *testing.T) {
|
||||||
|
// Create a random chain of hashes to drip
|
||||||
|
hashes := createHashes(0, 16*blockCacheLimit)
|
||||||
|
tester := newTester(t, hashes, nil)
|
||||||
|
|
||||||
|
// Try and sync with the attacker, one hash at a time
|
||||||
|
tester.maxHashFetch = 1
|
||||||
|
tester.newPeer("attack", big.NewInt(10000), hashes[0])
|
||||||
|
if _, err := tester.syncTake("attack", hashes[0]); err != ErrStallingPeer {
|
||||||
|
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrStallingPeer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Tests that if a malicious peer makes up a random block chain, and tried to
|
// Tests that if a malicious peer makes up a random block chain, and tried to
|
||||||
// push indefinitely, it actually gets caught with it.
|
// push indefinitely, it actually gets caught with it.
|
||||||
func TestMadeupBlockChainAttack(t *testing.T) {
|
func TestMadeupBlockChainAttack(t *testing.T) {
|
||||||
|
@ -479,7 +502,7 @@ func TestMadeupBlockChainAttack(t *testing.T) {
|
||||||
crossCheckCycle = 25 * time.Millisecond
|
crossCheckCycle = 25 * time.Millisecond
|
||||||
|
|
||||||
// Create a long chain of blocks and simulate an invalid chain by dropping every second
|
// Create a long chain of blocks and simulate an invalid chain by dropping every second
|
||||||
hashes := createHashes(0, 32*blockCacheLimit)
|
hashes := createHashes(0, 16*blockCacheLimit)
|
||||||
blocks := createBlocksFromHashes(hashes)
|
blocks := createBlocksFromHashes(hashes)
|
||||||
|
|
||||||
gapped := make([]common.Hash, len(hashes)/2)
|
gapped := make([]common.Hash, len(hashes)/2)
|
||||||
|
@ -502,3 +525,37 @@ func TestMadeupBlockChainAttack(t *testing.T) {
|
||||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Advanced form of the above forged blockchain attack, where not only does the
|
||||||
|
// attacker make up a valid hashes for random blocks, but also forges the block
|
||||||
|
// parents to point to existing hashes.
|
||||||
|
func TestMadeupParentBlockChainAttack(t *testing.T) {
|
||||||
|
defaultBlockTTL := blockTTL
|
||||||
|
defaultCrossCheckCycle := crossCheckCycle
|
||||||
|
|
||||||
|
blockTTL = 100 * time.Millisecond
|
||||||
|
crossCheckCycle = 25 * time.Millisecond
|
||||||
|
|
||||||
|
// Create a long chain of blocks and simulate an invalid chain by dropping every second
|
||||||
|
hashes := createHashes(0, 16*blockCacheLimit)
|
||||||
|
blocks := createBlocksFromHashes(hashes)
|
||||||
|
forges := createBlocksFromHashes(hashes)
|
||||||
|
for hash, block := range forges {
|
||||||
|
block.ParentHeaderHash = hash // Simulate pointing to already known hash
|
||||||
|
}
|
||||||
|
// Try and sync with the malicious node and check that it fails
|
||||||
|
tester := newTester(t, hashes, forges)
|
||||||
|
tester.newPeer("attack", big.NewInt(10000), hashes[0])
|
||||||
|
if _, err := tester.syncTake("attack", hashes[0]); err != ErrCrossCheckFailed {
|
||||||
|
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrCrossCheckFailed)
|
||||||
|
}
|
||||||
|
// Ensure that a valid chain can still pass sync
|
||||||
|
blockTTL = defaultBlockTTL
|
||||||
|
crossCheckCycle = defaultCrossCheckCycle
|
||||||
|
|
||||||
|
tester.blocks = blocks
|
||||||
|
tester.newPeer("valid", big.NewInt(20000), hashes[0])
|
||||||
|
if _, err := tester.syncTake("valid", hashes[0]); err != nil {
|
||||||
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
blockCacheLimit = 1024 // Maximum number of blocks to cache before throttling the download
|
blockCacheLimit = 8 * MaxBlockFetch // Maximum number of blocks to cache before throttling the download
|
||||||
)
|
)
|
||||||
|
|
||||||
// fetchRequest is a currently running block retrieval operation.
|
// fetchRequest is a currently running block retrieval operation.
|
||||||
|
|
|
@ -206,8 +206,8 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
|
||||||
return errResp(ErrDecode, "->msg %v: %v", msg, err)
|
return errResp(ErrDecode, "->msg %v: %v", msg, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if request.Amount > maxHashes {
|
if request.Amount > downloader.MaxHashFetch {
|
||||||
request.Amount = maxHashes
|
request.Amount = downloader.MaxHashFetch
|
||||||
}
|
}
|
||||||
|
|
||||||
hashes := self.chainman.GetBlockHashesFromHash(request.Hash, request.Amount)
|
hashes := self.chainman.GetBlockHashesFromHash(request.Hash, request.Amount)
|
||||||
|
@ -254,7 +254,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
|
||||||
if block != nil {
|
if block != nil {
|
||||||
blocks = append(blocks, block)
|
blocks = append(blocks, block)
|
||||||
}
|
}
|
||||||
if i == maxBlocks {
|
if i == downloader.MaxBlockFetch {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
|
@ -100,8 +101,8 @@ func (p *peer) sendTransaction(tx *types.Transaction) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *peer) requestHashes(from common.Hash) error {
|
func (p *peer) requestHashes(from common.Hash) error {
|
||||||
glog.V(logger.Debug).Infof("[%s] fetching hashes (%d) %x...\n", p.id, maxHashes, from[:4])
|
glog.V(logger.Debug).Infof("[%s] fetching hashes (%d) %x...\n", p.id, downloader.MaxHashFetch, from[:4])
|
||||||
return p2p.Send(p.rw, GetBlockHashesMsg, getBlockHashesMsgData{from, maxHashes})
|
return p2p.Send(p.rw, GetBlockHashesMsg, getBlockHashesMsgData{from, downloader.MaxHashFetch})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *peer) requestBlocks(hashes []common.Hash) error {
|
func (p *peer) requestBlocks(hashes []common.Hash) error {
|
||||||
|
|
|
@ -12,8 +12,6 @@ const (
|
||||||
NetworkId = 0
|
NetworkId = 0
|
||||||
ProtocolLength = uint64(8)
|
ProtocolLength = uint64(8)
|
||||||
ProtocolMaxMsgSize = 10 * 1024 * 1024
|
ProtocolMaxMsgSize = 10 * 1024 * 1024
|
||||||
maxHashes = 512
|
|
||||||
maxBlocks = 128
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// eth protocol message codes
|
// eth protocol message codes
|
||||||
|
|
Loading…
Reference in New Issue