mirror of https://github.com/status-im/op-geth.git
p2p/discover: fix out-of-bounds slicing for chunked neighbors packets
The code assumed that Table.closest always returns at least 13 nodes. This is not true for small tables (e.g. during bootstrap).
This commit is contained in:
parent
b2119d8931
commit
251846d65a
|
@ -363,7 +363,31 @@ const (
|
||||||
headSize = macSize + sigSize // space of packet frame data
|
headSize = macSize + sigSize // space of packet frame data
|
||||||
)
|
)
|
||||||
|
|
||||||
var headSpace = make([]byte, headSize)
|
var (
|
||||||
|
headSpace = make([]byte, headSize)
|
||||||
|
|
||||||
|
// Neighbors responses are sent across multiple packets to
|
||||||
|
// stay below the 1280 byte limit. We compute the maximum number
|
||||||
|
// of entries by stuffing a packet until it grows too large.
|
||||||
|
maxNeighbors int
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
p := neighbors{Expiration: ^uint64(0)}
|
||||||
|
maxSizeNode := rpcNode{IP: make(net.IP, 16), UDP: ^uint16(0), TCP: ^uint16(0)}
|
||||||
|
for n := 0; ; n++ {
|
||||||
|
p.Nodes = append(p.Nodes, maxSizeNode)
|
||||||
|
size, _, err := rlp.EncodeToReader(p)
|
||||||
|
if err != nil {
|
||||||
|
// If this ever happens, it will be caught by the unit tests.
|
||||||
|
panic("cannot encode: " + err.Error())
|
||||||
|
}
|
||||||
|
if headSize+size+1 >= 1280 {
|
||||||
|
maxNeighbors = n
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (t *udp) send(toaddr *net.UDPAddr, ptype byte, req interface{}) error {
|
func (t *udp) send(toaddr *net.UDPAddr, ptype byte, req interface{}) error {
|
||||||
packet, err := encodePacket(t.priv, ptype, req)
|
packet, err := encodePacket(t.priv, ptype, req)
|
||||||
|
@ -402,6 +426,9 @@ func encodePacket(priv *ecdsa.PrivateKey, ptype byte, req interface{}) ([]byte,
|
||||||
// readLoop runs in its own goroutine. it handles incoming UDP packets.
|
// readLoop runs in its own goroutine. it handles incoming UDP packets.
|
||||||
func (t *udp) readLoop() {
|
func (t *udp) readLoop() {
|
||||||
defer t.conn.Close()
|
defer t.conn.Close()
|
||||||
|
// Discovery packets are defined to be no larger than 1280 bytes.
|
||||||
|
// Packets larger than this size will be cut at the end and treated
|
||||||
|
// as invalid because their hash won't match.
|
||||||
buf := make([]byte, 1280)
|
buf := make([]byte, 1280)
|
||||||
for {
|
for {
|
||||||
nbytes, from, err := t.conn.ReadFromUDP(buf)
|
nbytes, from, err := t.conn.ReadFromUDP(buf)
|
||||||
|
@ -504,20 +531,15 @@ func (req *findnode) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte
|
||||||
closest := t.closest(target, bucketSize).entries
|
closest := t.closest(target, bucketSize).entries
|
||||||
t.mutex.Unlock()
|
t.mutex.Unlock()
|
||||||
|
|
||||||
// TODO: this conversion could use a cached version of the slice
|
p := neighbors{Expiration: uint64(time.Now().Add(expiration).Unix())}
|
||||||
closestrpc := make([]rpcNode, len(closest))
|
// Send neighbors in chunks with at most maxNeighbors per packet
|
||||||
|
// to stay below the 1280 byte limit.
|
||||||
for i, n := range closest {
|
for i, n := range closest {
|
||||||
closestrpc[i] = nodeToRPC(n)
|
p.Nodes = append(p.Nodes, nodeToRPC(n))
|
||||||
|
if len(p.Nodes) == maxNeighbors || i == len(closest)-1 {
|
||||||
|
t.send(from, neighborsPacket, p)
|
||||||
|
p.Nodes = p.Nodes[:0]
|
||||||
}
|
}
|
||||||
t.send(from, neighborsPacket, neighbors{
|
|
||||||
Nodes: closestrpc[:13],
|
|
||||||
Expiration: uint64(time.Now().Add(expiration).Unix()),
|
|
||||||
})
|
|
||||||
if len(closestrpc) > 13 {
|
|
||||||
t.send(from, neighborsPacket, neighbors{
|
|
||||||
Nodes: closestrpc[13:],
|
|
||||||
Expiration: uint64(time.Now().Add(expiration).Unix()),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -164,26 +164,21 @@ func TestUDP_findnode(t *testing.T) {
|
||||||
// check that closest neighbors are returned.
|
// check that closest neighbors are returned.
|
||||||
test.packetIn(nil, findnodePacket, &findnode{Target: testTarget, Expiration: futureExp})
|
test.packetIn(nil, findnodePacket, &findnode{Target: testTarget, Expiration: futureExp})
|
||||||
expected := test.table.closest(targetHash, bucketSize)
|
expected := test.table.closest(targetHash, bucketSize)
|
||||||
|
|
||||||
|
waitNeighbors := func(want []*Node) {
|
||||||
test.waitPacketOut(func(p *neighbors) {
|
test.waitPacketOut(func(p *neighbors) {
|
||||||
if len(p.Nodes) != 13 {
|
if len(p.Nodes) != len(want) {
|
||||||
t.Errorf("wrong number of results: got %d, want %d", len(p.Nodes), bucketSize)
|
t.Errorf("wrong number of results: got %d, want %d", len(p.Nodes), bucketSize)
|
||||||
}
|
}
|
||||||
for i := range p.Nodes {
|
for i := range p.Nodes {
|
||||||
if p.Nodes[i].ID != expected.entries[i].ID {
|
if p.Nodes[i].ID != want[i].ID {
|
||||||
t.Errorf("result mismatch at %d:\n got: %v\n want: %v", i, p.Nodes[i], expected.entries[i])
|
t.Errorf("result mismatch at %d:\n got: %v\n want: %v", i, p.Nodes[i], expected.entries[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
test.waitPacketOut(func(p *neighbors) {
|
|
||||||
if len(p.Nodes) != 3 {
|
|
||||||
t.Errorf("wrong number of results: got %d, want %d", len(p.Nodes), bucketSize)
|
|
||||||
}
|
}
|
||||||
for i := range p.Nodes {
|
waitNeighbors(expected.entries[:maxNeighbors])
|
||||||
if p.Nodes[i].ID != expected.entries[i + 13].ID {
|
waitNeighbors(expected.entries[maxNeighbors:])
|
||||||
t.Errorf("result mismatch at %d:\n got: %v\n want: %v", i, p.Nodes[i], expected.entries[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUDP_findnodeMultiReply(t *testing.T) {
|
func TestUDP_findnodeMultiReply(t *testing.T) {
|
||||||
|
|
Loading…
Reference in New Issue