go-libp2p-pubsub/gossipsub_connmgr_test.go

166 lines
4.4 KiB
Go
Raw Normal View History

2020-05-19 10:50:45 -04:00
package pubsub
import (
"context"
2020-05-14 14:08:01 -04:00
"testing"
"time"
2020-05-08 21:34:34 -04:00
"github.com/benbjohnson/clock"
"github.com/libp2p/go-libp2p/core/host"
2020-05-19 10:50:45 -04:00
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/p2p/net/connmgr"
2020-05-19 10:50:45 -04:00
)
func TestGossipsubConnTagMessageDeliveries(t *testing.T) {
GossipSub v1.2: IDONTWANT control message and priority queue. (#553) ## GossipSub v1.2 implementation Specification: libp2p/specs#548 ### Work Summary Sending IDONTWANT Implement a smart queue Add priorities to the smart queue Put IDONTWANT packets into the smart priority queue as soon as the node gets the packets Handling IDONTWANT Use a map to remember the message ids whose IDONTWANT packets have been received Implement max_idontwant_messages (ignore the IDONWANT packets if the max is reached) Clear the message IDs from the cache after 3 heartbeats Hash the message IDs before putting them into the cache. More requested features Add a feature test to not send IDONTWANT if the other side doesnt support it ### Commit Summary * Replace sending channel with the smart rpcQueue Since we want to implement a priority queue later, we need to replace the normal sending channels with the new smart structures first. * Implement UrgentPush in the smart rpcQueue UrgentPush allows you to push an rpc packet to the front of the queue so that it will be popped out fast. * Add IDONTWANT to rpc.proto and trace.proto * Send IDONTWANT right before validation step Most importantly, this commit adds a new method called PreValidation to the interface PubSubRouter, which will be called right before validating the gossipsub message. In GossipSubRouter, PreValidation will send the IDONTWANT controll messages to all the mesh peers of the topics of the received messages. * Test GossipSub IDONWANT sending * Send IDONWANT only for large messages * Handle IDONTWANT control messages When receiving IDONTWANTs, the host should remember the message ids contained in IDONTWANTs using a hash map. When receiving messages with those ids, it shouldn't forward them to the peers who already sent the IDONTWANTs. When the maximum number of IDONTWANTs is reached for any particular peer, the host should ignore any excessive IDONTWANTs from that peer. * Clear expired message IDs from the IDONTWANT cache If the messages IDs received from IDONTWANTs are older than 3 heartbeats, they should be removed from the IDONTWANT cache. * Keep the hashes of IDONTWANT message ids instead Rather than keeping the raw message ids, keep their hashes instead to save memory and protect again memory DoS attacks. * Increase GossipSubMaxIHaveMessages to 1000 * fixup! Clear expired message IDs from the IDONTWANT cache * Not send IDONTWANT if the receiver doesn't support * fixup! Replace sending channel with the smart rpcQueue * Not use pointers in rpcQueue * Simply rcpQueue by using only one mutex * Check ctx error in rpc sending worker Co-authored-by: Steven Allen <steven@stebalien.com> * fixup! Simply rcpQueue by using only one mutex * fixup! Keep the hashes of IDONTWANT message ids instead * Use AfterFunc instead implementing our own * Fix misc lint errors * fixup! Fix misc lint errors * Revert "Increase GossipSubMaxIHaveMessages to 1000" This reverts commit 6fabcdd068a5f5238c5280a3460af9c3998418ec. * Increase GossipSubMaxIDontWantMessages to 1000 * fixup! Handle IDONTWANT control messages * Skip TestGossipsubConnTagMessageDeliveries * Skip FuzzAppendOrMergeRPC * Revert "Skip FuzzAppendOrMergeRPC" This reverts commit f141e13234de0960d139339acb636a1afea9e219. * fixup! Send IDONWANT only for large messages * fixup! fixup! Keep the hashes of IDONTWANT message ids instead * fixup! Implement UrgentPush in the smart rpcQueue * fixup! Use AfterFunc instead implementing our own --------- Co-authored-by: Steven Allen <steven@stebalien.com>
2024-08-16 22:16:35 +07:00
t.Skip("flaky test disabled")
2020-05-19 10:50:45 -04:00
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
oldGossipSubD := GossipSubD
2020-05-15 15:53:51 -04:00
oldGossipSubDlo := GossipSubDlo
2020-05-19 10:50:45 -04:00
oldGossipSubDHi := GossipSubDhi
2020-05-08 21:34:34 -04:00
oldGossipSubConnTagDecayInterval := GossipSubConnTagDecayInterval
oldGossipSubConnTagMessageDeliveryCap := GossipSubConnTagMessageDeliveryCap
// set the gossipsub D parameters low, so that we have some peers outside the mesh
2020-05-18 19:13:13 -04:00
GossipSubDlo = 3
GossipSubD = 3
GossipSubDhi = 3
2020-05-08 21:34:34 -04:00
// also set the tag decay interval so we don't have to wait forever for tests
GossipSubConnTagDecayInterval = time.Second
// set the cap for deliveries above GossipSubConnTagValueMeshPeer, so the sybils
// will be forced out even if they end up in someone's mesh
GossipSubConnTagMessageDeliveryCap = 50
// reset globals after test
2020-05-19 10:50:45 -04:00
defer func() {
GossipSubD = oldGossipSubD
2020-05-15 15:53:51 -04:00
GossipSubDlo = oldGossipSubDlo
2020-05-19 10:50:45 -04:00
GossipSubDhi = oldGossipSubDHi
2020-05-08 21:34:34 -04:00
GossipSubConnTagDecayInterval = oldGossipSubConnTagDecayInterval
GossipSubConnTagMessageDeliveryCap = oldGossipSubConnTagMessageDeliveryCap
2020-05-19 10:50:45 -04:00
}()
2020-05-08 21:34:34 -04:00
decayClock := clock.NewMock()
2020-05-19 10:50:45 -04:00
decayCfg := connmgr.DecayerCfg{
Resolution: time.Second,
2020-05-08 21:34:34 -04:00
Clock: decayClock,
2020-05-19 10:50:45 -04:00
}
2020-05-18 19:13:13 -04:00
nHonest := 5
nSquatter := 10
connLimit := 10
2020-05-08 21:34:34 -04:00
connmgrs := make([]*connmgr.BasicConnMgr, nHonest)
honestHosts := make([]host.Host, nHonest)
honestPeers := make(map[peer.ID]struct{})
for i := 0; i < nHonest; i++ {
var err error
connmgrs[i], err = connmgr.NewConnManager(nHonest, connLimit,
connmgr.WithGracePeriod(0),
connmgr.WithSilencePeriod(time.Millisecond),
connmgr.DecayerConfig(&decayCfg),
)
if err != nil {
t.Fatal(err)
}
2020-05-08 21:34:34 -04:00
h, err := libp2p.New(
libp2p.ResourceManager(&network.NullResourceManager{}),
libp2p.ConnectionManager(connmgrs[i]),
)
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() { h.Close() })
2020-05-08 21:34:34 -04:00
honestHosts[i] = h
honestPeers[h.ID()] = struct{}{}
}
2020-05-19 10:50:45 -04:00
// use flood publishing, so non-mesh peers will still be delivering messages
// to everyone
2020-05-08 21:34:34 -04:00
psubs := getGossipsubs(ctx, honestHosts,
2020-05-19 10:50:45 -04:00
WithFloodPublish(true))
2020-05-08 21:34:34 -04:00
// sybil squatters to be connected later
sybilHosts := getDefaultHosts(t, nSquatter)
2020-05-08 21:34:34 -04:00
for _, h := range sybilHosts {
squatter := &sybilSquatter{h: h, ignoreErrors: true}
2020-05-08 21:34:34 -04:00
h.SetStreamHandler(GossipSubID_v10, squatter.handleStream)
}
// connect the honest hosts
connectAll(t, honestHosts)
for _, h := range honestHosts {
if len(h.Network().Conns()) != nHonest-1 {
t.Errorf("expected to have conns to all honest peers, have %d", len(h.Network().Conns()))
}
}
2020-05-19 10:50:45 -04:00
// subscribe everyone to the topic
2020-05-08 21:34:34 -04:00
topic := "test"
2020-05-19 10:50:45 -04:00
for _, ps := range psubs {
_, err := ps.Subscribe(topic)
2020-05-19 10:50:45 -04:00
if err != nil {
t.Fatal(err)
}
}
2020-05-08 21:34:34 -04:00
// sleep to allow meshes to form
2020-05-19 10:50:45 -04:00
time.Sleep(2 * time.Second)
2020-05-08 21:34:34 -04:00
// have all the hosts publish enough messages to ensure that they get some delivery credit
2020-05-15 15:53:51 -04:00
nMessages := GossipSubConnTagMessageDeliveryCap * 2
2020-05-08 21:34:34 -04:00
for _, ps := range psubs {
for i := 0; i < nMessages; i++ {
ps.Publish(topic, []byte("hello"))
}
}
2020-05-19 10:50:45 -04:00
2020-05-08 21:34:34 -04:00
// advance the fake time for the tag decay
decayClock.Add(time.Second)
// verify that they've given each other delivery connection tags
tag := "pubsub-deliveries:test"
for _, h := range honestHosts {
for _, h2 := range honestHosts {
if h.ID() == h2.ID() {
continue
}
val := getTagValue(h.ConnManager(), h2.ID(), tag)
if val == 0 {
t.Errorf("Expected non-zero delivery tag value for peer %s", h2.ID())
}
}
}
2020-05-19 10:50:45 -04:00
2020-05-08 21:34:34 -04:00
// now connect the sybils to put pressure on the real hosts' connection managers
allHosts := append(honestHosts, sybilHosts...)
connectAll(t, allHosts)
2020-05-19 10:50:45 -04:00
2020-05-08 21:34:34 -04:00
// we should still have conns to all the honest peers, but not the sybils
for _, h := range honestHosts {
nHonestConns := 0
nDishonestConns := 0
for _, conn := range h.Network().Conns() {
if _, ok := honestPeers[conn.RemotePeer()]; !ok {
nDishonestConns++
} else {
nHonestConns++
}
}
2020-05-18 19:13:13 -04:00
if nDishonestConns > connLimit-nHonest {
2020-05-08 21:34:34 -04:00
t.Errorf("expected most dishonest conns to be pruned, have %d", nDishonestConns)
}
if nHonestConns != nHonest-1 {
t.Errorf("expected all honest conns to be preserved, have %d", nHonestConns)
}
2020-05-19 10:50:45 -04:00
}
}