mirror of
https://github.com/logos-messaging/go-libp2p-pubsub.git
synced 2026-01-02 12:53:09 +00:00
## GossipSub v1.2 implementation Specification: libp2p/specs#548 ### Work Summary Sending IDONTWANT Implement a smart queue Add priorities to the smart queue Put IDONTWANT packets into the smart priority queue as soon as the node gets the packets Handling IDONTWANT Use a map to remember the message ids whose IDONTWANT packets have been received Implement max_idontwant_messages (ignore the IDONWANT packets if the max is reached) Clear the message IDs from the cache after 3 heartbeats Hash the message IDs before putting them into the cache. More requested features Add a feature test to not send IDONTWANT if the other side doesnt support it ### Commit Summary * Replace sending channel with the smart rpcQueue Since we want to implement a priority queue later, we need to replace the normal sending channels with the new smart structures first. * Implement UrgentPush in the smart rpcQueue UrgentPush allows you to push an rpc packet to the front of the queue so that it will be popped out fast. * Add IDONTWANT to rpc.proto and trace.proto * Send IDONTWANT right before validation step Most importantly, this commit adds a new method called PreValidation to the interface PubSubRouter, which will be called right before validating the gossipsub message. In GossipSubRouter, PreValidation will send the IDONTWANT controll messages to all the mesh peers of the topics of the received messages. * Test GossipSub IDONWANT sending * Send IDONWANT only for large messages * Handle IDONTWANT control messages When receiving IDONTWANTs, the host should remember the message ids contained in IDONTWANTs using a hash map. When receiving messages with those ids, it shouldn't forward them to the peers who already sent the IDONTWANTs. When the maximum number of IDONTWANTs is reached for any particular peer, the host should ignore any excessive IDONTWANTs from that peer. * Clear expired message IDs from the IDONTWANT cache If the messages IDs received from IDONTWANTs are older than 3 heartbeats, they should be removed from the IDONTWANT cache. * Keep the hashes of IDONTWANT message ids instead Rather than keeping the raw message ids, keep their hashes instead to save memory and protect again memory DoS attacks. * Increase GossipSubMaxIHaveMessages to 1000 * fixup! Clear expired message IDs from the IDONTWANT cache * Not send IDONTWANT if the receiver doesn't support * fixup! Replace sending channel with the smart rpcQueue * Not use pointers in rpcQueue * Simply rcpQueue by using only one mutex * Check ctx error in rpc sending worker Co-authored-by: Steven Allen <steven@stebalien.com> * fixup! Simply rcpQueue by using only one mutex * fixup! Keep the hashes of IDONTWANT message ids instead * Use AfterFunc instead implementing our own * Fix misc lint errors * fixup! Fix misc lint errors * Revert "Increase GossipSubMaxIHaveMessages to 1000" This reverts commit 6fabcdd068a5f5238c5280a3460af9c3998418ec. * Increase GossipSubMaxIDontWantMessages to 1000 * fixup! Handle IDONTWANT control messages * Skip TestGossipsubConnTagMessageDeliveries * Skip FuzzAppendOrMergeRPC * Revert "Skip FuzzAppendOrMergeRPC" This reverts commit f141e13234de0960d139339acb636a1afea9e219. * fixup! Send IDONWANT only for large messages * fixup! fixup! Keep the hashes of IDONTWANT message ids instead * fixup! Implement UrgentPush in the smart rpcQueue * fixup! Use AfterFunc instead implementing our own --------- Co-authored-by: Steven Allen <steven@stebalien.com>
166 lines
4.4 KiB
Go
166 lines
4.4 KiB
Go
package pubsub
|
|
|
|
import (
|
|
"context"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/benbjohnson/clock"
|
|
"github.com/libp2p/go-libp2p/core/host"
|
|
|
|
"github.com/libp2p/go-libp2p"
|
|
"github.com/libp2p/go-libp2p/core/network"
|
|
"github.com/libp2p/go-libp2p/core/peer"
|
|
"github.com/libp2p/go-libp2p/p2p/net/connmgr"
|
|
)
|
|
|
|
func TestGossipsubConnTagMessageDeliveries(t *testing.T) {
|
|
t.Skip("flaky test disabled")
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
|
|
oldGossipSubD := GossipSubD
|
|
oldGossipSubDlo := GossipSubDlo
|
|
oldGossipSubDHi := GossipSubDhi
|
|
oldGossipSubConnTagDecayInterval := GossipSubConnTagDecayInterval
|
|
oldGossipSubConnTagMessageDeliveryCap := GossipSubConnTagMessageDeliveryCap
|
|
|
|
// set the gossipsub D parameters low, so that we have some peers outside the mesh
|
|
GossipSubDlo = 3
|
|
GossipSubD = 3
|
|
GossipSubDhi = 3
|
|
// also set the tag decay interval so we don't have to wait forever for tests
|
|
GossipSubConnTagDecayInterval = time.Second
|
|
|
|
// set the cap for deliveries above GossipSubConnTagValueMeshPeer, so the sybils
|
|
// will be forced out even if they end up in someone's mesh
|
|
GossipSubConnTagMessageDeliveryCap = 50
|
|
|
|
// reset globals after test
|
|
defer func() {
|
|
GossipSubD = oldGossipSubD
|
|
GossipSubDlo = oldGossipSubDlo
|
|
GossipSubDhi = oldGossipSubDHi
|
|
GossipSubConnTagDecayInterval = oldGossipSubConnTagDecayInterval
|
|
GossipSubConnTagMessageDeliveryCap = oldGossipSubConnTagMessageDeliveryCap
|
|
}()
|
|
|
|
decayClock := clock.NewMock()
|
|
decayCfg := connmgr.DecayerCfg{
|
|
Resolution: time.Second,
|
|
Clock: decayClock,
|
|
}
|
|
|
|
nHonest := 5
|
|
nSquatter := 10
|
|
connLimit := 10
|
|
|
|
connmgrs := make([]*connmgr.BasicConnMgr, nHonest)
|
|
honestHosts := make([]host.Host, nHonest)
|
|
honestPeers := make(map[peer.ID]struct{})
|
|
|
|
for i := 0; i < nHonest; i++ {
|
|
var err error
|
|
connmgrs[i], err = connmgr.NewConnManager(nHonest, connLimit,
|
|
connmgr.WithGracePeriod(0),
|
|
connmgr.WithSilencePeriod(time.Millisecond),
|
|
connmgr.DecayerConfig(&decayCfg),
|
|
)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
h, err := libp2p.New(
|
|
libp2p.ResourceManager(&network.NullResourceManager{}),
|
|
libp2p.ConnectionManager(connmgrs[i]),
|
|
)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
t.Cleanup(func() { h.Close() })
|
|
honestHosts[i] = h
|
|
honestPeers[h.ID()] = struct{}{}
|
|
}
|
|
|
|
// use flood publishing, so non-mesh peers will still be delivering messages
|
|
// to everyone
|
|
psubs := getGossipsubs(ctx, honestHosts,
|
|
WithFloodPublish(true))
|
|
|
|
// sybil squatters to be connected later
|
|
sybilHosts := getDefaultHosts(t, nSquatter)
|
|
for _, h := range sybilHosts {
|
|
squatter := &sybilSquatter{h: h, ignoreErrors: true}
|
|
h.SetStreamHandler(GossipSubID_v10, squatter.handleStream)
|
|
}
|
|
|
|
// connect the honest hosts
|
|
connectAll(t, honestHosts)
|
|
|
|
for _, h := range honestHosts {
|
|
if len(h.Network().Conns()) != nHonest-1 {
|
|
t.Errorf("expected to have conns to all honest peers, have %d", len(h.Network().Conns()))
|
|
}
|
|
}
|
|
|
|
// subscribe everyone to the topic
|
|
topic := "test"
|
|
for _, ps := range psubs {
|
|
_, err := ps.Subscribe(topic)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
|
|
// sleep to allow meshes to form
|
|
time.Sleep(2 * time.Second)
|
|
|
|
// have all the hosts publish enough messages to ensure that they get some delivery credit
|
|
nMessages := GossipSubConnTagMessageDeliveryCap * 2
|
|
for _, ps := range psubs {
|
|
for i := 0; i < nMessages; i++ {
|
|
ps.Publish(topic, []byte("hello"))
|
|
}
|
|
}
|
|
|
|
// advance the fake time for the tag decay
|
|
decayClock.Add(time.Second)
|
|
|
|
// verify that they've given each other delivery connection tags
|
|
tag := "pubsub-deliveries:test"
|
|
for _, h := range honestHosts {
|
|
for _, h2 := range honestHosts {
|
|
if h.ID() == h2.ID() {
|
|
continue
|
|
}
|
|
val := getTagValue(h.ConnManager(), h2.ID(), tag)
|
|
if val == 0 {
|
|
t.Errorf("Expected non-zero delivery tag value for peer %s", h2.ID())
|
|
}
|
|
}
|
|
}
|
|
|
|
// now connect the sybils to put pressure on the real hosts' connection managers
|
|
allHosts := append(honestHosts, sybilHosts...)
|
|
connectAll(t, allHosts)
|
|
|
|
// we should still have conns to all the honest peers, but not the sybils
|
|
for _, h := range honestHosts {
|
|
nHonestConns := 0
|
|
nDishonestConns := 0
|
|
for _, conn := range h.Network().Conns() {
|
|
if _, ok := honestPeers[conn.RemotePeer()]; !ok {
|
|
nDishonestConns++
|
|
} else {
|
|
nHonestConns++
|
|
}
|
|
}
|
|
if nDishonestConns > connLimit-nHonest {
|
|
t.Errorf("expected most dishonest conns to be pruned, have %d", nDishonestConns)
|
|
}
|
|
if nHonestConns != nHonest-1 {
|
|
t.Errorf("expected all honest conns to be preserved, have %d", nHonestConns)
|
|
}
|
|
}
|
|
}
|