2020-05-19 14:50:45 +00:00
|
|
|
package pubsub
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2020-05-14 18:08:01 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2020-05-09 01:34:34 +00:00
|
|
|
"github.com/benbjohnson/clock"
|
|
|
|
"github.com/libp2p/go-libp2p-core/host"
|
|
|
|
swarmt "github.com/libp2p/go-libp2p-swarm/testing"
|
2020-05-19 14:50:45 +00:00
|
|
|
|
|
|
|
bhost "github.com/libp2p/go-libp2p-blankhost"
|
|
|
|
connmgr "github.com/libp2p/go-libp2p-connmgr"
|
|
|
|
"github.com/libp2p/go-libp2p-core/peer"
|
|
|
|
)
|
|
|
|
|
|
|
|
func TestGossipsubConnTagMessageDeliveries(t *testing.T) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
oldGossipSubD := GossipSubD
|
2020-05-15 19:53:51 +00:00
|
|
|
oldGossipSubDlo := GossipSubDlo
|
2020-05-19 14:50:45 +00:00
|
|
|
oldGossipSubDHi := GossipSubDhi
|
2020-05-09 01:34:34 +00:00
|
|
|
oldGossipSubConnTagDecayInterval := GossipSubConnTagDecayInterval
|
|
|
|
oldGossipSubConnTagMessageDeliveryCap := GossipSubConnTagMessageDeliveryCap
|
|
|
|
|
|
|
|
oldSilencePeriod := connmgr.SilencePeriod
|
|
|
|
|
|
|
|
// set the gossipsub D parameters low, so that we have some peers outside the mesh
|
2020-05-18 23:13:13 +00:00
|
|
|
GossipSubDlo = 3
|
|
|
|
GossipSubD = 3
|
|
|
|
GossipSubDhi = 3
|
2020-05-09 01:34:34 +00:00
|
|
|
// also set the tag decay interval so we don't have to wait forever for tests
|
|
|
|
GossipSubConnTagDecayInterval = time.Second
|
|
|
|
|
|
|
|
// set the cap for deliveries above GossipSubConnTagValueMeshPeer, so the sybils
|
|
|
|
// will be forced out even if they end up in someone's mesh
|
|
|
|
GossipSubConnTagMessageDeliveryCap = 50
|
|
|
|
|
|
|
|
connmgr.SilencePeriod = time.Millisecond
|
|
|
|
// reset globals after test
|
2020-05-19 14:50:45 +00:00
|
|
|
defer func() {
|
|
|
|
GossipSubD = oldGossipSubD
|
2020-05-15 19:53:51 +00:00
|
|
|
GossipSubDlo = oldGossipSubDlo
|
2020-05-19 14:50:45 +00:00
|
|
|
GossipSubDhi = oldGossipSubDHi
|
2020-05-09 01:34:34 +00:00
|
|
|
GossipSubConnTagDecayInterval = oldGossipSubConnTagDecayInterval
|
|
|
|
GossipSubConnTagMessageDeliveryCap = oldGossipSubConnTagMessageDeliveryCap
|
|
|
|
connmgr.SilencePeriod = oldSilencePeriod
|
2020-05-19 14:50:45 +00:00
|
|
|
}()
|
|
|
|
|
2020-05-09 01:34:34 +00:00
|
|
|
decayClock := clock.NewMock()
|
2020-05-19 14:50:45 +00:00
|
|
|
decayCfg := connmgr.DecayerCfg{
|
|
|
|
Resolution: time.Second,
|
2020-05-09 01:34:34 +00:00
|
|
|
Clock: decayClock,
|
2020-05-19 14:50:45 +00:00
|
|
|
}
|
|
|
|
|
2020-05-18 23:13:13 +00:00
|
|
|
nHonest := 5
|
|
|
|
nSquatter := 10
|
|
|
|
connLimit := 10
|
2020-05-09 01:34:34 +00:00
|
|
|
|
|
|
|
connmgrs := make([]*connmgr.BasicConnMgr, nHonest)
|
|
|
|
honestHosts := make([]host.Host, nHonest)
|
|
|
|
honestPeers := make(map[peer.ID]struct{})
|
|
|
|
|
|
|
|
for i := 0; i < nHonest; i++ {
|
|
|
|
connmgrs[i] = connmgr.NewConnManager(nHonest, connLimit, 0,
|
|
|
|
connmgr.DecayerConfig(&decayCfg))
|
|
|
|
|
|
|
|
netw := swarmt.GenSwarm(t, ctx)
|
|
|
|
h := bhost.NewBlankHost(netw, bhost.WithConnectionManager(connmgrs[i]))
|
|
|
|
honestHosts[i] = h
|
|
|
|
honestPeers[h.ID()] = struct{}{}
|
|
|
|
}
|
2020-05-19 14:50:45 +00:00
|
|
|
|
|
|
|
// use flood publishing, so non-mesh peers will still be delivering messages
|
|
|
|
// to everyone
|
2020-05-09 01:34:34 +00:00
|
|
|
psubs := getGossipsubs(ctx, honestHosts,
|
2020-05-19 14:50:45 +00:00
|
|
|
WithFloodPublish(true))
|
2020-05-09 01:34:34 +00:00
|
|
|
|
|
|
|
// sybil squatters to be connected later
|
|
|
|
sybilHosts := getNetHosts(t, ctx, nSquatter)
|
|
|
|
squatters := make([]*sybilSquatter, 0, nSquatter)
|
|
|
|
for _, h := range sybilHosts {
|
|
|
|
squatter := &sybilSquatter{h: h}
|
|
|
|
h.SetStreamHandler(GossipSubID_v10, squatter.handleStream)
|
|
|
|
squatters = append(squatters, squatter)
|
|
|
|
}
|
|
|
|
|
|
|
|
// connect the honest hosts
|
|
|
|
connectAll(t, honestHosts)
|
|
|
|
|
|
|
|
for _, h := range honestHosts {
|
|
|
|
if len(h.Network().Conns()) != nHonest-1 {
|
|
|
|
t.Errorf("expected to have conns to all honest peers, have %d", len(h.Network().Conns()))
|
|
|
|
}
|
|
|
|
}
|
2020-05-19 14:50:45 +00:00
|
|
|
|
|
|
|
// subscribe everyone to the topic
|
2020-05-09 01:34:34 +00:00
|
|
|
topic := "test"
|
2020-05-19 14:50:45 +00:00
|
|
|
var msgs []*Subscription
|
|
|
|
for _, ps := range psubs {
|
2020-05-09 01:34:34 +00:00
|
|
|
subch, err := ps.Subscribe(topic)
|
2020-05-19 14:50:45 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
msgs = append(msgs, subch)
|
|
|
|
}
|
|
|
|
|
2020-05-09 01:34:34 +00:00
|
|
|
// sleep to allow meshes to form
|
2020-05-19 14:50:45 +00:00
|
|
|
time.Sleep(2 * time.Second)
|
|
|
|
|
2020-05-09 01:34:34 +00:00
|
|
|
// have all the hosts publish enough messages to ensure that they get some delivery credit
|
2020-05-15 19:53:51 +00:00
|
|
|
nMessages := GossipSubConnTagMessageDeliveryCap * 2
|
2020-05-09 01:34:34 +00:00
|
|
|
for _, ps := range psubs {
|
|
|
|
for i := 0; i < nMessages; i++ {
|
|
|
|
ps.Publish(topic, []byte("hello"))
|
|
|
|
}
|
|
|
|
}
|
2020-05-19 14:50:45 +00:00
|
|
|
|
2020-05-09 01:34:34 +00:00
|
|
|
// advance the fake time for the tag decay
|
|
|
|
decayClock.Add(time.Second)
|
|
|
|
|
|
|
|
// verify that they've given each other delivery connection tags
|
|
|
|
tag := "pubsub-deliveries:test"
|
|
|
|
for _, h := range honestHosts {
|
|
|
|
for _, h2 := range honestHosts {
|
|
|
|
if h.ID() == h2.ID() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
val := getTagValue(h.ConnManager(), h2.ID(), tag)
|
|
|
|
if val == 0 {
|
|
|
|
t.Errorf("Expected non-zero delivery tag value for peer %s", h2.ID())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-05-19 14:50:45 +00:00
|
|
|
|
2020-05-09 01:34:34 +00:00
|
|
|
// now connect the sybils to put pressure on the real hosts' connection managers
|
|
|
|
allHosts := append(honestHosts, sybilHosts...)
|
|
|
|
connectAll(t, allHosts)
|
2020-05-19 14:50:45 +00:00
|
|
|
|
2020-05-09 01:34:34 +00:00
|
|
|
// verify that we have a bunch of connections
|
|
|
|
for _, h := range honestHosts {
|
|
|
|
if len(h.Network().Conns()) != nHonest+nSquatter-1 {
|
|
|
|
t.Errorf("expected to have conns to all peers, have %d", len(h.Network().Conns()))
|
2020-05-19 14:50:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-09 01:34:34 +00:00
|
|
|
// force the connection managers to trim, so we don't need to muck about with timing as much
|
|
|
|
for _, cm := range connmgrs {
|
|
|
|
cm.TrimOpenConns(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
// we should still have conns to all the honest peers, but not the sybils
|
|
|
|
for _, h := range honestHosts {
|
|
|
|
nHonestConns := 0
|
|
|
|
nDishonestConns := 0
|
|
|
|
for _, conn := range h.Network().Conns() {
|
|
|
|
if _, ok := honestPeers[conn.RemotePeer()]; !ok {
|
|
|
|
nDishonestConns++
|
|
|
|
} else {
|
|
|
|
nHonestConns++
|
|
|
|
}
|
|
|
|
}
|
2020-05-18 23:13:13 +00:00
|
|
|
if nDishonestConns > connLimit-nHonest {
|
2020-05-09 01:34:34 +00:00
|
|
|
t.Errorf("expected most dishonest conns to be pruned, have %d", nDishonestConns)
|
|
|
|
}
|
|
|
|
if nHonestConns != nHonest-1 {
|
|
|
|
t.Errorf("expected all honest conns to be preserved, have %d", nHonestConns)
|
|
|
|
}
|
2020-05-19 14:50:45 +00:00
|
|
|
}
|
|
|
|
}
|