2018-08-28 03:01:08 +00:00
|
|
|
package pubsub
|
2018-02-16 20:01:15 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2018-10-26 12:49:34 +00:00
|
|
|
"fmt"
|
2018-02-20 12:00:33 +00:00
|
|
|
"math/rand"
|
2018-02-19 12:50:14 +00:00
|
|
|
"time"
|
2018-02-16 20:01:15 +00:00
|
|
|
|
2018-08-28 03:01:08 +00:00
|
|
|
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
2018-02-16 20:01:15 +00:00
|
|
|
|
2019-05-26 16:19:03 +00:00
|
|
|
"github.com/libp2p/go-libp2p-core/host"
|
|
|
|
"github.com/libp2p/go-libp2p-core/peer"
|
|
|
|
"github.com/libp2p/go-libp2p-core/protocol"
|
2018-02-16 20:01:15 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
GossipSubID = protocol.ID("/meshsub/1.0.0")
|
2018-03-10 08:23:55 +00:00
|
|
|
)
|
2018-02-19 14:13:18 +00:00
|
|
|
|
2018-03-10 08:23:55 +00:00
|
|
|
var (
|
2018-02-19 14:13:18 +00:00
|
|
|
// overlay parameters
|
|
|
|
GossipSubD = 6
|
|
|
|
GossipSubDlo = 4
|
|
|
|
GossipSubDhi = 12
|
2018-02-20 14:10:53 +00:00
|
|
|
|
|
|
|
// gossip parameters
|
|
|
|
GossipSubHistoryLength = 5
|
|
|
|
GossipSubHistoryGossip = 3
|
2018-03-10 08:23:55 +00:00
|
|
|
|
|
|
|
// heartbeat interval
|
2018-08-29 08:15:41 +00:00
|
|
|
GossipSubHeartbeatInitialDelay = 100 * time.Millisecond
|
|
|
|
GossipSubHeartbeatInterval = 1 * time.Second
|
2018-03-10 09:21:50 +00:00
|
|
|
|
|
|
|
// fanout ttl
|
|
|
|
GossipSubFanoutTTL = 60 * time.Second
|
2018-02-16 20:01:15 +00:00
|
|
|
)
|
|
|
|
|
2019-01-04 11:09:21 +00:00
|
|
|
// NewGossipSub returns a new PubSub object using GossipSubRouter as the router.
|
2018-02-16 20:01:15 +00:00
|
|
|
func NewGossipSub(ctx context.Context, h host.Host, opts ...Option) (*PubSub, error) {
|
2018-02-19 14:13:18 +00:00
|
|
|
rt := &GossipSubRouter{
|
2018-02-20 10:08:18 +00:00
|
|
|
peers: make(map[peer.ID]protocol.ID),
|
|
|
|
mesh: make(map[string]map[peer.ID]struct{}),
|
|
|
|
fanout: make(map[string]map[peer.ID]struct{}),
|
2018-03-10 09:21:50 +00:00
|
|
|
lastpub: make(map[string]int64),
|
2018-02-20 10:08:18 +00:00
|
|
|
gossip: make(map[peer.ID][]*pb.ControlIHave),
|
|
|
|
control: make(map[peer.ID]*pb.ControlMessage),
|
2018-02-20 14:10:53 +00:00
|
|
|
mcache: NewMessageCache(GossipSubHistoryGossip, GossipSubHistoryLength),
|
2018-02-19 14:13:18 +00:00
|
|
|
}
|
2018-02-16 20:01:15 +00:00
|
|
|
return NewPubSub(ctx, h, rt, opts...)
|
|
|
|
}
|
|
|
|
|
2018-03-10 09:21:50 +00:00
|
|
|
// GossipSubRouter is a router that implements the gossipsub protocol.
|
|
|
|
// For each topic we have joined, we maintain an overlay through which
|
|
|
|
// messages flow; this is the mesh map.
|
|
|
|
// For each topic we publish to without joining, we maintain a list of peers
|
|
|
|
// to use for injecting our messages in the overlay with stable routes; this
|
|
|
|
// is the fanout map. Fanout peer lists are expired if we don't publish any
|
|
|
|
// messages to their topic for GossipSubFanoutTTL.
|
2018-02-16 20:01:15 +00:00
|
|
|
type GossipSubRouter struct {
|
2018-02-20 10:08:18 +00:00
|
|
|
p *PubSub
|
|
|
|
peers map[peer.ID]protocol.ID // peer protocols
|
|
|
|
mesh map[string]map[peer.ID]struct{} // topic meshes
|
|
|
|
fanout map[string]map[peer.ID]struct{} // topic fanout
|
2018-09-17 04:57:32 +00:00
|
|
|
lastpub map[string]int64 // last publish time for fanout topics
|
2018-02-20 10:08:18 +00:00
|
|
|
gossip map[peer.ID][]*pb.ControlIHave // pending gossip
|
|
|
|
control map[peer.ID]*pb.ControlMessage // pending control messages
|
|
|
|
mcache *MessageCache
|
2018-02-16 20:01:15 +00:00
|
|
|
}
|
|
|
|
|
2018-02-19 14:13:18 +00:00
|
|
|
func (gs *GossipSubRouter) Protocols() []protocol.ID {
|
2018-02-16 20:01:15 +00:00
|
|
|
return []protocol.ID{GossipSubID, FloodSubID}
|
|
|
|
}
|
|
|
|
|
2018-02-19 14:13:18 +00:00
|
|
|
func (gs *GossipSubRouter) Attach(p *PubSub) {
|
|
|
|
gs.p = p
|
|
|
|
go gs.heartbeatTimer()
|
2018-02-16 20:01:15 +00:00
|
|
|
}
|
|
|
|
|
2018-02-19 14:13:18 +00:00
|
|
|
func (gs *GossipSubRouter) AddPeer(p peer.ID, proto protocol.ID) {
|
2018-08-09 09:16:19 +00:00
|
|
|
log.Debugf("PEERUP: Add new peer %s using %s", p, proto)
|
2018-02-19 14:13:18 +00:00
|
|
|
gs.peers[p] = proto
|
2018-02-16 20:01:15 +00:00
|
|
|
}
|
|
|
|
|
2018-02-19 14:13:18 +00:00
|
|
|
func (gs *GossipSubRouter) RemovePeer(p peer.ID) {
|
2018-08-09 09:16:19 +00:00
|
|
|
log.Debugf("PEERDOWN: Remove disconnected peer %s", p)
|
2018-02-19 14:13:18 +00:00
|
|
|
delete(gs.peers, p)
|
|
|
|
for _, peers := range gs.mesh {
|
|
|
|
delete(peers, p)
|
|
|
|
}
|
|
|
|
for _, peers := range gs.fanout {
|
|
|
|
delete(peers, p)
|
|
|
|
}
|
2018-02-20 16:23:28 +00:00
|
|
|
delete(gs.gossip, p)
|
|
|
|
delete(gs.control, p)
|
2018-02-19 14:13:18 +00:00
|
|
|
}
|
2018-02-16 20:01:15 +00:00
|
|
|
|
2018-02-19 14:13:18 +00:00
|
|
|
func (gs *GossipSubRouter) HandleRPC(rpc *RPC) {
|
2018-02-19 16:45:10 +00:00
|
|
|
ctl := rpc.GetControl()
|
|
|
|
if ctl == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-08-09 09:16:19 +00:00
|
|
|
iwant := gs.handleIHave(rpc.from, ctl)
|
|
|
|
ihave := gs.handleIWant(rpc.from, ctl)
|
2018-02-19 16:45:10 +00:00
|
|
|
prune := gs.handleGraft(rpc.from, ctl)
|
|
|
|
gs.handlePrune(rpc.from, ctl)
|
|
|
|
|
2018-02-19 18:16:58 +00:00
|
|
|
if len(iwant) == 0 && len(ihave) == 0 && len(prune) == 0 {
|
2018-02-19 16:45:10 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-02-19 18:16:58 +00:00
|
|
|
out := rpcWithControl(ihave, nil, iwant, nil, prune)
|
2018-02-20 10:08:18 +00:00
|
|
|
gs.sendRPC(rpc.from, out)
|
2018-02-19 16:45:10 +00:00
|
|
|
}
|
|
|
|
|
2018-08-09 09:16:19 +00:00
|
|
|
func (gs *GossipSubRouter) handleIHave(p peer.ID, ctl *pb.ControlMessage) []*pb.ControlIWant {
|
2018-02-19 16:45:10 +00:00
|
|
|
iwant := make(map[string]struct{})
|
|
|
|
|
|
|
|
for _, ihave := range ctl.GetIhave() {
|
|
|
|
topic := ihave.GetTopicID()
|
|
|
|
_, ok := gs.mesh[topic]
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, mid := range ihave.GetMessageIDs() {
|
|
|
|
if gs.p.seenMessage(mid) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
iwant[mid] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(iwant) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-08-09 09:16:19 +00:00
|
|
|
log.Debugf("IHAVE: Asking for %d messages from %s", len(iwant), p)
|
|
|
|
|
2018-02-19 16:45:10 +00:00
|
|
|
iwantlst := make([]string, 0, len(iwant))
|
|
|
|
for mid := range iwant {
|
|
|
|
iwantlst = append(iwantlst, mid)
|
|
|
|
}
|
|
|
|
|
|
|
|
return []*pb.ControlIWant{&pb.ControlIWant{MessageIDs: iwantlst}}
|
|
|
|
}
|
|
|
|
|
2018-08-09 09:16:19 +00:00
|
|
|
func (gs *GossipSubRouter) handleIWant(p peer.ID, ctl *pb.ControlMessage) []*pb.Message {
|
2018-02-19 16:45:10 +00:00
|
|
|
ihave := make(map[string]*pb.Message)
|
|
|
|
for _, iwant := range ctl.GetIwant() {
|
|
|
|
for _, mid := range iwant.GetMessageIDs() {
|
|
|
|
msg, ok := gs.mcache.Get(mid)
|
|
|
|
if ok {
|
|
|
|
ihave[mid] = msg
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(ihave) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-08-09 09:16:19 +00:00
|
|
|
log.Debugf("IWANT: Sending %d messages to %s", len(ihave), p)
|
|
|
|
|
2018-02-19 16:45:10 +00:00
|
|
|
msgs := make([]*pb.Message, 0, len(ihave))
|
|
|
|
for _, msg := range ihave {
|
|
|
|
msgs = append(msgs, msg)
|
|
|
|
}
|
|
|
|
|
|
|
|
return msgs
|
|
|
|
}
|
|
|
|
|
|
|
|
func (gs *GossipSubRouter) handleGraft(p peer.ID, ctl *pb.ControlMessage) []*pb.ControlPrune {
|
|
|
|
var prune []string
|
|
|
|
for _, graft := range ctl.GetGraft() {
|
|
|
|
topic := graft.GetTopicID()
|
|
|
|
peers, ok := gs.mesh[topic]
|
|
|
|
if !ok {
|
|
|
|
prune = append(prune, topic)
|
|
|
|
} else {
|
2018-08-09 09:16:19 +00:00
|
|
|
log.Debugf("GRAFT: Add mesh link from %s in %s", p, topic)
|
2018-02-19 16:45:10 +00:00
|
|
|
peers[p] = struct{}{}
|
2018-10-26 12:49:34 +00:00
|
|
|
gs.tagPeer(p, topic)
|
2018-02-19 16:45:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(prune) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
cprune := make([]*pb.ControlPrune, 0, len(prune))
|
|
|
|
for _, topic := range prune {
|
|
|
|
cprune = append(cprune, &pb.ControlPrune{TopicID: &topic})
|
|
|
|
}
|
|
|
|
|
|
|
|
return cprune
|
|
|
|
}
|
|
|
|
|
|
|
|
func (gs *GossipSubRouter) handlePrune(p peer.ID, ctl *pb.ControlMessage) {
|
|
|
|
for _, prune := range ctl.GetPrune() {
|
|
|
|
topic := prune.GetTopicID()
|
|
|
|
peers, ok := gs.mesh[topic]
|
|
|
|
if ok {
|
2018-08-09 09:16:19 +00:00
|
|
|
log.Debugf("PRUNE: Remove mesh link to %s in %s", p, topic)
|
2018-02-19 16:45:10 +00:00
|
|
|
delete(peers, p)
|
2018-10-26 12:49:34 +00:00
|
|
|
gs.untagPeer(p, topic)
|
2018-02-19 16:45:10 +00:00
|
|
|
}
|
|
|
|
}
|
2018-02-16 20:01:15 +00:00
|
|
|
}
|
|
|
|
|
2018-02-19 14:13:18 +00:00
|
|
|
func (gs *GossipSubRouter) Publish(from peer.ID, msg *pb.Message) {
|
2018-02-19 19:24:17 +00:00
|
|
|
gs.mcache.Put(msg)
|
2018-02-16 20:01:15 +00:00
|
|
|
|
2018-02-19 14:13:18 +00:00
|
|
|
tosend := make(map[peer.ID]struct{})
|
|
|
|
for _, topic := range msg.GetTopicIDs() {
|
|
|
|
// any peers in the topic?
|
|
|
|
tmap, ok := gs.p.topics[topic]
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
2018-02-16 20:01:15 +00:00
|
|
|
|
2018-02-19 14:13:18 +00:00
|
|
|
// floodsub peers
|
|
|
|
for p := range tmap {
|
|
|
|
if gs.peers[p] == FloodSubID {
|
|
|
|
tosend[p] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
2018-02-16 20:01:15 +00:00
|
|
|
|
2018-02-19 14:13:18 +00:00
|
|
|
// gossipsub peers
|
|
|
|
gmap, ok := gs.mesh[topic]
|
2018-02-19 17:47:54 +00:00
|
|
|
if !ok {
|
|
|
|
// we are not in the mesh for topic, use fanout peers
|
2018-02-19 14:13:18 +00:00
|
|
|
gmap, ok = gs.fanout[topic]
|
2019-05-15 10:19:35 +00:00
|
|
|
if !ok || len(gmap) == 0 {
|
2018-02-19 17:47:54 +00:00
|
|
|
// we don't have any, pick some
|
2018-02-21 10:47:45 +00:00
|
|
|
peers := gs.getPeers(topic, GossipSubD, func(peer.ID) bool { return true })
|
2018-02-19 14:13:18 +00:00
|
|
|
|
|
|
|
if len(peers) > 0 {
|
2018-02-21 10:47:45 +00:00
|
|
|
gmap = peerListToMap(peers)
|
2018-02-19 14:13:18 +00:00
|
|
|
gs.fanout[topic] = gmap
|
|
|
|
}
|
|
|
|
}
|
2018-03-10 09:21:50 +00:00
|
|
|
gs.lastpub[topic] = time.Now().UnixNano()
|
2018-02-19 14:13:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for p := range gmap {
|
|
|
|
tosend[p] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out := rpcWithMessages(msg)
|
|
|
|
for pid := range tosend {
|
|
|
|
if pid == from || pid == peer.ID(msg.GetFrom()) {
|
|
|
|
continue
|
|
|
|
}
|
2018-02-16 20:01:15 +00:00
|
|
|
|
2018-02-20 10:08:18 +00:00
|
|
|
gs.sendRPC(pid, out)
|
2018-02-19 18:16:58 +00:00
|
|
|
}
|
|
|
|
}
|
2018-02-16 20:01:15 +00:00
|
|
|
|
2018-02-19 18:16:58 +00:00
|
|
|
func (gs *GossipSubRouter) Join(topic string) {
|
|
|
|
gmap, ok := gs.mesh[topic]
|
|
|
|
if ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-08-09 09:16:19 +00:00
|
|
|
log.Debugf("JOIN %s", topic)
|
|
|
|
|
2018-02-19 18:16:58 +00:00
|
|
|
gmap, ok = gs.fanout[topic]
|
|
|
|
if ok {
|
2019-08-13 09:01:55 +00:00
|
|
|
if len(gmap) < GossipSubD {
|
|
|
|
// we need more peers; eager, as this would get fixed in the next heartbeat
|
|
|
|
more := gs.getPeers(topic, GossipSubD-len(gmap), func(p peer.ID) bool {
|
|
|
|
// filter our current peers
|
|
|
|
_, ok := gmap[p]
|
|
|
|
return !ok
|
|
|
|
})
|
|
|
|
for _, p := range more {
|
|
|
|
gmap[p] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
2018-02-19 18:16:58 +00:00
|
|
|
gs.mesh[topic] = gmap
|
|
|
|
delete(gs.fanout, topic)
|
2018-03-10 09:21:50 +00:00
|
|
|
delete(gs.lastpub, topic)
|
2018-02-19 18:16:58 +00:00
|
|
|
} else {
|
2018-02-21 10:47:45 +00:00
|
|
|
peers := gs.getPeers(topic, GossipSubD, func(peer.ID) bool { return true })
|
|
|
|
gmap = peerListToMap(peers)
|
2018-02-19 18:16:58 +00:00
|
|
|
gs.mesh[topic] = gmap
|
|
|
|
}
|
|
|
|
|
|
|
|
for p := range gmap {
|
2018-08-09 09:16:19 +00:00
|
|
|
log.Debugf("JOIN: Add mesh link to %s in %s", p, topic)
|
2018-02-19 18:16:58 +00:00
|
|
|
gs.sendGraft(p, topic)
|
2018-10-26 12:49:34 +00:00
|
|
|
gs.tagPeer(p, topic)
|
2018-02-19 18:16:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (gs *GossipSubRouter) Leave(topic string) {
|
|
|
|
gmap, ok := gs.mesh[topic]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-08-09 09:16:19 +00:00
|
|
|
log.Debugf("LEAVE %s", topic)
|
|
|
|
|
2018-02-20 12:12:52 +00:00
|
|
|
delete(gs.mesh, topic)
|
|
|
|
|
2018-02-19 18:16:58 +00:00
|
|
|
for p := range gmap {
|
2018-08-09 09:16:19 +00:00
|
|
|
log.Debugf("LEAVE: Remove mesh link to %s in %s", p, topic)
|
2018-02-19 18:16:58 +00:00
|
|
|
gs.sendPrune(p, topic)
|
2018-10-26 12:49:34 +00:00
|
|
|
gs.untagPeer(p, topic)
|
2018-02-19 18:16:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (gs *GossipSubRouter) sendGraft(p peer.ID, topic string) {
|
|
|
|
graft := []*pb.ControlGraft{&pb.ControlGraft{TopicID: &topic}}
|
|
|
|
out := rpcWithControl(nil, nil, nil, graft, nil)
|
2018-02-20 10:08:18 +00:00
|
|
|
gs.sendRPC(p, out)
|
2018-02-19 18:16:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (gs *GossipSubRouter) sendPrune(p peer.ID, topic string) {
|
|
|
|
prune := []*pb.ControlPrune{&pb.ControlPrune{TopicID: &topic}}
|
|
|
|
out := rpcWithControl(nil, nil, nil, nil, prune)
|
2018-02-20 10:08:18 +00:00
|
|
|
gs.sendRPC(p, out)
|
2018-02-19 18:16:58 +00:00
|
|
|
}
|
|
|
|
|
2018-02-20 10:08:18 +00:00
|
|
|
func (gs *GossipSubRouter) sendRPC(p peer.ID, out *RPC) {
|
2018-08-28 18:07:09 +00:00
|
|
|
// do we own the RPC?
|
|
|
|
own := false
|
|
|
|
|
2018-09-10 15:17:23 +00:00
|
|
|
// piggyback control message retries
|
2018-02-20 10:08:18 +00:00
|
|
|
ctl, ok := gs.control[p]
|
|
|
|
if ok {
|
2018-08-28 18:07:09 +00:00
|
|
|
out = copyRPC(out)
|
|
|
|
own = true
|
2018-02-20 10:08:18 +00:00
|
|
|
gs.piggybackControl(p, out, ctl)
|
|
|
|
delete(gs.control, p)
|
|
|
|
}
|
2018-02-19 18:16:58 +00:00
|
|
|
|
2018-02-20 10:08:18 +00:00
|
|
|
// piggyback gossip
|
|
|
|
ihave, ok := gs.gossip[p]
|
|
|
|
if ok {
|
2018-08-28 18:07:09 +00:00
|
|
|
if !own {
|
|
|
|
out = copyRPC(out)
|
|
|
|
own = true
|
|
|
|
}
|
2018-02-20 10:08:18 +00:00
|
|
|
gs.piggybackGossip(p, out, ihave)
|
|
|
|
delete(gs.gossip, p)
|
|
|
|
}
|
2018-02-19 18:16:58 +00:00
|
|
|
|
|
|
|
mch, ok := gs.p.peers[p]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case mch <- out:
|
|
|
|
default:
|
|
|
|
log.Infof("dropping message to peer %s: queue full", p)
|
2018-02-20 10:08:18 +00:00
|
|
|
// push control messages that need to be retried
|
|
|
|
ctl := out.GetControl()
|
|
|
|
if ctl != nil {
|
|
|
|
gs.pushControl(p, ctl)
|
2018-02-19 17:47:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-19 14:13:18 +00:00
|
|
|
func (gs *GossipSubRouter) heartbeatTimer() {
|
2018-08-29 08:15:41 +00:00
|
|
|
time.Sleep(GossipSubHeartbeatInitialDelay)
|
|
|
|
select {
|
|
|
|
case gs.p.eval <- gs.heartbeat:
|
|
|
|
case <-gs.p.ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-03-10 08:23:55 +00:00
|
|
|
ticker := time.NewTicker(GossipSubHeartbeatInterval)
|
2018-02-19 12:50:14 +00:00
|
|
|
defer ticker.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
select {
|
2018-02-19 14:13:18 +00:00
|
|
|
case gs.p.eval <- gs.heartbeat:
|
|
|
|
case <-gs.p.ctx.Done():
|
2018-02-19 12:50:14 +00:00
|
|
|
return
|
|
|
|
}
|
2018-02-19 14:13:18 +00:00
|
|
|
case <-gs.p.ctx.Done():
|
2018-02-19 12:50:14 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-19 14:13:18 +00:00
|
|
|
func (gs *GossipSubRouter) heartbeat() {
|
2018-08-09 09:16:19 +00:00
|
|
|
defer log.EventBegin(gs.p.ctx, "heartbeat").Done()
|
|
|
|
|
2018-02-20 10:08:18 +00:00
|
|
|
// flush pending control message from retries and gossip
|
|
|
|
// that hasn't been piggybacked since the last heartbeat
|
|
|
|
gs.flush()
|
|
|
|
|
2018-02-19 19:24:17 +00:00
|
|
|
tograft := make(map[peer.ID][]string)
|
|
|
|
toprune := make(map[peer.ID][]string)
|
|
|
|
|
2018-02-21 14:34:25 +00:00
|
|
|
// maintain the mesh for topics we have joined
|
2018-02-19 19:24:17 +00:00
|
|
|
for topic, peers := range gs.mesh {
|
|
|
|
|
2018-02-20 08:22:53 +00:00
|
|
|
// do we have enough peers?
|
2018-02-19 19:24:17 +00:00
|
|
|
if len(peers) < GossipSubDlo {
|
|
|
|
ineed := GossipSubD - len(peers)
|
2018-02-21 10:47:45 +00:00
|
|
|
plst := gs.getPeers(topic, ineed, func(p peer.ID) bool {
|
2018-02-22 08:18:48 +00:00
|
|
|
// filter our current peers
|
2018-02-19 19:24:17 +00:00
|
|
|
_, ok := peers[p]
|
|
|
|
return !ok
|
|
|
|
})
|
|
|
|
|
2018-02-21 10:47:45 +00:00
|
|
|
for _, p := range plst {
|
2018-08-09 09:16:19 +00:00
|
|
|
log.Debugf("HEARTBEAT: Add mesh link to %s in %s", p, topic)
|
2018-02-19 19:24:17 +00:00
|
|
|
peers[p] = struct{}{}
|
2018-10-26 12:49:34 +00:00
|
|
|
gs.tagPeer(p, topic)
|
2018-02-19 19:24:17 +00:00
|
|
|
topics := tograft[p]
|
|
|
|
tograft[p] = append(topics, topic)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-20 12:00:33 +00:00
|
|
|
// do we have too many peers?
|
2018-02-19 19:24:17 +00:00
|
|
|
if len(peers) > GossipSubDhi {
|
|
|
|
idontneed := len(peers) - GossipSubD
|
|
|
|
plst := peerMapToList(peers)
|
|
|
|
shufflePeers(plst)
|
2018-02-20 08:22:53 +00:00
|
|
|
|
2018-02-19 19:24:17 +00:00
|
|
|
for _, p := range plst[:idontneed] {
|
2018-08-09 09:16:19 +00:00
|
|
|
log.Debugf("HEARTBEAT: Remove mesh link to %s in %s", p, topic)
|
2018-02-19 19:24:17 +00:00
|
|
|
delete(peers, p)
|
2018-10-26 12:49:34 +00:00
|
|
|
gs.untagPeer(p, topic)
|
2018-02-19 19:24:17 +00:00
|
|
|
topics := toprune[p]
|
|
|
|
toprune[p] = append(topics, topic)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-06 09:50:17 +00:00
|
|
|
// 2nd arg are mesh peers excluded from gossip. We already push
|
|
|
|
// messages to them, so its redundant to gossip IHAVEs.
|
2018-02-21 14:34:25 +00:00
|
|
|
gs.emitGossip(topic, peers)
|
|
|
|
}
|
2018-02-20 10:08:18 +00:00
|
|
|
|
2018-03-10 09:21:50 +00:00
|
|
|
// expire fanout for topics we haven't published to in a while
|
|
|
|
now := time.Now().UnixNano()
|
|
|
|
for topic, lastpub := range gs.lastpub {
|
|
|
|
if lastpub+int64(GossipSubFanoutTTL) < now {
|
|
|
|
delete(gs.fanout, topic)
|
|
|
|
delete(gs.lastpub, topic)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-21 14:34:25 +00:00
|
|
|
// maintain our fanout for topics we are publishing but we have not joined
|
|
|
|
for topic, peers := range gs.fanout {
|
|
|
|
// check whether our peers are still in the topic
|
|
|
|
for p := range peers {
|
|
|
|
_, ok := gs.p.topics[topic][p]
|
2018-02-20 13:10:16 +00:00
|
|
|
if !ok {
|
2018-02-21 14:34:25 +00:00
|
|
|
delete(peers, p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// do we need more peers?
|
|
|
|
if len(peers) < GossipSubD {
|
|
|
|
ineed := GossipSubD - len(peers)
|
|
|
|
plst := gs.getPeers(topic, ineed, func(p peer.ID) bool {
|
2018-02-22 08:18:48 +00:00
|
|
|
// filter our current peers
|
2018-02-21 14:34:25 +00:00
|
|
|
_, ok := peers[p]
|
|
|
|
return !ok
|
|
|
|
})
|
|
|
|
|
|
|
|
for _, p := range plst {
|
|
|
|
peers[p] = struct{}{}
|
2018-02-20 13:10:16 +00:00
|
|
|
}
|
2018-02-20 10:08:18 +00:00
|
|
|
}
|
2018-02-21 14:34:25 +00:00
|
|
|
|
2019-10-06 09:50:17 +00:00
|
|
|
// 2nd arg are fanout peers excluded from gossip. We already push
|
|
|
|
// messages to them, so its redundant to gossip IHAVEs.
|
2018-02-21 14:34:25 +00:00
|
|
|
gs.emitGossip(topic, peers)
|
2018-02-19 19:24:17 +00:00
|
|
|
}
|
|
|
|
|
2018-02-21 14:34:25 +00:00
|
|
|
// send coalesced GRAFT/PRUNE messages (will piggyback gossip)
|
2018-02-22 08:18:48 +00:00
|
|
|
gs.sendGraftPrune(tograft, toprune)
|
|
|
|
|
|
|
|
// advance the message history window
|
|
|
|
gs.mcache.Shift()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (gs *GossipSubRouter) sendGraftPrune(tograft, toprune map[peer.ID][]string) {
|
2018-02-19 19:24:17 +00:00
|
|
|
for p, topics := range tograft {
|
|
|
|
graft := make([]*pb.ControlGraft, 0, len(topics))
|
|
|
|
for _, topic := range topics {
|
|
|
|
graft = append(graft, &pb.ControlGraft{TopicID: &topic})
|
|
|
|
}
|
|
|
|
|
|
|
|
var prune []*pb.ControlPrune
|
|
|
|
pruning, ok := toprune[p]
|
|
|
|
if ok {
|
|
|
|
delete(toprune, p)
|
|
|
|
prune = make([]*pb.ControlPrune, 0, len(pruning))
|
|
|
|
for _, topic := range pruning {
|
|
|
|
prune = append(prune, &pb.ControlPrune{TopicID: &topic})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out := rpcWithControl(nil, nil, nil, graft, prune)
|
2018-02-20 10:08:18 +00:00
|
|
|
gs.sendRPC(p, out)
|
2018-02-19 19:24:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for p, topics := range toprune {
|
|
|
|
prune := make([]*pb.ControlPrune, 0, len(topics))
|
|
|
|
for _, topic := range topics {
|
|
|
|
prune = append(prune, &pb.ControlPrune{TopicID: &topic})
|
|
|
|
}
|
|
|
|
|
|
|
|
out := rpcWithControl(nil, nil, nil, nil, prune)
|
2018-02-20 10:08:18 +00:00
|
|
|
gs.sendRPC(p, out)
|
2018-02-19 19:24:17 +00:00
|
|
|
}
|
2018-02-20 08:43:27 +00:00
|
|
|
|
2018-02-21 14:34:25 +00:00
|
|
|
}
|
2018-02-20 08:43:27 +00:00
|
|
|
|
2019-10-06 09:50:17 +00:00
|
|
|
// emitGossip emits IHAVE gossip advertising items in the message cache window
|
|
|
|
// of this topic.
|
|
|
|
func (gs *GossipSubRouter) emitGossip(topic string, exclude map[peer.ID]struct{}) {
|
2018-02-21 14:34:25 +00:00
|
|
|
mids := gs.mcache.GetGossipIDs(topic)
|
|
|
|
if len(mids) == 0 {
|
|
|
|
return
|
|
|
|
}
|
2018-02-20 08:43:27 +00:00
|
|
|
|
2019-10-06 09:52:11 +00:00
|
|
|
// Send gossip to D peers, skipping over the exclude set.
|
|
|
|
gpeers := gs.getPeers(topic, GossipSubD, func(p peer.ID) bool {
|
2019-10-06 09:50:17 +00:00
|
|
|
_, ok := exclude[p]
|
2019-10-06 09:52:11 +00:00
|
|
|
return !ok
|
|
|
|
})
|
|
|
|
|
|
|
|
// Emit the IHAVE gossip to the selected peers.
|
|
|
|
for _, p := range gpeers {
|
2019-10-12 12:50:20 +00:00
|
|
|
gs.enqueueGossip(p, &pb.ControlIHave{TopicID: &topic, MessageIDs: mids})
|
2018-02-20 08:43:27 +00:00
|
|
|
}
|
2018-02-19 14:13:18 +00:00
|
|
|
}
|
2018-02-19 12:50:14 +00:00
|
|
|
|
2018-02-20 10:08:18 +00:00
|
|
|
func (gs *GossipSubRouter) flush() {
|
2018-02-20 10:50:24 +00:00
|
|
|
// send gossip first, which will also piggyback control
|
|
|
|
for p, ihave := range gs.gossip {
|
|
|
|
delete(gs.gossip, p)
|
|
|
|
out := rpcWithControl(nil, ihave, nil, nil, nil)
|
|
|
|
gs.sendRPC(p, out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// send the remaining control messages
|
|
|
|
for p, ctl := range gs.control {
|
|
|
|
delete(gs.control, p)
|
|
|
|
out := rpcWithControl(nil, nil, nil, ctl.Graft, ctl.Prune)
|
|
|
|
gs.sendRPC(p, out)
|
|
|
|
}
|
2018-02-20 10:08:18 +00:00
|
|
|
}
|
|
|
|
|
2019-10-12 12:50:20 +00:00
|
|
|
func (gs *GossipSubRouter) enqueueGossip(p peer.ID, ihave *pb.ControlIHave) {
|
2018-02-20 10:08:18 +00:00
|
|
|
gossip := gs.gossip[p]
|
|
|
|
gossip = append(gossip, ihave)
|
|
|
|
gs.gossip[p] = gossip
|
|
|
|
}
|
|
|
|
|
2018-02-20 10:36:49 +00:00
|
|
|
func (gs *GossipSubRouter) piggybackGossip(p peer.ID, out *RPC, ihave []*pb.ControlIHave) {
|
|
|
|
ctl := out.GetControl()
|
|
|
|
if ctl == nil {
|
|
|
|
ctl = &pb.ControlMessage{}
|
|
|
|
out.Control = ctl
|
|
|
|
}
|
|
|
|
|
|
|
|
ctl.Ihave = ihave
|
2018-02-20 10:08:18 +00:00
|
|
|
}
|
|
|
|
|
2018-02-20 10:36:49 +00:00
|
|
|
func (gs *GossipSubRouter) pushControl(p peer.ID, ctl *pb.ControlMessage) {
|
|
|
|
// remove IHAVE/IWANT from control message, gossip is not retried
|
|
|
|
ctl.Ihave = nil
|
|
|
|
ctl.Iwant = nil
|
2018-02-20 10:50:24 +00:00
|
|
|
if ctl.Graft != nil || ctl.Prune != nil {
|
|
|
|
gs.control[p] = ctl
|
|
|
|
}
|
2018-02-20 10:08:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (gs *GossipSubRouter) piggybackControl(p peer.ID, out *RPC, ctl *pb.ControlMessage) {
|
2018-02-20 10:36:49 +00:00
|
|
|
// check control message for staleness first
|
|
|
|
var tograft []*pb.ControlGraft
|
|
|
|
var toprune []*pb.ControlPrune
|
|
|
|
|
|
|
|
for _, graft := range ctl.GetGraft() {
|
|
|
|
topic := graft.GetTopicID()
|
|
|
|
peers, ok := gs.mesh[topic]
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
_, ok = peers[p]
|
|
|
|
if ok {
|
|
|
|
tograft = append(tograft, graft)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, prune := range ctl.GetPrune() {
|
|
|
|
topic := prune.GetTopicID()
|
|
|
|
peers, ok := gs.mesh[topic]
|
|
|
|
if !ok {
|
|
|
|
toprune = append(toprune, prune)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
_, ok = peers[p]
|
|
|
|
if !ok {
|
|
|
|
toprune = append(toprune, prune)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-22 09:01:14 +00:00
|
|
|
if len(tograft) == 0 && len(toprune) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-02-20 10:36:49 +00:00
|
|
|
xctl := out.Control
|
|
|
|
if xctl == nil {
|
|
|
|
xctl = &pb.ControlMessage{}
|
|
|
|
out.Control = xctl
|
|
|
|
}
|
|
|
|
|
2018-02-22 09:01:14 +00:00
|
|
|
if len(tograft) > 0 {
|
|
|
|
xctl.Graft = append(xctl.Graft, tograft...)
|
|
|
|
}
|
|
|
|
if len(toprune) > 0 {
|
|
|
|
xctl.Prune = append(xctl.Prune, toprune...)
|
|
|
|
}
|
2018-02-20 10:08:18 +00:00
|
|
|
}
|
|
|
|
|
2018-02-21 10:47:45 +00:00
|
|
|
func (gs *GossipSubRouter) getPeers(topic string, count int, filter func(peer.ID) bool) []peer.ID {
|
2018-02-20 10:08:18 +00:00
|
|
|
tmap, ok := gs.p.topics[topic]
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
peers := make([]peer.ID, 0, len(tmap))
|
|
|
|
for p := range tmap {
|
|
|
|
if gs.peers[p] == GossipSubID && filter(p) {
|
|
|
|
peers = append(peers, p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
shufflePeers(peers)
|
2018-02-21 10:47:45 +00:00
|
|
|
|
|
|
|
if count > 0 && len(peers) > count {
|
|
|
|
peers = peers[:count]
|
|
|
|
}
|
|
|
|
|
2018-02-20 10:08:18 +00:00
|
|
|
return peers
|
|
|
|
}
|
|
|
|
|
2018-10-26 12:49:34 +00:00
|
|
|
func (gs *GossipSubRouter) tagPeer(p peer.ID, topic string) {
|
|
|
|
tag := topicTag(topic)
|
|
|
|
gs.p.host.ConnManager().TagPeer(p, tag, 2)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (gs *GossipSubRouter) untagPeer(p peer.ID, topic string) {
|
|
|
|
tag := topicTag(topic)
|
|
|
|
gs.p.host.ConnManager().UntagPeer(p, tag)
|
|
|
|
}
|
|
|
|
|
|
|
|
func topicTag(topic string) string {
|
|
|
|
return fmt.Sprintf("pubsub:%s", topic)
|
|
|
|
}
|
|
|
|
|
2018-02-19 18:16:58 +00:00
|
|
|
func peerListToMap(peers []peer.ID) map[peer.ID]struct{} {
|
|
|
|
pmap := make(map[peer.ID]struct{})
|
|
|
|
for _, p := range peers {
|
|
|
|
pmap[p] = struct{}{}
|
|
|
|
}
|
|
|
|
return pmap
|
|
|
|
}
|
|
|
|
|
2018-02-19 19:24:17 +00:00
|
|
|
func peerMapToList(peers map[peer.ID]struct{}) []peer.ID {
|
|
|
|
plst := make([]peer.ID, 0, len(peers))
|
|
|
|
for p := range peers {
|
|
|
|
plst = append(plst, p)
|
|
|
|
}
|
|
|
|
return plst
|
|
|
|
}
|
|
|
|
|
2018-02-19 14:13:18 +00:00
|
|
|
func shufflePeers(peers []peer.ID) {
|
2018-02-20 12:00:33 +00:00
|
|
|
for i := range peers {
|
|
|
|
j := rand.Intn(i + 1)
|
|
|
|
peers[i], peers[j] = peers[j], peers[i]
|
|
|
|
}
|
2018-02-19 12:50:14 +00:00
|
|
|
}
|