2021-06-16 20:19:45 +00:00
|
|
|
package pubsub
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2023-02-22 21:58:17 +00:00
|
|
|
"encoding/binary"
|
2021-06-16 20:19:45 +00:00
|
|
|
"io"
|
2022-08-19 16:34:07 +00:00
|
|
|
"time"
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
"github.com/gogo/protobuf/proto"
|
|
|
|
pool "github.com/libp2p/go-buffer-pool"
|
|
|
|
"github.com/multiformats/go-varint"
|
|
|
|
|
2022-11-04 13:57:20 +00:00
|
|
|
"github.com/libp2p/go-libp2p/core/network"
|
|
|
|
"github.com/libp2p/go-libp2p/core/peer"
|
2023-02-22 21:58:17 +00:00
|
|
|
"github.com/libp2p/go-msgio"
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2021-10-19 13:43:41 +00:00
|
|
|
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
2021-06-16 20:19:45 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// get the initial RPC containing all of our subscriptions to send to new peers
|
|
|
|
func (p *PubSub) getHelloPacket() *RPC {
|
|
|
|
var rpc RPC
|
|
|
|
|
|
|
|
subscriptions := make(map[string]bool)
|
|
|
|
|
|
|
|
for t := range p.mySubs {
|
|
|
|
subscriptions[t] = true
|
|
|
|
}
|
|
|
|
|
|
|
|
for t := range p.myRelays {
|
|
|
|
subscriptions[t] = true
|
|
|
|
}
|
|
|
|
|
|
|
|
for t := range subscriptions {
|
|
|
|
as := &pb.RPC_SubOpts{
|
|
|
|
Topicid: proto.String(t),
|
|
|
|
Subscribe: proto.Bool(true),
|
|
|
|
}
|
|
|
|
rpc.Subscriptions = append(rpc.Subscriptions, as)
|
|
|
|
}
|
|
|
|
return &rpc
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *PubSub) handleNewStream(s network.Stream) {
|
|
|
|
peer := s.Conn().RemotePeer()
|
|
|
|
|
|
|
|
p.inboundStreamsMx.Lock()
|
|
|
|
other, dup := p.inboundStreams[peer]
|
|
|
|
if dup {
|
|
|
|
log.Debugf("duplicate inbound stream from %s; resetting other stream", peer)
|
|
|
|
other.Reset()
|
|
|
|
}
|
|
|
|
p.inboundStreams[peer] = s
|
|
|
|
p.inboundStreamsMx.Unlock()
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
p.inboundStreamsMx.Lock()
|
|
|
|
if p.inboundStreams[peer] == s {
|
|
|
|
delete(p.inboundStreams, peer)
|
|
|
|
}
|
|
|
|
p.inboundStreamsMx.Unlock()
|
|
|
|
}()
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
r := msgio.NewVarintReaderSize(s, p.maxMessageSize)
|
2021-06-16 20:19:45 +00:00
|
|
|
for {
|
2023-02-22 21:58:17 +00:00
|
|
|
msgbytes, err := r.ReadMsg()
|
2021-06-16 20:19:45 +00:00
|
|
|
if err != nil {
|
2023-02-22 21:58:17 +00:00
|
|
|
r.ReleaseMsg(msgbytes)
|
2021-06-16 20:19:45 +00:00
|
|
|
if err != io.EOF {
|
|
|
|
s.Reset()
|
|
|
|
log.Debugf("error reading rpc from %s: %s", s.Conn().RemotePeer(), err)
|
|
|
|
} else {
|
|
|
|
// Just be nice. They probably won't read this
|
|
|
|
// but it doesn't hurt to send it.
|
|
|
|
s.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
2024-05-15 23:15:00 +00:00
|
|
|
if len(msgbytes) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
rpc := new(RPC)
|
|
|
|
err = rpc.Unmarshal(msgbytes)
|
|
|
|
r.ReleaseMsg(msgbytes)
|
|
|
|
if err != nil {
|
|
|
|
s.Reset()
|
|
|
|
log.Warnf("bogus rpc from %s: %s", s.Conn().RemotePeer(), err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
rpc.from = peer
|
|
|
|
select {
|
|
|
|
case p.incoming <- rpc:
|
|
|
|
case <-p.ctx.Done():
|
|
|
|
// Close is useless because the other side isn't reading.
|
|
|
|
s.Reset()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-19 13:43:41 +00:00
|
|
|
func (p *PubSub) notifyPeerDead(pid peer.ID) {
|
|
|
|
p.peerDeadPrioLk.RLock()
|
|
|
|
p.peerDeadMx.Lock()
|
|
|
|
p.peerDeadPend[pid] = struct{}{}
|
|
|
|
p.peerDeadMx.Unlock()
|
|
|
|
p.peerDeadPrioLk.RUnlock()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case p.peerDead <- struct{}{}:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-10-10 11:33:36 +00:00
|
|
|
func (p *PubSub) handleNewPeer(ctx context.Context, pid peer.ID, outgoing *rpcQueue) {
|
2021-06-16 20:19:45 +00:00
|
|
|
s, err := p.host.NewStream(p.ctx, pid, p.rt.Protocols()...)
|
|
|
|
if err != nil {
|
|
|
|
log.Debug("opening new stream to peer: ", err, pid)
|
|
|
|
|
|
|
|
select {
|
2021-10-19 13:43:41 +00:00
|
|
|
case p.newPeerError <- pid:
|
2021-06-16 20:19:45 +00:00
|
|
|
case <-ctx.Done():
|
|
|
|
}
|
2021-10-19 13:43:41 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
go p.handleSendingMessages(ctx, s, outgoing)
|
2023-02-22 21:58:17 +00:00
|
|
|
go p.handlePeerDead(s)
|
2021-06-16 20:19:45 +00:00
|
|
|
select {
|
|
|
|
case p.newPeerStream <- s:
|
|
|
|
case <-ctx.Done():
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-10-10 11:33:36 +00:00
|
|
|
func (p *PubSub) handleNewPeerWithBackoff(ctx context.Context, pid peer.ID, backoff time.Duration, outgoing *rpcQueue) {
|
2022-08-19 16:34:07 +00:00
|
|
|
select {
|
|
|
|
case <-time.After(backoff):
|
|
|
|
p.handleNewPeer(ctx, pid, outgoing)
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
func (p *PubSub) handlePeerDead(s network.Stream) {
|
2021-10-19 13:43:41 +00:00
|
|
|
pid := s.Conn().RemotePeer()
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
_, err := s.Read([]byte{0})
|
|
|
|
if err == nil {
|
2021-10-19 13:43:41 +00:00
|
|
|
log.Debugf("unexpected message from %s", pid)
|
2021-06-16 20:19:45 +00:00
|
|
|
}
|
2023-02-22 21:58:17 +00:00
|
|
|
|
|
|
|
s.Reset()
|
|
|
|
p.notifyPeerDead(pid)
|
2021-06-16 20:19:45 +00:00
|
|
|
}
|
|
|
|
|
2024-10-10 11:33:36 +00:00
|
|
|
func (p *PubSub) handleSendingMessages(ctx context.Context, s network.Stream, outgoing *rpcQueue) {
|
2023-02-22 21:58:17 +00:00
|
|
|
writeRpc := func(rpc *RPC) error {
|
|
|
|
size := uint64(rpc.Size())
|
|
|
|
|
|
|
|
buf := pool.Get(varint.UvarintSize(size) + int(size))
|
|
|
|
defer pool.Put(buf)
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
n := binary.PutUvarint(buf, size)
|
|
|
|
_, err := rpc.MarshalTo(buf[n:])
|
2021-06-16 20:19:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-02-22 21:58:17 +00:00
|
|
|
_, err = s.Write(buf)
|
|
|
|
return err
|
2021-06-16 20:19:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
defer s.Close()
|
2024-10-10 11:33:36 +00:00
|
|
|
for ctx.Err() == nil {
|
|
|
|
rpc, err := outgoing.Pop(ctx)
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf("popping message from the queue to send to %s: %s", s.Conn().RemotePeer(), err)
|
|
|
|
return
|
|
|
|
}
|
2021-06-16 20:19:45 +00:00
|
|
|
|
2024-10-10 11:33:36 +00:00
|
|
|
err = writeRpc(rpc)
|
|
|
|
if err != nil {
|
|
|
|
s.Reset()
|
|
|
|
log.Debugf("writing message to %s: %s", s.Conn().RemotePeer(), err)
|
2021-06-16 20:19:45 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func rpcWithSubs(subs ...*pb.RPC_SubOpts) *RPC {
|
|
|
|
return &RPC{
|
|
|
|
RPC: pb.RPC{
|
|
|
|
Subscriptions: subs,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func rpcWithMessages(msgs ...*pb.Message) *RPC {
|
|
|
|
return &RPC{RPC: pb.RPC{Publish: msgs}}
|
|
|
|
}
|
|
|
|
|
|
|
|
func rpcWithControl(msgs []*pb.Message,
|
|
|
|
ihave []*pb.ControlIHave,
|
|
|
|
iwant []*pb.ControlIWant,
|
|
|
|
graft []*pb.ControlGraft,
|
2024-10-10 11:33:36 +00:00
|
|
|
prune []*pb.ControlPrune,
|
|
|
|
idontwant []*pb.ControlIDontWant) *RPC {
|
2021-06-16 20:19:45 +00:00
|
|
|
return &RPC{
|
|
|
|
RPC: pb.RPC{
|
|
|
|
Publish: msgs,
|
|
|
|
Control: &pb.ControlMessage{
|
2024-10-10 11:33:36 +00:00
|
|
|
Ihave: ihave,
|
|
|
|
Iwant: iwant,
|
|
|
|
Graft: graft,
|
|
|
|
Prune: prune,
|
|
|
|
Idontwant: idontwant,
|
2021-06-16 20:19:45 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func copyRPC(rpc *RPC) *RPC {
|
|
|
|
res := new(RPC)
|
|
|
|
*res = *rpc
|
|
|
|
if rpc.Control != nil {
|
|
|
|
res.Control = new(pb.ControlMessage)
|
|
|
|
*res.Control = *rpc.Control
|
|
|
|
}
|
|
|
|
return res
|
|
|
|
}
|