2018-07-04 10:51:47 +00:00
|
|
|
package multiplex
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bufio"
|
2019-06-09 07:24:20 +00:00
|
|
|
"context"
|
2018-07-04 10:51:47 +00:00
|
|
|
"encoding/binary"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"net"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
logging "github.com/ipfs/go-log"
|
2019-06-09 07:24:20 +00:00
|
|
|
pool "github.com/libp2p/go-buffer-pool"
|
2021-06-16 20:19:45 +00:00
|
|
|
"github.com/multiformats/go-varint"
|
2018-07-04 10:51:47 +00:00
|
|
|
)
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
var log = logging.Logger("mplex")
|
2018-07-04 10:51:47 +00:00
|
|
|
|
|
|
|
var MaxMessageSize = 1 << 20
|
|
|
|
|
|
|
|
// Max time to block waiting for a slow reader to read from a stream before
|
|
|
|
// resetting it. Preferably, we'd have some form of back-pressure mechanism but
|
|
|
|
// we don't have that in this protocol.
|
|
|
|
var ReceiveTimeout = 5 * time.Second
|
|
|
|
|
|
|
|
// ErrShutdown is returned when operating on a shutdown session
|
|
|
|
var ErrShutdown = errors.New("session shut down")
|
|
|
|
|
|
|
|
// ErrTwoInitiators is returned when both sides think they're the initiator
|
|
|
|
var ErrTwoInitiators = errors.New("two initiators")
|
|
|
|
|
|
|
|
// ErrInvalidState is returned when the other side does something it shouldn't.
|
|
|
|
// In this case, we close the connection to be safe.
|
|
|
|
var ErrInvalidState = errors.New("received an unexpected message from the peer")
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
var errTimeout = timeout{}
|
|
|
|
var errStreamClosed = errors.New("stream closed")
|
|
|
|
|
|
|
|
var (
|
|
|
|
ResetStreamTimeout = 2 * time.Minute
|
|
|
|
|
|
|
|
WriteCoalesceDelay = 100 * time.Microsecond
|
|
|
|
)
|
|
|
|
|
|
|
|
type timeout struct{}
|
|
|
|
|
|
|
|
func (_ timeout) Error() string {
|
|
|
|
return "i/o deadline exceeded"
|
|
|
|
}
|
|
|
|
|
|
|
|
func (_ timeout) Temporary() bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (_ timeout) Timeout() bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
// +1 for initiator
|
|
|
|
const (
|
|
|
|
newStreamTag = 0
|
|
|
|
messageTag = 2
|
|
|
|
closeTag = 4
|
|
|
|
resetTag = 6
|
|
|
|
)
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
// Multiplex is a mplex session.
|
2018-07-04 10:51:47 +00:00
|
|
|
type Multiplex struct {
|
|
|
|
con net.Conn
|
|
|
|
buf *bufio.Reader
|
|
|
|
nextID uint64
|
|
|
|
initiator bool
|
|
|
|
|
|
|
|
closed chan struct{}
|
|
|
|
shutdown chan struct{}
|
|
|
|
shutdownErr error
|
|
|
|
shutdownLock sync.Mutex
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
writeCh chan []byte
|
|
|
|
writeTimer *time.Timer
|
|
|
|
writeTimerFired bool
|
2018-07-04 10:51:47 +00:00
|
|
|
|
|
|
|
nstreams chan *Stream
|
|
|
|
|
|
|
|
channels map[streamID]*Stream
|
|
|
|
chLock sync.Mutex
|
|
|
|
}
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
// NewMultiplex creates a new multiplexer session.
|
2018-07-04 10:51:47 +00:00
|
|
|
func NewMultiplex(con net.Conn, initiator bool) *Multiplex {
|
|
|
|
mp := &Multiplex{
|
2019-06-09 07:24:20 +00:00
|
|
|
con: con,
|
|
|
|
initiator: initiator,
|
|
|
|
buf: bufio.NewReader(con),
|
|
|
|
channels: make(map[streamID]*Stream),
|
|
|
|
closed: make(chan struct{}),
|
|
|
|
shutdown: make(chan struct{}),
|
|
|
|
writeCh: make(chan []byte, 16),
|
|
|
|
writeTimer: time.NewTimer(0),
|
|
|
|
nstreams: make(chan *Stream, 16),
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
go mp.handleIncoming()
|
2019-06-09 07:24:20 +00:00
|
|
|
go mp.handleOutgoing()
|
2018-07-04 10:51:47 +00:00
|
|
|
|
|
|
|
return mp
|
|
|
|
}
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
func (mp *Multiplex) newStream(id streamID, name string) (s *Stream) {
|
|
|
|
s = &Stream{
|
2021-06-16 20:19:45 +00:00
|
|
|
id: id,
|
|
|
|
name: name,
|
|
|
|
dataIn: make(chan []byte, 8),
|
|
|
|
rDeadline: makePipeDeadline(),
|
|
|
|
wDeadline: makePipeDeadline(),
|
|
|
|
mp: mp,
|
|
|
|
writeCancel: make(chan struct{}),
|
|
|
|
readCancel: make(chan struct{}),
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
2019-06-09 07:24:20 +00:00
|
|
|
return
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
// Accept accepts the next stream from the connection.
|
2018-07-04 10:51:47 +00:00
|
|
|
func (m *Multiplex) Accept() (*Stream, error) {
|
|
|
|
select {
|
|
|
|
case s, ok := <-m.nstreams:
|
|
|
|
if !ok {
|
|
|
|
return nil, errors.New("multiplex closed")
|
|
|
|
}
|
|
|
|
return s, nil
|
|
|
|
case <-m.closed:
|
|
|
|
return nil, m.shutdownErr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
// Close closes the session.
|
2018-07-04 10:51:47 +00:00
|
|
|
func (mp *Multiplex) Close() error {
|
|
|
|
mp.closeNoWait()
|
|
|
|
|
|
|
|
// Wait for the receive loop to finish.
|
|
|
|
<-mp.closed
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (mp *Multiplex) closeNoWait() {
|
|
|
|
mp.shutdownLock.Lock()
|
|
|
|
select {
|
|
|
|
case <-mp.shutdown:
|
|
|
|
default:
|
|
|
|
mp.con.Close()
|
|
|
|
close(mp.shutdown)
|
|
|
|
}
|
|
|
|
mp.shutdownLock.Unlock()
|
|
|
|
}
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
// IsClosed returns true if the session is closed.
|
2018-07-04 10:51:47 +00:00
|
|
|
func (mp *Multiplex) IsClosed() bool {
|
|
|
|
select {
|
|
|
|
case <-mp.closed:
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
func (mp *Multiplex) sendMsg(timeout, cancel <-chan struct{}, header uint64, data []byte) error {
|
2019-06-09 07:24:20 +00:00
|
|
|
buf := pool.Get(len(data) + 20)
|
|
|
|
|
|
|
|
n := 0
|
|
|
|
n += binary.PutUvarint(buf[n:], header)
|
|
|
|
n += binary.PutUvarint(buf[n:], uint64(len(data)))
|
|
|
|
n += copy(buf[n:], data)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case mp.writeCh <- buf[:n]:
|
|
|
|
return nil
|
|
|
|
case <-mp.shutdown:
|
|
|
|
return ErrShutdown
|
2021-06-16 20:19:45 +00:00
|
|
|
case <-timeout:
|
2019-06-09 07:24:20 +00:00
|
|
|
return errTimeout
|
2021-06-16 20:19:45 +00:00
|
|
|
case <-cancel:
|
|
|
|
return ErrStreamClosed
|
2019-06-09 07:24:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (mp *Multiplex) handleOutgoing() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-mp.shutdown:
|
|
|
|
return
|
|
|
|
|
|
|
|
case data := <-mp.writeCh:
|
|
|
|
// FIXME: https://github.com/libp2p/go-libp2p/issues/644
|
|
|
|
// write coalescing disabled until this can be fixed.
|
|
|
|
//err := mp.writeMsg(data)
|
|
|
|
err := mp.doWriteMsg(data)
|
|
|
|
pool.Put(data)
|
|
|
|
if err != nil {
|
|
|
|
// the connection is closed by this time
|
2021-06-16 20:19:45 +00:00
|
|
|
log.Warnf("error writing data: %s", err.Error())
|
2019-06-09 07:24:20 +00:00
|
|
|
return
|
|
|
|
}
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
}
|
2019-06-09 07:24:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (mp *Multiplex) writeMsg(data []byte) error {
|
|
|
|
if len(data) >= 512 {
|
|
|
|
err := mp.doWriteMsg(data)
|
|
|
|
pool.Put(data)
|
2018-07-04 10:51:47 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
buf := pool.Get(4096)
|
|
|
|
defer pool.Put(buf)
|
|
|
|
|
|
|
|
n := copy(buf, data)
|
|
|
|
pool.Put(data)
|
|
|
|
|
|
|
|
if !mp.writeTimerFired {
|
|
|
|
if !mp.writeTimer.Stop() {
|
|
|
|
<-mp.writeTimer.C
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
}
|
2019-06-09 07:24:20 +00:00
|
|
|
mp.writeTimer.Reset(WriteCoalesceDelay)
|
|
|
|
mp.writeTimerFired = false
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case data = <-mp.writeCh:
|
|
|
|
wr := copy(buf[n:], data)
|
|
|
|
if wr < len(data) {
|
|
|
|
// we filled the buffer, send it
|
|
|
|
err := mp.doWriteMsg(buf)
|
|
|
|
if err != nil {
|
|
|
|
pool.Put(data)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(data)-wr >= 512 {
|
|
|
|
// the remaining data is not a small write, send it
|
|
|
|
err := mp.doWriteMsg(data[wr:])
|
|
|
|
pool.Put(data)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
n = copy(buf, data[wr:])
|
|
|
|
|
|
|
|
// we've written some, reset the timer to coalesce the rest
|
|
|
|
if !mp.writeTimer.Stop() {
|
|
|
|
<-mp.writeTimer.C
|
|
|
|
}
|
|
|
|
mp.writeTimer.Reset(WriteCoalesceDelay)
|
|
|
|
} else {
|
|
|
|
n += wr
|
|
|
|
}
|
|
|
|
|
|
|
|
pool.Put(data)
|
|
|
|
|
|
|
|
case <-mp.writeTimer.C:
|
|
|
|
mp.writeTimerFired = true
|
|
|
|
return mp.doWriteMsg(buf[:n])
|
|
|
|
|
|
|
|
case <-mp.shutdown:
|
|
|
|
return ErrShutdown
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
}
|
2019-06-09 07:24:20 +00:00
|
|
|
}
|
2018-07-04 10:51:47 +00:00
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
func (mp *Multiplex) doWriteMsg(data []byte) error {
|
|
|
|
if mp.isShutdown() {
|
|
|
|
return ErrShutdown
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err := mp.con.Write(data)
|
|
|
|
if err != nil {
|
|
|
|
mp.closeNoWait()
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (mp *Multiplex) nextChanID() uint64 {
|
|
|
|
out := mp.nextID
|
|
|
|
mp.nextID++
|
|
|
|
return out
|
|
|
|
}
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
// NewStream creates a new stream.
|
2021-06-16 20:19:45 +00:00
|
|
|
func (mp *Multiplex) NewStream(ctx context.Context) (*Stream, error) {
|
|
|
|
return mp.NewNamedStream(ctx, "")
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
// NewNamedStream creates a new named stream.
|
2021-06-16 20:19:45 +00:00
|
|
|
func (mp *Multiplex) NewNamedStream(ctx context.Context, name string) (*Stream, error) {
|
2018-07-04 10:51:47 +00:00
|
|
|
mp.chLock.Lock()
|
|
|
|
|
|
|
|
// We could call IsClosed but this is faster (given that we already have
|
|
|
|
// the lock).
|
|
|
|
if mp.channels == nil {
|
2019-06-09 07:24:20 +00:00
|
|
|
mp.chLock.Unlock()
|
2018-07-04 10:51:47 +00:00
|
|
|
return nil, ErrShutdown
|
|
|
|
}
|
|
|
|
|
|
|
|
sid := mp.nextChanID()
|
|
|
|
header := (sid << 3) | newStreamTag
|
|
|
|
|
|
|
|
if name == "" {
|
|
|
|
name = fmt.Sprint(sid)
|
|
|
|
}
|
|
|
|
s := mp.newStream(streamID{
|
|
|
|
id: sid,
|
|
|
|
initiator: true,
|
|
|
|
}, name)
|
|
|
|
mp.channels[s.id] = s
|
|
|
|
mp.chLock.Unlock()
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
err := mp.sendMsg(ctx.Done(), nil, header, []byte(name))
|
2018-07-04 10:51:47 +00:00
|
|
|
if err != nil {
|
2021-06-16 20:19:45 +00:00
|
|
|
if err == errTimeout {
|
|
|
|
return nil, ctx.Err()
|
|
|
|
}
|
2018-07-04 10:51:47 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return s, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (mp *Multiplex) cleanup() {
|
|
|
|
mp.closeNoWait()
|
2021-06-16 20:19:45 +00:00
|
|
|
|
|
|
|
// Take the channels.
|
2018-07-04 10:51:47 +00:00
|
|
|
mp.chLock.Lock()
|
2021-06-16 20:19:45 +00:00
|
|
|
channels := mp.channels
|
|
|
|
mp.channels = nil
|
|
|
|
mp.chLock.Unlock()
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
// Cancel any reads/writes
|
|
|
|
for _, msch := range channels {
|
|
|
|
msch.cancelRead(ErrStreamReset)
|
|
|
|
msch.cancelWrite(ErrStreamReset)
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
2021-06-16 20:19:45 +00:00
|
|
|
|
|
|
|
// And... shutdown!
|
2018-07-04 10:51:47 +00:00
|
|
|
if mp.shutdownErr == nil {
|
|
|
|
mp.shutdownErr = ErrShutdown
|
|
|
|
}
|
|
|
|
close(mp.closed)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (mp *Multiplex) handleIncoming() {
|
|
|
|
defer mp.cleanup()
|
|
|
|
|
|
|
|
recvTimeout := time.NewTimer(0)
|
|
|
|
defer recvTimeout.Stop()
|
|
|
|
|
|
|
|
if !recvTimeout.Stop() {
|
|
|
|
<-recvTimeout.C
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
chID, tag, err := mp.readNextHeader()
|
|
|
|
if err != nil {
|
|
|
|
mp.shutdownErr = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
remoteIsInitiator := tag&1 == 0
|
|
|
|
ch := streamID{
|
|
|
|
// true if *I'm* the initiator.
|
|
|
|
initiator: !remoteIsInitiator,
|
|
|
|
id: chID,
|
|
|
|
}
|
|
|
|
// Rounds up the tag:
|
|
|
|
// 0 -> 0
|
|
|
|
// 1 -> 2
|
|
|
|
// 2 -> 2
|
|
|
|
// 3 -> 4
|
|
|
|
// etc...
|
|
|
|
tag += (tag & 1)
|
|
|
|
|
|
|
|
b, err := mp.readNext()
|
|
|
|
if err != nil {
|
|
|
|
mp.shutdownErr = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
mp.chLock.Lock()
|
|
|
|
msch, ok := mp.channels[ch]
|
|
|
|
mp.chLock.Unlock()
|
|
|
|
|
|
|
|
switch tag {
|
|
|
|
case newStreamTag:
|
|
|
|
if ok {
|
|
|
|
log.Debugf("received NewStream message for existing stream: %d", ch)
|
|
|
|
mp.shutdownErr = ErrInvalidState
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
name := string(b)
|
2019-06-09 07:24:20 +00:00
|
|
|
pool.Put(b)
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
msch = mp.newStream(ch, name)
|
|
|
|
mp.chLock.Lock()
|
|
|
|
mp.channels[ch] = msch
|
|
|
|
mp.chLock.Unlock()
|
|
|
|
select {
|
|
|
|
case mp.nstreams <- msch:
|
|
|
|
case <-mp.shutdown:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
case resetTag:
|
|
|
|
if !ok {
|
|
|
|
// This is *ok*. We forget the stream on reset.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
// Cancel any ongoing reads/writes.
|
|
|
|
msch.cancelRead(ErrStreamReset)
|
|
|
|
msch.cancelWrite(ErrStreamReset)
|
2018-07-04 10:51:47 +00:00
|
|
|
case closeTag:
|
|
|
|
if !ok {
|
2021-06-16 20:19:45 +00:00
|
|
|
// may have canceled our reads already.
|
2018-07-04 10:51:47 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
// unregister and throw away future data.
|
|
|
|
mp.chLock.Lock()
|
|
|
|
delete(mp.channels, ch)
|
|
|
|
mp.chLock.Unlock()
|
2018-07-04 10:51:47 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
// close data channel, there will be no more data.
|
2018-07-04 10:51:47 +00:00
|
|
|
close(msch.dataIn)
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
// We intentionally don't cancel any deadlines, cancel reads, cancel
|
|
|
|
// writes, etc. We just deliver the EOF by closing the
|
|
|
|
// data channel, and unregister the channel so we don't
|
|
|
|
// receive any more data. The user still needs to call
|
|
|
|
// `Close()` or `Reset()`.
|
2018-07-04 10:51:47 +00:00
|
|
|
case messageTag:
|
|
|
|
if !ok {
|
2021-06-16 20:19:45 +00:00
|
|
|
// We're not accepting data on this stream, for
|
|
|
|
// some reason. It's likely that we reset it, or
|
|
|
|
// simply canceled reads (e.g., called Close).
|
2019-06-09 07:24:20 +00:00
|
|
|
pool.Put(b)
|
2018-07-04 10:51:47 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
recvTimeout.Reset(ReceiveTimeout)
|
|
|
|
select {
|
|
|
|
case msch.dataIn <- b:
|
2021-06-16 20:19:45 +00:00
|
|
|
case <-msch.readCancel:
|
|
|
|
// the user has canceled reading. walk away.
|
2019-06-09 07:24:20 +00:00
|
|
|
pool.Put(b)
|
2018-07-04 10:51:47 +00:00
|
|
|
case <-recvTimeout.C:
|
2019-06-09 07:24:20 +00:00
|
|
|
pool.Put(b)
|
2021-06-16 20:19:45 +00:00
|
|
|
log.Warnf("timed out receiving message into stream queue.")
|
2018-07-04 10:51:47 +00:00
|
|
|
// Do not do this asynchronously. Otherwise, we
|
|
|
|
// could drop a message, then receive a message,
|
|
|
|
// then reset.
|
|
|
|
msch.Reset()
|
|
|
|
continue
|
|
|
|
case <-mp.shutdown:
|
2019-06-09 07:24:20 +00:00
|
|
|
pool.Put(b)
|
2018-07-04 10:51:47 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if !recvTimeout.Stop() {
|
|
|
|
<-recvTimeout.C
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
log.Debugf("message with unknown header on stream %s", ch)
|
|
|
|
if ok {
|
|
|
|
msch.Reset()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
func (mp *Multiplex) isShutdown() bool {
|
|
|
|
select {
|
|
|
|
case <-mp.shutdown:
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (mp *Multiplex) sendResetMsg(header uint64, hard bool) {
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), ResetStreamTimeout)
|
|
|
|
defer cancel()
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
err := mp.sendMsg(ctx.Done(), nil, header, nil)
|
2019-06-09 07:24:20 +00:00
|
|
|
if err != nil && !mp.isShutdown() {
|
|
|
|
if hard {
|
2021-06-16 20:19:45 +00:00
|
|
|
log.Warnf("error sending reset message: %s; killing connection", err.Error())
|
2019-06-09 07:24:20 +00:00
|
|
|
mp.Close()
|
|
|
|
} else {
|
|
|
|
log.Debugf("error sending reset message: %s", err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
func (mp *Multiplex) readNextHeader() (uint64, uint64, error) {
|
2021-06-16 20:19:45 +00:00
|
|
|
h, err := varint.ReadUvarint(mp.buf)
|
2018-07-04 10:51:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// get channel ID
|
|
|
|
ch := h >> 3
|
|
|
|
|
|
|
|
rem := h & 7
|
|
|
|
|
|
|
|
return ch, rem, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (mp *Multiplex) readNext() ([]byte, error) {
|
|
|
|
// get length
|
2021-06-16 20:19:45 +00:00
|
|
|
l, err := varint.ReadUvarint(mp.buf)
|
2018-07-04 10:51:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if l > uint64(MaxMessageSize) {
|
|
|
|
return nil, fmt.Errorf("message size too large!")
|
|
|
|
}
|
|
|
|
|
|
|
|
if l == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
buf := pool.Get(int(l))
|
2018-07-04 10:51:47 +00:00
|
|
|
n, err := io.ReadFull(mp.buf, buf)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return buf[:n], nil
|
|
|
|
}
|
2019-06-09 07:24:20 +00:00
|
|
|
|
|
|
|
func isFatalNetworkError(err error) bool {
|
|
|
|
nerr, ok := err.(net.Error)
|
|
|
|
if ok {
|
|
|
|
return !(nerr.Timeout() || nerr.Temporary())
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|