2018-07-04 10:51:47 +00:00
|
|
|
package yamux
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bufio"
|
2021-06-16 20:19:45 +00:00
|
|
|
"context"
|
2018-07-04 10:51:47 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"log"
|
|
|
|
"math"
|
|
|
|
"net"
|
2019-06-09 07:24:20 +00:00
|
|
|
"os"
|
2022-08-19 16:34:07 +00:00
|
|
|
"runtime/debug"
|
2018-07-04 10:51:47 +00:00
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
pool "github.com/libp2p/go-buffer-pool"
|
2018-07-04 10:51:47 +00:00
|
|
|
)
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
// The MemoryManager allows management of memory allocations.
|
|
|
|
// Memory is allocated:
|
|
|
|
// 1. When opening / accepting a new stream. This uses the highest priority.
|
|
|
|
// 2. When trying to increase the stream receive window. This uses a lower priority.
|
2022-11-04 13:57:20 +00:00
|
|
|
// This is a subset of the libp2p's resource manager ResourceScopeSpan interface.
|
2022-04-01 16:16:46 +00:00
|
|
|
type MemoryManager interface {
|
|
|
|
ReserveMemory(size int, prio uint8) error
|
2022-11-04 13:57:20 +00:00
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
// ReleaseMemory explicitly releases memory previously reserved with ReserveMemory
|
|
|
|
ReleaseMemory(size int)
|
2022-11-04 13:57:20 +00:00
|
|
|
|
|
|
|
// Done ends the span and releases associated resources.
|
|
|
|
Done()
|
2022-04-01 16:16:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type nullMemoryManagerImpl struct{}
|
|
|
|
|
|
|
|
func (n nullMemoryManagerImpl) ReserveMemory(size int, prio uint8) error { return nil }
|
|
|
|
func (n nullMemoryManagerImpl) ReleaseMemory(size int) {}
|
2022-11-04 13:57:20 +00:00
|
|
|
func (n nullMemoryManagerImpl) Done() {}
|
2022-04-01 16:16:46 +00:00
|
|
|
|
2022-11-04 13:57:20 +00:00
|
|
|
var nullMemoryManager = &nullMemoryManagerImpl{}
|
2022-04-01 16:16:46 +00:00
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
// Session is used to wrap a reliable ordered connection and to
|
|
|
|
// multiplex it into multiple streams.
|
|
|
|
type Session struct {
|
2021-10-19 13:43:41 +00:00
|
|
|
rtt int64 // to be accessed atomically, in nanoseconds
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
// remoteGoAway indicates the remote side does
|
|
|
|
// not want futher connections. Must be first for alignment.
|
|
|
|
remoteGoAway int32
|
|
|
|
|
|
|
|
// localGoAway indicates that we should stop
|
|
|
|
// accepting futher connections. Must be first for alignment.
|
|
|
|
localGoAway int32
|
|
|
|
|
|
|
|
// nextStreamID is the next stream we should
|
|
|
|
// send. This depends if we are a client/server.
|
|
|
|
nextStreamID uint32
|
|
|
|
|
|
|
|
// config holds our configuration
|
|
|
|
config *Config
|
|
|
|
|
|
|
|
// logger is used for our logs
|
|
|
|
logger *log.Logger
|
|
|
|
|
|
|
|
// conn is the underlying connection
|
2019-06-09 07:24:20 +00:00
|
|
|
conn net.Conn
|
2018-07-04 10:51:47 +00:00
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
// reader is a buffered reader
|
|
|
|
reader io.Reader
|
2018-07-04 10:51:47 +00:00
|
|
|
|
2022-11-04 13:57:20 +00:00
|
|
|
newMemoryManager func() (MemoryManager, error)
|
2022-04-01 16:16:46 +00:00
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
// pings is used to track inflight pings
|
2021-06-16 20:19:45 +00:00
|
|
|
pingLock sync.Mutex
|
|
|
|
pingID uint32
|
|
|
|
activePing *ping
|
2018-07-04 10:51:47 +00:00
|
|
|
|
|
|
|
// streams maps a stream id to a stream, and inflight has an entry
|
|
|
|
// for any outgoing stream that has not yet been established. Both are
|
|
|
|
// protected by streamLock.
|
2022-04-01 16:16:46 +00:00
|
|
|
numIncomingStreams uint32
|
|
|
|
streams map[uint32]*Stream
|
|
|
|
inflight map[uint32]struct{}
|
|
|
|
streamLock sync.Mutex
|
2018-07-04 10:51:47 +00:00
|
|
|
|
|
|
|
// synCh acts like a semaphore. It is sized to the AcceptBacklog which
|
|
|
|
// is assumed to be symmetric between the client and server. This allows
|
|
|
|
// the client to avoid exceeding the backlog and instead blocks the open.
|
|
|
|
synCh chan struct{}
|
|
|
|
|
|
|
|
// acceptCh is used to pass ready streams to the client
|
|
|
|
acceptCh chan *Stream
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
// sendCh is used to send messages
|
|
|
|
sendCh chan []byte
|
2018-07-04 10:51:47 +00:00
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
// pingCh and pingCh are used to send pings and pongs
|
|
|
|
pongCh, pingCh chan uint32
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
// recvDoneCh is closed when recv() exits to avoid a race
|
|
|
|
// between stream registration and stream shutdown
|
|
|
|
recvDoneCh chan struct{}
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
// sendDoneCh is closed when send() exits to avoid a race
|
|
|
|
// between returning from a Stream.Write and exiting from the send loop
|
|
|
|
// (which may be reading a buffer on-load-from Stream.Write).
|
|
|
|
sendDoneCh chan struct{}
|
|
|
|
|
|
|
|
// client is true if we're the client and our stream IDs should be odd.
|
|
|
|
client bool
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
// shutdown is used to safely close a session
|
|
|
|
shutdown bool
|
|
|
|
shutdownErr error
|
|
|
|
shutdownCh chan struct{}
|
|
|
|
shutdownLock sync.Mutex
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
// keepaliveTimer is a periodic timer for keepalive messages. It's nil
|
|
|
|
// when keepalives are disabled.
|
2021-06-16 20:19:45 +00:00
|
|
|
keepaliveLock sync.Mutex
|
|
|
|
keepaliveTimer *time.Timer
|
|
|
|
keepaliveActive bool
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// newSession is used to construct a new session
|
2022-11-04 13:57:20 +00:00
|
|
|
func newSession(config *Config, conn net.Conn, client bool, readBuf int, newMemoryManager func() (MemoryManager, error)) *Session {
|
2019-06-09 07:24:20 +00:00
|
|
|
var reader io.Reader = conn
|
|
|
|
if readBuf > 0 {
|
|
|
|
reader = bufio.NewReaderSize(reader, readBuf)
|
|
|
|
}
|
2022-11-04 13:57:20 +00:00
|
|
|
if newMemoryManager == nil {
|
|
|
|
newMemoryManager = func() (MemoryManager, error) { return nullMemoryManager, nil }
|
2022-04-01 16:16:46 +00:00
|
|
|
}
|
2018-07-04 10:51:47 +00:00
|
|
|
s := &Session{
|
2022-11-04 13:57:20 +00:00
|
|
|
config: config,
|
|
|
|
client: client,
|
|
|
|
logger: log.New(config.LogOutput, "", log.LstdFlags),
|
|
|
|
conn: conn,
|
|
|
|
reader: reader,
|
|
|
|
streams: make(map[uint32]*Stream),
|
|
|
|
inflight: make(map[uint32]struct{}),
|
|
|
|
synCh: make(chan struct{}, config.AcceptBacklog),
|
|
|
|
acceptCh: make(chan *Stream, config.AcceptBacklog),
|
|
|
|
sendCh: make(chan []byte, 64),
|
|
|
|
pongCh: make(chan uint32, config.PingBacklog),
|
|
|
|
pingCh: make(chan uint32),
|
|
|
|
recvDoneCh: make(chan struct{}),
|
|
|
|
sendDoneCh: make(chan struct{}),
|
|
|
|
shutdownCh: make(chan struct{}),
|
|
|
|
newMemoryManager: newMemoryManager,
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
if client {
|
|
|
|
s.nextStreamID = 1
|
|
|
|
} else {
|
|
|
|
s.nextStreamID = 2
|
|
|
|
}
|
|
|
|
if config.EnableKeepAlive {
|
2019-06-09 07:24:20 +00:00
|
|
|
s.startKeepalive()
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
2019-06-09 07:24:20 +00:00
|
|
|
go s.recv()
|
|
|
|
go s.send()
|
2022-11-04 13:57:20 +00:00
|
|
|
go s.startMeasureRTT()
|
2018-07-04 10:51:47 +00:00
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsClosed does a safe check to see if we have shutdown
|
|
|
|
func (s *Session) IsClosed() bool {
|
|
|
|
select {
|
|
|
|
case <-s.shutdownCh:
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// CloseChan returns a read-only channel which is closed as
|
|
|
|
// soon as the session is closed.
|
|
|
|
func (s *Session) CloseChan() <-chan struct{} {
|
|
|
|
return s.shutdownCh
|
|
|
|
}
|
|
|
|
|
|
|
|
// NumStreams returns the number of currently open streams
|
|
|
|
func (s *Session) NumStreams() int {
|
|
|
|
s.streamLock.Lock()
|
|
|
|
num := len(s.streams)
|
|
|
|
s.streamLock.Unlock()
|
|
|
|
return num
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open is used to create a new stream as a net.Conn
|
2021-06-16 20:19:45 +00:00
|
|
|
func (s *Session) Open(ctx context.Context) (net.Conn, error) {
|
|
|
|
conn, err := s.OpenStream(ctx)
|
2018-07-04 10:51:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return conn, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// OpenStream is used to create a new stream
|
2021-06-16 20:19:45 +00:00
|
|
|
func (s *Session) OpenStream(ctx context.Context) (*Stream, error) {
|
2018-07-04 10:51:47 +00:00
|
|
|
if s.IsClosed() {
|
2019-06-09 07:24:20 +00:00
|
|
|
return nil, s.shutdownErr
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
if atomic.LoadInt32(&s.remoteGoAway) == 1 {
|
|
|
|
return nil, ErrRemoteGoAway
|
|
|
|
}
|
|
|
|
|
|
|
|
// Block if we have too many inflight SYNs
|
|
|
|
select {
|
|
|
|
case s.synCh <- struct{}{}:
|
2021-06-16 20:19:45 +00:00
|
|
|
case <-ctx.Done():
|
|
|
|
return nil, ctx.Err()
|
2018-07-04 10:51:47 +00:00
|
|
|
case <-s.shutdownCh:
|
2019-06-09 07:24:20 +00:00
|
|
|
return nil, s.shutdownErr
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2022-11-04 13:57:20 +00:00
|
|
|
span, err := s.newMemoryManager()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to create resource scope span: %w", err)
|
|
|
|
}
|
|
|
|
if err := span.ReserveMemory(initialStreamWindow, 255); err != nil {
|
2022-04-01 16:16:46 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
GET_ID:
|
|
|
|
// Get an ID, and check for stream exhaustion
|
|
|
|
id := atomic.LoadUint32(&s.nextStreamID)
|
|
|
|
if id >= math.MaxUint32-1 {
|
2022-11-04 13:57:20 +00:00
|
|
|
span.Done()
|
2018-07-04 10:51:47 +00:00
|
|
|
return nil, ErrStreamsExhausted
|
|
|
|
}
|
|
|
|
if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) {
|
|
|
|
goto GET_ID
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register the stream
|
2022-11-04 13:57:20 +00:00
|
|
|
stream := newStream(s, id, streamInit, initialStreamWindow, span)
|
2018-07-04 10:51:47 +00:00
|
|
|
s.streamLock.Lock()
|
|
|
|
s.streams[id] = stream
|
|
|
|
s.inflight[id] = struct{}{}
|
|
|
|
s.streamLock.Unlock()
|
|
|
|
|
|
|
|
// Send the window update to create
|
|
|
|
if err := stream.sendWindowUpdate(); err != nil {
|
2022-11-04 13:57:20 +00:00
|
|
|
defer span.Done()
|
2018-07-04 10:51:47 +00:00
|
|
|
select {
|
|
|
|
case <-s.synCh:
|
|
|
|
default:
|
|
|
|
s.logger.Printf("[ERR] yamux: aborted stream open without inflight syn semaphore")
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return stream, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Accept is used to block until the next available stream
|
|
|
|
// is ready to be accepted.
|
|
|
|
func (s *Session) Accept() (net.Conn, error) {
|
|
|
|
conn, err := s.AcceptStream()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return conn, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// AcceptStream is used to block until the next available stream
|
|
|
|
// is ready to be accepted.
|
|
|
|
func (s *Session) AcceptStream() (*Stream, error) {
|
2021-06-16 20:19:45 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case stream := <-s.acceptCh:
|
|
|
|
if err := stream.sendWindowUpdate(); err != nil {
|
|
|
|
// don't return accept errors.
|
|
|
|
s.logger.Printf("[WARN] error sending window update before accepting: %s", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return stream, nil
|
|
|
|
case <-s.shutdownCh:
|
|
|
|
return nil, s.shutdownErr
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close is used to close the session and all streams.
|
|
|
|
// Attempts to send a GoAway before closing the connection.
|
|
|
|
func (s *Session) Close() error {
|
|
|
|
s.shutdownLock.Lock()
|
|
|
|
defer s.shutdownLock.Unlock()
|
|
|
|
|
|
|
|
if s.shutdown {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
s.shutdown = true
|
|
|
|
if s.shutdownErr == nil {
|
|
|
|
s.shutdownErr = ErrSessionShutdown
|
|
|
|
}
|
|
|
|
close(s.shutdownCh)
|
|
|
|
s.conn.Close()
|
2019-06-09 07:24:20 +00:00
|
|
|
s.stopKeepalive()
|
2018-07-04 10:51:47 +00:00
|
|
|
<-s.recvDoneCh
|
2019-06-09 07:24:20 +00:00
|
|
|
<-s.sendDoneCh
|
2018-07-04 10:51:47 +00:00
|
|
|
|
|
|
|
s.streamLock.Lock()
|
|
|
|
defer s.streamLock.Unlock()
|
2022-08-19 16:34:07 +00:00
|
|
|
for id, stream := range s.streams {
|
2018-07-04 10:51:47 +00:00
|
|
|
stream.forceClose()
|
2022-08-19 16:34:07 +00:00
|
|
|
delete(s.streams, id)
|
2022-11-04 13:57:20 +00:00
|
|
|
stream.memorySpan.Done()
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// exitErr is used to handle an error that is causing the
|
|
|
|
// session to terminate.
|
|
|
|
func (s *Session) exitErr(err error) {
|
|
|
|
s.shutdownLock.Lock()
|
|
|
|
if s.shutdownErr == nil {
|
|
|
|
s.shutdownErr = err
|
|
|
|
}
|
|
|
|
s.shutdownLock.Unlock()
|
|
|
|
s.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
// GoAway can be used to prevent accepting further
|
|
|
|
// connections. It does not close the underlying conn.
|
|
|
|
func (s *Session) GoAway() error {
|
2019-06-09 07:24:20 +00:00
|
|
|
return s.sendMsg(s.goAway(goAwayNormal), nil, nil)
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// goAway is used to send a goAway message
|
|
|
|
func (s *Session) goAway(reason uint32) header {
|
|
|
|
atomic.SwapInt32(&s.localGoAway, 1)
|
2019-06-09 07:24:20 +00:00
|
|
|
hdr := encode(typeGoAway, 0, 0, reason)
|
2018-07-04 10:51:47 +00:00
|
|
|
return hdr
|
|
|
|
}
|
|
|
|
|
2021-10-19 13:43:41 +00:00
|
|
|
func (s *Session) measureRTT() {
|
|
|
|
rtt, err := s.Ping()
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2022-11-04 13:57:20 +00:00
|
|
|
if !atomic.CompareAndSwapInt64(&s.rtt, 0, rtt.Nanoseconds()) {
|
|
|
|
prev := atomic.LoadInt64(&s.rtt)
|
|
|
|
smoothedRTT := prev/2 + rtt.Nanoseconds()/2
|
|
|
|
atomic.StoreInt64(&s.rtt, smoothedRTT)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Session) startMeasureRTT() {
|
|
|
|
s.measureRTT()
|
|
|
|
t := time.NewTicker(s.config.MeasureRTTInterval)
|
|
|
|
defer t.Stop()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-s.CloseChan():
|
|
|
|
return
|
|
|
|
case <-t.C:
|
|
|
|
s.measureRTT()
|
|
|
|
}
|
|
|
|
}
|
2021-10-19 13:43:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// 0 if we don't yet have a measurement
|
|
|
|
func (s *Session) getRTT() time.Duration {
|
|
|
|
return time.Duration(atomic.LoadInt64(&s.rtt))
|
|
|
|
}
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
// Ping is used to measure the RTT response time
|
2021-06-16 20:19:45 +00:00
|
|
|
func (s *Session) Ping() (dur time.Duration, err error) {
|
|
|
|
// Prepare a ping.
|
2018-07-04 10:51:47 +00:00
|
|
|
s.pingLock.Lock()
|
2021-06-16 20:19:45 +00:00
|
|
|
// If there's an active ping, jump on the bandwagon.
|
|
|
|
if activePing := s.activePing; activePing != nil {
|
|
|
|
s.pingLock.Unlock()
|
|
|
|
return activePing.wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ok, our job to send the ping.
|
|
|
|
activePing := newPing(s.pingID)
|
2018-07-04 10:51:47 +00:00
|
|
|
s.pingID++
|
2021-06-16 20:19:45 +00:00
|
|
|
s.activePing = activePing
|
2018-07-04 10:51:47 +00:00
|
|
|
s.pingLock.Unlock()
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
defer func() {
|
|
|
|
// complete ping promise
|
|
|
|
activePing.finish(dur, err)
|
|
|
|
|
|
|
|
// Unset it.
|
|
|
|
s.pingLock.Lock()
|
|
|
|
s.activePing = nil
|
|
|
|
s.pingLock.Unlock()
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Send the ping request, waiting at most one connection write timeout
|
|
|
|
// to flush it.
|
|
|
|
timer := time.NewTimer(s.config.ConnectionWriteTimeout)
|
|
|
|
defer timer.Stop()
|
|
|
|
select {
|
|
|
|
case s.pingCh <- activePing.id:
|
|
|
|
case <-timer.C:
|
|
|
|
return 0, ErrTimeout
|
|
|
|
case <-s.shutdownCh:
|
|
|
|
return 0, s.shutdownErr
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
// The "time" starts once we've actually sent the ping. Otherwise, we'll
|
|
|
|
// measure the time it takes to flush the queue as well.
|
2018-07-04 10:51:47 +00:00
|
|
|
start := time.Now()
|
2021-06-16 20:19:45 +00:00
|
|
|
|
|
|
|
// Wait for a response, again waiting at most one write timeout.
|
|
|
|
if !timer.Stop() {
|
|
|
|
<-timer.C
|
|
|
|
}
|
|
|
|
timer.Reset(s.config.ConnectionWriteTimeout)
|
2018-07-04 10:51:47 +00:00
|
|
|
select {
|
2021-06-16 20:19:45 +00:00
|
|
|
case <-activePing.pingResponse:
|
|
|
|
case <-timer.C:
|
2018-07-04 10:51:47 +00:00
|
|
|
return 0, ErrTimeout
|
|
|
|
case <-s.shutdownCh:
|
2019-06-09 07:24:20 +00:00
|
|
|
return 0, s.shutdownErr
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Compute the RTT
|
2021-06-16 20:19:45 +00:00
|
|
|
return time.Since(start), nil
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
// startKeepalive starts the keepalive process.
|
|
|
|
func (s *Session) startKeepalive() {
|
|
|
|
s.keepaliveLock.Lock()
|
|
|
|
defer s.keepaliveLock.Unlock()
|
|
|
|
s.keepaliveTimer = time.AfterFunc(s.config.KeepAliveInterval, func() {
|
|
|
|
s.keepaliveLock.Lock()
|
2021-06-16 20:19:45 +00:00
|
|
|
if s.keepaliveTimer == nil || s.keepaliveActive {
|
|
|
|
// keepalives have been stopped or a keepalive is active.
|
2019-06-09 07:24:20 +00:00
|
|
|
s.keepaliveLock.Unlock()
|
2018-07-04 10:51:47 +00:00
|
|
|
return
|
|
|
|
}
|
2021-06-16 20:19:45 +00:00
|
|
|
s.keepaliveActive = true
|
|
|
|
s.keepaliveLock.Unlock()
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
_, err := s.Ping()
|
2021-06-16 20:19:45 +00:00
|
|
|
|
|
|
|
s.keepaliveLock.Lock()
|
|
|
|
s.keepaliveActive = false
|
|
|
|
if s.keepaliveTimer != nil {
|
|
|
|
s.keepaliveTimer.Reset(s.config.KeepAliveInterval)
|
|
|
|
}
|
|
|
|
s.keepaliveLock.Unlock()
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
if err != nil {
|
|
|
|
s.logger.Printf("[ERR] yamux: keepalive failed: %v", err)
|
|
|
|
s.exitErr(ErrKeepAliveTimeout)
|
|
|
|
}
|
|
|
|
})
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
// stopKeepalive stops the keepalive process.
|
|
|
|
func (s *Session) stopKeepalive() {
|
|
|
|
s.keepaliveLock.Lock()
|
|
|
|
defer s.keepaliveLock.Unlock()
|
|
|
|
if s.keepaliveTimer != nil {
|
|
|
|
s.keepaliveTimer.Stop()
|
2021-06-16 20:19:45 +00:00
|
|
|
s.keepaliveTimer = nil
|
2019-06-09 07:24:20 +00:00
|
|
|
}
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
func (s *Session) extendKeepalive() {
|
|
|
|
s.keepaliveLock.Lock()
|
|
|
|
if s.keepaliveTimer != nil && !s.keepaliveActive {
|
|
|
|
// Don't stop the timer and drain the channel. This is an
|
|
|
|
// AfterFunc, not a normal timer, and any attempts to drain the
|
|
|
|
// channel will block forever.
|
|
|
|
//
|
|
|
|
// Go will stop the timer for us internally anyways. The docs
|
|
|
|
// say one must stop the timer before calling reset but that's
|
|
|
|
// to ensure that the timer doesn't end up firing immediately
|
|
|
|
// after calling Reset.
|
|
|
|
s.keepaliveTimer.Reset(s.config.KeepAliveInterval)
|
|
|
|
}
|
|
|
|
s.keepaliveLock.Unlock()
|
|
|
|
}
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
// send sends the header and body.
|
|
|
|
func (s *Session) sendMsg(hdr header, body []byte, deadline <-chan struct{}) error {
|
2018-07-04 10:51:47 +00:00
|
|
|
select {
|
|
|
|
case <-s.shutdownCh:
|
2019-06-09 07:24:20 +00:00
|
|
|
return s.shutdownErr
|
|
|
|
default:
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
// duplicate as we're sending this async.
|
|
|
|
buf := pool.Get(headerSize + len(body))
|
|
|
|
copy(buf[:headerSize], hdr[:])
|
|
|
|
copy(buf[headerSize:], body)
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
select {
|
|
|
|
case <-s.shutdownCh:
|
2019-06-09 07:24:20 +00:00
|
|
|
pool.Put(buf)
|
|
|
|
return s.shutdownErr
|
|
|
|
case s.sendCh <- buf:
|
|
|
|
return nil
|
|
|
|
case <-deadline:
|
|
|
|
pool.Put(buf)
|
|
|
|
return ErrTimeout
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
// send is a long running goroutine that sends data
|
|
|
|
func (s *Session) send() {
|
|
|
|
if err := s.sendLoop(); err != nil {
|
|
|
|
s.exitErr(err)
|
|
|
|
}
|
|
|
|
}
|
2018-07-04 10:51:47 +00:00
|
|
|
|
2022-08-19 16:34:07 +00:00
|
|
|
func (s *Session) sendLoop() (err error) {
|
|
|
|
defer func() {
|
|
|
|
if rerr := recover(); rerr != nil {
|
|
|
|
fmt.Fprintf(os.Stderr, "caught panic: %s\n%s\n", rerr, debug.Stack())
|
|
|
|
err = fmt.Errorf("panic in yamux send loop: %s", rerr)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
defer close(s.sendDoneCh)
|
|
|
|
|
|
|
|
// Extend the write deadline if we've passed the halfway point. This can
|
|
|
|
// be expensive so this ensures we only have to do this once every
|
|
|
|
// ConnectionWriteTimeout/2 (usually 5s).
|
|
|
|
var lastWriteDeadline time.Time
|
|
|
|
extendWriteDeadline := func() error {
|
|
|
|
now := time.Now()
|
|
|
|
// If over half of the deadline has elapsed, extend it.
|
|
|
|
if now.Add(s.config.ConnectionWriteTimeout / 2).After(lastWriteDeadline) {
|
|
|
|
lastWriteDeadline = now.Add(s.config.ConnectionWriteTimeout)
|
|
|
|
return s.conn.SetWriteDeadline(lastWriteDeadline)
|
|
|
|
}
|
2018-07-04 10:51:47 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
writer := s.conn
|
|
|
|
|
|
|
|
// FIXME: https://github.com/libp2p/go-libp2p/issues/644
|
|
|
|
// Write coalescing is disabled for now.
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
// writer := pool.Writer{W: s.conn}
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
// var writeTimeout *time.Timer
|
|
|
|
// var writeTimeoutCh <-chan time.Time
|
|
|
|
// if s.config.WriteCoalesceDelay > 0 {
|
2019-06-09 07:24:20 +00:00
|
|
|
// writeTimeout = time.NewTimer(s.config.WriteCoalesceDelay)
|
|
|
|
// defer writeTimeout.Stop()
|
|
|
|
|
|
|
|
// writeTimeoutCh = writeTimeout.C
|
2022-04-01 16:16:46 +00:00
|
|
|
// } else {
|
2019-06-09 07:24:20 +00:00
|
|
|
// ch := make(chan time.Time)
|
|
|
|
// close(ch)
|
|
|
|
// writeTimeoutCh = ch
|
2022-04-01 16:16:46 +00:00
|
|
|
// }
|
2019-06-09 07:24:20 +00:00
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
for {
|
2019-06-09 07:24:20 +00:00
|
|
|
// yield after processing the last message, if we've shutdown.
|
|
|
|
// s.sendCh is a buffered channel and Go doesn't guarantee select order.
|
2018-07-04 10:51:47 +00:00
|
|
|
select {
|
2019-06-09 07:24:20 +00:00
|
|
|
case <-s.shutdownCh:
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
}
|
2018-07-04 10:51:47 +00:00
|
|
|
|
2019-06-09 07:24:20 +00:00
|
|
|
var buf []byte
|
2022-04-01 16:16:46 +00:00
|
|
|
// Make sure to send any pings & pongs first so they don't get stuck behind writes.
|
2019-06-09 07:24:20 +00:00
|
|
|
select {
|
2021-06-16 20:19:45 +00:00
|
|
|
case pingID := <-s.pingCh:
|
|
|
|
buf = pool.Get(headerSize)
|
|
|
|
hdr := encode(typePing, flagSYN, 0, pingID)
|
|
|
|
copy(buf, hdr[:])
|
|
|
|
case pingID := <-s.pongCh:
|
|
|
|
buf = pool.Get(headerSize)
|
|
|
|
hdr := encode(typePing, flagACK, 0, pingID)
|
|
|
|
copy(buf, hdr[:])
|
2022-04-01 16:16:46 +00:00
|
|
|
default:
|
|
|
|
// Then send normal data.
|
|
|
|
select {
|
|
|
|
case buf = <-s.sendCh:
|
|
|
|
case pingID := <-s.pingCh:
|
|
|
|
buf = pool.Get(headerSize)
|
|
|
|
hdr := encode(typePing, flagSYN, 0, pingID)
|
|
|
|
copy(buf, hdr[:])
|
|
|
|
case pingID := <-s.pongCh:
|
|
|
|
buf = pool.Get(headerSize)
|
|
|
|
hdr := encode(typePing, flagACK, 0, pingID)
|
|
|
|
copy(buf, hdr[:])
|
|
|
|
case <-s.shutdownCh:
|
|
|
|
return nil
|
|
|
|
// default:
|
|
|
|
// select {
|
|
|
|
// case buf = <-s.sendCh:
|
|
|
|
// case <-s.shutdownCh:
|
|
|
|
// return nil
|
|
|
|
// case <-writeTimeoutCh:
|
|
|
|
// if err := writer.Flush(); err != nil {
|
|
|
|
// if os.IsTimeout(err) {
|
|
|
|
// err = ErrConnectionWriteTimeout
|
|
|
|
// }
|
|
|
|
// return err
|
|
|
|
// }
|
|
|
|
|
|
|
|
// select {
|
|
|
|
// case buf = <-s.sendCh:
|
|
|
|
// case <-s.shutdownCh:
|
|
|
|
// return nil
|
|
|
|
// }
|
|
|
|
|
|
|
|
// if writeTimeout != nil {
|
|
|
|
// writeTimeout.Reset(s.config.WriteCoalesceDelay)
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
}
|
2019-06-09 07:24:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if err := extendWriteDeadline(); err != nil {
|
|
|
|
pool.Put(buf)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err := writer.Write(buf)
|
|
|
|
pool.Put(buf)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
if os.IsTimeout(err) {
|
|
|
|
err = ErrConnectionWriteTimeout
|
|
|
|
}
|
|
|
|
return err
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// recv is a long running goroutine that accepts new data
|
|
|
|
func (s *Session) recv() {
|
|
|
|
if err := s.recvLoop(); err != nil {
|
|
|
|
s.exitErr(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that the index of the handler (typeData/typeWindowUpdate/etc) matches the message type
|
|
|
|
var (
|
|
|
|
handlers = []func(*Session, header) error{
|
|
|
|
typeData: (*Session).handleStreamMessage,
|
|
|
|
typeWindowUpdate: (*Session).handleStreamMessage,
|
|
|
|
typePing: (*Session).handlePing,
|
|
|
|
typeGoAway: (*Session).handleGoAway,
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
// recvLoop continues to receive data until a fatal error is encountered
|
2022-08-19 16:34:07 +00:00
|
|
|
func (s *Session) recvLoop() (err error) {
|
|
|
|
defer func() {
|
|
|
|
if rerr := recover(); rerr != nil {
|
|
|
|
fmt.Fprintf(os.Stderr, "caught panic: %s\n%s\n", rerr, debug.Stack())
|
|
|
|
err = fmt.Errorf("panic in yamux receive loop: %s", rerr)
|
|
|
|
}
|
|
|
|
}()
|
2018-07-04 10:51:47 +00:00
|
|
|
defer close(s.recvDoneCh)
|
2019-06-09 07:24:20 +00:00
|
|
|
var hdr header
|
2018-07-04 10:51:47 +00:00
|
|
|
for {
|
2022-04-01 16:16:46 +00:00
|
|
|
// fmt.Printf("ReadFull from %#v\n", s.reader)
|
2018-07-04 10:51:47 +00:00
|
|
|
// Read the header
|
2019-06-09 07:24:20 +00:00
|
|
|
if _, err := io.ReadFull(s.reader, hdr[:]); err != nil {
|
2018-07-04 10:51:47 +00:00
|
|
|
if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") {
|
|
|
|
s.logger.Printf("[ERR] yamux: Failed to read header: %v", err)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
// Reset the keepalive timer every time we receive data.
|
|
|
|
// There's no reason to keepalive if we're active. Worse, if the
|
|
|
|
// peer is busy sending us stuff, the pong might get stuck
|
|
|
|
// behind a bunch of data.
|
|
|
|
s.extendKeepalive()
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
// Verify the version
|
|
|
|
if hdr.Version() != protoVersion {
|
|
|
|
s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version())
|
|
|
|
return ErrInvalidVersion
|
|
|
|
}
|
|
|
|
|
|
|
|
mt := hdr.MsgType()
|
|
|
|
if mt < typeData || mt > typeGoAway {
|
|
|
|
return ErrInvalidMsgType
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := handlers[mt](s, hdr); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleStreamMessage handles either a data or window update frame
|
|
|
|
func (s *Session) handleStreamMessage(hdr header) error {
|
|
|
|
// Check for a new stream creation
|
|
|
|
id := hdr.StreamID()
|
|
|
|
flags := hdr.Flags()
|
|
|
|
if flags&flagSYN == flagSYN {
|
|
|
|
if err := s.incomingStream(id); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the stream
|
|
|
|
s.streamLock.Lock()
|
|
|
|
stream := s.streams[id]
|
|
|
|
s.streamLock.Unlock()
|
|
|
|
|
|
|
|
// If we do not have a stream, likely we sent a RST
|
|
|
|
if stream == nil {
|
|
|
|
// Drain any data on the wire
|
|
|
|
if hdr.MsgType() == typeData && hdr.Length() > 0 {
|
|
|
|
s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id)
|
2022-11-04 13:57:20 +00:00
|
|
|
if _, err := io.CopyN(io.Discard, s.reader, int64(hdr.Length())); err != nil {
|
2018-07-04 10:51:47 +00:00
|
|
|
s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if this is a window update
|
|
|
|
if hdr.MsgType() == typeWindowUpdate {
|
2021-10-19 13:43:41 +00:00
|
|
|
stream.incrSendWindow(hdr, flags)
|
2018-07-04 10:51:47 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read the new data
|
2019-06-09 07:24:20 +00:00
|
|
|
if err := stream.readData(hdr, flags, s.reader); err != nil {
|
|
|
|
if sendErr := s.sendMsg(s.goAway(goAwayProtoErr), nil, nil); sendErr != nil {
|
2018-07-04 10:51:47 +00:00
|
|
|
s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-06-16 20:19:45 +00:00
|
|
|
// handlePing is invoked for a typePing frame
|
2018-07-04 10:51:47 +00:00
|
|
|
func (s *Session) handlePing(hdr header) error {
|
|
|
|
flags := hdr.Flags()
|
|
|
|
pingID := hdr.Length()
|
|
|
|
|
|
|
|
// Check if this is a query, respond back in a separate context so we
|
|
|
|
// don't interfere with the receiving thread blocking for the write.
|
|
|
|
if flags&flagSYN == flagSYN {
|
2021-06-16 20:19:45 +00:00
|
|
|
select {
|
|
|
|
case s.pongCh <- pingID:
|
|
|
|
default:
|
|
|
|
s.logger.Printf("[WARN] yamux: dropped ping reply")
|
|
|
|
}
|
2018-07-04 10:51:47 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle a response
|
|
|
|
s.pingLock.Lock()
|
2021-06-16 20:19:45 +00:00
|
|
|
// If we have an active ping, and this is a response to that active
|
|
|
|
// ping, complete the ping.
|
|
|
|
if s.activePing != nil && s.activePing.id == pingID {
|
|
|
|
// Don't assume that the peer won't send multiple responses for
|
|
|
|
// the same ping.
|
|
|
|
select {
|
|
|
|
case s.activePing.pingResponse <- struct{}{}:
|
|
|
|
default:
|
|
|
|
}
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
s.pingLock.Unlock()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleGoAway is invokde for a typeGoAway frame
|
|
|
|
func (s *Session) handleGoAway(hdr header) error {
|
|
|
|
code := hdr.Length()
|
|
|
|
switch code {
|
|
|
|
case goAwayNormal:
|
|
|
|
atomic.SwapInt32(&s.remoteGoAway, 1)
|
|
|
|
case goAwayProtoErr:
|
|
|
|
s.logger.Printf("[ERR] yamux: received protocol error go away")
|
|
|
|
return fmt.Errorf("yamux protocol error")
|
|
|
|
case goAwayInternalErr:
|
|
|
|
s.logger.Printf("[ERR] yamux: received internal error go away")
|
|
|
|
return fmt.Errorf("remote yamux internal error")
|
|
|
|
default:
|
|
|
|
s.logger.Printf("[ERR] yamux: received unexpected go away")
|
|
|
|
return fmt.Errorf("unexpected go away received")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// incomingStream is used to create a new incoming stream
|
|
|
|
func (s *Session) incomingStream(id uint32) error {
|
2019-06-09 07:24:20 +00:00
|
|
|
if s.client != (id%2 == 0) {
|
|
|
|
s.logger.Printf("[ERR] yamux: both endpoints are clients")
|
|
|
|
return fmt.Errorf("both yamux endpoints are clients")
|
|
|
|
}
|
2018-07-04 10:51:47 +00:00
|
|
|
// Reject immediately if we are doing a go away
|
|
|
|
if atomic.LoadInt32(&s.localGoAway) == 1 {
|
2019-06-09 07:24:20 +00:00
|
|
|
hdr := encode(typeWindowUpdate, flagRST, id, 0)
|
|
|
|
return s.sendMsg(hdr, nil, nil)
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Allocate a new stream
|
2022-11-04 13:57:20 +00:00
|
|
|
span, err := s.newMemoryManager()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to create resource span: %w", err)
|
|
|
|
}
|
|
|
|
if err := span.ReserveMemory(initialStreamWindow, 255); err != nil {
|
2022-04-01 16:16:46 +00:00
|
|
|
return err
|
|
|
|
}
|
2022-11-04 13:57:20 +00:00
|
|
|
stream := newStream(s, id, streamSYNReceived, initialStreamWindow, span)
|
2018-07-04 10:51:47 +00:00
|
|
|
|
|
|
|
s.streamLock.Lock()
|
|
|
|
defer s.streamLock.Unlock()
|
|
|
|
|
|
|
|
// Check if stream already exists
|
|
|
|
if _, ok := s.streams[id]; ok {
|
|
|
|
s.logger.Printf("[ERR] yamux: duplicate stream declared")
|
2019-06-09 07:24:20 +00:00
|
|
|
if sendErr := s.sendMsg(s.goAway(goAwayProtoErr), nil, nil); sendErr != nil {
|
2018-07-04 10:51:47 +00:00
|
|
|
s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
|
|
|
|
}
|
2022-11-04 13:57:20 +00:00
|
|
|
span.Done()
|
2018-07-04 10:51:47 +00:00
|
|
|
return ErrDuplicateStream
|
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
if s.numIncomingStreams >= s.config.MaxIncomingStreams {
|
|
|
|
// too many active streams at the same time
|
|
|
|
s.logger.Printf("[WARN] yamux: MaxIncomingStreams exceeded, forcing stream reset")
|
2022-11-04 13:57:20 +00:00
|
|
|
defer span.Done()
|
2022-04-01 16:16:46 +00:00
|
|
|
hdr := encode(typeWindowUpdate, flagRST, id, 0)
|
|
|
|
return s.sendMsg(hdr, nil, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
s.numIncomingStreams++
|
2018-07-04 10:51:47 +00:00
|
|
|
// Register the stream
|
|
|
|
s.streams[id] = stream
|
|
|
|
|
|
|
|
// Check if we've exceeded the backlog
|
|
|
|
select {
|
|
|
|
case s.acceptCh <- stream:
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
// Backlog exceeded! RST the stream
|
2022-11-04 13:57:20 +00:00
|
|
|
defer span.Done()
|
2022-04-01 16:16:46 +00:00
|
|
|
s.logger.Printf("[WARN] yamux: backlog exceeded, forcing stream reset")
|
|
|
|
s.deleteStream(id)
|
2019-06-09 07:24:20 +00:00
|
|
|
hdr := encode(typeWindowUpdate, flagRST, id, 0)
|
|
|
|
return s.sendMsg(hdr, nil, nil)
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// closeStream is used to close a stream once both sides have
|
|
|
|
// issued a close. If there was an in-flight SYN and the stream
|
|
|
|
// was not yet established, then this will give the credit back.
|
|
|
|
func (s *Session) closeStream(id uint32) {
|
|
|
|
s.streamLock.Lock()
|
2022-08-19 16:34:07 +00:00
|
|
|
defer s.streamLock.Unlock()
|
2018-07-04 10:51:47 +00:00
|
|
|
if _, ok := s.inflight[id]; ok {
|
|
|
|
select {
|
|
|
|
case <-s.synCh:
|
|
|
|
default:
|
|
|
|
s.logger.Printf("[ERR] yamux: SYN tracking out of sync")
|
|
|
|
}
|
2021-06-16 20:19:45 +00:00
|
|
|
delete(s.inflight, id)
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
2022-04-01 16:16:46 +00:00
|
|
|
s.deleteStream(id)
|
2018-07-04 10:51:47 +00:00
|
|
|
}
|
|
|
|
|
2022-04-01 16:16:46 +00:00
|
|
|
func (s *Session) deleteStream(id uint32) {
|
|
|
|
str, ok := s.streams[id]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2022-08-19 16:34:07 +00:00
|
|
|
if s.client == (id%2 == 0) {
|
|
|
|
if s.numIncomingStreams == 0 {
|
|
|
|
s.logger.Printf("[ERR] yamux: numIncomingStreams underflow")
|
|
|
|
// prevent the creation of any new streams
|
|
|
|
s.numIncomingStreams = math.MaxUint32
|
|
|
|
} else {
|
|
|
|
s.numIncomingStreams--
|
|
|
|
}
|
|
|
|
}
|
2022-04-01 16:16:46 +00:00
|
|
|
delete(s.streams, id)
|
2022-11-04 13:57:20 +00:00
|
|
|
str.memorySpan.Done()
|
2022-04-01 16:16:46 +00:00
|
|
|
}
|
|
|
|
|
2018-07-04 10:51:47 +00:00
|
|
|
// establishStream is used to mark a stream that was in the
|
|
|
|
// SYN Sent state as established.
|
|
|
|
func (s *Session) establishStream(id uint32) {
|
|
|
|
s.streamLock.Lock()
|
|
|
|
if _, ok := s.inflight[id]; ok {
|
|
|
|
delete(s.inflight, id)
|
|
|
|
} else {
|
|
|
|
s.logger.Printf("[ERR] yamux: established stream without inflight SYN (no tracking entry)")
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-s.synCh:
|
|
|
|
default:
|
|
|
|
s.logger.Printf("[ERR] yamux: established stream without inflight SYN (didn't have semaphore)")
|
|
|
|
}
|
|
|
|
s.streamLock.Unlock()
|
|
|
|
}
|