Rework Reader waiting
This commit is contained in:
parent
f86355ac0b
commit
175b826e73
@ -1097,12 +1097,12 @@ func (cl *Client) newTorrent(ih metainfo.Hash, specStorage storage.ClientImpl) (
|
|||||||
storageOpener: storageClient,
|
storageOpener: storageClient,
|
||||||
maxEstablishedConns: cl.config.EstablishedConnsPerTorrent,
|
maxEstablishedConns: cl.config.EstablishedConnsPerTorrent,
|
||||||
|
|
||||||
networkingEnabled: true,
|
|
||||||
metadataChanged: sync.Cond{
|
metadataChanged: sync.Cond{
|
||||||
L: cl.locker(),
|
L: cl.locker(),
|
||||||
},
|
},
|
||||||
webSeeds: make(map[string]*Peer),
|
webSeeds: make(map[string]*Peer),
|
||||||
}
|
}
|
||||||
|
t.networkingEnabled.Set()
|
||||||
t._pendingPieces.NewSet = priorityBitmapStableNewSet
|
t._pendingPieces.NewSet = priorityBitmapStableNewSet
|
||||||
t.logger = cl.logger.WithContextValue(t)
|
t.logger = cl.logger.WithContextValue(t)
|
||||||
t.setChunkSize(defaultChunkSize)
|
t.setChunkSize(defaultChunkSize)
|
||||||
@ -1198,7 +1198,7 @@ func (t *Torrent) MergeSpec(spec *TorrentSpec) error {
|
|||||||
}
|
}
|
||||||
t.addTrackers(spec.Trackers)
|
t.addTrackers(spec.Trackers)
|
||||||
t.maybeNewConns()
|
t.maybeNewConns()
|
||||||
t.dataDownloadDisallowed = spec.DisallowDataDownload
|
t.dataDownloadDisallowed.SetBool(spec.DisallowDataDownload)
|
||||||
t.dataUploadDisallowed = spec.DisallowDataUpload
|
t.dataUploadDisallowed = spec.DisallowDataUpload
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
7
misc.go
7
misc.go
@ -52,8 +52,11 @@ func metadataPieceSize(totalSize int, piece int) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Return the request that would include the given offset into the torrent data.
|
// Return the request that would include the given offset into the torrent data.
|
||||||
func torrentOffsetRequest(torrentLength, pieceSize, chunkSize, offset int64) (
|
func torrentOffsetRequest(
|
||||||
r Request, ok bool) {
|
torrentLength, pieceSize, chunkSize, offset int64,
|
||||||
|
) (
|
||||||
|
r Request, ok bool,
|
||||||
|
) {
|
||||||
if offset < 0 || offset >= torrentLength {
|
if offset < 0 || offset >= torrentLength {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
5
piece.go
5
piece.go
@ -4,6 +4,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/anacrolix/chansync"
|
||||||
"github.com/anacrolix/missinggo/v2/bitmap"
|
"github.com/anacrolix/missinggo/v2/bitmap"
|
||||||
|
|
||||||
"github.com/anacrolix/torrent/metainfo"
|
"github.com/anacrolix/torrent/metainfo"
|
||||||
@ -21,6 +22,8 @@ type Piece struct {
|
|||||||
// length can be determined by the request chunkSize in use.
|
// length can be determined by the request chunkSize in use.
|
||||||
_dirtyChunks bitmap.Bitmap
|
_dirtyChunks bitmap.Bitmap
|
||||||
|
|
||||||
|
readerCond chansync.BroadcastCond
|
||||||
|
|
||||||
numVerifies int64
|
numVerifies int64
|
||||||
hashing bool
|
hashing bool
|
||||||
marking bool
|
marking bool
|
||||||
@ -70,7 +73,7 @@ func (p *Piece) numDirtyChunks() pp.Integer {
|
|||||||
|
|
||||||
func (p *Piece) unpendChunkIndex(i int) {
|
func (p *Piece) unpendChunkIndex(i int) {
|
||||||
p._dirtyChunks.Add(bitmap.BitIndex(i))
|
p._dirtyChunks.Add(bitmap.BitIndex(i))
|
||||||
p.t.tickleReaders()
|
p.readerCond.Broadcast()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Piece) pendChunkIndex(i int) {
|
func (p *Piece) pendChunkIndex(i int) {
|
||||||
|
65
reader.go
65
reader.go
@ -97,11 +97,6 @@ func (r *reader) available(off, max int64) (ret int64) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *reader) waitReadable(off int64) {
|
|
||||||
// We may have been sent back here because we were told we could read but it failed.
|
|
||||||
r.t.cl.event.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculates the pieces this reader wants downloaded, ignoring the cached value at r.pieces.
|
// Calculates the pieces this reader wants downloaded, ignoring the cached value at r.pieces.
|
||||||
func (r *reader) piecesUncached() (ret pieceRange) {
|
func (r *reader) piecesUncached() (ret pieceRange) {
|
||||||
ra := r.readahead
|
ra := r.readahead
|
||||||
@ -122,27 +117,11 @@ func (r *reader) Read(b []byte) (n int, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *reader) ReadContext(ctx context.Context, b []byte) (n int, err error) {
|
func (r *reader) ReadContext(ctx context.Context, b []byte) (n int, err error) {
|
||||||
// This is set under the Client lock if the Context is canceled. I think we coordinate on a
|
|
||||||
// separate variable so as to avoid false negatives with race conditions due to Contexts being
|
|
||||||
// synchronized.
|
|
||||||
var ctxErr error
|
|
||||||
if ctx.Done() != nil {
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
// Abort the goroutine when the function returns.
|
|
||||||
defer cancel()
|
|
||||||
go func() {
|
|
||||||
<-ctx.Done()
|
|
||||||
r.t.cl.lock()
|
|
||||||
ctxErr = ctx.Err()
|
|
||||||
r.t.tickleReaders()
|
|
||||||
r.t.cl.unlock()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
// Hmmm, if a Read gets stuck, this means you can't change position for other purposes. That
|
// Hmmm, if a Read gets stuck, this means you can't change position for other purposes. That
|
||||||
// seems reasonable, but unusual.
|
// seems reasonable, but unusual.
|
||||||
r.opMu.Lock()
|
r.opMu.Lock()
|
||||||
defer r.opMu.Unlock()
|
defer r.opMu.Unlock()
|
||||||
n, err = r.readOnceAt(b, r.pos, &ctxErr)
|
n, err = r.readOnceAt(ctx, b, r.pos)
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
if err == nil && len(b) > 0 {
|
if err == nil && len(b) > 0 {
|
||||||
panic("expected error")
|
panic("expected error")
|
||||||
@ -163,32 +142,44 @@ func (r *reader) ReadContext(ctx context.Context, b []byte) (n int, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var closedChan = make(chan struct{})
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
close(closedChan)
|
||||||
|
}
|
||||||
|
|
||||||
// Wait until some data should be available to read. Tickles the client if it isn't. Returns how
|
// Wait until some data should be available to read. Tickles the client if it isn't. Returns how
|
||||||
// much should be readable without blocking.
|
// much should be readable without blocking.
|
||||||
func (r *reader) waitAvailable(pos, wanted int64, ctxErr *error, wait bool) (avail int64, err error) {
|
func (r *reader) waitAvailable(ctx context.Context, pos, wanted int64, wait bool) (avail int64, err error) {
|
||||||
r.t.cl.lock()
|
t := r.t
|
||||||
defer r.t.cl.unlock()
|
|
||||||
for {
|
for {
|
||||||
|
r.t.cl.rLock()
|
||||||
avail = r.available(pos, wanted)
|
avail = r.available(pos, wanted)
|
||||||
|
readerCond := t.piece(int((r.offset + pos) / t.info.PieceLength)).readerCond.Signaled()
|
||||||
|
r.t.cl.rUnlock()
|
||||||
if avail != 0 {
|
if avail != 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if r.t.closed.IsSet() {
|
var dontWait <-chan struct{}
|
||||||
|
if !wait || wanted == 0 {
|
||||||
|
dontWait = closedChan
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-r.t.closed.Done():
|
||||||
err = errors.New("torrent closed")
|
err = errors.New("torrent closed")
|
||||||
return
|
return
|
||||||
}
|
case <-ctx.Done():
|
||||||
if *ctxErr != nil {
|
err = ctx.Err()
|
||||||
err = *ctxErr
|
|
||||||
return
|
return
|
||||||
}
|
case <-r.t.dataDownloadDisallowed.On():
|
||||||
if r.t.dataDownloadDisallowed || !r.t.networkingEnabled {
|
err = errors.New("torrent data downloading disabled")
|
||||||
err = errors.New("downloading disabled and data not already available")
|
case <-r.t.networkingEnabled.Off():
|
||||||
|
err = errors.New("torrent networking disabled")
|
||||||
return
|
return
|
||||||
}
|
case <-dontWait:
|
||||||
if !wait || wanted == 0 {
|
|
||||||
return
|
return
|
||||||
|
case <-readerCond:
|
||||||
}
|
}
|
||||||
r.waitReadable(pos)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -199,14 +190,14 @@ func (r *reader) torrentOffset(readerPos int64) int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Performs at most one successful read to torrent storage.
|
// Performs at most one successful read to torrent storage.
|
||||||
func (r *reader) readOnceAt(b []byte, pos int64, ctxErr *error) (n int, err error) {
|
func (r *reader) readOnceAt(ctx context.Context, b []byte, pos int64) (n int, err error) {
|
||||||
if pos >= r.length {
|
if pos >= r.length {
|
||||||
err = io.EOF
|
err = io.EOF
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for {
|
for {
|
||||||
var avail int64
|
var avail int64
|
||||||
avail, err = r.waitAvailable(pos, int64(len(b)), ctxErr, n == 0)
|
avail, err = r.waitAvailable(ctx, pos, int64(len(b)), n == 0)
|
||||||
if avail == 0 {
|
if avail == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
42
torrent.go
42
torrent.go
@ -13,12 +13,12 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"time"
|
"time"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/RoaringBitmap/roaring"
|
"github.com/RoaringBitmap/roaring"
|
||||||
|
"github.com/anacrolix/chansync"
|
||||||
"github.com/anacrolix/dht/v2"
|
"github.com/anacrolix/dht/v2"
|
||||||
"github.com/anacrolix/log"
|
"github.com/anacrolix/log"
|
||||||
"github.com/anacrolix/missinggo/iter"
|
"github.com/anacrolix/missinggo/iter"
|
||||||
@ -29,6 +29,7 @@ import (
|
|||||||
"github.com/anacrolix/missinggo/v2/bitmap"
|
"github.com/anacrolix/missinggo/v2/bitmap"
|
||||||
"github.com/anacrolix/missinggo/v2/prioritybitmap"
|
"github.com/anacrolix/missinggo/v2/prioritybitmap"
|
||||||
"github.com/anacrolix/multiless"
|
"github.com/anacrolix/multiless"
|
||||||
|
"github.com/anacrolix/sync"
|
||||||
"github.com/davecgh/go-spew/spew"
|
"github.com/davecgh/go-spew/spew"
|
||||||
"github.com/pion/datachannel"
|
"github.com/pion/datachannel"
|
||||||
|
|
||||||
@ -52,12 +53,12 @@ type Torrent struct {
|
|||||||
cl *Client
|
cl *Client
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
|
|
||||||
networkingEnabled bool
|
networkingEnabled chansync.Flag
|
||||||
dataDownloadDisallowed bool
|
dataDownloadDisallowed chansync.Flag
|
||||||
dataUploadDisallowed bool
|
dataUploadDisallowed bool
|
||||||
userOnWriteChunkErr func(error)
|
userOnWriteChunkErr func(error)
|
||||||
|
|
||||||
closed missinggo.Event
|
closed chansync.SetOnce
|
||||||
infoHash metainfo.Hash
|
infoHash metainfo.Hash
|
||||||
pieces []Piece
|
pieces []Piece
|
||||||
// Values are the piece indices that changed.
|
// Values are the piece indices that changed.
|
||||||
@ -192,13 +193,9 @@ func (t *Torrent) pendingPieces() *prioritybitmap.PriorityBitmap {
|
|||||||
return &t._pendingPieces
|
return &t._pendingPieces
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Torrent) tickleReaders() {
|
|
||||||
t.cl.event.Broadcast()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns a channel that is closed when the Torrent is closed.
|
// Returns a channel that is closed when the Torrent is closed.
|
||||||
func (t *Torrent) Closed() <-chan struct{} {
|
func (t *Torrent) Closed() chansync.Done {
|
||||||
return t.closed.LockedChan(t.cl.locker())
|
return t.closed.Done()
|
||||||
}
|
}
|
||||||
|
|
||||||
// KnownSwarm returns the known subset of the peers in the Torrent's swarm, including active,
|
// KnownSwarm returns the known subset of the peers in the Torrent's swarm, including active,
|
||||||
@ -794,7 +791,6 @@ func (t *Torrent) numPiecesCompleted() (num pieceIndex) {
|
|||||||
|
|
||||||
func (t *Torrent) close() (err error) {
|
func (t *Torrent) close() (err error) {
|
||||||
t.closed.Set()
|
t.closed.Set()
|
||||||
t.tickleReaders()
|
|
||||||
if t.storage != nil {
|
if t.storage != nil {
|
||||||
go func() {
|
go func() {
|
||||||
t.storageLock.Lock()
|
t.storageLock.Lock()
|
||||||
@ -1162,7 +1158,6 @@ func (t *Torrent) pendRequest(req Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *Torrent) pieceCompletionChanged(piece pieceIndex) {
|
func (t *Torrent) pieceCompletionChanged(piece pieceIndex) {
|
||||||
t.tickleReaders()
|
|
||||||
t.cl.event.Broadcast()
|
t.cl.event.Broadcast()
|
||||||
if t.pieceComplete(piece) {
|
if t.pieceComplete(piece) {
|
||||||
t.onPieceCompleted(piece)
|
t.onPieceCompleted(piece)
|
||||||
@ -1501,7 +1496,7 @@ func (t *Torrent) runHandshookConnLoggingErr(pc *PeerConn) {
|
|||||||
func (t *Torrent) startWebsocketAnnouncer(u url.URL) torrentTrackerAnnouncer {
|
func (t *Torrent) startWebsocketAnnouncer(u url.URL) torrentTrackerAnnouncer {
|
||||||
wtc, release := t.cl.websocketTrackers.Get(u.String())
|
wtc, release := t.cl.websocketTrackers.Get(u.String())
|
||||||
go func() {
|
go func() {
|
||||||
<-t.closed.LockedChan(t.cl.locker())
|
<-t.closed.Done()
|
||||||
release()
|
release()
|
||||||
}()
|
}()
|
||||||
wst := websocketTrackerStatus{u, wtc}
|
wst := websocketTrackerStatus{u, wtc}
|
||||||
@ -1664,7 +1659,7 @@ func (t *Torrent) timeboxedAnnounceToDht(s DhtServer) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
case <-t.closed.LockedChan(t.cl.locker()):
|
case <-t.closed.Done():
|
||||||
case <-time.After(5 * time.Minute):
|
case <-time.After(5 * time.Minute):
|
||||||
}
|
}
|
||||||
stop()
|
stop()
|
||||||
@ -1815,7 +1810,7 @@ func (t *Torrent) addPeerConn(c *PeerConn) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *Torrent) wantConns() bool {
|
func (t *Torrent) wantConns() bool {
|
||||||
if !t.networkingEnabled {
|
if !t.networkingEnabled.Bool() {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if t.closed.IsSet() {
|
if t.closed.IsSet() {
|
||||||
@ -1949,6 +1944,7 @@ func (t *Torrent) cancelRequestsForPiece(piece pieceIndex) {
|
|||||||
func (t *Torrent) onPieceCompleted(piece pieceIndex) {
|
func (t *Torrent) onPieceCompleted(piece pieceIndex) {
|
||||||
t.pendAllChunkSpecs(piece)
|
t.pendAllChunkSpecs(piece)
|
||||||
t.cancelRequestsForPiece(piece)
|
t.cancelRequestsForPiece(piece)
|
||||||
|
t.piece(piece).readerCond.Broadcast()
|
||||||
for conn := range t.conns {
|
for conn := range t.conns {
|
||||||
conn.have(piece)
|
conn.have(piece)
|
||||||
t.maybeDropMutuallyCompletePeer(&conn.Peer)
|
t.maybeDropMutuallyCompletePeer(&conn.Peer)
|
||||||
@ -2137,27 +2133,15 @@ func (t *Torrent) onWriteChunkErr(err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *Torrent) DisallowDataDownload() {
|
func (t *Torrent) DisallowDataDownload() {
|
||||||
t.cl.lock()
|
|
||||||
defer t.cl.unlock()
|
|
||||||
t.disallowDataDownloadLocked()
|
t.disallowDataDownloadLocked()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Torrent) disallowDataDownloadLocked() {
|
func (t *Torrent) disallowDataDownloadLocked() {
|
||||||
t.dataDownloadDisallowed = true
|
t.dataDownloadDisallowed.Set()
|
||||||
t.iterPeers(func(c *Peer) {
|
|
||||||
c.updateRequests()
|
|
||||||
})
|
|
||||||
t.tickleReaders()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Torrent) AllowDataDownload() {
|
func (t *Torrent) AllowDataDownload() {
|
||||||
t.cl.lock()
|
t.dataDownloadDisallowed.Clear()
|
||||||
defer t.cl.unlock()
|
|
||||||
t.dataDownloadDisallowed = false
|
|
||||||
t.tickleReaders()
|
|
||||||
t.iterPeers(func(c *Peer) {
|
|
||||||
c.updateRequests()
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Enables uploading data, if it was disabled.
|
// Enables uploading data, if it was disabled.
|
||||||
|
@ -216,7 +216,6 @@ func (me *trackerScraper) Run() {
|
|||||||
|
|
||||||
me.t.cl.lock()
|
me.t.cl.lock()
|
||||||
wantPeers := me.t.wantPeersEvent.C()
|
wantPeers := me.t.wantPeersEvent.C()
|
||||||
closed := me.t.closed.C()
|
|
||||||
me.t.cl.unlock()
|
me.t.cl.unlock()
|
||||||
|
|
||||||
// If we want peers, reduce the interval to the minimum if it's appropriate.
|
// If we want peers, reduce the interval to the minimum if it's appropriate.
|
||||||
@ -234,7 +233,7 @@ func (me *trackerScraper) Run() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-closed:
|
case <-me.t.closed.Done():
|
||||||
return
|
return
|
||||||
case <-reconsider:
|
case <-reconsider:
|
||||||
// Recalculate the interval.
|
// Recalculate the interval.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user