torrent/reader.go

279 lines
6.3 KiB
Go
Raw Normal View History

package torrent
import (
2018-01-31 05:42:26 +00:00
"context"
"errors"
"io"
"sync"
2019-08-21 10:55:43 +00:00
"github.com/anacrolix/log"
"github.com/anacrolix/missinggo"
)
2018-01-06 05:37:13 +00:00
type Reader interface {
io.Reader
io.Seeker
io.Closer
missinggo.ReadContexter
SetReadahead(int64)
SetResponsive()
}
// Piece range by piece index, [begin, end).
type pieceRange struct {
begin, end pieceIndex
}
// Accesses Torrent data via a Client. Reads block until the data is
// available. Seeks and readahead also drive Client behaviour.
2018-01-06 05:37:13 +00:00
type reader struct {
2016-02-21 13:32:02 +00:00
t *Torrent
responsive bool
// Adjust the read/seek window to handle Readers locked to File extents
// and the like.
offset, length int64
2016-02-21 15:42:01 +00:00
// Ensure operations that change the position are exclusive, like Read()
// and Seek().
opMu sync.Mutex
2016-02-21 13:32:02 +00:00
2016-02-21 15:42:01 +00:00
// Required when modifying pos and readahead, or reading them without
// opMu.
mu sync.Locker
2016-02-21 13:32:02 +00:00
pos int64
readahead int64
// The cached piece range this reader wants downloaded. The zero value
// corresponds to nothing. We cache this so that changes can be detected,
// and bubbled up to the Torrent only as required.
pieces pieceRange
}
2018-01-06 05:37:13 +00:00
var _ io.ReadCloser = &reader{}
// Don't wait for pieces to complete and be verified. Read calls return as
// soon as they can when the underlying chunks become available.
2018-01-06 05:37:13 +00:00
func (r *reader) SetResponsive() {
r.responsive = true
r.t.cl.event.Broadcast()
}
2018-01-06 05:37:13 +00:00
// Disable responsive mode. TODO: Remove?
func (r *reader) SetNonResponsive() {
r.responsive = false
r.t.cl.event.Broadcast()
}
2015-06-03 03:30:55 +00:00
// Configure the number of bytes ahead of a read that should also be
// prioritized in preparation for further reads.
2018-01-06 05:37:13 +00:00
func (r *reader) SetReadahead(readahead int64) {
r.mu.Lock()
r.readahead = readahead
2016-03-19 06:39:56 +00:00
r.mu.Unlock()
2018-07-25 03:41:50 +00:00
r.t.cl.lock()
defer r.t.cl.unlock()
r.posChanged()
}
2018-01-06 05:37:13 +00:00
func (r *reader) readable(off int64) (ret bool) {
if r.t.closed.IsSet() {
return true
}
req, ok := r.t.offsetRequest(r.torrentOffset(off))
if !ok {
panic(off)
}
if r.responsive {
2016-04-03 08:40:43 +00:00
return r.t.haveChunk(req)
}
return r.t.pieceComplete(pieceIndex(req.Index))
}
// How many bytes are available to read. Max is the most we could require.
2018-01-06 05:37:13 +00:00
func (r *reader) available(off, max int64) (ret int64) {
2018-01-08 00:03:34 +00:00
off += r.offset
for max > 0 {
2016-04-03 08:40:43 +00:00
req, ok := r.t.offsetRequest(off)
if !ok {
break
}
2016-04-03 08:40:43 +00:00
if !r.t.haveChunk(req) {
break
}
2016-04-03 08:40:43 +00:00
len1 := int64(req.Length) - (off - r.t.requestOffset(req))
max -= len1
ret += len1
off += len1
}
// Ensure that ret hasn't exceeded our original max.
if max < 0 {
ret += max
}
return
}
2018-01-06 05:37:13 +00:00
func (r *reader) waitReadable(off int64) {
// We may have been sent back here because we were told we could read but
// it failed.
r.t.cl.event.Wait()
}
// Calculates the pieces this reader wants downloaded, ignoring the cached
// value at r.pieces.
2018-01-06 05:37:13 +00:00
func (r *reader) piecesUncached() (ret pieceRange) {
ra := r.readahead
if ra < 1 {
// Needs to be at least 1, because [x, x) means we don't want
// anything.
ra = 1
}
if ra > r.length-r.pos {
ra = r.length - r.pos
}
ret.begin, ret.end = r.t.byteRegionPieces(r.torrentOffset(r.pos), ra)
return
}
2018-01-06 05:37:13 +00:00
func (r *reader) Read(b []byte) (n int, err error) {
return r.ReadContext(context.Background(), b)
}
2018-01-06 05:37:13 +00:00
func (r *reader) ReadContext(ctx context.Context, b []byte) (n int, err error) {
// This is set under the Client lock if the Context is canceled.
var ctxErr error
if ctx.Done() != nil {
ctx, cancel := context.WithCancel(ctx)
// Abort the goroutine when the function returns.
defer cancel()
go func() {
<-ctx.Done()
2018-07-25 03:41:50 +00:00
r.t.cl.lock()
ctxErr = ctx.Err()
2018-02-04 11:47:01 +00:00
r.t.tickleReaders()
2018-07-25 03:41:50 +00:00
r.t.cl.unlock()
}()
}
// Hmmm, if a Read gets stuck, this means you can't change position for
// other purposes. That seems reasonable, but unusual.
2016-02-21 13:32:02 +00:00
r.opMu.Lock()
defer r.opMu.Unlock()
for len(b) != 0 {
var n1 int
n1, err = r.readOnceAt(b, r.pos, &ctxErr)
2016-02-21 13:32:02 +00:00
if n1 == 0 {
if err == nil {
panic("expected error")
}
break
}
b = b[n1:]
n += n1
2016-02-21 15:42:01 +00:00
r.mu.Lock()
2016-02-21 13:32:02 +00:00
r.pos += int64(n1)
r.posChanged()
2016-02-21 15:42:01 +00:00
r.mu.Unlock()
2016-02-21 13:32:02 +00:00
}
if r.pos >= r.length {
2016-02-21 13:32:02 +00:00
err = io.EOF
} else if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return
}
// Wait until some data should be available to read. Tickles the client if it
// isn't. Returns how much should be readable without blocking.
2018-01-06 05:37:13 +00:00
func (r *reader) waitAvailable(pos, wanted int64, ctxErr *error) (avail int64) {
2018-07-25 03:41:50 +00:00
r.t.cl.lock()
defer r.t.cl.unlock()
for !r.readable(pos) && *ctxErr == nil {
r.waitReadable(pos)
}
return r.available(pos, wanted)
}
func (r *reader) torrentOffset(readerPos int64) int64 {
return r.offset + readerPos
}
// Performs at most one successful read to torrent storage.
2018-01-06 05:37:13 +00:00
func (r *reader) readOnceAt(b []byte, pos int64, ctxErr *error) (n int, err error) {
if pos >= r.length {
err = io.EOF
return
}
for {
avail := r.waitAvailable(pos, int64(len(b)), ctxErr)
if avail == 0 {
if r.t.closed.IsSet() {
err = errors.New("torrent closed")
return
}
if *ctxErr != nil {
err = *ctxErr
return
}
}
pi := pieceIndex(r.torrentOffset(pos) / r.t.info.PieceLength)
ip := r.t.info.Piece(pi)
po := r.torrentOffset(pos) % r.t.info.PieceLength
2017-12-03 02:44:08 +00:00
b1 := missinggo.LimitLen(b, ip.Length()-po, avail)
n, err = r.t.readAt(b1, r.torrentOffset(pos))
if n != 0 {
err = nil
return
}
2018-07-25 03:41:50 +00:00
r.t.cl.lock()
2018-01-25 06:43:33 +00:00
// TODO: Just reset pieces in the readahead window. This might help
// prevent thrashing with small caches and file and piece priorities.
2019-08-21 10:55:43 +00:00
r.log(log.Fstr("error reading torrent %s piece %d offset %d, %d bytes: %v",
r.t.infoHash.HexString(), pi, po, len(b1), err))
if !r.t.updatePieceCompletion(pi) {
2019-08-21 10:55:43 +00:00
r.log(log.Fstr("piece %d completion unchanged", pi))
}
2018-07-25 03:41:50 +00:00
r.t.cl.unlock()
}
}
2018-01-06 05:37:13 +00:00
func (r *reader) Close() error {
2018-07-25 03:41:50 +00:00
r.t.cl.lock()
defer r.t.cl.unlock()
r.t.deleteReader(r)
return nil
}
2018-01-06 05:37:13 +00:00
func (r *reader) posChanged() {
to := r.piecesUncached()
from := r.pieces
if to == from {
return
}
r.pieces = to
// log.Printf("reader pos changed %v->%v", from, to)
r.t.readerPosChanged(from, to)
}
2018-01-06 05:37:13 +00:00
func (r *reader) Seek(off int64, whence int) (ret int64, err error) {
2016-02-21 15:42:01 +00:00
r.opMu.Lock()
defer r.opMu.Unlock()
r.mu.Lock()
defer r.mu.Unlock()
switch whence {
2017-11-07 05:11:59 +00:00
case io.SeekStart:
r.pos = off
2017-11-07 05:11:59 +00:00
case io.SeekCurrent:
r.pos += off
2017-11-07 05:11:59 +00:00
case io.SeekEnd:
r.pos = r.length + off
default:
err = errors.New("bad whence")
}
ret = r.pos
2016-02-21 15:42:01 +00:00
r.posChanged()
return
}
2019-08-21 10:55:43 +00:00
func (r *reader) log(m log.Msg) {
2020-01-23 02:55:40 +00:00
r.t.logger.Log(m.Skip(1))
2019-08-21 10:55:43 +00:00
}