2013-09-26 09:49:15 +00:00
|
|
|
package torrent
|
|
|
|
|
|
|
|
import (
|
2013-09-30 11:51:08 +00:00
|
|
|
"bufio"
|
2014-09-11 04:20:47 +00:00
|
|
|
"bytes"
|
2013-09-28 22:11:24 +00:00
|
|
|
"crypto/rand"
|
2014-06-26 14:57:07 +00:00
|
|
|
"crypto/sha1"
|
2015-03-12 09:06:23 +00:00
|
|
|
"encoding/hex"
|
2013-09-26 09:49:15 +00:00
|
|
|
"errors"
|
2014-08-21 15:33:13 +00:00
|
|
|
"expvar"
|
2013-10-07 07:58:33 +00:00
|
|
|
"fmt"
|
2013-09-26 09:49:15 +00:00
|
|
|
"io"
|
2013-09-28 22:11:24 +00:00
|
|
|
"log"
|
2014-11-19 03:56:50 +00:00
|
|
|
"math/big"
|
2014-03-16 15:30:10 +00:00
|
|
|
mathRand "math/rand"
|
2013-09-28 22:11:24 +00:00
|
|
|
"net"
|
2015-03-10 15:41:41 +00:00
|
|
|
"net/url"
|
2013-09-26 09:49:15 +00:00
|
|
|
"os"
|
2014-11-29 01:41:53 +00:00
|
|
|
"path/filepath"
|
2014-11-19 03:56:50 +00:00
|
|
|
"sort"
|
2015-03-18 07:21:00 +00:00
|
|
|
"strconv"
|
2014-09-13 17:50:15 +00:00
|
|
|
"strings"
|
2014-04-03 12:16:59 +00:00
|
|
|
"syscall"
|
2013-10-20 14:07:01 +00:00
|
|
|
"time"
|
2014-03-20 05:58:09 +00:00
|
|
|
|
2015-07-17 11:45:44 +00:00
|
|
|
"github.com/anacrolix/missinggo/perf"
|
2015-03-20 12:52:53 +00:00
|
|
|
"github.com/anacrolix/sync"
|
2015-03-26 06:18:08 +00:00
|
|
|
"github.com/anacrolix/utp"
|
|
|
|
"github.com/bradfitz/iter"
|
|
|
|
|
2015-04-28 05:24:17 +00:00
|
|
|
"github.com/anacrolix/torrent/bencode"
|
2015-03-20 05:37:44 +00:00
|
|
|
"github.com/anacrolix/torrent/data"
|
|
|
|
filePkg "github.com/anacrolix/torrent/data/file"
|
|
|
|
"github.com/anacrolix/torrent/dht"
|
|
|
|
"github.com/anacrolix/torrent/internal/pieceordering"
|
|
|
|
"github.com/anacrolix/torrent/iplist"
|
|
|
|
"github.com/anacrolix/torrent/logonce"
|
2015-04-28 05:24:17 +00:00
|
|
|
"github.com/anacrolix/torrent/metainfo"
|
2015-03-26 06:18:08 +00:00
|
|
|
"github.com/anacrolix/torrent/mse"
|
2015-03-20 05:37:44 +00:00
|
|
|
pp "github.com/anacrolix/torrent/peer_protocol"
|
|
|
|
"github.com/anacrolix/torrent/tracker"
|
|
|
|
. "github.com/anacrolix/torrent/util"
|
2013-09-26 09:49:15 +00:00
|
|
|
)
|
|
|
|
|
2014-08-21 15:33:13 +00:00
|
|
|
var (
|
2015-06-22 09:48:30 +00:00
|
|
|
unwantedChunksReceived = expvar.NewInt("chunksReceivedUnwanted")
|
|
|
|
unexpectedChunksReceived = expvar.NewInt("chunksReceivedUnexpected")
|
|
|
|
chunksReceived = expvar.NewInt("chunksReceived")
|
2015-06-28 06:41:51 +00:00
|
|
|
|
|
|
|
peersFoundByDHT = expvar.NewInt("peersFoundByDHT")
|
|
|
|
peersFoundByPEX = expvar.NewInt("peersFoundByPEX")
|
|
|
|
peersFoundByTracker = expvar.NewInt("peersFoundByTracker")
|
|
|
|
|
|
|
|
uploadChunksPosted = expvar.NewInt("uploadChunksPosted")
|
|
|
|
unexpectedCancels = expvar.NewInt("unexpectedCancels")
|
|
|
|
postedCancels = expvar.NewInt("postedCancels")
|
|
|
|
duplicateConnsAvoided = expvar.NewInt("duplicateConnsAvoided")
|
|
|
|
|
|
|
|
pieceHashedCorrect = expvar.NewInt("pieceHashedCorrect")
|
|
|
|
pieceHashedNotCorrect = expvar.NewInt("pieceHashedNotCorrect")
|
|
|
|
|
|
|
|
unsuccessfulDials = expvar.NewInt("dialSuccessful")
|
|
|
|
successfulDials = expvar.NewInt("dialUnsuccessful")
|
|
|
|
|
2015-06-29 14:35:47 +00:00
|
|
|
acceptUTP = expvar.NewInt("acceptUTP")
|
|
|
|
acceptTCP = expvar.NewInt("acceptTCP")
|
|
|
|
acceptReject = expvar.NewInt("acceptReject")
|
|
|
|
|
|
|
|
peerExtensions = expvar.NewMap("peerExtensions")
|
2015-03-18 07:28:13 +00:00
|
|
|
// Count of connections to peer with same client ID.
|
|
|
|
connsToSelf = expvar.NewInt("connsToSelf")
|
|
|
|
// Number of completed connections to a client we're already connected with.
|
|
|
|
duplicateClientConns = expvar.NewInt("duplicateClientConns")
|
|
|
|
receivedMessageTypes = expvar.NewMap("receivedMessageTypes")
|
|
|
|
supportedExtensionMessages = expvar.NewMap("supportedExtensionMessages")
|
2014-08-21 15:33:13 +00:00
|
|
|
)
|
|
|
|
|
2014-08-28 00:06:36 +00:00
|
|
|
const (
|
|
|
|
// Justification for set bits follows.
|
|
|
|
//
|
2015-03-18 07:21:00 +00:00
|
|
|
// Extension protocol: http://www.bittorrent.org/beps/bep_0010.html ([5]|=0x10)
|
|
|
|
// DHT: http://www.bittorrent.org/beps/bep_0005.html ([7]|=1)
|
|
|
|
// Fast Extension:
|
|
|
|
// http://bittorrent.org/beps/bep_0006.html ([7]|=4)
|
|
|
|
// Disabled until AllowedFast is implemented
|
|
|
|
defaultExtensionBytes = "\x00\x00\x00\x00\x00\x10\x00\x01"
|
2014-08-28 00:06:36 +00:00
|
|
|
|
2015-06-29 14:46:43 +00:00
|
|
|
socketsPerTorrent = 80
|
2015-02-21 03:56:17 +00:00
|
|
|
torrentPeersHighWater = 200
|
|
|
|
torrentPeersLowWater = 50
|
|
|
|
|
|
|
|
// Limit how long handshake can take. This is to reduce the lingering
|
|
|
|
// impact of a few bad apples. 4s loses 1% of successful handshakes that
|
|
|
|
// are obtained with 60s timeout, and 5% of unsuccessful handshakes.
|
2015-03-18 07:28:13 +00:00
|
|
|
btHandshakeTimeout = 4 * time.Second
|
|
|
|
handshakesTimeout = 20 * time.Second
|
2015-02-25 03:48:39 +00:00
|
|
|
|
|
|
|
pruneInterval = 10 * time.Second
|
2015-03-25 04:42:14 +00:00
|
|
|
|
2015-06-28 06:40:46 +00:00
|
|
|
// These are our extended message IDs.
|
2015-03-25 04:42:14 +00:00
|
|
|
metadataExtendedId = iota + 1 // 0 is reserved for deleting keys
|
|
|
|
pexExtendedId
|
|
|
|
|
2015-06-28 06:40:46 +00:00
|
|
|
// Updated occasionally to when there's been some changes to client
|
|
|
|
// behaviour in case other clients are assuming anything of us. See also
|
|
|
|
// `bep20`.
|
|
|
|
extendedHandshakeClientVersion = "go.torrent dev 20150624"
|
2014-08-28 00:06:36 +00:00
|
|
|
)
|
2014-08-21 08:12:49 +00:00
|
|
|
|
2014-03-16 15:30:10 +00:00
|
|
|
// Currently doesn't really queue, but should in the future.
|
2014-05-21 08:01:58 +00:00
|
|
|
func (cl *Client) queuePieceCheck(t *torrent, pieceIndex pp.Integer) {
|
2013-10-20 14:07:01 +00:00
|
|
|
piece := t.Pieces[pieceIndex]
|
2014-03-19 17:30:08 +00:00
|
|
|
if piece.QueuedForHash {
|
2013-10-20 14:07:01 +00:00
|
|
|
return
|
|
|
|
}
|
2014-03-19 17:30:08 +00:00
|
|
|
piece.QueuedForHash = true
|
2013-10-20 14:07:01 +00:00
|
|
|
go cl.verifyPiece(t, pieceIndex)
|
|
|
|
}
|
|
|
|
|
2015-02-24 14:34:57 +00:00
|
|
|
// Queue a piece check if one isn't already queued, and the piece has never
|
|
|
|
// been checked before.
|
2014-09-13 17:50:15 +00:00
|
|
|
func (cl *Client) queueFirstHash(t *torrent, piece int) {
|
|
|
|
p := t.Pieces[piece]
|
2015-03-10 15:41:21 +00:00
|
|
|
if p.EverHashed || p.Hashing || p.QueuedForHash || t.pieceComplete(piece) {
|
2014-09-13 17:50:15 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
cl.queuePieceCheck(t, pp.Integer(piece))
|
|
|
|
}
|
|
|
|
|
2015-06-03 03:30:55 +00:00
|
|
|
// Clients contain zero or more Torrents. A client manages a blocklist, the
|
|
|
|
// TCP/UDP protocol ports, and DHT as desired.
|
2013-10-06 07:01:39 +00:00
|
|
|
type Client struct {
|
2015-04-27 04:05:27 +00:00
|
|
|
halfOpenLimit int
|
|
|
|
peerID [20]byte
|
|
|
|
listeners []net.Listener
|
|
|
|
utpSock *utp.Socket
|
|
|
|
dHT *dht.Server
|
|
|
|
ipBlockList *iplist.IPList
|
|
|
|
bannedTorrents map[InfoHash]struct{}
|
|
|
|
config Config
|
|
|
|
pruneTimer *time.Timer
|
|
|
|
extensionBytes peerExtensionBytes
|
2015-03-18 07:29:51 +00:00
|
|
|
// Set of addresses that have our client ID. This intentionally will
|
|
|
|
// include ourselves if we end up trying to connect to our own address
|
|
|
|
// through legitimate channels.
|
|
|
|
dopplegangerAddrs map[string]struct{}
|
2015-02-25 03:48:39 +00:00
|
|
|
|
|
|
|
torrentDataOpener TorrentDataOpener
|
2013-09-28 22:11:24 +00:00
|
|
|
|
2014-11-29 01:41:53 +00:00
|
|
|
mu sync.RWMutex
|
2013-10-20 14:07:01 +00:00
|
|
|
event sync.Cond
|
2014-03-18 11:39:33 +00:00
|
|
|
quit chan struct{}
|
2013-10-20 14:07:01 +00:00
|
|
|
|
2014-11-16 19:30:44 +00:00
|
|
|
torrents map[InfoHash]*torrent
|
2015-02-25 00:25:22 +00:00
|
|
|
}
|
2014-08-24 19:24:18 +00:00
|
|
|
|
2015-02-25 00:25:22 +00:00
|
|
|
func (me *Client) IPBlockList() *iplist.IPList {
|
|
|
|
me.mu.Lock()
|
|
|
|
defer me.mu.Unlock()
|
|
|
|
return me.ipBlockList
|
2013-09-26 09:49:15 +00:00
|
|
|
}
|
|
|
|
|
2014-11-29 01:41:53 +00:00
|
|
|
func (me *Client) SetIPBlockList(list *iplist.IPList) {
|
|
|
|
me.mu.Lock()
|
|
|
|
defer me.mu.Unlock()
|
|
|
|
me.ipBlockList = list
|
2014-11-30 02:33:17 +00:00
|
|
|
if me.dHT != nil {
|
|
|
|
me.dHT.SetIPBlockList(list)
|
|
|
|
}
|
2014-11-29 01:41:53 +00:00
|
|
|
}
|
|
|
|
|
2014-11-16 19:54:43 +00:00
|
|
|
func (me *Client) PeerID() string {
|
|
|
|
return string(me.peerID[:])
|
|
|
|
}
|
|
|
|
|
2014-11-16 19:16:26 +00:00
|
|
|
func (me *Client) ListenAddr() (addr net.Addr) {
|
|
|
|
for _, l := range me.listeners {
|
|
|
|
addr = l.Addr()
|
2015-05-20 08:14:42 +00:00
|
|
|
break
|
2014-11-16 19:16:26 +00:00
|
|
|
}
|
|
|
|
return
|
2014-08-21 08:07:06 +00:00
|
|
|
}
|
|
|
|
|
2014-11-19 03:56:50 +00:00
|
|
|
type hashSorter struct {
|
|
|
|
Hashes []InfoHash
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me hashSorter) Len() int {
|
|
|
|
return len(me.Hashes)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me hashSorter) Less(a, b int) bool {
|
|
|
|
return (&big.Int{}).SetBytes(me.Hashes[a][:]).Cmp((&big.Int{}).SetBytes(me.Hashes[b][:])) < 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me hashSorter) Swap(a, b int) {
|
|
|
|
me.Hashes[a], me.Hashes[b] = me.Hashes[b], me.Hashes[a]
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cl *Client) sortedTorrents() (ret []*torrent) {
|
|
|
|
var hs hashSorter
|
|
|
|
for ih := range cl.torrents {
|
|
|
|
hs.Hashes = append(hs.Hashes, ih)
|
|
|
|
}
|
|
|
|
sort.Sort(hs)
|
|
|
|
for _, ih := range hs.Hashes {
|
|
|
|
ret = append(ret, cl.torrent(ih))
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-03-08 06:28:14 +00:00
|
|
|
// Writes out a human readable status of the client, such as for writing to a
|
|
|
|
// HTTP status page.
|
2014-11-19 03:56:50 +00:00
|
|
|
func (cl *Client) WriteStatus(_w io.Writer) {
|
2014-11-29 01:41:53 +00:00
|
|
|
cl.mu.RLock()
|
|
|
|
defer cl.mu.RUnlock()
|
2014-11-19 03:56:50 +00:00
|
|
|
w := bufio.NewWriter(_w)
|
|
|
|
defer w.Flush()
|
2015-02-25 03:52:19 +00:00
|
|
|
if addr := cl.ListenAddr(); addr != nil {
|
|
|
|
fmt.Fprintf(w, "Listening on %s\n", cl.ListenAddr())
|
|
|
|
} else {
|
2015-03-10 15:39:01 +00:00
|
|
|
fmt.Fprintln(w, "Not listening!")
|
2015-02-25 03:52:19 +00:00
|
|
|
}
|
2015-06-22 09:46:26 +00:00
|
|
|
fmt.Fprintf(w, "Peer ID: %+q\n", cl.peerID)
|
2014-08-21 08:07:06 +00:00
|
|
|
if cl.dHT != nil {
|
2014-12-07 03:19:02 +00:00
|
|
|
dhtStats := cl.dHT.Stats()
|
2015-04-01 06:29:55 +00:00
|
|
|
fmt.Fprintf(w, "DHT nodes: %d (%d good)\n", dhtStats.Nodes, dhtStats.GoodNodes)
|
|
|
|
fmt.Fprintf(w, "DHT Server ID: %x\n", cl.dHT.ID())
|
|
|
|
fmt.Fprintf(w, "DHT port: %d\n", addrPort(cl.dHT.Addr()))
|
|
|
|
fmt.Fprintf(w, "DHT announces: %d\n", dhtStats.ConfirmedAnnounces)
|
|
|
|
fmt.Fprintf(w, "Outstanding transactions: %d\n", dhtStats.OutstandingTransactions)
|
2014-07-22 15:50:49 +00:00
|
|
|
}
|
2015-03-27 15:51:16 +00:00
|
|
|
fmt.Fprintf(w, "# Torrents: %d\n", len(cl.torrents))
|
2014-07-16 07:07:28 +00:00
|
|
|
fmt.Fprintln(w)
|
2014-11-19 03:56:50 +00:00
|
|
|
for _, t := range cl.sortedTorrents() {
|
2014-11-18 20:32:51 +00:00
|
|
|
if t.Name() == "" {
|
|
|
|
fmt.Fprint(w, "<unknown name>")
|
|
|
|
} else {
|
|
|
|
fmt.Fprint(w, t.Name())
|
|
|
|
}
|
2015-02-21 03:57:37 +00:00
|
|
|
fmt.Fprint(w, "\n")
|
2014-11-18 20:32:51 +00:00
|
|
|
if t.haveInfo() {
|
2015-02-25 04:42:47 +00:00
|
|
|
fmt.Fprintf(w, "%f%% of %d bytes", 100*(1-float32(t.bytesLeft())/float32(t.Length())), t.Length())
|
2015-02-21 03:57:37 +00:00
|
|
|
} else {
|
|
|
|
w.WriteString("<missing metainfo>")
|
2014-11-18 20:32:51 +00:00
|
|
|
}
|
|
|
|
fmt.Fprint(w, "\n")
|
2015-06-16 06:57:47 +00:00
|
|
|
t.writeStatus(w, cl)
|
2014-07-17 05:58:33 +00:00
|
|
|
fmt.Fprintln(w)
|
2014-06-26 07:29:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-14 13:59:41 +00:00
|
|
|
// A Data that implements this has a streaming interface that should be
|
|
|
|
// preferred over ReadAt. For example, the data is stored in blocks on the
|
|
|
|
// network and have a fixed cost to open.
|
2015-03-01 03:32:54 +00:00
|
|
|
type SectionOpener interface {
|
2015-04-14 13:59:41 +00:00
|
|
|
// Open a ReadCloser at the given offset into torrent data. n is how many
|
|
|
|
// bytes we intend to read.
|
2015-03-01 03:32:54 +00:00
|
|
|
OpenSection(off, n int64) (io.ReadCloser, error)
|
|
|
|
}
|
|
|
|
|
2015-03-08 06:28:14 +00:00
|
|
|
func dataReadAt(d data.Data, b []byte, off int64) (n int, err error) {
|
2015-04-14 13:59:41 +00:00
|
|
|
// defer func() {
|
|
|
|
// if err == io.ErrUnexpectedEOF && n != 0 {
|
|
|
|
// err = nil
|
|
|
|
// }
|
|
|
|
// }()
|
|
|
|
// log.Println("data read at", len(b), off)
|
2015-03-10 15:41:21 +00:00
|
|
|
again:
|
2015-03-01 03:32:54 +00:00
|
|
|
if ra, ok := d.(io.ReaderAt); ok {
|
|
|
|
return ra.ReadAt(b, off)
|
|
|
|
}
|
|
|
|
if so, ok := d.(SectionOpener); ok {
|
|
|
|
var rc io.ReadCloser
|
|
|
|
rc, err = so.OpenSection(off, int64(len(b)))
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer rc.Close()
|
|
|
|
return io.ReadFull(rc, b)
|
|
|
|
}
|
2015-03-10 15:41:21 +00:00
|
|
|
if dp, ok := super(d); ok {
|
|
|
|
d = dp.(data.Data)
|
|
|
|
goto again
|
|
|
|
}
|
2015-03-01 03:32:54 +00:00
|
|
|
panic(fmt.Sprintf("can't read from %T", d))
|
|
|
|
}
|
|
|
|
|
2015-03-18 07:28:13 +00:00
|
|
|
// Calculates the number of pieces to set to Readahead priority, after the
|
|
|
|
// Now, and Next pieces.
|
2015-06-16 06:54:12 +00:00
|
|
|
func readaheadPieces(readahead, pieceLength int64) (ret int) {
|
2015-06-16 07:14:15 +00:00
|
|
|
// Expand the readahead to fit any partial pieces. Subtract 1 for the
|
|
|
|
// "next" piece that is assigned.
|
2015-06-16 06:54:12 +00:00
|
|
|
ret = int((readahead+pieceLength-1)/pieceLength - 1)
|
2015-06-16 07:14:15 +00:00
|
|
|
// Lengthen the "readahead tail" to smooth blockiness that occurs when the
|
|
|
|
// piece length is much larger than the readahead.
|
2015-06-16 06:54:12 +00:00
|
|
|
if ret < 2 {
|
2015-06-16 07:14:15 +00:00
|
|
|
ret++
|
2015-06-16 06:54:12 +00:00
|
|
|
}
|
|
|
|
return
|
2015-03-04 02:07:11 +00:00
|
|
|
}
|
|
|
|
|
2015-04-14 13:59:41 +00:00
|
|
|
func (cl *Client) readRaisePiecePriorities(t *torrent, off, readaheadBytes int64) {
|
2015-02-25 04:42:47 +00:00
|
|
|
index := int(off / int64(t.usualPieceSize()))
|
2015-06-01 08:22:12 +00:00
|
|
|
cl.raisePiecePriority(t, index, PiecePriorityNow)
|
2014-12-05 06:56:28 +00:00
|
|
|
index++
|
2015-02-09 13:12:29 +00:00
|
|
|
if index >= t.numPieces() {
|
2014-12-05 06:56:28 +00:00
|
|
|
return
|
|
|
|
}
|
2015-06-01 08:22:12 +00:00
|
|
|
cl.raisePiecePriority(t, index, PiecePriorityNext)
|
2015-04-14 13:59:41 +00:00
|
|
|
for range iter.N(readaheadPieces(readaheadBytes, t.Info.PieceLength)) {
|
2014-12-05 06:56:28 +00:00
|
|
|
index++
|
2015-02-09 13:12:29 +00:00
|
|
|
if index >= t.numPieces() {
|
2014-12-05 06:56:28 +00:00
|
|
|
break
|
|
|
|
}
|
2015-06-01 08:22:12 +00:00
|
|
|
cl.raisePiecePriority(t, index, PiecePriorityReadahead)
|
2014-12-05 06:56:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-14 13:59:41 +00:00
|
|
|
func (cl *Client) addUrgentRequests(t *torrent, off int64, n int) {
|
|
|
|
for n > 0 {
|
|
|
|
req, ok := t.offsetRequest(off)
|
|
|
|
if !ok {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if _, ok := t.urgent[req]; !ok && !t.haveChunk(req) {
|
|
|
|
if t.urgent == nil {
|
2015-07-15 05:31:18 +00:00
|
|
|
t.urgent = make(map[request]struct{}, (n+int(t.chunkSize)-1)/int(t.chunkSize))
|
2015-04-14 13:59:41 +00:00
|
|
|
}
|
|
|
|
t.urgent[req] = struct{}{}
|
|
|
|
cl.event.Broadcast() // Why?
|
|
|
|
index := int(req.Index)
|
|
|
|
cl.queueFirstHash(t, index)
|
|
|
|
cl.pieceChanged(t, index)
|
|
|
|
}
|
|
|
|
reqOff := t.requestOffset(req)
|
|
|
|
n1 := req.Length - pp.Integer(off-reqOff)
|
|
|
|
off += int64(n1)
|
|
|
|
n -= int(n1)
|
|
|
|
}
|
|
|
|
// log.Print(t.urgent)
|
|
|
|
}
|
|
|
|
|
2014-12-01 22:39:09 +00:00
|
|
|
func (cl *Client) configDir() string {
|
2015-04-27 04:05:27 +00:00
|
|
|
if cl.config.ConfigDir == "" {
|
2015-02-25 03:48:39 +00:00
|
|
|
return filepath.Join(os.Getenv("HOME"), ".config/torrent")
|
|
|
|
}
|
2015-04-27 04:05:27 +00:00
|
|
|
return cl.config.ConfigDir
|
2014-12-01 22:39:09 +00:00
|
|
|
}
|
|
|
|
|
2015-06-22 09:48:50 +00:00
|
|
|
// The directory where the Client expects to find and store configuration
|
|
|
|
// data. Defaults to $HOME/.config/torrent.
|
2014-12-03 07:07:50 +00:00
|
|
|
func (cl *Client) ConfigDir() string {
|
|
|
|
return cl.configDir()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *torrent) connPendPiece(c *connection, piece int) {
|
|
|
|
c.pendPiece(piece, t.Pieces[piece].Priority)
|
|
|
|
}
|
|
|
|
|
2014-12-05 06:56:28 +00:00
|
|
|
func (cl *Client) raisePiecePriority(t *torrent, piece int, priority piecePriority) {
|
|
|
|
if t.Pieces[piece].Priority < priority {
|
|
|
|
cl.prioritizePiece(t, piece, priority)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-03 07:07:50 +00:00
|
|
|
func (cl *Client) prioritizePiece(t *torrent, piece int, priority piecePriority) {
|
|
|
|
if t.havePiece(piece) {
|
2015-07-04 03:38:42 +00:00
|
|
|
priority = PiecePriorityNone
|
2014-12-03 07:07:50 +00:00
|
|
|
}
|
2015-06-01 08:23:35 +00:00
|
|
|
if priority != PiecePriorityNone {
|
|
|
|
cl.queueFirstHash(t, piece)
|
|
|
|
}
|
|
|
|
p := t.Pieces[piece]
|
|
|
|
if p.Priority != priority {
|
|
|
|
p.Priority = priority
|
|
|
|
cl.pieceChanged(t, piece)
|
|
|
|
}
|
2014-12-03 07:07:50 +00:00
|
|
|
}
|
|
|
|
|
2014-11-29 01:41:53 +00:00
|
|
|
func (cl *Client) setEnvBlocklist() (err error) {
|
|
|
|
filename := os.Getenv("TORRENT_BLOCKLIST_FILE")
|
|
|
|
defaultBlocklist := filename == ""
|
|
|
|
if defaultBlocklist {
|
2014-12-01 22:39:09 +00:00
|
|
|
filename = filepath.Join(cl.configDir(), "blocklist")
|
2014-11-29 01:41:53 +00:00
|
|
|
}
|
|
|
|
f, err := os.Open(filename)
|
|
|
|
if err != nil {
|
|
|
|
if defaultBlocklist {
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer f.Close()
|
2015-06-22 09:44:59 +00:00
|
|
|
cl.ipBlockList, err = iplist.NewFromReader(f)
|
2014-11-29 01:41:53 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-12-01 22:39:09 +00:00
|
|
|
func (cl *Client) initBannedTorrents() error {
|
|
|
|
f, err := os.Open(filepath.Join(cl.configDir(), "banned_infohashes"))
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return fmt.Errorf("error opening banned infohashes file: %s", err)
|
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
scanner := bufio.NewScanner(f)
|
|
|
|
cl.bannedTorrents = make(map[InfoHash]struct{})
|
|
|
|
for scanner.Scan() {
|
2015-03-18 07:35:52 +00:00
|
|
|
if strings.HasPrefix(strings.TrimSpace(scanner.Text()), "#") {
|
|
|
|
continue
|
|
|
|
}
|
2014-12-01 22:39:09 +00:00
|
|
|
var ihs string
|
|
|
|
n, err := fmt.Sscanf(scanner.Text(), "%x", &ihs)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error reading infohash: %s", err)
|
|
|
|
}
|
|
|
|
if n != 1 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if len(ihs) != 20 {
|
|
|
|
return errors.New("bad infohash")
|
|
|
|
}
|
|
|
|
var ih InfoHash
|
|
|
|
CopyExact(&ih, ihs)
|
|
|
|
cl.bannedTorrents[ih] = struct{}{}
|
|
|
|
}
|
|
|
|
if err := scanner.Err(); err != nil {
|
|
|
|
return fmt.Errorf("error scanning file: %s", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-06-03 03:30:55 +00:00
|
|
|
// Creates a new client.
|
2014-08-21 08:07:06 +00:00
|
|
|
func NewClient(cfg *Config) (cl *Client, err error) {
|
|
|
|
if cfg == nil {
|
|
|
|
cfg = &Config{}
|
2013-10-14 14:39:12 +00:00
|
|
|
}
|
2014-08-21 08:07:06 +00:00
|
|
|
|
2015-04-01 03:30:22 +00:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
cl = nil
|
|
|
|
}
|
|
|
|
}()
|
2014-08-21 08:07:06 +00:00
|
|
|
cl = &Client{
|
2015-04-27 04:05:27 +00:00
|
|
|
halfOpenLimit: socketsPerTorrent,
|
|
|
|
config: *cfg,
|
2015-03-07 06:11:45 +00:00
|
|
|
torrentDataOpener: func(md *metainfo.Info) data.Data {
|
|
|
|
return filePkg.TorrentData(md, cfg.DataDir)
|
2015-02-25 03:48:39 +00:00
|
|
|
},
|
2015-03-18 07:29:51 +00:00
|
|
|
dopplegangerAddrs: make(map[string]struct{}),
|
2014-08-21 08:07:06 +00:00
|
|
|
|
|
|
|
quit: make(chan struct{}),
|
|
|
|
torrents: make(map[InfoHash]*torrent),
|
|
|
|
}
|
2015-03-12 19:21:13 +00:00
|
|
|
CopyExact(&cl.extensionBytes, defaultExtensionBytes)
|
2014-08-21 08:07:06 +00:00
|
|
|
cl.event.L = &cl.mu
|
2015-02-25 04:41:13 +00:00
|
|
|
if cfg.TorrentDataOpener != nil {
|
|
|
|
cl.torrentDataOpener = cfg.TorrentDataOpener
|
|
|
|
}
|
2014-11-29 01:41:53 +00:00
|
|
|
|
2014-12-02 20:23:01 +00:00
|
|
|
if !cfg.NoDefaultBlocklist {
|
|
|
|
err = cl.setEnvBlocklist()
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2014-11-29 01:41:53 +00:00
|
|
|
}
|
2014-08-21 08:07:06 +00:00
|
|
|
|
2014-12-01 22:39:09 +00:00
|
|
|
if err = cl.initBannedTorrents(); err != nil {
|
|
|
|
err = fmt.Errorf("error initing banned torrents: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-11-16 19:54:43 +00:00
|
|
|
if cfg.PeerID != "" {
|
|
|
|
CopyExact(&cl.peerID, cfg.PeerID)
|
|
|
|
} else {
|
2015-03-08 06:28:14 +00:00
|
|
|
o := copy(cl.peerID[:], bep20)
|
2014-11-16 19:54:43 +00:00
|
|
|
_, err = rand.Read(cl.peerID[o:])
|
|
|
|
if err != nil {
|
|
|
|
panic("error generating peer id")
|
|
|
|
}
|
2013-09-28 22:11:24 +00:00
|
|
|
}
|
2014-08-21 08:07:06 +00:00
|
|
|
|
2014-11-16 19:29:31 +00:00
|
|
|
// Returns the laddr string to listen on for the next Listen call.
|
|
|
|
listenAddr := func() string {
|
|
|
|
if addr := cl.ListenAddr(); addr != nil {
|
|
|
|
return addr.String()
|
|
|
|
}
|
2014-11-21 06:07:04 +00:00
|
|
|
if cfg.ListenAddr == "" {
|
2015-05-20 08:14:42 +00:00
|
|
|
return ":50007"
|
2014-11-21 06:07:04 +00:00
|
|
|
}
|
2014-11-16 19:29:31 +00:00
|
|
|
return cfg.ListenAddr
|
2014-05-21 07:40:54 +00:00
|
|
|
}
|
2015-04-27 04:05:27 +00:00
|
|
|
if !cl.config.DisableTCP {
|
2014-11-17 05:27:01 +00:00
|
|
|
var l net.Listener
|
2014-11-16 19:29:31 +00:00
|
|
|
l, err = net.Listen("tcp", listenAddr())
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
cl.listeners = append(cl.listeners, l)
|
|
|
|
go cl.acceptConnections(l, false)
|
|
|
|
}
|
2015-04-27 04:05:27 +00:00
|
|
|
if !cl.config.DisableUTP {
|
2015-01-10 13:16:19 +00:00
|
|
|
cl.utpSock, err = utp.NewSocket(listenAddr())
|
2014-11-16 19:29:31 +00:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2015-01-10 13:16:19 +00:00
|
|
|
cl.listeners = append(cl.listeners, cl.utpSock)
|
|
|
|
go cl.acceptConnections(cl.utpSock, true)
|
2014-03-17 14:44:22 +00:00
|
|
|
}
|
2014-08-21 08:07:06 +00:00
|
|
|
if !cfg.NoDHT {
|
2014-11-28 18:13:08 +00:00
|
|
|
dhtCfg := cfg.DHTConfig
|
|
|
|
if dhtCfg == nil {
|
2015-06-02 13:58:49 +00:00
|
|
|
dhtCfg = &dht.ServerConfig{
|
|
|
|
IPBlocklist: cl.ipBlockList,
|
|
|
|
}
|
2014-11-20 02:02:20 +00:00
|
|
|
}
|
2014-11-28 18:13:08 +00:00
|
|
|
if dhtCfg.Addr == "" {
|
|
|
|
dhtCfg.Addr = listenAddr()
|
2014-11-20 02:02:20 +00:00
|
|
|
}
|
2015-01-10 13:16:19 +00:00
|
|
|
if dhtCfg.Conn == nil && cl.utpSock != nil {
|
2015-02-09 13:17:04 +00:00
|
|
|
dhtCfg.Conn = cl.utpSock.PacketConn()
|
2014-11-28 18:13:08 +00:00
|
|
|
}
|
|
|
|
cl.dHT, err = dht.NewServer(dhtCfg)
|
2014-08-21 08:07:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
2014-03-17 14:44:22 +00:00
|
|
|
}
|
|
|
|
|
2014-03-20 05:58:09 +00:00
|
|
|
func (cl *Client) stopped() bool {
|
|
|
|
select {
|
|
|
|
case <-cl.quit:
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-08 16:36:05 +00:00
|
|
|
// Stops the client. All connections to peers are closed and all activity will
|
|
|
|
// come to a halt.
|
2015-03-08 06:28:14 +00:00
|
|
|
func (me *Client) Close() {
|
2014-04-08 16:36:05 +00:00
|
|
|
me.mu.Lock()
|
2015-02-09 13:21:50 +00:00
|
|
|
defer me.mu.Unlock()
|
2014-03-18 11:39:33 +00:00
|
|
|
close(me.quit)
|
2014-11-21 06:07:04 +00:00
|
|
|
for _, l := range me.listeners {
|
|
|
|
l.Close()
|
|
|
|
}
|
2014-03-18 11:39:33 +00:00
|
|
|
me.event.Broadcast()
|
|
|
|
for _, t := range me.torrents {
|
2015-02-09 13:12:29 +00:00
|
|
|
t.close()
|
2014-03-18 11:39:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-01 09:27:11 +00:00
|
|
|
var ipv6BlockRange = iplist.Range{Description: "non-IPv4 address"}
|
|
|
|
|
2014-11-30 02:33:17 +00:00
|
|
|
func (cl *Client) ipBlockRange(ip net.IP) (r *iplist.Range) {
|
2014-11-29 01:41:53 +00:00
|
|
|
if cl.ipBlockList == nil {
|
2014-11-30 02:33:17 +00:00
|
|
|
return
|
2014-11-29 01:41:53 +00:00
|
|
|
}
|
2015-04-01 06:36:51 +00:00
|
|
|
ip4 := ip.To4()
|
|
|
|
if ip4 == nil {
|
|
|
|
log.Printf("blocking non-IPv4 address: %s", ip)
|
2014-12-01 09:27:11 +00:00
|
|
|
r = &ipv6BlockRange
|
|
|
|
return
|
|
|
|
}
|
2015-04-01 06:36:51 +00:00
|
|
|
r = cl.ipBlockList.Lookup(ip4)
|
2014-11-30 02:33:17 +00:00
|
|
|
return
|
2014-11-29 01:41:53 +00:00
|
|
|
}
|
|
|
|
|
2015-03-18 07:36:27 +00:00
|
|
|
func (cl *Client) waitAccept() {
|
|
|
|
cl.mu.Lock()
|
|
|
|
defer cl.mu.Unlock()
|
|
|
|
for {
|
|
|
|
for _, t := range cl.torrents {
|
|
|
|
if cl.wantConns(t) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cl.event.Wait()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-16 19:29:31 +00:00
|
|
|
func (cl *Client) acceptConnections(l net.Listener, utp bool) {
|
2014-03-17 14:44:22 +00:00
|
|
|
for {
|
2015-03-18 07:36:27 +00:00
|
|
|
cl.waitAccept()
|
2014-11-29 01:41:53 +00:00
|
|
|
// We accept all connections immediately, because we don't know what
|
2014-08-28 00:05:41 +00:00
|
|
|
// torrent they're for.
|
2014-11-16 19:29:31 +00:00
|
|
|
conn, err := l.Accept()
|
2014-03-18 11:39:33 +00:00
|
|
|
select {
|
|
|
|
case <-cl.quit:
|
2014-07-03 15:44:15 +00:00
|
|
|
if conn != nil {
|
|
|
|
conn.Close()
|
|
|
|
}
|
2014-03-18 11:39:33 +00:00
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
2014-03-17 14:44:22 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Print(err)
|
|
|
|
return
|
|
|
|
}
|
2015-06-29 14:35:47 +00:00
|
|
|
if utp {
|
|
|
|
acceptUTP.Add(1)
|
|
|
|
} else {
|
|
|
|
acceptTCP.Add(1)
|
|
|
|
}
|
2014-11-29 01:41:53 +00:00
|
|
|
cl.mu.RLock()
|
2015-03-18 07:29:51 +00:00
|
|
|
doppleganger := cl.dopplegangerAddr(conn.RemoteAddr().String())
|
2014-11-30 02:33:17 +00:00
|
|
|
blockRange := cl.ipBlockRange(AddrIP(conn.RemoteAddr()))
|
2014-11-29 01:41:53 +00:00
|
|
|
cl.mu.RUnlock()
|
2015-03-18 07:29:51 +00:00
|
|
|
if blockRange != nil || doppleganger {
|
2015-06-29 14:35:47 +00:00
|
|
|
acceptReject.Add(1)
|
2015-03-12 09:04:44 +00:00
|
|
|
// log.Printf("inbound connection from %s blocked by %s", conn.RemoteAddr(), blockRange)
|
2014-12-26 06:18:36 +00:00
|
|
|
conn.Close()
|
2014-11-29 01:41:53 +00:00
|
|
|
continue
|
|
|
|
}
|
2015-03-18 07:28:13 +00:00
|
|
|
go cl.incomingConnection(conn, utp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cl *Client) incomingConnection(nc net.Conn, utp bool) {
|
|
|
|
defer nc.Close()
|
|
|
|
if tc, ok := nc.(*net.TCPConn); ok {
|
|
|
|
tc.SetLinger(0)
|
|
|
|
}
|
|
|
|
c := newConnection()
|
|
|
|
c.conn = nc
|
|
|
|
c.rw = nc
|
|
|
|
c.Discovery = peerSourceIncoming
|
|
|
|
c.uTP = utp
|
|
|
|
err := cl.runReceivedConn(c)
|
|
|
|
if err != nil {
|
2015-03-25 06:28:34 +00:00
|
|
|
// log.Print(err)
|
2014-03-17 14:44:22 +00:00
|
|
|
}
|
2013-09-26 09:49:15 +00:00
|
|
|
}
|
|
|
|
|
2015-03-19 23:52:01 +00:00
|
|
|
// Returns a handle to the given torrent, if it's present in the client.
|
2015-03-18 07:28:13 +00:00
|
|
|
func (cl *Client) Torrent(ih InfoHash) (T Torrent, ok bool) {
|
|
|
|
cl.mu.Lock()
|
|
|
|
defer cl.mu.Unlock()
|
|
|
|
t, ok := cl.torrents[ih]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2015-04-29 14:30:19 +00:00
|
|
|
T = Torrent{cl, t}
|
2015-03-18 07:28:13 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-04-08 16:36:05 +00:00
|
|
|
func (me *Client) torrent(ih InfoHash) *torrent {
|
2015-03-08 06:28:14 +00:00
|
|
|
return me.torrents[ih]
|
2013-09-28 22:11:24 +00:00
|
|
|
}
|
|
|
|
|
2014-11-17 05:27:01 +00:00
|
|
|
type dialResult struct {
|
2015-03-18 07:28:13 +00:00
|
|
|
Conn net.Conn
|
|
|
|
UTP bool
|
2014-11-17 05:27:01 +00:00
|
|
|
}
|
|
|
|
|
2015-03-18 07:28:13 +00:00
|
|
|
func doDial(dial func(addr string, t *torrent) (net.Conn, error), ch chan dialResult, utp bool, addr string, t *torrent) {
|
|
|
|
conn, err := dial(addr, t)
|
2014-11-18 00:04:33 +00:00
|
|
|
if err != nil {
|
2014-12-26 06:18:36 +00:00
|
|
|
if conn != nil {
|
|
|
|
conn.Close()
|
|
|
|
}
|
2014-11-18 00:04:33 +00:00
|
|
|
conn = nil // Pedantic
|
|
|
|
}
|
2014-11-17 05:27:01 +00:00
|
|
|
ch <- dialResult{conn, utp}
|
2014-11-17 07:44:06 +00:00
|
|
|
if err == nil {
|
|
|
|
successfulDials.Add(1)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
unsuccessfulDials.Add(1)
|
2014-11-17 05:27:01 +00:00
|
|
|
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if netOpErr, ok := err.(*net.OpError); ok {
|
|
|
|
switch netOpErr.Err {
|
|
|
|
case syscall.ECONNREFUSED, syscall.EHOSTUNREACH:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2015-06-03 12:44:03 +00:00
|
|
|
if utp && err.Error() == "timed out waiting for ack" {
|
|
|
|
return
|
|
|
|
}
|
2014-11-17 05:27:01 +00:00
|
|
|
if err != nil {
|
2015-01-30 14:54:45 +00:00
|
|
|
log.Printf("error dialing %s: %s", addr, err)
|
2014-11-17 05:27:01 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-19 03:53:00 +00:00
|
|
|
func reducedDialTimeout(max time.Duration, halfOpenLimit int, pendingPeers int) (ret time.Duration) {
|
|
|
|
ret = max / time.Duration((pendingPeers+halfOpenLimit)/halfOpenLimit)
|
|
|
|
if ret < minDialTimeout {
|
|
|
|
ret = minDialTimeout
|
|
|
|
}
|
|
|
|
return
|
2014-11-18 00:04:09 +00:00
|
|
|
}
|
|
|
|
|
2015-03-18 07:29:51 +00:00
|
|
|
func (me *Client) dopplegangerAddr(addr string) bool {
|
|
|
|
_, ok := me.dopplegangerAddrs[addr]
|
|
|
|
return ok
|
|
|
|
}
|
|
|
|
|
2014-07-22 11:45:12 +00:00
|
|
|
// Start the process of connecting to the given peer for the given torrent if
|
|
|
|
// appropriate.
|
2014-11-16 19:29:31 +00:00
|
|
|
func (me *Client) initiateConn(peer Peer, t *torrent) {
|
2014-08-21 08:07:06 +00:00
|
|
|
if peer.Id == me.peerID {
|
2013-09-28 22:11:24 +00:00
|
|
|
return
|
|
|
|
}
|
2014-11-16 19:30:44 +00:00
|
|
|
addr := net.JoinHostPort(peer.IP.String(), fmt.Sprintf("%d", peer.Port))
|
2015-03-18 07:29:51 +00:00
|
|
|
if me.dopplegangerAddr(addr) || t.addrActive(addr) {
|
2014-11-16 19:30:44 +00:00
|
|
|
duplicateConnsAvoided.Add(1)
|
|
|
|
return
|
2014-08-27 23:35:13 +00:00
|
|
|
}
|
2014-11-30 02:33:17 +00:00
|
|
|
if r := me.ipBlockRange(peer.IP); r != nil {
|
|
|
|
log.Printf("outbound connect to %s blocked by IP blocklist rule %s", peer.IP, r)
|
2014-11-29 01:41:53 +00:00
|
|
|
return
|
|
|
|
}
|
2014-11-16 19:30:44 +00:00
|
|
|
t.HalfOpen[addr] = struct{}{}
|
2015-03-18 07:28:13 +00:00
|
|
|
go me.outgoingConnection(t, addr, peer.Source)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *Client) dialTimeout(t *torrent) time.Duration {
|
2015-03-30 12:10:37 +00:00
|
|
|
me.mu.Lock()
|
|
|
|
pendingPeers := len(t.Peers)
|
|
|
|
me.mu.Unlock()
|
|
|
|
return reducedDialTimeout(nominalDialTimeout, me.halfOpenLimit, pendingPeers)
|
2015-03-18 07:28:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (me *Client) dialTCP(addr string, t *torrent) (c net.Conn, err error) {
|
|
|
|
c, err = net.DialTimeout("tcp", addr, me.dialTimeout(t))
|
|
|
|
if err == nil {
|
|
|
|
c.(*net.TCPConn).SetLinger(0)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *Client) dialUTP(addr string, t *torrent) (c net.Conn, err error) {
|
|
|
|
return me.utpSock.DialTimeout(addr, me.dialTimeout(t))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns a connection over UTP or TCP.
|
|
|
|
func (me *Client) dial(addr string, t *torrent) (conn net.Conn, utp bool) {
|
|
|
|
// Initiate connections via TCP and UTP simultaneously. Use the first one
|
|
|
|
// that succeeds.
|
|
|
|
left := 0
|
2015-04-27 04:05:27 +00:00
|
|
|
if !me.config.DisableUTP {
|
2015-03-18 07:28:13 +00:00
|
|
|
left++
|
|
|
|
}
|
2015-04-27 04:05:27 +00:00
|
|
|
if !me.config.DisableTCP {
|
2015-03-18 07:28:13 +00:00
|
|
|
left++
|
|
|
|
}
|
|
|
|
resCh := make(chan dialResult, left)
|
2015-04-27 04:05:27 +00:00
|
|
|
if !me.config.DisableUTP {
|
2015-03-18 07:28:13 +00:00
|
|
|
go doDial(me.dialUTP, resCh, true, addr, t)
|
|
|
|
}
|
2015-04-27 04:05:27 +00:00
|
|
|
if !me.config.DisableTCP {
|
2015-03-18 07:28:13 +00:00
|
|
|
go doDial(me.dialTCP, resCh, false, addr, t)
|
|
|
|
}
|
|
|
|
var res dialResult
|
|
|
|
// Wait for a successful connection.
|
|
|
|
for ; left > 0 && res.Conn == nil; left-- {
|
|
|
|
res = <-resCh
|
|
|
|
}
|
|
|
|
if left > 0 {
|
|
|
|
// There are still incompleted dials.
|
2014-04-03 12:16:59 +00:00
|
|
|
go func() {
|
2015-03-18 07:28:13 +00:00
|
|
|
for ; left > 0; left-- {
|
|
|
|
conn := (<-resCh).Conn
|
|
|
|
if conn != nil {
|
|
|
|
conn.Close()
|
|
|
|
}
|
2014-04-03 12:16:59 +00:00
|
|
|
}
|
|
|
|
}()
|
2015-03-18 07:28:13 +00:00
|
|
|
}
|
|
|
|
conn = res.Conn
|
|
|
|
utp = res.UTP
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *Client) noLongerHalfOpen(t *torrent, addr string) {
|
|
|
|
if _, ok := t.HalfOpen[addr]; !ok {
|
|
|
|
panic("invariant broken")
|
|
|
|
}
|
|
|
|
delete(t.HalfOpen, addr)
|
|
|
|
me.openNewConns(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns nil connection and nil error if no connection could be established
|
|
|
|
// for valid reasons.
|
|
|
|
func (me *Client) establishOutgoingConn(t *torrent, addr string) (c *connection, err error) {
|
|
|
|
handshakesConnection := func(nc net.Conn, encrypted, utp bool) (c *connection, err error) {
|
|
|
|
c = newConnection()
|
|
|
|
c.conn = nc
|
|
|
|
c.rw = nc
|
|
|
|
c.encrypted = encrypted
|
|
|
|
c.uTP = utp
|
|
|
|
err = nc.SetDeadline(time.Now().Add(handshakesTimeout))
|
|
|
|
if err != nil {
|
2014-11-16 19:31:11 +00:00
|
|
|
return
|
|
|
|
}
|
2015-03-18 07:28:13 +00:00
|
|
|
ok, err := me.initiateHandshakes(c, t)
|
|
|
|
if !ok {
|
|
|
|
c = nil
|
2013-09-28 22:11:24 +00:00
|
|
|
}
|
2015-03-18 07:28:13 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
nc, utp := me.dial(addr, t)
|
|
|
|
if nc == nil {
|
|
|
|
return
|
|
|
|
}
|
2015-04-20 07:30:22 +00:00
|
|
|
c, err = handshakesConnection(nc, !me.config.DisableEncryption, utp)
|
2015-03-18 07:28:13 +00:00
|
|
|
if err != nil {
|
|
|
|
nc.Close()
|
|
|
|
return
|
|
|
|
} else if c != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
nc.Close()
|
2015-06-08 08:16:01 +00:00
|
|
|
// Try again without encryption, using whichever protocol type worked last
|
|
|
|
// time.
|
|
|
|
if me.config.DisableEncryption {
|
|
|
|
// We already tried without encryption.
|
|
|
|
return
|
|
|
|
}
|
2015-03-18 07:28:13 +00:00
|
|
|
if utp {
|
|
|
|
nc, err = me.dialUTP(addr, t)
|
|
|
|
} else {
|
|
|
|
nc, err = me.dialTCP(addr, t)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("error dialing for unencrypted connection: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c, err = handshakesConnection(nc, false, utp)
|
|
|
|
if err != nil {
|
|
|
|
nc.Close()
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2014-11-17 05:27:01 +00:00
|
|
|
|
2015-03-18 07:28:13 +00:00
|
|
|
// Called to dial out and run a connection. The addr we're given is already
|
|
|
|
// considered half-open.
|
|
|
|
func (me *Client) outgoingConnection(t *torrent, addr string, ps peerSource) {
|
|
|
|
c, err := me.establishOutgoingConn(t, addr)
|
|
|
|
me.mu.Lock()
|
|
|
|
defer me.mu.Unlock()
|
|
|
|
// Don't release lock between here and addConnection, unless it's for
|
|
|
|
// failure.
|
|
|
|
me.noLongerHalfOpen(t, addr)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if c == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer c.Close()
|
|
|
|
c.Discovery = ps
|
|
|
|
err = me.runInitiatedHandshookConn(c, t)
|
|
|
|
if err != nil {
|
2015-03-25 06:28:34 +00:00
|
|
|
// log.Print(err)
|
2015-03-18 07:28:13 +00:00
|
|
|
}
|
2013-09-28 22:11:24 +00:00
|
|
|
}
|
|
|
|
|
2014-11-16 19:16:26 +00:00
|
|
|
// The port number for incoming peer connections. 0 if the client isn't
|
|
|
|
// listening.
|
2014-06-29 08:57:49 +00:00
|
|
|
func (cl *Client) incomingPeerPort() int {
|
2014-11-16 19:16:26 +00:00
|
|
|
listenAddr := cl.ListenAddr()
|
|
|
|
if listenAddr == nil {
|
2014-06-29 08:57:49 +00:00
|
|
|
return 0
|
|
|
|
}
|
2014-11-16 19:16:26 +00:00
|
|
|
return addrPort(listenAddr)
|
2014-06-29 08:57:49 +00:00
|
|
|
}
|
|
|
|
|
2014-11-16 19:16:26 +00:00
|
|
|
// Convert a net.Addr to its compact IP representation. Either 4 or 16 bytes
|
|
|
|
// per "yourip" field of http://www.bittorrent.org/beps/bep_0010.html.
|
2014-07-22 11:45:12 +00:00
|
|
|
func addrCompactIP(addr net.Addr) (string, error) {
|
2014-11-16 19:16:26 +00:00
|
|
|
host, _, err := net.SplitHostPort(addr.String())
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
ip := net.ParseIP(host)
|
|
|
|
if v4 := ip.To4(); v4 != nil {
|
|
|
|
if len(v4) != 4 {
|
|
|
|
panic(v4)
|
2014-07-22 11:45:12 +00:00
|
|
|
}
|
2014-11-16 19:16:26 +00:00
|
|
|
return string(v4), nil
|
2014-07-22 11:45:12 +00:00
|
|
|
}
|
2014-11-16 19:16:26 +00:00
|
|
|
return string(ip.To16()), nil
|
2014-07-22 11:45:12 +00:00
|
|
|
}
|
|
|
|
|
2015-03-12 19:21:13 +00:00
|
|
|
func handshakeWriter(w io.Writer, bb <-chan []byte, done chan<- error) {
|
2014-08-21 08:12:49 +00:00
|
|
|
var err error
|
|
|
|
for b := range bb {
|
|
|
|
_, err = w.Write(b)
|
|
|
|
if err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
done <- err
|
|
|
|
}
|
|
|
|
|
2015-03-12 09:04:44 +00:00
|
|
|
type (
|
|
|
|
peerExtensionBytes [8]byte
|
|
|
|
peerID [20]byte
|
|
|
|
)
|
2014-08-21 08:12:49 +00:00
|
|
|
|
2015-03-12 19:21:13 +00:00
|
|
|
func (me *peerExtensionBytes) SupportsExtended() bool {
|
|
|
|
return me[5]&0x10 != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *peerExtensionBytes) SupportsDHT() bool {
|
|
|
|
return me[7]&0x01 != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *peerExtensionBytes) SupportsFast() bool {
|
|
|
|
return me[7]&0x04 != 0
|
|
|
|
}
|
|
|
|
|
2014-08-21 08:12:49 +00:00
|
|
|
type handshakeResult struct {
|
|
|
|
peerExtensionBytes
|
|
|
|
peerID
|
|
|
|
InfoHash
|
|
|
|
}
|
|
|
|
|
2015-02-09 13:16:01 +00:00
|
|
|
// ih is nil if we expect the peer to declare the InfoHash, such as when the
|
|
|
|
// peer initiated the connection. Returns ok if the handshake was successful,
|
|
|
|
// and err if there was an unexpected condition other than the peer simply
|
|
|
|
// abandoning the handshake.
|
2015-03-12 19:21:13 +00:00
|
|
|
func handshake(sock io.ReadWriter, ih *InfoHash, peerID [20]byte, extensions peerExtensionBytes) (res handshakeResult, ok bool, err error) {
|
2014-08-21 08:12:49 +00:00
|
|
|
// Bytes to be sent to the peer. Should never block the sender.
|
|
|
|
postCh := make(chan []byte, 4)
|
|
|
|
// A single error value sent when the writer completes.
|
|
|
|
writeDone := make(chan error, 1)
|
|
|
|
// Performs writes to the socket and ensures posts don't block.
|
|
|
|
go handshakeWriter(sock, postCh, writeDone)
|
|
|
|
|
2014-03-20 11:01:56 +00:00
|
|
|
defer func() {
|
2014-08-21 08:12:49 +00:00
|
|
|
close(postCh) // Done writing.
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
// Wait until writes complete before returning from handshake.
|
|
|
|
err = <-writeDone
|
|
|
|
if err != nil {
|
2015-03-18 07:28:13 +00:00
|
|
|
err = fmt.Errorf("error writing: %s", err)
|
2014-08-21 08:12:49 +00:00
|
|
|
}
|
2014-03-20 11:01:56 +00:00
|
|
|
}()
|
2014-08-21 08:12:49 +00:00
|
|
|
|
|
|
|
post := func(bb []byte) {
|
|
|
|
select {
|
|
|
|
case postCh <- bb:
|
|
|
|
default:
|
|
|
|
panic("mustn't block while posting")
|
|
|
|
}
|
2014-03-20 13:14:17 +00:00
|
|
|
}
|
2014-08-21 08:12:49 +00:00
|
|
|
|
|
|
|
post([]byte(pp.Protocol))
|
2015-03-12 19:21:13 +00:00
|
|
|
post(extensions[:])
|
2014-08-21 08:12:49 +00:00
|
|
|
if ih != nil { // We already know what we want.
|
|
|
|
post(ih[:])
|
|
|
|
post(peerID[:])
|
|
|
|
}
|
|
|
|
var b [68]byte
|
|
|
|
_, err = io.ReadFull(sock, b[:68])
|
2013-10-20 14:07:01 +00:00
|
|
|
if err != nil {
|
2014-08-21 08:12:49 +00:00
|
|
|
err = nil
|
2013-10-14 14:39:12 +00:00
|
|
|
return
|
2013-09-29 06:45:17 +00:00
|
|
|
}
|
2014-05-21 08:01:58 +00:00
|
|
|
if string(b[:20]) != pp.Protocol {
|
2013-09-29 06:45:17 +00:00
|
|
|
return
|
|
|
|
}
|
2014-08-24 19:25:52 +00:00
|
|
|
CopyExact(&res.peerExtensionBytes, b[20:28])
|
|
|
|
CopyExact(&res.InfoHash, b[28:48])
|
|
|
|
CopyExact(&res.peerID, b[48:68])
|
2015-03-12 09:06:23 +00:00
|
|
|
peerExtensions.Add(hex.EncodeToString(res.peerExtensionBytes[:]), 1)
|
2014-08-21 08:12:49 +00:00
|
|
|
|
2015-03-12 09:06:23 +00:00
|
|
|
// TODO: Maybe we can just drop peers here if we're not interested. This
|
|
|
|
// could prevent them trying to reconnect, falsely believing there was
|
|
|
|
// just a problem.
|
2014-08-21 08:12:49 +00:00
|
|
|
if ih == nil { // We were waiting for the peer to tell us what they wanted.
|
|
|
|
post(res.InfoHash[:])
|
|
|
|
post(peerID[:])
|
2013-09-29 06:45:17 +00:00
|
|
|
}
|
2014-08-21 08:12:49 +00:00
|
|
|
|
|
|
|
ok = true
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-02-21 03:58:28 +00:00
|
|
|
// Wraps a raw connection and provides the interface we want for using the
|
|
|
|
// connection in the message loop.
|
2015-03-18 07:28:13 +00:00
|
|
|
type deadlineReader struct {
|
|
|
|
nc net.Conn
|
|
|
|
r io.Reader
|
2014-08-27 23:45:20 +00:00
|
|
|
}
|
|
|
|
|
2015-03-18 07:28:13 +00:00
|
|
|
func (me deadlineReader) Read(b []byte) (n int, err error) {
|
2014-11-20 02:02:20 +00:00
|
|
|
// Keep-alives should be received every 2 mins. Give a bit of gracetime.
|
2015-03-18 07:28:13 +00:00
|
|
|
err = me.nc.SetReadDeadline(time.Now().Add(150 * time.Second))
|
2014-08-27 23:45:20 +00:00
|
|
|
if err != nil {
|
2015-01-21 13:42:13 +00:00
|
|
|
err = fmt.Errorf("error setting read deadline: %s", err)
|
2014-08-27 23:45:20 +00:00
|
|
|
}
|
2015-03-18 07:28:13 +00:00
|
|
|
n, err = me.r.Read(b)
|
2015-02-21 03:58:28 +00:00
|
|
|
// Convert common errors into io.EOF.
|
2015-03-18 07:28:13 +00:00
|
|
|
// if err != nil {
|
|
|
|
// if opError, ok := err.(*net.OpError); ok && opError.Op == "read" && opError.Err == syscall.ECONNRESET {
|
|
|
|
// err = io.EOF
|
|
|
|
// } else if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
|
|
|
// if n != 0 {
|
|
|
|
// panic(n)
|
|
|
|
// }
|
|
|
|
// err = io.EOF
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
type readWriter struct {
|
|
|
|
io.Reader
|
|
|
|
io.Writer
|
|
|
|
}
|
|
|
|
|
|
|
|
func maybeReceiveEncryptedHandshake(rw io.ReadWriter, skeys [][]byte) (ret io.ReadWriter, encrypted bool, err error) {
|
|
|
|
var protocol [len(pp.Protocol)]byte
|
|
|
|
_, err = io.ReadFull(rw, protocol[:])
|
2014-09-13 17:45:38 +00:00
|
|
|
if err != nil {
|
2015-03-18 07:28:13 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
ret = readWriter{
|
|
|
|
io.MultiReader(bytes.NewReader(protocol[:]), rw),
|
|
|
|
rw,
|
|
|
|
}
|
|
|
|
if string(protocol[:]) == pp.Protocol {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
encrypted = true
|
|
|
|
ret, err = mse.ReceiveHandshake(ret, skeys)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cl *Client) receiveSkeys() (ret [][]byte) {
|
|
|
|
for ih := range cl.torrents {
|
|
|
|
ret = append(ret, ih[:])
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *Client) initiateHandshakes(c *connection, t *torrent) (ok bool, err error) {
|
|
|
|
if c.encrypted {
|
|
|
|
c.rw, err = mse.InitiateHandshake(c.rw, t.InfoHash[:], nil)
|
|
|
|
if err != nil {
|
|
|
|
return
|
2014-09-13 17:45:38 +00:00
|
|
|
}
|
|
|
|
}
|
2015-03-18 07:28:13 +00:00
|
|
|
ih, ok, err := me.connBTHandshake(c, &t.InfoHash)
|
|
|
|
if ih != t.InfoHash {
|
|
|
|
ok = false
|
|
|
|
}
|
2014-08-27 23:45:20 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-03-27 04:37:58 +00:00
|
|
|
// Do encryption and bittorrent handshakes as receiver.
|
2015-03-18 07:28:13 +00:00
|
|
|
func (cl *Client) receiveHandshakes(c *connection) (t *torrent, err error) {
|
|
|
|
cl.mu.Lock()
|
|
|
|
skeys := cl.receiveSkeys()
|
|
|
|
cl.mu.Unlock()
|
2015-04-20 07:30:22 +00:00
|
|
|
if !cl.config.DisableEncryption {
|
|
|
|
c.rw, c.encrypted, err = maybeReceiveEncryptedHandshake(c.rw, skeys)
|
|
|
|
if err != nil {
|
|
|
|
if err == mse.ErrNoSecretKeyMatch {
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
return
|
2015-03-18 07:28:13 +00:00
|
|
|
}
|
2014-08-28 00:06:57 +00:00
|
|
|
}
|
2015-03-18 07:28:13 +00:00
|
|
|
ih, ok, err := cl.connBTHandshake(c, nil)
|
2015-02-09 13:17:59 +00:00
|
|
|
if err != nil {
|
2015-06-28 06:39:04 +00:00
|
|
|
err = fmt.Errorf("error during bt handshake: %s", err)
|
2015-02-09 13:17:59 +00:00
|
|
|
return
|
|
|
|
}
|
2015-03-18 07:28:13 +00:00
|
|
|
if !ok {
|
|
|
|
return
|
2015-03-12 19:21:13 +00:00
|
|
|
}
|
2015-03-18 07:28:13 +00:00
|
|
|
cl.mu.Lock()
|
|
|
|
t = cl.torrents[ih]
|
|
|
|
cl.mu.Unlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns !ok if handshake failed for valid reasons.
|
|
|
|
func (cl *Client) connBTHandshake(c *connection, ih *InfoHash) (ret InfoHash, ok bool, err error) {
|
|
|
|
res, ok, err := handshake(c.rw, ih, cl.peerID, cl.extensionBytes)
|
|
|
|
if err != nil || !ok {
|
2015-03-12 19:21:13 +00:00
|
|
|
return
|
|
|
|
}
|
2015-03-18 07:28:13 +00:00
|
|
|
ret = res.InfoHash
|
|
|
|
c.PeerExtensionBytes = res.peerExtensionBytes
|
|
|
|
c.PeerID = res.peerID
|
|
|
|
c.completedHandshake = time.Now()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cl *Client) runInitiatedHandshookConn(c *connection, t *torrent) (err error) {
|
|
|
|
if c.PeerID == cl.peerID {
|
|
|
|
// Only if we initiated the connection is the remote address a
|
|
|
|
// listen addr for a doppleganger.
|
|
|
|
connsToSelf.Add(1)
|
|
|
|
addr := c.conn.RemoteAddr().String()
|
|
|
|
cl.dopplegangerAddrs[addr] = struct{}{}
|
|
|
|
return
|
2014-08-27 23:45:20 +00:00
|
|
|
}
|
2015-03-18 07:28:13 +00:00
|
|
|
return cl.runHandshookConn(c, t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cl *Client) runReceivedConn(c *connection) (err error) {
|
|
|
|
err = c.conn.SetDeadline(time.Now().Add(handshakesTimeout))
|
2014-08-21 08:12:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2015-03-18 07:28:13 +00:00
|
|
|
t, err := cl.receiveHandshakes(c)
|
|
|
|
if err != nil {
|
2015-03-27 04:37:58 +00:00
|
|
|
err = fmt.Errorf("error receiving handshakes: %s", err)
|
2014-08-21 08:12:49 +00:00
|
|
|
return
|
2013-09-29 06:45:17 +00:00
|
|
|
}
|
2015-03-18 07:28:13 +00:00
|
|
|
if t == nil {
|
2014-11-16 19:54:00 +00:00
|
|
|
return
|
|
|
|
}
|
2015-03-18 07:28:13 +00:00
|
|
|
cl.mu.Lock()
|
|
|
|
defer cl.mu.Unlock()
|
|
|
|
if c.PeerID == cl.peerID {
|
2014-08-21 08:12:49 +00:00
|
|
|
return
|
|
|
|
}
|
2015-03-18 07:28:13 +00:00
|
|
|
return cl.runHandshookConn(c, t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cl *Client) runHandshookConn(c *connection, t *torrent) (err error) {
|
|
|
|
c.conn.SetWriteDeadline(time.Time{})
|
|
|
|
c.rw = readWriter{
|
|
|
|
deadlineReader{c.conn, c.rw},
|
|
|
|
c.rw,
|
|
|
|
}
|
|
|
|
if !cl.addConnection(t, c) {
|
2014-03-16 15:30:10 +00:00
|
|
|
return
|
|
|
|
}
|
2015-03-18 07:28:13 +00:00
|
|
|
defer cl.dropConnection(t, c)
|
|
|
|
go c.writer()
|
|
|
|
go c.writeOptimizer(time.Minute)
|
|
|
|
cl.sendInitialMessages(c, t)
|
|
|
|
if t.haveInfo() {
|
|
|
|
t.initRequestOrdering(c)
|
|
|
|
}
|
|
|
|
err = cl.connectionLoop(t, c)
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("error during connection loop: %s", err)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *Client) sendInitialMessages(conn *connection, torrent *torrent) {
|
2015-03-12 19:21:13 +00:00
|
|
|
if conn.PeerExtensionBytes.SupportsExtended() && me.extensionBytes.SupportsExtended() {
|
2014-06-26 14:57:07 +00:00
|
|
|
conn.Post(pp.Message{
|
|
|
|
Type: pp.Extended,
|
|
|
|
ExtendedID: pp.HandshakeExtendedID,
|
|
|
|
ExtendedPayload: func() []byte {
|
2014-06-28 09:38:31 +00:00
|
|
|
d := map[string]interface{}{
|
2015-03-25 04:49:27 +00:00
|
|
|
"m": func() (ret map[string]int) {
|
|
|
|
ret = make(map[string]int, 2)
|
|
|
|
ret["ut_metadata"] = metadataExtendedId
|
|
|
|
if !me.config.DisablePEX {
|
|
|
|
ret["ut_pex"] = pexExtendedId
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}(),
|
2015-03-25 04:42:14 +00:00
|
|
|
"v": extendedHandshakeClientVersion,
|
2014-08-25 12:12:50 +00:00
|
|
|
// No upload queue is implemented yet.
|
2015-05-14 22:39:53 +00:00
|
|
|
"reqq": 64,
|
2015-04-20 07:30:22 +00:00
|
|
|
}
|
|
|
|
if !me.config.DisableEncryption {
|
|
|
|
d["e"] = 1
|
2014-06-28 09:38:31 +00:00
|
|
|
}
|
|
|
|
if torrent.metadataSizeKnown() {
|
|
|
|
d["metadata_size"] = torrent.metadataSize()
|
|
|
|
}
|
2014-06-29 08:57:49 +00:00
|
|
|
if p := me.incomingPeerPort(); p != 0 {
|
|
|
|
d["p"] = p
|
|
|
|
}
|
2015-03-12 19:21:13 +00:00
|
|
|
yourip, err := addrCompactIP(conn.remoteAddr())
|
2014-07-22 11:45:12 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Printf("error calculating yourip field value in extension handshake: %s", err)
|
|
|
|
} else {
|
|
|
|
d["yourip"] = yourip
|
|
|
|
}
|
2014-07-24 03:43:45 +00:00
|
|
|
// log.Printf("sending %v", d)
|
2014-06-28 09:38:31 +00:00
|
|
|
b, err := bencode.Marshal(d)
|
2014-06-26 14:57:07 +00:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return b
|
|
|
|
}(),
|
|
|
|
})
|
|
|
|
}
|
2013-10-20 14:07:01 +00:00
|
|
|
if torrent.haveAnyPieces() {
|
2014-05-21 08:01:58 +00:00
|
|
|
conn.Post(pp.Message{
|
|
|
|
Type: pp.Bitfield,
|
2013-10-20 14:07:01 +00:00
|
|
|
Bitfield: torrent.bitfield(),
|
|
|
|
})
|
2015-03-12 19:21:13 +00:00
|
|
|
} else if me.extensionBytes.SupportsFast() && conn.PeerExtensionBytes.SupportsFast() {
|
|
|
|
conn.Post(pp.Message{
|
|
|
|
Type: pp.HaveNone,
|
|
|
|
})
|
2013-10-20 14:07:01 +00:00
|
|
|
}
|
2015-03-12 19:21:13 +00:00
|
|
|
if conn.PeerExtensionBytes.SupportsDHT() && me.extensionBytes.SupportsDHT() && me.dHT != nil {
|
2014-08-25 12:12:16 +00:00
|
|
|
conn.Post(pp.Message{
|
|
|
|
Type: pp.Port,
|
2015-04-01 06:29:55 +00:00
|
|
|
Port: uint16(AddrPort(me.dHT.Addr())),
|
2014-08-25 12:12:16 +00:00
|
|
|
})
|
|
|
|
}
|
2013-09-30 11:51:08 +00:00
|
|
|
}
|
|
|
|
|
2015-03-18 07:28:13 +00:00
|
|
|
// Randomizes the piece order for this connection. Every connection will be
|
|
|
|
// given a different ordering. Having it stored per connection saves having to
|
|
|
|
// randomize during request filling, and constantly recalculate the ordering
|
|
|
|
// based on piece priorities.
|
2014-12-03 00:42:22 +00:00
|
|
|
func (t *torrent) initRequestOrdering(c *connection) {
|
2014-12-03 00:22:38 +00:00
|
|
|
if c.pieceRequestOrder != nil || c.piecePriorities != nil {
|
|
|
|
panic("double init of request ordering")
|
|
|
|
}
|
2015-02-09 13:12:29 +00:00
|
|
|
c.piecePriorities = mathRand.Perm(t.numPieces())
|
2014-12-03 00:22:38 +00:00
|
|
|
c.pieceRequestOrder = pieceordering.New()
|
2015-03-18 07:28:13 +00:00
|
|
|
for i := range iter.N(t.Info.NumPieces()) {
|
2015-03-12 09:06:23 +00:00
|
|
|
if !c.PeerHasPiece(i) {
|
2014-12-03 00:22:38 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !t.wantPiece(i) {
|
|
|
|
continue
|
|
|
|
}
|
2014-12-03 07:07:50 +00:00
|
|
|
t.connPendPiece(c, i)
|
2014-12-03 00:22:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-26 14:57:07 +00:00
|
|
|
func (me *Client) peerGotPiece(t *torrent, c *connection, piece int) {
|
2015-03-12 09:06:23 +00:00
|
|
|
if !c.peerHasAll {
|
|
|
|
if t.haveInfo() {
|
|
|
|
if c.PeerPieces == nil {
|
|
|
|
c.PeerPieces = make([]bool, t.numPieces())
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for piece >= len(c.PeerPieces) {
|
|
|
|
c.PeerPieces = append(c.PeerPieces, false)
|
|
|
|
}
|
2014-12-03 00:43:05 +00:00
|
|
|
}
|
2015-03-12 09:06:23 +00:00
|
|
|
c.PeerPieces[piece] = true
|
2013-10-01 08:43:18 +00:00
|
|
|
}
|
2014-12-01 09:32:17 +00:00
|
|
|
if t.wantPiece(piece) {
|
2014-12-03 07:07:50 +00:00
|
|
|
t.connPendPiece(c, piece)
|
2014-06-26 14:57:07 +00:00
|
|
|
me.replenishConnRequests(t, c)
|
2013-09-30 11:51:08 +00:00
|
|
|
}
|
2013-10-01 08:43:18 +00:00
|
|
|
}
|
|
|
|
|
2014-04-08 16:36:05 +00:00
|
|
|
func (me *Client) peerUnchoked(torrent *torrent, conn *connection) {
|
2013-10-01 08:43:18 +00:00
|
|
|
me.replenishConnRequests(torrent, conn)
|
2013-09-30 11:51:08 +00:00
|
|
|
}
|
|
|
|
|
2014-05-23 11:01:05 +00:00
|
|
|
func (cl *Client) connCancel(t *torrent, cn *connection, r request) (ok bool) {
|
|
|
|
ok = cn.Cancel(r)
|
|
|
|
if ok {
|
2014-08-23 17:10:47 +00:00
|
|
|
postedCancels.Add(1)
|
2014-05-23 11:01:05 +00:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-06-22 09:48:30 +00:00
|
|
|
func (cl *Client) connDeleteRequest(t *torrent, cn *connection, r request) bool {
|
2014-05-23 11:01:05 +00:00
|
|
|
if !cn.RequestPending(r) {
|
2015-06-22 09:48:30 +00:00
|
|
|
return false
|
2014-05-23 11:01:05 +00:00
|
|
|
}
|
|
|
|
delete(cn.Requests, r)
|
2015-06-22 09:48:30 +00:00
|
|
|
return true
|
2014-05-23 11:01:05 +00:00
|
|
|
}
|
|
|
|
|
2014-06-26 14:57:07 +00:00
|
|
|
func (cl *Client) requestPendingMetadata(t *torrent, c *connection) {
|
2014-06-27 08:57:35 +00:00
|
|
|
if t.haveInfo() {
|
|
|
|
return
|
|
|
|
}
|
2015-03-27 04:36:59 +00:00
|
|
|
if c.PeerExtensionIDs["ut_metadata"] == 0 {
|
|
|
|
// Peer doesn't support this.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Request metadata pieces that we don't have in a random order.
|
2014-06-26 14:57:07 +00:00
|
|
|
var pending []int
|
2015-02-25 04:42:47 +00:00
|
|
|
for index := 0; index < t.metadataPieceCount(); index++ {
|
2015-03-27 04:36:59 +00:00
|
|
|
if !t.haveMetadataPiece(index) && !c.requestedMetadataPiece(index) {
|
2014-06-26 14:57:07 +00:00
|
|
|
pending = append(pending, index)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, i := range mathRand.Perm(len(pending)) {
|
2015-03-27 04:36:59 +00:00
|
|
|
c.requestMetadataPiece(pending[i])
|
2014-06-26 14:57:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-28 09:38:31 +00:00
|
|
|
func (cl *Client) completedMetadata(t *torrent) {
|
|
|
|
h := sha1.New()
|
|
|
|
h.Write(t.MetaData)
|
|
|
|
var ih InfoHash
|
2014-08-21 08:12:49 +00:00
|
|
|
CopyExact(&ih, h.Sum(nil))
|
2014-06-28 09:38:31 +00:00
|
|
|
if ih != t.InfoHash {
|
|
|
|
log.Print("bad metadata")
|
2015-02-25 04:42:47 +00:00
|
|
|
t.invalidateMetadata()
|
2014-06-28 09:38:31 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
var info metainfo.Info
|
|
|
|
err := bencode.Unmarshal(t.MetaData, &info)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("error unmarshalling metadata: %s", err)
|
2015-02-25 04:42:47 +00:00
|
|
|
t.invalidateMetadata()
|
2014-06-28 09:38:31 +00:00
|
|
|
return
|
|
|
|
}
|
2014-07-14 13:12:52 +00:00
|
|
|
// TODO(anacrolix): If this fails, I think something harsher should be
|
|
|
|
// done.
|
2015-03-18 07:32:31 +00:00
|
|
|
err = cl.setMetaData(t, &info, t.MetaData)
|
2014-06-29 05:45:21 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Printf("error setting metadata: %s", err)
|
2015-02-25 04:42:47 +00:00
|
|
|
t.invalidateMetadata()
|
2014-06-29 05:45:21 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
log.Printf("%s: got metadata from peers", t)
|
2014-06-28 09:38:31 +00:00
|
|
|
}
|
|
|
|
|
2014-08-21 08:12:49 +00:00
|
|
|
// Process incoming ut_metadata message.
|
2014-06-28 09:38:31 +00:00
|
|
|
func (cl *Client) gotMetadataExtensionMsg(payload []byte, t *torrent, c *connection) (err error) {
|
|
|
|
var d map[string]int
|
|
|
|
err = bencode.Unmarshal(payload, &d)
|
|
|
|
if err != nil {
|
2014-06-30 14:05:28 +00:00
|
|
|
err = fmt.Errorf("error unmarshalling payload: %s: %q", err, payload)
|
2014-06-28 09:38:31 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
msgType, ok := d["msg_type"]
|
|
|
|
if !ok {
|
|
|
|
err = errors.New("missing msg_type field")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
piece := d["piece"]
|
|
|
|
switch msgType {
|
|
|
|
case pp.DataMetadataExtensionMsgType:
|
|
|
|
if t.haveInfo() {
|
|
|
|
break
|
|
|
|
}
|
2014-11-19 03:57:27 +00:00
|
|
|
begin := len(payload) - metadataPieceSize(d["total_size"], piece)
|
|
|
|
if begin < 0 || begin >= len(payload) {
|
|
|
|
log.Printf("got bad metadata piece")
|
|
|
|
break
|
|
|
|
}
|
2015-03-27 04:36:59 +00:00
|
|
|
if !c.requestedMetadataPiece(piece) {
|
|
|
|
log.Printf("got unexpected metadata piece %d", piece)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
c.metadataRequests[piece] = false
|
2015-03-19 23:52:01 +00:00
|
|
|
t.saveMetadataPiece(piece, payload[begin:])
|
2014-12-01 09:32:17 +00:00
|
|
|
c.UsefulChunksReceived++
|
|
|
|
c.lastUsefulChunkReceived = time.Now()
|
2015-02-25 04:42:47 +00:00
|
|
|
if !t.haveAllMetadataPieces() {
|
2014-06-28 09:38:31 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
cl.completedMetadata(t)
|
|
|
|
case pp.RequestMetadataExtensionMsgType:
|
2015-02-25 04:42:47 +00:00
|
|
|
if !t.haveMetadataPiece(piece) {
|
|
|
|
c.Post(t.newMetadataExtensionMessage(c, pp.RejectMetadataExtensionMsgType, d["piece"], nil))
|
2014-06-28 09:38:31 +00:00
|
|
|
break
|
|
|
|
}
|
2014-08-21 08:12:49 +00:00
|
|
|
start := (1 << 14) * piece
|
2015-02-25 04:42:47 +00:00
|
|
|
c.Post(t.newMetadataExtensionMessage(c, pp.DataMetadataExtensionMsgType, piece, t.MetaData[start:start+t.metadataPieceSize(piece)]))
|
2014-06-28 09:38:31 +00:00
|
|
|
case pp.RejectMetadataExtensionMsgType:
|
|
|
|
default:
|
|
|
|
err = errors.New("unknown msg_type value")
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-06-29 09:07:43 +00:00
|
|
|
type peerExchangeMessage struct {
|
2015-03-18 07:37:26 +00:00
|
|
|
Added CompactPeers `bencode:"added"`
|
|
|
|
AddedFlags []byte `bencode:"added.f"`
|
|
|
|
Dropped CompactPeers `bencode:"dropped"`
|
2014-06-29 09:07:43 +00:00
|
|
|
}
|
|
|
|
|
2014-11-16 19:16:26 +00:00
|
|
|
// Extracts the port as an integer from an address string.
|
|
|
|
func addrPort(addr net.Addr) int {
|
2014-11-17 03:20:49 +00:00
|
|
|
return AddrPort(addr)
|
2014-11-16 19:16:26 +00:00
|
|
|
}
|
|
|
|
|
2015-03-12 09:06:23 +00:00
|
|
|
func (cl *Client) peerHasAll(t *torrent, cn *connection) {
|
|
|
|
cn.peerHasAll = true
|
|
|
|
cn.PeerPieces = nil
|
|
|
|
if t.haveInfo() {
|
|
|
|
for i := 0; i < t.numPieces(); i++ {
|
|
|
|
cl.peerGotPiece(t, cn, i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-16 06:57:47 +00:00
|
|
|
func (me *Client) upload(t *torrent, c *connection) {
|
|
|
|
if me.config.NoUpload {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !c.PeerInterested {
|
|
|
|
return
|
|
|
|
}
|
2015-07-15 05:29:53 +00:00
|
|
|
seeding := me.seeding(t)
|
|
|
|
if !seeding && !t.connHasWantedPieces(c) {
|
2015-06-16 06:57:47 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
another:
|
2015-07-15 05:29:53 +00:00
|
|
|
for seeding || c.chunksSent < c.UsefulChunksReceived+6 {
|
2015-06-16 06:57:47 +00:00
|
|
|
c.Unchoke()
|
|
|
|
for r := range c.PeerRequests {
|
|
|
|
err := me.sendChunk(t, c, r)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("error sending chunk to peer: %s", err)
|
|
|
|
}
|
|
|
|
delete(c.PeerRequests, r)
|
|
|
|
goto another
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c.Choke()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *Client) sendChunk(t *torrent, c *connection, r request) error {
|
|
|
|
b := make([]byte, r.Length)
|
2015-07-15 06:00:59 +00:00
|
|
|
t.Pieces[r.Index].pendingWrites.Wait()
|
2015-06-16 06:57:47 +00:00
|
|
|
p := t.Info.Piece(int(r.Index))
|
|
|
|
n, err := dataReadAt(t.data, b, p.Offset()+int64(r.Begin))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if n != len(b) {
|
|
|
|
log.Fatal(b)
|
|
|
|
}
|
|
|
|
c.Post(pp.Message{
|
|
|
|
Type: pp.Piece,
|
|
|
|
Index: r.Index,
|
|
|
|
Begin: r.Begin,
|
|
|
|
Piece: b,
|
|
|
|
})
|
|
|
|
uploadChunksPosted.Add(1)
|
|
|
|
c.chunksSent++
|
|
|
|
c.lastChunkSent = time.Now()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-07-24 03:43:45 +00:00
|
|
|
// Processes incoming bittorrent messages. The client lock is held upon entry
|
|
|
|
// and exit.
|
2014-05-21 07:55:50 +00:00
|
|
|
func (me *Client) connectionLoop(t *torrent, c *connection) error {
|
2014-05-21 08:01:58 +00:00
|
|
|
decoder := pp.Decoder{
|
2015-03-12 19:21:13 +00:00
|
|
|
R: bufio.NewReader(c.rw),
|
2013-09-30 11:51:08 +00:00
|
|
|
MaxLength: 256 * 1024,
|
|
|
|
}
|
|
|
|
for {
|
2013-10-20 14:07:01 +00:00
|
|
|
me.mu.Unlock()
|
2014-05-21 08:01:58 +00:00
|
|
|
var msg pp.Message
|
2014-05-21 07:48:44 +00:00
|
|
|
err := decoder.Decode(&msg)
|
2015-03-18 07:37:26 +00:00
|
|
|
receivedMessageTypes.Add(strconv.FormatInt(int64(msg.Type), 10), 1)
|
2013-10-20 14:07:01 +00:00
|
|
|
me.mu.Lock()
|
2014-08-27 23:31:05 +00:00
|
|
|
c.lastMessageReceived = time.Now()
|
|
|
|
select {
|
|
|
|
case <-c.closing:
|
2014-06-26 08:06:33 +00:00
|
|
|
return nil
|
2014-08-27 23:31:05 +00:00
|
|
|
default:
|
2014-06-26 08:06:33 +00:00
|
|
|
}
|
2013-09-30 11:51:08 +00:00
|
|
|
if err != nil {
|
2014-03-20 13:14:17 +00:00
|
|
|
if me.stopped() || err == io.EOF {
|
2014-03-20 05:58:09 +00:00
|
|
|
return nil
|
|
|
|
}
|
2013-09-30 11:51:08 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if msg.Keepalive {
|
|
|
|
continue
|
|
|
|
}
|
2013-10-20 14:07:01 +00:00
|
|
|
switch msg.Type {
|
2014-05-21 08:01:58 +00:00
|
|
|
case pp.Choke:
|
2014-05-21 07:55:50 +00:00
|
|
|
c.PeerChoked = true
|
2014-05-23 11:01:05 +00:00
|
|
|
for r := range c.Requests {
|
|
|
|
me.connDeleteRequest(t, c, r)
|
|
|
|
}
|
2014-12-30 12:58:38 +00:00
|
|
|
// We can then reset our interest.
|
|
|
|
me.replenishConnRequests(t, c)
|
2015-03-12 09:06:23 +00:00
|
|
|
case pp.Reject:
|
|
|
|
me.connDeleteRequest(t, c, newRequest(msg.Index, msg.Begin, msg.Length))
|
2015-03-12 19:21:13 +00:00
|
|
|
me.replenishConnRequests(t, c)
|
2014-05-21 08:01:58 +00:00
|
|
|
case pp.Unchoke:
|
2014-05-21 07:55:50 +00:00
|
|
|
c.PeerChoked = false
|
|
|
|
me.peerUnchoked(t, c)
|
2014-05-21 08:01:58 +00:00
|
|
|
case pp.Interested:
|
2014-05-21 07:55:50 +00:00
|
|
|
c.PeerInterested = true
|
2015-06-16 06:57:47 +00:00
|
|
|
me.upload(t, c)
|
2014-05-21 08:01:58 +00:00
|
|
|
case pp.NotInterested:
|
2014-05-21 07:55:50 +00:00
|
|
|
c.PeerInterested = false
|
|
|
|
c.Choke()
|
2014-05-21 08:01:58 +00:00
|
|
|
case pp.Have:
|
2014-05-21 07:55:50 +00:00
|
|
|
me.peerGotPiece(t, c, int(msg.Index))
|
2014-05-21 08:01:58 +00:00
|
|
|
case pp.Request:
|
2015-06-02 14:03:43 +00:00
|
|
|
if c.Choked {
|
2014-08-24 20:00:29 +00:00
|
|
|
break
|
|
|
|
}
|
2015-06-16 06:57:47 +00:00
|
|
|
if !c.PeerInterested {
|
|
|
|
err = errors.New("peer sent request but isn't interested")
|
|
|
|
break
|
2014-03-17 14:44:22 +00:00
|
|
|
}
|
2015-06-16 06:57:47 +00:00
|
|
|
if c.PeerRequests == nil {
|
|
|
|
c.PeerRequests = make(map[request]struct{}, maxRequests)
|
2014-03-17 14:44:22 +00:00
|
|
|
}
|
2015-06-16 06:57:47 +00:00
|
|
|
c.PeerRequests[newRequest(msg.Index, msg.Begin, msg.Length)] = struct{}{}
|
|
|
|
me.upload(t, c)
|
2014-05-21 08:01:58 +00:00
|
|
|
case pp.Cancel:
|
2014-04-16 07:33:33 +00:00
|
|
|
req := newRequest(msg.Index, msg.Begin, msg.Length)
|
2014-05-21 07:55:50 +00:00
|
|
|
if !c.PeerCancel(req) {
|
2014-08-22 07:47:44 +00:00
|
|
|
unexpectedCancels.Add(1)
|
2014-04-16 07:33:33 +00:00
|
|
|
}
|
2014-05-21 08:01:58 +00:00
|
|
|
case pp.Bitfield:
|
2015-03-12 09:06:23 +00:00
|
|
|
if c.PeerPieces != nil || c.peerHasAll {
|
2013-10-20 14:07:01 +00:00
|
|
|
err = errors.New("received unexpected bitfield")
|
|
|
|
break
|
|
|
|
}
|
2014-06-26 14:57:07 +00:00
|
|
|
if t.haveInfo() {
|
2015-02-09 13:12:29 +00:00
|
|
|
if len(msg.Bitfield) < t.numPieces() {
|
2014-06-26 14:57:07 +00:00
|
|
|
err = errors.New("received invalid bitfield")
|
|
|
|
break
|
|
|
|
}
|
2015-02-09 13:12:29 +00:00
|
|
|
msg.Bitfield = msg.Bitfield[:t.numPieces()]
|
2014-06-26 14:57:07 +00:00
|
|
|
}
|
|
|
|
c.PeerPieces = msg.Bitfield
|
2014-05-21 07:55:50 +00:00
|
|
|
for index, has := range c.PeerPieces {
|
2013-10-20 14:07:01 +00:00
|
|
|
if has {
|
2014-05-21 07:55:50 +00:00
|
|
|
me.peerGotPiece(t, c, index)
|
2013-10-02 07:57:59 +00:00
|
|
|
}
|
2013-09-30 11:51:08 +00:00
|
|
|
}
|
2015-03-12 09:06:23 +00:00
|
|
|
case pp.HaveAll:
|
|
|
|
if c.PeerPieces != nil || c.peerHasAll {
|
|
|
|
err = errors.New("unexpected have-all")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
me.peerHasAll(t, c)
|
|
|
|
case pp.HaveNone:
|
|
|
|
if c.peerHasAll || c.PeerPieces != nil {
|
|
|
|
err = errors.New("unexpected have-none")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
c.PeerPieces = make([]bool, func() int {
|
|
|
|
if t.haveInfo() {
|
|
|
|
return t.numPieces()
|
|
|
|
} else {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
}())
|
2014-05-21 08:01:58 +00:00
|
|
|
case pp.Piece:
|
2014-05-21 07:55:50 +00:00
|
|
|
err = me.downloadedChunk(t, c, &msg)
|
2014-06-26 14:57:07 +00:00
|
|
|
case pp.Extended:
|
|
|
|
switch msg.ExtendedID {
|
|
|
|
case pp.HandshakeExtendedID:
|
2014-06-29 08:57:49 +00:00
|
|
|
// TODO: Create a bencode struct for this.
|
2014-06-26 14:57:07 +00:00
|
|
|
var d map[string]interface{}
|
|
|
|
err = bencode.Unmarshal(msg.ExtendedPayload, &d)
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("error decoding extended message payload: %s", err)
|
|
|
|
break
|
|
|
|
}
|
2014-11-17 05:27:01 +00:00
|
|
|
// log.Printf("got handshake from %q: %#v", c.Socket.RemoteAddr().String(), d)
|
2014-06-29 08:57:49 +00:00
|
|
|
if reqq, ok := d["reqq"]; ok {
|
|
|
|
if i, ok := reqq.(int64); ok {
|
|
|
|
c.PeerMaxRequests = int(i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if v, ok := d["v"]; ok {
|
|
|
|
c.PeerClientName = v.(string)
|
|
|
|
}
|
2014-06-26 14:57:07 +00:00
|
|
|
m, ok := d["m"]
|
|
|
|
if !ok {
|
|
|
|
err = errors.New("handshake missing m item")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
mTyped, ok := m.(map[string]interface{})
|
|
|
|
if !ok {
|
|
|
|
err = errors.New("handshake m value is not dict")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if c.PeerExtensionIDs == nil {
|
2015-03-27 04:36:59 +00:00
|
|
|
c.PeerExtensionIDs = make(map[string]byte, len(mTyped))
|
2014-06-26 14:57:07 +00:00
|
|
|
}
|
|
|
|
for name, v := range mTyped {
|
|
|
|
id, ok := v.(int64)
|
|
|
|
if !ok {
|
|
|
|
log.Printf("bad handshake m item extension ID type: %T", v)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if id == 0 {
|
|
|
|
delete(c.PeerExtensionIDs, name)
|
|
|
|
} else {
|
2015-03-18 07:37:26 +00:00
|
|
|
if c.PeerExtensionIDs[name] == 0 {
|
|
|
|
supportedExtensionMessages.Add(name, 1)
|
|
|
|
}
|
2015-03-27 04:36:59 +00:00
|
|
|
c.PeerExtensionIDs[name] = byte(id)
|
2014-06-26 14:57:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
metadata_sizeUntyped, ok := d["metadata_size"]
|
|
|
|
if ok {
|
|
|
|
metadata_size, ok := metadata_sizeUntyped.(int64)
|
|
|
|
if !ok {
|
|
|
|
log.Printf("bad metadata_size type: %T", metadata_sizeUntyped)
|
|
|
|
} else {
|
2015-03-27 04:36:59 +00:00
|
|
|
t.setMetadataSize(metadata_size, me)
|
2014-06-26 14:57:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if _, ok := c.PeerExtensionIDs["ut_metadata"]; ok {
|
|
|
|
me.requestPendingMetadata(t, c)
|
|
|
|
}
|
2015-03-25 04:42:14 +00:00
|
|
|
case metadataExtendedId:
|
2014-06-28 09:38:31 +00:00
|
|
|
err = me.gotMetadataExtensionMsg(msg.ExtendedPayload, t, c)
|
2014-06-30 14:05:28 +00:00
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("error handling metadata extension message: %s", err)
|
|
|
|
}
|
2015-03-25 04:42:14 +00:00
|
|
|
case pexExtendedId:
|
|
|
|
if me.config.DisablePEX {
|
|
|
|
break
|
|
|
|
}
|
2014-06-29 09:07:43 +00:00
|
|
|
var pexMsg peerExchangeMessage
|
|
|
|
err := bencode.Unmarshal(msg.ExtendedPayload, &pexMsg)
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("error unmarshalling PEX message: %s", err)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
go func() {
|
2015-03-08 06:28:14 +00:00
|
|
|
me.mu.Lock()
|
|
|
|
me.addPeers(t, func() (ret []Peer) {
|
2015-03-18 07:37:26 +00:00
|
|
|
for i, cp := range pexMsg.Added {
|
2014-06-29 09:07:43 +00:00
|
|
|
p := Peer{
|
2014-07-16 07:06:18 +00:00
|
|
|
IP: make([]byte, 4),
|
|
|
|
Port: int(cp.Port),
|
|
|
|
Source: peerSourcePEX,
|
2014-06-29 09:07:43 +00:00
|
|
|
}
|
2015-03-18 07:37:26 +00:00
|
|
|
if i < len(pexMsg.AddedFlags) && pexMsg.AddedFlags[i]&0x01 != 0 {
|
|
|
|
p.SupportsEncryption = true
|
2014-06-29 09:07:43 +00:00
|
|
|
}
|
2015-03-18 07:37:26 +00:00
|
|
|
CopyExact(p.IP, cp.IP[:])
|
2014-06-29 09:07:43 +00:00
|
|
|
ret = append(ret, p)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}())
|
2015-03-08 06:28:14 +00:00
|
|
|
me.mu.Unlock()
|
2014-08-21 15:33:13 +00:00
|
|
|
peersFoundByPEX.Add(int64(len(pexMsg.Added)))
|
2014-06-29 09:07:43 +00:00
|
|
|
}()
|
2014-06-28 09:38:31 +00:00
|
|
|
default:
|
2014-07-09 16:59:37 +00:00
|
|
|
err = fmt.Errorf("unexpected extended message ID: %v", msg.ExtendedID)
|
2014-06-26 14:57:07 +00:00
|
|
|
}
|
2014-08-21 08:12:49 +00:00
|
|
|
if err != nil {
|
2014-09-11 04:20:47 +00:00
|
|
|
// That client uses its own extension IDs for outgoing message
|
|
|
|
// types, which is incorrect.
|
2014-09-13 17:47:47 +00:00
|
|
|
if bytes.HasPrefix(c.PeerID[:], []byte("-SD0100-")) ||
|
|
|
|
strings.HasPrefix(string(c.PeerID[:]), "-XL0012-") {
|
2014-09-11 04:20:47 +00:00
|
|
|
return nil
|
|
|
|
}
|
2014-08-21 08:12:49 +00:00
|
|
|
}
|
2014-08-25 12:12:16 +00:00
|
|
|
case pp.Port:
|
|
|
|
if me.dHT == nil {
|
|
|
|
break
|
|
|
|
}
|
2015-03-12 19:21:13 +00:00
|
|
|
pingAddr, err := net.ResolveUDPAddr("", c.remoteAddr().String())
|
2014-11-16 19:16:26 +00:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
if msg.Port != 0 {
|
|
|
|
pingAddr.Port = int(msg.Port)
|
|
|
|
}
|
|
|
|
_, err = me.dHT.Ping(pingAddr)
|
2013-10-20 14:07:01 +00:00
|
|
|
default:
|
2014-05-21 07:42:06 +00:00
|
|
|
err = fmt.Errorf("received unknown message type: %#v", msg.Type)
|
2013-10-20 14:07:01 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-09-30 11:51:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-29 14:45:26 +00:00
|
|
|
// Returns true if connection is removed from torrent.Conns.
|
|
|
|
func (me *Client) deleteConnection(t *torrent, c *connection) bool {
|
|
|
|
for i0, _c := range t.Conns {
|
|
|
|
if _c != c {
|
2013-09-30 11:51:08 +00:00
|
|
|
continue
|
|
|
|
}
|
2015-06-29 14:45:26 +00:00
|
|
|
i1 := len(t.Conns) - 1
|
2013-09-30 11:51:08 +00:00
|
|
|
if i0 != i1 {
|
2015-06-29 14:45:26 +00:00
|
|
|
t.Conns[i0] = t.Conns[i1]
|
2013-09-30 11:51:08 +00:00
|
|
|
}
|
2015-06-29 14:45:26 +00:00
|
|
|
t.Conns = t.Conns[:i1]
|
|
|
|
return true
|
2013-09-30 11:51:08 +00:00
|
|
|
}
|
2015-06-29 14:45:26 +00:00
|
|
|
return false
|
2013-09-30 11:51:08 +00:00
|
|
|
}
|
|
|
|
|
2015-06-29 14:45:26 +00:00
|
|
|
func (me *Client) dropConnection(t *torrent, c *connection) {
|
|
|
|
me.event.Broadcast()
|
|
|
|
c.Close()
|
|
|
|
if me.deleteConnection(t, c) {
|
|
|
|
me.openNewConns(t)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns true if the connection is added.
|
2014-04-08 16:36:05 +00:00
|
|
|
func (me *Client) addConnection(t *torrent, c *connection) bool {
|
2014-06-26 08:06:33 +00:00
|
|
|
if me.stopped() {
|
|
|
|
return false
|
|
|
|
}
|
2014-08-27 23:39:27 +00:00
|
|
|
select {
|
|
|
|
case <-t.ceasingNetworking:
|
|
|
|
return false
|
|
|
|
default:
|
|
|
|
}
|
2014-12-03 07:07:50 +00:00
|
|
|
if !me.wantConns(t) {
|
|
|
|
return false
|
|
|
|
}
|
2014-03-16 15:30:10 +00:00
|
|
|
for _, c0 := range t.Conns {
|
2014-08-21 08:12:49 +00:00
|
|
|
if c.PeerID == c0.PeerID {
|
2014-05-21 07:42:06 +00:00
|
|
|
// Already connected to a client with that ID.
|
2015-03-18 07:37:26 +00:00
|
|
|
duplicateClientConns.Add(1)
|
2013-09-30 11:51:08 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
2015-06-29 14:45:26 +00:00
|
|
|
if len(t.Conns) >= socketsPerTorrent {
|
|
|
|
c := t.worstBadConn(me)
|
|
|
|
if c == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
log.Printf("%s: dropping connection to make room for new one: %s", t, c)
|
|
|
|
c.Close()
|
|
|
|
me.deleteConnection(t, c)
|
2014-08-28 00:06:36 +00:00
|
|
|
}
|
2015-06-29 14:45:26 +00:00
|
|
|
if len(t.Conns) >= socketsPerTorrent {
|
|
|
|
panic(len(t.Conns))
|
|
|
|
}
|
|
|
|
t.Conns = append(t.Conns, c)
|
2013-09-30 11:51:08 +00:00
|
|
|
return true
|
2013-09-28 22:11:24 +00:00
|
|
|
}
|
|
|
|
|
2014-12-03 07:07:50 +00:00
|
|
|
func (t *torrent) needData() bool {
|
|
|
|
if !t.haveInfo() {
|
|
|
|
return true
|
|
|
|
}
|
2015-07-17 11:07:01 +00:00
|
|
|
if len(t.urgent) != 0 {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
for _, p := range t.Pieces {
|
|
|
|
if p.Priority != PiecePriorityNone {
|
2014-12-03 07:07:50 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2015-06-16 06:57:47 +00:00
|
|
|
func (cl *Client) usefulConn(t *torrent, c *connection) bool {
|
2015-06-29 14:45:26 +00:00
|
|
|
select {
|
|
|
|
case <-c.closing:
|
|
|
|
return false
|
|
|
|
default:
|
2014-12-03 07:07:50 +00:00
|
|
|
}
|
|
|
|
if !t.haveInfo() {
|
2015-06-29 14:45:26 +00:00
|
|
|
return c.supportsExtension("ut_metadata")
|
2015-06-16 06:57:47 +00:00
|
|
|
}
|
|
|
|
if cl.seeding(t) {
|
|
|
|
return c.PeerInterested
|
2014-12-03 07:07:50 +00:00
|
|
|
}
|
2015-06-16 06:57:47 +00:00
|
|
|
return t.connHasWantedPieces(c)
|
2014-12-03 07:07:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (me *Client) wantConns(t *torrent) bool {
|
2015-06-16 06:57:47 +00:00
|
|
|
if !me.seeding(t) && !t.needData() {
|
2014-12-03 07:07:50 +00:00
|
|
|
return false
|
|
|
|
}
|
2015-06-29 14:45:26 +00:00
|
|
|
if len(t.Conns) < socketsPerTorrent {
|
|
|
|
return true
|
2014-12-03 07:07:50 +00:00
|
|
|
}
|
2015-06-29 14:45:26 +00:00
|
|
|
return t.worstBadConn(me) != nil
|
2014-12-03 07:07:50 +00:00
|
|
|
}
|
|
|
|
|
2014-11-21 06:09:55 +00:00
|
|
|
func (me *Client) openNewConns(t *torrent) {
|
|
|
|
select {
|
|
|
|
case <-t.ceasingNetworking:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
for len(t.Peers) != 0 {
|
2014-12-03 07:07:50 +00:00
|
|
|
if !me.wantConns(t) {
|
|
|
|
return
|
2014-08-27 23:39:27 +00:00
|
|
|
}
|
2014-12-23 04:20:28 +00:00
|
|
|
if len(t.HalfOpen) >= me.halfOpenLimit {
|
2014-12-03 07:07:50 +00:00
|
|
|
return
|
2014-11-21 06:09:55 +00:00
|
|
|
}
|
|
|
|
var (
|
|
|
|
k peersKey
|
|
|
|
p Peer
|
|
|
|
)
|
|
|
|
for k, p = range t.Peers {
|
|
|
|
break
|
2013-09-28 22:11:24 +00:00
|
|
|
}
|
2014-11-21 06:09:55 +00:00
|
|
|
delete(t.Peers, k)
|
|
|
|
me.initiateConn(p, t)
|
2013-09-28 22:11:24 +00:00
|
|
|
}
|
2014-11-21 06:09:55 +00:00
|
|
|
t.wantPeers.Broadcast()
|
2013-09-28 22:11:24 +00:00
|
|
|
}
|
|
|
|
|
2014-12-09 03:58:49 +00:00
|
|
|
func (me *Client) addPeers(t *torrent, peers []Peer) {
|
2015-03-08 06:28:14 +00:00
|
|
|
for _, p := range peers {
|
2015-03-18 07:29:51 +00:00
|
|
|
if me.dopplegangerAddr(net.JoinHostPort(p.IP.String(), strconv.FormatInt(int64(p.Port), 10))) {
|
|
|
|
continue
|
|
|
|
}
|
2015-03-08 06:28:14 +00:00
|
|
|
if me.ipBlockRange(p.IP) != nil {
|
2014-11-30 02:33:17 +00:00
|
|
|
continue
|
|
|
|
}
|
2015-06-02 14:03:43 +00:00
|
|
|
if p.Port == 0 {
|
|
|
|
log.Printf("got bad peer: %v", p)
|
|
|
|
continue
|
|
|
|
}
|
2015-03-08 06:28:14 +00:00
|
|
|
t.addPeer(p)
|
2014-11-30 02:33:17 +00:00
|
|
|
}
|
2014-11-21 06:09:55 +00:00
|
|
|
me.openNewConns(t)
|
2014-12-09 03:58:49 +00:00
|
|
|
}
|
|
|
|
|
2015-03-18 07:28:13 +00:00
|
|
|
func (cl *Client) cachedMetaInfoFilename(ih InfoHash) string {
|
2014-12-01 22:39:09 +00:00
|
|
|
return filepath.Join(cl.configDir(), "torrents", ih.HexString()+".torrent")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cl *Client) saveTorrentFile(t *torrent) error {
|
2015-03-18 07:28:13 +00:00
|
|
|
path := cl.cachedMetaInfoFilename(t.InfoHash)
|
2014-12-01 22:39:09 +00:00
|
|
|
os.MkdirAll(filepath.Dir(path), 0777)
|
|
|
|
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error opening file: %s", err)
|
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
e := bencode.NewEncoder(f)
|
|
|
|
err = e.Encode(t.MetaInfo())
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error marshalling metainfo: %s", err)
|
|
|
|
}
|
2015-02-06 03:54:59 +00:00
|
|
|
mi, err := cl.torrentCacheMetaInfo(t.InfoHash)
|
|
|
|
if err != nil {
|
|
|
|
// For example, a script kiddy makes us load too many files, and we're
|
|
|
|
// able to save the torrent, but not load it again to check it.
|
|
|
|
return nil
|
|
|
|
}
|
2014-12-02 05:32:40 +00:00
|
|
|
if !bytes.Equal(mi.Info.Hash, t.InfoHash[:]) {
|
|
|
|
log.Fatalf("%x != %x", mi.Info.Hash, t.InfoHash[:])
|
|
|
|
}
|
2014-12-01 22:39:09 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-02-25 03:48:39 +00:00
|
|
|
func (cl *Client) startTorrent(t *torrent) {
|
|
|
|
if t.Info == nil || t.data == nil {
|
|
|
|
panic("nope")
|
2015-02-06 03:54:59 +00:00
|
|
|
}
|
2014-09-14 17:25:53 +00:00
|
|
|
// If the client intends to upload, it needs to know what state pieces are
|
|
|
|
// in.
|
2015-04-27 04:05:27 +00:00
|
|
|
if !cl.config.NoUpload {
|
2014-09-14 17:25:53 +00:00
|
|
|
// Queue all pieces for hashing. This is done sequentially to avoid
|
|
|
|
// spamming goroutines.
|
|
|
|
for _, p := range t.Pieces {
|
|
|
|
p.QueuedForHash = true
|
2014-06-26 14:57:07 +00:00
|
|
|
}
|
2014-09-14 17:25:53 +00:00
|
|
|
go func() {
|
|
|
|
for i := range t.Pieces {
|
|
|
|
cl.verifyPiece(t, pp.Integer(i))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
2015-02-25 03:48:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Storage cannot be changed once it's set.
|
2015-03-08 06:28:14 +00:00
|
|
|
func (cl *Client) setStorage(t *torrent, td data.Data) (err error) {
|
2015-02-25 03:48:39 +00:00
|
|
|
err = t.setStorage(td)
|
|
|
|
cl.event.Broadcast()
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
cl.startTorrent(t)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-03-07 06:11:45 +00:00
|
|
|
type TorrentDataOpener func(*metainfo.Info) data.Data
|
2015-02-27 01:45:55 +00:00
|
|
|
|
2015-03-18 07:32:31 +00:00
|
|
|
func (cl *Client) setMetaData(t *torrent, md *metainfo.Info, bytes []byte) (err error) {
|
2015-02-25 03:48:39 +00:00
|
|
|
err = t.setMetadata(md, bytes, &cl.mu)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !cl.config.DisableMetainfoCache {
|
|
|
|
if err := cl.saveTorrentFile(t); err != nil {
|
|
|
|
log.Printf("error saving torrent file for %s: %s", t, err)
|
|
|
|
}
|
|
|
|
}
|
2015-03-18 07:32:31 +00:00
|
|
|
cl.event.Broadcast()
|
2014-09-25 08:05:52 +00:00
|
|
|
close(t.gotMetainfo)
|
2015-03-18 07:32:31 +00:00
|
|
|
td := cl.torrentDataOpener(md)
|
2015-02-25 03:48:39 +00:00
|
|
|
err = cl.setStorage(t, td)
|
2014-06-26 14:57:07 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prepare a Torrent without any attachment to a Client. That means we can
|
|
|
|
// initialize fields all fields that don't require the Client without locking
|
|
|
|
// it.
|
2015-03-18 07:28:13 +00:00
|
|
|
func newTorrent(ih InfoHash) (t *torrent, err error) {
|
2014-06-26 14:57:07 +00:00
|
|
|
t = &torrent{
|
2015-07-15 05:31:18 +00:00
|
|
|
InfoHash: ih,
|
|
|
|
chunkSize: defaultChunkSize,
|
|
|
|
Peers: make(map[peersKey]Peer),
|
2014-08-24 20:01:05 +00:00
|
|
|
|
2014-08-27 23:39:27 +00:00
|
|
|
closing: make(chan struct{}),
|
|
|
|
ceasingNetworking: make(chan struct{}),
|
2014-09-25 08:05:52 +00:00
|
|
|
|
2014-12-01 22:37:40 +00:00
|
|
|
gotMetainfo: make(chan struct{}),
|
2014-11-16 19:30:44 +00:00
|
|
|
|
2014-12-20 02:00:21 +00:00
|
|
|
HalfOpen: make(map[string]struct{}),
|
2014-06-26 14:57:07 +00:00
|
|
|
}
|
2014-11-21 06:09:55 +00:00
|
|
|
t.wantPeers.L = &t.stateMu
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-12-01 09:32:17 +00:00
|
|
|
func init() {
|
|
|
|
// For shuffling the tracker tiers.
|
|
|
|
mathRand.Seed(time.Now().Unix())
|
|
|
|
}
|
|
|
|
|
2014-11-21 06:09:55 +00:00
|
|
|
// The trackers within each tier must be shuffled before use.
|
|
|
|
// http://stackoverflow.com/a/12267471/149482
|
|
|
|
// http://www.bittorrent.org/beps/bep_0012.html#order-of-processing
|
|
|
|
func shuffleTier(tier []tracker.Client) {
|
|
|
|
for i := range tier {
|
|
|
|
j := mathRand.Intn(i + 1)
|
|
|
|
tier[i], tier[j] = tier[j], tier[i]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func copyTrackers(base [][]tracker.Client) (copy [][]tracker.Client) {
|
|
|
|
for _, tier := range base {
|
2014-11-21 06:54:19 +00:00
|
|
|
copy = append(copy, append([]tracker.Client{}, tier...))
|
2014-11-21 06:09:55 +00:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func mergeTier(tier []tracker.Client, newURLs []string) []tracker.Client {
|
|
|
|
nextURL:
|
|
|
|
for _, url := range newURLs {
|
|
|
|
for _, tr := range tier {
|
|
|
|
if tr.URL() == url {
|
|
|
|
continue nextURL
|
2014-03-16 15:30:10 +00:00
|
|
|
}
|
|
|
|
}
|
2014-11-21 06:09:55 +00:00
|
|
|
tr, err := tracker.New(url)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("error creating tracker client for %q: %s", url, err)
|
|
|
|
continue
|
2014-03-16 15:30:10 +00:00
|
|
|
}
|
2014-11-21 06:09:55 +00:00
|
|
|
tier = append(tier, tr)
|
2014-03-16 15:30:10 +00:00
|
|
|
}
|
2014-11-21 06:09:55 +00:00
|
|
|
return tier
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *torrent) addTrackers(announceList [][]string) {
|
|
|
|
newTrackers := copyTrackers(t.Trackers)
|
|
|
|
for tierIndex, tier := range announceList {
|
|
|
|
if tierIndex < len(newTrackers) {
|
|
|
|
newTrackers[tierIndex] = mergeTier(newTrackers[tierIndex], tier)
|
|
|
|
} else {
|
|
|
|
newTrackers = append(newTrackers, mergeTier(nil, tier))
|
|
|
|
}
|
2014-11-21 06:32:27 +00:00
|
|
|
shuffleTier(newTrackers[tierIndex])
|
2014-11-21 06:09:55 +00:00
|
|
|
}
|
|
|
|
t.Trackers = newTrackers
|
2014-03-16 15:30:10 +00:00
|
|
|
}
|
|
|
|
|
2015-03-25 06:32:42 +00:00
|
|
|
// Don't call this before the info is available.
|
|
|
|
func (t *torrent) BytesCompleted() int64 {
|
|
|
|
if !t.haveInfo() {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return t.Info.TotalLength() - t.bytesLeft()
|
|
|
|
}
|
|
|
|
|
2015-03-07 06:11:02 +00:00
|
|
|
// A file-like handle to some torrent data resource.
|
2015-03-01 03:32:54 +00:00
|
|
|
type Handle interface {
|
|
|
|
io.Reader
|
|
|
|
io.Seeker
|
|
|
|
io.Closer
|
2015-03-04 02:06:33 +00:00
|
|
|
io.ReaderAt
|
2015-03-01 03:32:54 +00:00
|
|
|
}
|
|
|
|
|
2015-01-27 14:12:36 +00:00
|
|
|
// Returns handles to the files in the torrent. This requires the metainfo is
|
|
|
|
// available first.
|
|
|
|
func (t Torrent) Files() (ret []File) {
|
2015-03-01 03:32:54 +00:00
|
|
|
t.cl.mu.Lock()
|
2015-04-28 05:24:17 +00:00
|
|
|
info := t.Info()
|
2015-03-01 03:32:54 +00:00
|
|
|
t.cl.mu.Unlock()
|
|
|
|
if info == nil {
|
2015-02-25 03:51:56 +00:00
|
|
|
return
|
|
|
|
}
|
2015-01-27 14:12:36 +00:00
|
|
|
var offset int64
|
2015-03-01 03:32:54 +00:00
|
|
|
for _, fi := range info.UpvertedFiles() {
|
2015-01-27 14:12:36 +00:00
|
|
|
ret = append(ret, File{
|
|
|
|
t,
|
2015-03-01 03:32:54 +00:00
|
|
|
strings.Join(append([]string{info.Name}, fi.Path...), "/"),
|
2015-01-27 14:12:36 +00:00
|
|
|
offset,
|
|
|
|
fi.Length,
|
2015-02-09 13:18:59 +00:00
|
|
|
fi,
|
2015-01-27 14:12:36 +00:00
|
|
|
})
|
|
|
|
offset += fi.Length
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-06-01 08:17:14 +00:00
|
|
|
// Marks the pieces in the given region for download.
|
2015-01-27 14:12:36 +00:00
|
|
|
func (t Torrent) SetRegionPriority(off, len int64) {
|
|
|
|
t.cl.mu.Lock()
|
|
|
|
defer t.cl.mu.Unlock()
|
2015-02-25 04:42:47 +00:00
|
|
|
pieceSize := int64(t.usualPieceSize())
|
2015-01-27 14:12:36 +00:00
|
|
|
for i := off / pieceSize; i*pieceSize < off+len; i++ {
|
2015-06-01 08:22:12 +00:00
|
|
|
t.cl.raisePiecePriority(t.torrent, int(i), PiecePriorityNormal)
|
2015-01-27 14:12:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-01 09:37:33 +00:00
|
|
|
func (t Torrent) AddPeers(pp []Peer) error {
|
2015-03-08 06:28:14 +00:00
|
|
|
cl := t.cl
|
|
|
|
cl.mu.Lock()
|
|
|
|
defer cl.mu.Unlock()
|
|
|
|
cl.addPeers(t.torrent, pp)
|
|
|
|
return nil
|
2014-12-01 09:37:33 +00:00
|
|
|
}
|
|
|
|
|
2015-04-28 05:24:17 +00:00
|
|
|
// Marks the entire torrent for download. Requires the info first, see
|
|
|
|
// GotInfo.
|
2014-12-01 09:37:33 +00:00
|
|
|
func (t Torrent) DownloadAll() {
|
|
|
|
t.cl.mu.Lock()
|
2015-03-19 23:52:01 +00:00
|
|
|
defer t.cl.mu.Unlock()
|
|
|
|
for i := range iter.N(t.numPieces()) {
|
2015-06-01 08:22:12 +00:00
|
|
|
t.cl.raisePiecePriority(t.torrent, i, PiecePriorityNormal)
|
2014-12-01 09:37:33 +00:00
|
|
|
}
|
2015-03-19 23:52:01 +00:00
|
|
|
// Nice to have the first and last pieces sooner for various interactive
|
2014-12-05 06:56:28 +00:00
|
|
|
// purposes.
|
2015-06-01 08:22:12 +00:00
|
|
|
t.cl.raisePiecePriority(t.torrent, 0, PiecePriorityReadahead)
|
|
|
|
t.cl.raisePiecePriority(t.torrent, t.numPieces()-1, PiecePriorityReadahead)
|
2014-12-01 09:37:33 +00:00
|
|
|
}
|
|
|
|
|
2015-03-18 07:28:13 +00:00
|
|
|
// Returns nil metainfo if it isn't in the cache. Checks that the retrieved
|
|
|
|
// metainfo has the correct infohash.
|
2014-12-02 05:32:40 +00:00
|
|
|
func (cl *Client) torrentCacheMetaInfo(ih InfoHash) (mi *metainfo.MetaInfo, err error) {
|
2015-02-25 03:48:39 +00:00
|
|
|
if cl.config.DisableMetainfoCache {
|
|
|
|
return
|
|
|
|
}
|
2015-03-18 07:28:13 +00:00
|
|
|
f, err := os.Open(cl.cachedMetaInfoFilename(ih))
|
2014-12-02 05:32:40 +00:00
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
dec := bencode.NewDecoder(f)
|
|
|
|
err = dec.Decode(&mi)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !bytes.Equal(mi.Info.Hash, ih[:]) {
|
|
|
|
err = fmt.Errorf("cached torrent has wrong infohash: %x != %x", mi.Info.Hash, ih[:])
|
|
|
|
return
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-03-20 12:52:53 +00:00
|
|
|
// Specifies a new torrent for adding to a client. There are helpers for
|
|
|
|
// magnet URIs and torrent metainfo files.
|
2015-03-18 07:32:31 +00:00
|
|
|
type TorrentSpec struct {
|
2015-07-15 05:31:18 +00:00
|
|
|
// The tiered tracker URIs.
|
|
|
|
Trackers [][]string
|
|
|
|
InfoHash InfoHash
|
|
|
|
Info *metainfo.InfoEx
|
|
|
|
// The name to use if the Name field from the Info isn't available.
|
2015-03-18 07:32:31 +00:00
|
|
|
DisplayName string
|
2015-07-15 05:31:18 +00:00
|
|
|
// The chunk size to use for outbound requests. Defaults to 16KiB if not
|
|
|
|
// set.
|
|
|
|
ChunkSize int
|
2015-03-18 07:32:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TorrentSpecFromMagnetURI(uri string) (spec *TorrentSpec, err error) {
|
2014-06-26 14:57:07 +00:00
|
|
|
m, err := ParseMagnetURI(uri)
|
2013-09-26 09:49:15 +00:00
|
|
|
if err != nil {
|
2014-06-26 14:57:07 +00:00
|
|
|
return
|
2013-09-26 09:49:15 +00:00
|
|
|
}
|
2015-03-18 07:32:31 +00:00
|
|
|
spec = &TorrentSpec{
|
|
|
|
Trackers: [][]string{m.Trackers},
|
|
|
|
DisplayName: m.DisplayName,
|
2015-04-01 03:34:57 +00:00
|
|
|
InfoHash: m.InfoHash,
|
2015-03-18 07:32:31 +00:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func TorrentSpecFromMetaInfo(mi *metainfo.MetaInfo) (spec *TorrentSpec) {
|
|
|
|
spec = &TorrentSpec{
|
2015-03-27 06:16:50 +00:00
|
|
|
Trackers: mi.AnnounceList,
|
|
|
|
Info: &mi.Info,
|
|
|
|
DisplayName: mi.Info.Name,
|
2015-03-18 07:32:31 +00:00
|
|
|
}
|
|
|
|
CopyExact(&spec.InfoHash, &mi.Info.Hash)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-03-27 15:50:55 +00:00
|
|
|
// Add or merge a torrent spec. If the torrent is already present, the
|
|
|
|
// trackers will be merged with the existing ones. If the Info isn't yet
|
|
|
|
// known, it will be set. The display name is replaced if the new spec
|
|
|
|
// provides one. Returns new if the torrent wasn't already in the client.
|
2015-03-18 07:32:31 +00:00
|
|
|
func (cl *Client) AddTorrentSpec(spec *TorrentSpec) (T Torrent, new bool, err error) {
|
|
|
|
T.cl = cl
|
|
|
|
cl.mu.Lock()
|
|
|
|
defer cl.mu.Unlock()
|
|
|
|
|
|
|
|
t, ok := cl.torrents[spec.InfoHash]
|
2015-03-27 15:50:55 +00:00
|
|
|
if !ok {
|
|
|
|
new = true
|
2015-03-18 07:32:31 +00:00
|
|
|
|
2015-03-27 15:50:55 +00:00
|
|
|
if _, ok := cl.bannedTorrents[spec.InfoHash]; ok {
|
|
|
|
err = errors.New("banned torrent")
|
|
|
|
return
|
|
|
|
}
|
2015-03-18 07:32:31 +00:00
|
|
|
|
2015-03-27 15:50:55 +00:00
|
|
|
t, err = newTorrent(spec.InfoHash)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2015-07-15 05:31:18 +00:00
|
|
|
if spec.ChunkSize != 0 {
|
|
|
|
t.chunkSize = pp.Integer(spec.ChunkSize)
|
|
|
|
}
|
2015-03-18 07:32:31 +00:00
|
|
|
}
|
|
|
|
if spec.DisplayName != "" {
|
|
|
|
t.DisplayName = spec.DisplayName
|
|
|
|
}
|
2015-03-27 15:50:55 +00:00
|
|
|
// Try to merge in info we have on the torrent. Any err left will
|
|
|
|
// terminate the function.
|
|
|
|
if t.Info == nil {
|
|
|
|
if spec.Info != nil {
|
|
|
|
err = cl.setMetaData(t, &spec.Info.Info, spec.Info.Bytes)
|
|
|
|
} else {
|
|
|
|
var mi *metainfo.MetaInfo
|
|
|
|
mi, err = cl.torrentCacheMetaInfo(spec.InfoHash)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("error getting cached metainfo: %s", err)
|
|
|
|
err = nil
|
|
|
|
} else if mi != nil {
|
|
|
|
t.addTrackers(mi.AnnounceList)
|
|
|
|
err = cl.setMetaData(t, &mi.Info.Info, mi.Info.Bytes)
|
|
|
|
}
|
2015-02-06 03:54:59 +00:00
|
|
|
}
|
2014-12-02 05:32:40 +00:00
|
|
|
}
|
2014-06-26 14:57:07 +00:00
|
|
|
if err != nil {
|
|
|
|
return
|
2013-10-20 14:07:01 +00:00
|
|
|
}
|
2015-03-27 15:50:55 +00:00
|
|
|
t.addTrackers(spec.Trackers)
|
2015-03-18 07:32:31 +00:00
|
|
|
|
|
|
|
cl.torrents[spec.InfoHash] = t
|
|
|
|
T.torrent = t
|
|
|
|
|
2015-03-27 15:50:55 +00:00
|
|
|
// From this point onwards, we can consider the torrent a part of the
|
|
|
|
// client.
|
|
|
|
if new {
|
2015-04-27 04:05:27 +00:00
|
|
|
if !cl.config.DisableTrackers {
|
2015-03-27 15:50:55 +00:00
|
|
|
go cl.announceTorrentTrackers(T.torrent)
|
|
|
|
}
|
|
|
|
if cl.dHT != nil {
|
|
|
|
go cl.announceTorrentDHT(T.torrent, true)
|
|
|
|
}
|
2014-12-02 01:12:03 +00:00
|
|
|
}
|
2014-06-26 14:57:07 +00:00
|
|
|
return
|
|
|
|
}
|
2014-05-21 07:37:31 +00:00
|
|
|
|
2015-02-06 03:54:59 +00:00
|
|
|
func (me *Client) dropTorrent(infoHash InfoHash) (err error) {
|
2014-07-22 15:54:11 +00:00
|
|
|
t, ok := me.torrents[infoHash]
|
|
|
|
if !ok {
|
|
|
|
err = fmt.Errorf("no such torrent")
|
|
|
|
return
|
|
|
|
}
|
2015-02-09 13:12:29 +00:00
|
|
|
err = t.close()
|
2014-07-22 15:54:11 +00:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
delete(me.torrents, infoHash)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-11-21 06:09:55 +00:00
|
|
|
// Returns true when peers are required, or false if the torrent is closing.
|
|
|
|
func (cl *Client) waitWantPeers(t *torrent) bool {
|
|
|
|
cl.mu.Lock()
|
|
|
|
defer cl.mu.Unlock()
|
|
|
|
t.stateMu.Lock()
|
|
|
|
defer t.stateMu.Unlock()
|
2014-07-11 09:30:20 +00:00
|
|
|
for {
|
2014-11-21 06:09:55 +00:00
|
|
|
select {
|
|
|
|
case <-t.ceasingNetworking:
|
|
|
|
return false
|
|
|
|
default:
|
|
|
|
}
|
2015-05-14 22:39:53 +00:00
|
|
|
if len(t.Peers) > torrentPeersLowWater {
|
|
|
|
goto wait
|
|
|
|
}
|
|
|
|
if t.needData() || cl.seeding(t) {
|
2014-11-21 06:09:55 +00:00
|
|
|
return true
|
|
|
|
}
|
2015-05-14 22:39:53 +00:00
|
|
|
wait:
|
2014-11-21 06:09:55 +00:00
|
|
|
cl.mu.Unlock()
|
|
|
|
t.wantPeers.Wait()
|
|
|
|
t.stateMu.Unlock()
|
|
|
|
cl.mu.Lock()
|
|
|
|
t.stateMu.Lock()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-14 22:39:53 +00:00
|
|
|
// Returns whether the client should make effort to seed the torrent.
|
|
|
|
func (cl *Client) seeding(t *torrent) bool {
|
2015-06-16 06:57:47 +00:00
|
|
|
if cl.config.NoUpload {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if !cl.config.Seed {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if t.needData() {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
2015-05-14 22:39:53 +00:00
|
|
|
}
|
|
|
|
|
2014-11-21 06:09:55 +00:00
|
|
|
func (cl *Client) announceTorrentDHT(t *torrent, impliedPort bool) {
|
|
|
|
for cl.waitWantPeers(t) {
|
2014-11-29 01:42:18 +00:00
|
|
|
log.Printf("getting peers for %q from DHT", t)
|
2015-01-29 03:20:21 +00:00
|
|
|
ps, err := cl.dHT.Announce(string(t.InfoHash[:]), cl.incomingPeerPort(), impliedPort)
|
2014-07-11 09:30:20 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Printf("error getting peers from dht: %s", err)
|
|
|
|
return
|
|
|
|
}
|
2014-12-08 22:57:42 +00:00
|
|
|
allAddrs := make(map[string]struct{})
|
2014-07-11 09:30:20 +00:00
|
|
|
getPeers:
|
|
|
|
for {
|
|
|
|
select {
|
2015-04-01 06:29:55 +00:00
|
|
|
case v, ok := <-ps.Peers:
|
2014-07-11 09:30:20 +00:00
|
|
|
if !ok {
|
|
|
|
break getPeers
|
|
|
|
}
|
2014-11-17 23:47:36 +00:00
|
|
|
peersFoundByDHT.Add(int64(len(v.Peers)))
|
2014-12-08 22:57:42 +00:00
|
|
|
for _, p := range v.Peers {
|
|
|
|
allAddrs[(&net.UDPAddr{
|
|
|
|
IP: p.IP[:],
|
|
|
|
Port: int(p.Port),
|
|
|
|
}).String()] = struct{}{}
|
|
|
|
}
|
|
|
|
// log.Printf("%s: %d new peers from DHT", t, len(v.Peers))
|
2014-12-09 03:58:49 +00:00
|
|
|
cl.mu.Lock()
|
|
|
|
cl.addPeers(t, func() (ret []Peer) {
|
2014-11-17 23:47:36 +00:00
|
|
|
for _, cp := range v.Peers {
|
2014-07-11 09:30:20 +00:00
|
|
|
ret = append(ret, Peer{
|
2014-07-16 07:06:18 +00:00
|
|
|
IP: cp.IP[:],
|
|
|
|
Port: int(cp.Port),
|
|
|
|
Source: peerSourceDHT,
|
2014-07-11 09:30:20 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}())
|
2014-12-09 03:58:49 +00:00
|
|
|
numPeers := len(t.Peers)
|
|
|
|
cl.mu.Unlock()
|
|
|
|
if numPeers >= torrentPeersHighWater {
|
2014-07-11 09:30:20 +00:00
|
|
|
break getPeers
|
|
|
|
}
|
2014-08-27 23:39:27 +00:00
|
|
|
case <-t.ceasingNetworking:
|
2014-08-24 20:01:05 +00:00
|
|
|
ps.Close()
|
|
|
|
return
|
2014-07-11 09:30:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
ps.Close()
|
2014-12-08 22:57:42 +00:00
|
|
|
log.Printf("finished DHT peer scrape for %s: %d peers", t, len(allAddrs))
|
2014-07-11 09:30:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-10 15:41:41 +00:00
|
|
|
func (cl *Client) trackerBlockedUnlocked(tr tracker.Client) (blocked bool, err error) {
|
|
|
|
url_, err := url.Parse(tr.URL())
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
host, _, err := net.SplitHostPort(url_.Host)
|
|
|
|
if err != nil {
|
|
|
|
host = url_.Host
|
|
|
|
}
|
|
|
|
addr, err := net.ResolveIPAddr("ip", host)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
cl.mu.Lock()
|
|
|
|
if cl.ipBlockList != nil {
|
|
|
|
if cl.ipBlockRange(addr.IP) != nil {
|
|
|
|
blocked = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cl.mu.Unlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-12-01 09:33:52 +00:00
|
|
|
func (cl *Client) announceTorrentSingleTracker(tr tracker.Client, req *tracker.AnnounceRequest, t *torrent) error {
|
2015-03-10 15:41:41 +00:00
|
|
|
blocked, err := cl.trackerBlockedUnlocked(tr)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error determining if tracker blocked: %s", err)
|
|
|
|
}
|
|
|
|
if blocked {
|
|
|
|
return fmt.Errorf("tracker blocked: %s", tr)
|
|
|
|
}
|
2014-12-01 09:33:52 +00:00
|
|
|
if err := tr.Connect(); err != nil {
|
|
|
|
return fmt.Errorf("error connecting: %s", err)
|
|
|
|
}
|
|
|
|
resp, err := tr.Announce(req)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error announcing: %s", err)
|
|
|
|
}
|
|
|
|
var peers []Peer
|
|
|
|
for _, peer := range resp.Peers {
|
|
|
|
peers = append(peers, Peer{
|
|
|
|
IP: peer.IP,
|
|
|
|
Port: peer.Port,
|
|
|
|
})
|
|
|
|
}
|
2015-03-08 06:28:14 +00:00
|
|
|
cl.mu.Lock()
|
|
|
|
cl.addPeers(t, peers)
|
|
|
|
cl.mu.Unlock()
|
2015-03-12 09:04:44 +00:00
|
|
|
|
2015-03-08 06:28:14 +00:00
|
|
|
log.Printf("%s: %d new peers from %s", t, len(peers), tr)
|
2015-03-12 09:04:44 +00:00
|
|
|
peersFoundByTracker.Add(int64(len(peers)))
|
2014-12-01 09:33:52 +00:00
|
|
|
|
|
|
|
time.Sleep(time.Second * time.Duration(resp.Interval))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cl *Client) announceTorrentTrackersFastStart(req *tracker.AnnounceRequest, trackers [][]tracker.Client, t *torrent) (atLeastOne bool) {
|
|
|
|
oks := make(chan bool)
|
|
|
|
outstanding := 0
|
|
|
|
for _, tier := range trackers {
|
|
|
|
for _, tr := range tier {
|
|
|
|
outstanding++
|
|
|
|
go func(tr tracker.Client) {
|
|
|
|
err := cl.announceTorrentSingleTracker(tr, req, t)
|
|
|
|
oks <- err == nil
|
|
|
|
}(tr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for outstanding > 0 {
|
|
|
|
ok := <-oks
|
|
|
|
outstanding--
|
|
|
|
if ok {
|
|
|
|
atLeastOne = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-11-30 02:33:45 +00:00
|
|
|
// Announce torrent to its trackers.
|
|
|
|
func (cl *Client) announceTorrentTrackers(t *torrent) {
|
2014-03-16 15:30:10 +00:00
|
|
|
req := tracker.AnnounceRequest{
|
2014-05-22 14:35:24 +00:00
|
|
|
Event: tracker.Started,
|
|
|
|
NumWant: -1,
|
2015-03-27 06:22:00 +00:00
|
|
|
Port: uint16(cl.incomingPeerPort()),
|
2014-08-21 08:07:06 +00:00
|
|
|
PeerId: cl.peerID,
|
2014-05-22 14:35:24 +00:00
|
|
|
InfoHash: t.InfoHash,
|
2014-03-16 15:30:10 +00:00
|
|
|
}
|
2015-02-21 04:02:06 +00:00
|
|
|
if !cl.waitWantPeers(t) {
|
|
|
|
return
|
|
|
|
}
|
2014-12-01 09:33:52 +00:00
|
|
|
cl.mu.RLock()
|
2015-03-27 06:22:00 +00:00
|
|
|
req.Left = uint64(t.bytesLeft())
|
2014-12-01 09:33:52 +00:00
|
|
|
trackers := t.Trackers
|
|
|
|
cl.mu.RUnlock()
|
|
|
|
if cl.announceTorrentTrackersFastStart(&req, trackers, t) {
|
|
|
|
req.Event = tracker.None
|
|
|
|
}
|
2014-03-16 15:30:10 +00:00
|
|
|
newAnnounce:
|
2014-11-21 06:09:55 +00:00
|
|
|
for cl.waitWantPeers(t) {
|
2014-11-30 02:33:45 +00:00
|
|
|
cl.mu.RLock()
|
2015-03-27 06:22:00 +00:00
|
|
|
req.Left = uint64(t.bytesLeft())
|
2014-12-01 09:33:52 +00:00
|
|
|
trackers = t.Trackers
|
2014-11-30 02:33:45 +00:00
|
|
|
cl.mu.RUnlock()
|
2014-12-26 06:17:49 +00:00
|
|
|
numTrackersTried := 0
|
2014-11-21 06:09:55 +00:00
|
|
|
for _, tier := range trackers {
|
2014-03-16 15:30:10 +00:00
|
|
|
for trIndex, tr := range tier {
|
2014-12-26 06:17:49 +00:00
|
|
|
numTrackersTried++
|
2014-12-01 09:33:52 +00:00
|
|
|
err := cl.announceTorrentSingleTracker(tr, &req, t)
|
2014-03-16 15:30:10 +00:00
|
|
|
if err != nil {
|
2015-03-12 09:04:44 +00:00
|
|
|
logonce.Stderr.Printf("%s: error announcing to %s: %s", t, tr, err)
|
2015-03-27 06:20:02 +00:00
|
|
|
continue
|
2014-03-16 15:30:10 +00:00
|
|
|
}
|
2014-12-01 09:33:52 +00:00
|
|
|
// Float the successful announce to the top of the tier. If
|
|
|
|
// the trackers list has been changed, we'll be modifying an
|
|
|
|
// old copy so it won't matter.
|
2014-11-30 02:33:45 +00:00
|
|
|
cl.mu.Lock()
|
2014-03-16 15:30:10 +00:00
|
|
|
tier[0], tier[trIndex] = tier[trIndex], tier[0]
|
2014-11-30 02:33:45 +00:00
|
|
|
cl.mu.Unlock()
|
|
|
|
|
2014-05-22 14:35:24 +00:00
|
|
|
req.Event = tracker.None
|
2014-03-16 15:30:10 +00:00
|
|
|
continue newAnnounce
|
|
|
|
}
|
|
|
|
}
|
2014-12-26 06:17:49 +00:00
|
|
|
if numTrackersTried != 0 {
|
|
|
|
log.Printf("%s: all trackers failed", t)
|
|
|
|
}
|
2014-12-01 18:43:34 +00:00
|
|
|
// TODO: Wait until trackers are added if there are none.
|
|
|
|
time.Sleep(10 * time.Second)
|
2014-03-16 15:30:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cl *Client) allTorrentsCompleted() bool {
|
|
|
|
for _, t := range cl.torrents {
|
2014-09-14 17:25:53 +00:00
|
|
|
if !t.haveInfo() {
|
|
|
|
return false
|
|
|
|
}
|
2015-02-25 04:42:47 +00:00
|
|
|
if t.numPiecesCompleted() != t.numPieces() {
|
2014-03-16 15:30:10 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2014-04-08 16:36:05 +00:00
|
|
|
// Returns true when all torrents are completely downloaded and false if the
|
2014-06-29 14:22:05 +00:00
|
|
|
// client is stopped before that.
|
2014-04-08 16:36:05 +00:00
|
|
|
func (me *Client) WaitAll() bool {
|
2013-10-20 14:07:01 +00:00
|
|
|
me.mu.Lock()
|
2014-04-08 16:36:05 +00:00
|
|
|
defer me.mu.Unlock()
|
2014-03-16 15:30:10 +00:00
|
|
|
for !me.allTorrentsCompleted() {
|
2014-04-08 16:36:05 +00:00
|
|
|
if me.stopped() {
|
|
|
|
return false
|
|
|
|
}
|
2013-10-20 14:07:01 +00:00
|
|
|
me.event.Wait()
|
|
|
|
}
|
2014-04-08 16:36:05 +00:00
|
|
|
return true
|
2013-09-26 09:49:15 +00:00
|
|
|
}
|
|
|
|
|
2015-03-10 15:41:21 +00:00
|
|
|
func (me *Client) fillRequests(t *torrent, c *connection) {
|
|
|
|
if c.Interested {
|
|
|
|
if c.PeerChoked {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if len(c.Requests) > c.requestsLowWater {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
addRequest := func(req request) (again bool) {
|
2015-04-14 13:59:41 +00:00
|
|
|
// TODO: Couldn't this check also be done *after* the request?
|
2015-03-18 07:37:26 +00:00
|
|
|
if len(c.Requests) >= 64 {
|
2015-03-10 15:41:21 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
return c.Request(req)
|
|
|
|
}
|
2015-04-14 13:59:41 +00:00
|
|
|
for req := range t.urgent {
|
|
|
|
if !addRequest(req) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2015-03-10 15:41:21 +00:00
|
|
|
for e := c.pieceRequestOrder.First(); e != nil; e = e.Next() {
|
|
|
|
pieceIndex := e.Piece()
|
2015-03-12 09:06:23 +00:00
|
|
|
if !c.PeerHasPiece(pieceIndex) {
|
2015-03-10 15:41:21 +00:00
|
|
|
panic("piece in request order but peer doesn't have it")
|
|
|
|
}
|
|
|
|
if !t.wantPiece(pieceIndex) {
|
|
|
|
panic("unwanted piece in connection request order")
|
|
|
|
}
|
|
|
|
piece := t.Pieces[pieceIndex]
|
2015-07-15 05:31:18 +00:00
|
|
|
for _, cs := range piece.shuffledPendingChunkSpecs(t.pieceLength(pieceIndex), pp.Integer(t.chunkSize)) {
|
2015-03-10 15:41:21 +00:00
|
|
|
r := request{pp.Integer(pieceIndex), cs}
|
|
|
|
if !addRequest(r) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-05-21 07:40:54 +00:00
|
|
|
func (me *Client) replenishConnRequests(t *torrent, c *connection) {
|
2014-07-14 13:12:15 +00:00
|
|
|
if !t.haveInfo() {
|
|
|
|
return
|
|
|
|
}
|
2015-03-10 15:41:21 +00:00
|
|
|
me.fillRequests(t, c)
|
2014-06-28 09:38:31 +00:00
|
|
|
if len(c.Requests) == 0 && !c.PeerChoked {
|
2015-03-18 07:37:26 +00:00
|
|
|
// So we're not choked, but we don't want anything right now. We may
|
|
|
|
// have completed readahead, and the readahead window has not rolled
|
|
|
|
// over to the next piece. Better to stay interested in case we're
|
|
|
|
// going to want data in the near future.
|
|
|
|
c.SetInterested(!t.haveAllPieces())
|
2013-10-14 14:39:12 +00:00
|
|
|
}
|
2014-05-21 07:40:54 +00:00
|
|
|
}
|
|
|
|
|
2014-08-27 23:32:49 +00:00
|
|
|
// Handle a received chunk from a peer.
|
2014-05-21 08:01:58 +00:00
|
|
|
func (me *Client) downloadedChunk(t *torrent, c *connection, msg *pp.Message) error {
|
2015-06-22 09:48:30 +00:00
|
|
|
chunksReceived.Add(1)
|
2014-08-21 15:33:13 +00:00
|
|
|
|
2014-05-21 08:01:58 +00:00
|
|
|
req := newRequest(msg.Index, msg.Begin, pp.Integer(len(msg.Piece)))
|
2014-05-21 07:40:54 +00:00
|
|
|
|
|
|
|
// Request has been satisfied.
|
2015-06-22 09:48:30 +00:00
|
|
|
if me.connDeleteRequest(t, c, req) {
|
|
|
|
defer me.replenishConnRequests(t, c)
|
|
|
|
} else {
|
|
|
|
unexpectedChunksReceived.Add(1)
|
|
|
|
}
|
2014-05-21 07:40:54 +00:00
|
|
|
|
2015-03-10 15:39:01 +00:00
|
|
|
piece := t.Pieces[req.Index]
|
|
|
|
|
2014-05-21 07:40:54 +00:00
|
|
|
// Do we actually want this chunk?
|
2015-04-14 13:59:41 +00:00
|
|
|
if !t.wantChunk(req) {
|
2015-06-22 09:48:30 +00:00
|
|
|
unwantedChunksReceived.Add(1)
|
2014-09-11 10:30:13 +00:00
|
|
|
c.UnwantedChunksReceived++
|
2014-05-21 07:40:54 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-08-27 23:32:49 +00:00
|
|
|
c.UsefulChunksReceived++
|
|
|
|
c.lastUsefulChunkReceived = time.Now()
|
|
|
|
|
2015-06-16 06:57:47 +00:00
|
|
|
me.upload(t, c)
|
|
|
|
|
2015-07-15 06:00:59 +00:00
|
|
|
piece.pendingWrites.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer piece.pendingWrites.Done()
|
|
|
|
// Write the chunk out.
|
|
|
|
tr := perf.NewTimer()
|
|
|
|
err := t.writeChunk(int(msg.Index), int64(msg.Begin), msg.Piece)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("error writing chunk: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
tr.Stop("write chunk")
|
|
|
|
}()
|
2014-05-21 07:40:54 +00:00
|
|
|
|
2015-04-14 13:59:41 +00:00
|
|
|
// log.Println("got chunk", req)
|
|
|
|
piece.Event.Broadcast()
|
2014-05-21 07:40:54 +00:00
|
|
|
// Record that we have the chunk.
|
2015-07-15 05:31:18 +00:00
|
|
|
piece.unpendChunkIndex(chunkIndex(req.chunkSpec, t.chunkSize))
|
2015-04-14 13:59:41 +00:00
|
|
|
delete(t.urgent, req)
|
2015-07-17 11:04:43 +00:00
|
|
|
// It's important that the piece is potentially queued before we check if
|
|
|
|
// the piece is still wanted, because if it is queued, it won't be wanted.
|
2015-05-16 00:51:48 +00:00
|
|
|
if piece.numPendingChunks() == 0 {
|
2015-07-17 11:04:43 +00:00
|
|
|
me.queuePieceCheck(t, req.Index)
|
|
|
|
}
|
|
|
|
if !t.wantPiece(int(req.Index)) {
|
2014-12-03 00:22:38 +00:00
|
|
|
for _, c := range t.Conns {
|
2014-12-05 06:58:04 +00:00
|
|
|
c.pieceRequestOrder.DeletePiece(int(req.Index))
|
2014-12-03 00:22:38 +00:00
|
|
|
}
|
2013-10-14 14:39:12 +00:00
|
|
|
}
|
2014-05-21 07:40:54 +00:00
|
|
|
|
2014-05-28 16:44:27 +00:00
|
|
|
// Cancel pending requests for this chunk.
|
|
|
|
for _, c := range t.Conns {
|
|
|
|
if me.connCancel(t, c, req) {
|
|
|
|
me.replenishConnRequests(t, c)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-21 07:40:54 +00:00
|
|
|
return nil
|
2013-10-13 12:16:21 +00:00
|
|
|
}
|
|
|
|
|
2014-05-21 08:01:58 +00:00
|
|
|
func (me *Client) pieceHashed(t *torrent, piece pp.Integer, correct bool) {
|
2013-10-20 14:07:01 +00:00
|
|
|
p := t.Pieces[piece]
|
2015-06-28 06:41:51 +00:00
|
|
|
if p.EverHashed {
|
|
|
|
if correct {
|
|
|
|
pieceHashedCorrect.Add(1)
|
|
|
|
} else {
|
|
|
|
log.Printf("%s: piece %d failed hash", t, piece)
|
|
|
|
pieceHashedNotCorrect.Add(1)
|
|
|
|
}
|
2014-09-13 17:57:51 +00:00
|
|
|
}
|
2013-10-20 14:07:01 +00:00
|
|
|
p.EverHashed = true
|
2015-02-27 01:45:55 +00:00
|
|
|
if correct {
|
2015-06-02 14:03:43 +00:00
|
|
|
err := t.data.PieceCompleted(int(piece))
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("error completing piece: %s", err)
|
|
|
|
correct = false
|
2015-02-27 01:45:55 +00:00
|
|
|
}
|
|
|
|
}
|
2015-03-10 15:41:21 +00:00
|
|
|
me.pieceChanged(t, int(piece))
|
|
|
|
}
|
|
|
|
|
2015-04-14 13:59:41 +00:00
|
|
|
// TODO: Check this isn't called more than once for each piece being correct.
|
2015-03-10 15:41:21 +00:00
|
|
|
func (me *Client) pieceChanged(t *torrent, piece int) {
|
|
|
|
correct := t.pieceComplete(piece)
|
|
|
|
p := t.Pieces[piece]
|
2015-06-03 12:43:40 +00:00
|
|
|
defer p.Event.Broadcast()
|
2013-10-20 14:07:01 +00:00
|
|
|
if correct {
|
2015-06-01 08:22:12 +00:00
|
|
|
p.Priority = PiecePriorityNone
|
2013-10-20 14:07:01 +00:00
|
|
|
p.PendingChunkSpecs = nil
|
2015-04-14 13:59:41 +00:00
|
|
|
for req := range t.urgent {
|
|
|
|
if int(req.Index) == piece {
|
|
|
|
delete(t.urgent, req)
|
|
|
|
}
|
|
|
|
}
|
2013-10-20 14:07:01 +00:00
|
|
|
} else {
|
2015-05-16 00:51:48 +00:00
|
|
|
if p.numPendingChunks() == 0 {
|
2015-03-10 15:39:01 +00:00
|
|
|
t.pendAllChunkSpecs(int(piece))
|
2013-10-20 14:07:01 +00:00
|
|
|
}
|
2015-04-14 13:59:41 +00:00
|
|
|
if t.wantPiece(piece) {
|
2014-12-03 07:07:50 +00:00
|
|
|
me.openNewConns(t)
|
|
|
|
}
|
2013-09-30 11:51:08 +00:00
|
|
|
}
|
2013-10-20 14:07:01 +00:00
|
|
|
for _, conn := range t.Conns {
|
2013-09-30 11:51:08 +00:00
|
|
|
if correct {
|
2014-05-21 08:01:58 +00:00
|
|
|
conn.Post(pp.Message{
|
|
|
|
Type: pp.Have,
|
|
|
|
Index: pp.Integer(piece),
|
2013-09-30 11:51:08 +00:00
|
|
|
})
|
2014-03-20 13:40:54 +00:00
|
|
|
// TODO: Cancel requests for this piece.
|
2014-09-13 18:03:23 +00:00
|
|
|
for r := range conn.Requests {
|
2015-03-10 15:39:01 +00:00
|
|
|
if int(r.Index) == piece {
|
2015-05-09 01:52:52 +00:00
|
|
|
conn.Cancel(r)
|
2014-09-13 18:03:23 +00:00
|
|
|
}
|
2013-09-30 11:51:08 +00:00
|
|
|
}
|
2014-12-05 06:58:04 +00:00
|
|
|
conn.pieceRequestOrder.DeletePiece(int(piece))
|
2015-07-17 11:07:01 +00:00
|
|
|
} else if t.wantPiece(piece) && conn.PeerHasPiece(piece) {
|
2014-12-09 06:22:05 +00:00
|
|
|
t.connPendPiece(conn, int(piece))
|
|
|
|
me.replenishConnRequests(t, conn)
|
2013-09-26 09:49:15 +00:00
|
|
|
}
|
|
|
|
}
|
2014-03-16 15:30:10 +00:00
|
|
|
me.event.Broadcast()
|
2013-10-02 07:57:59 +00:00
|
|
|
}
|
2013-09-30 11:51:08 +00:00
|
|
|
|
2014-05-21 08:01:58 +00:00
|
|
|
func (cl *Client) verifyPiece(t *torrent, index pp.Integer) {
|
2014-03-19 17:30:08 +00:00
|
|
|
cl.mu.Lock()
|
2014-09-13 18:07:05 +00:00
|
|
|
defer cl.mu.Unlock()
|
2014-03-19 17:30:08 +00:00
|
|
|
p := t.Pieces[index]
|
2015-02-25 03:48:39 +00:00
|
|
|
for p.Hashing || t.data == nil {
|
2014-03-19 17:30:08 +00:00
|
|
|
cl.event.Wait()
|
|
|
|
}
|
2015-02-27 01:45:55 +00:00
|
|
|
p.QueuedForHash = false
|
2015-03-10 15:41:21 +00:00
|
|
|
if t.isClosed() || t.pieceComplete(int(index)) {
|
2014-07-22 15:54:11 +00:00
|
|
|
return
|
|
|
|
}
|
2014-03-19 17:30:08 +00:00
|
|
|
p.Hashing = true
|
|
|
|
cl.mu.Unlock()
|
2015-02-25 04:42:47 +00:00
|
|
|
sum := t.hashPiece(index)
|
2013-10-20 14:07:01 +00:00
|
|
|
cl.mu.Lock()
|
2014-09-13 18:07:05 +00:00
|
|
|
select {
|
|
|
|
case <-t.closing:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
2014-03-19 17:30:08 +00:00
|
|
|
p.Hashing = false
|
|
|
|
cl.pieceHashed(t, index, sum == p.Hash)
|
2013-09-26 09:49:15 +00:00
|
|
|
}
|
2013-10-06 07:01:39 +00:00
|
|
|
|
2015-03-08 06:28:14 +00:00
|
|
|
// Returns handles to all the torrents loaded in the Client.
|
2014-12-03 07:07:50 +00:00
|
|
|
func (me *Client) Torrents() (ret []Torrent) {
|
2013-10-20 14:07:01 +00:00
|
|
|
me.mu.Lock()
|
|
|
|
for _, t := range me.torrents {
|
2015-04-29 14:30:19 +00:00
|
|
|
ret = append(ret, Torrent{me, t})
|
2013-10-20 14:07:01 +00:00
|
|
|
}
|
|
|
|
me.mu.Unlock()
|
2013-10-06 07:01:39 +00:00
|
|
|
return
|
|
|
|
}
|
2015-03-18 07:32:31 +00:00
|
|
|
|
|
|
|
func (me *Client) AddMagnet(uri string) (T Torrent, err error) {
|
|
|
|
spec, err := TorrentSpecFromMagnetURI(uri)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
T, _, err = me.AddTorrentSpec(spec)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *Client) AddTorrent(mi *metainfo.MetaInfo) (T Torrent, err error) {
|
|
|
|
T, _, err = me.AddTorrentSpec(TorrentSpecFromMetaInfo(mi))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (me *Client) AddTorrentFromFile(filename string) (T Torrent, err error) {
|
|
|
|
mi, err := metainfo.LoadFromFile(filename)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
T, _, err = me.AddTorrentSpec(TorrentSpecFromMetaInfo(mi))
|
|
|
|
return
|
|
|
|
}
|