2020-05-30 07:52:27 +00:00
|
|
|
package torrent
|
|
|
|
|
|
|
|
import (
|
2020-06-04 01:50:20 +00:00
|
|
|
"fmt"
|
2020-07-10 03:18:33 +00:00
|
|
|
"strings"
|
2021-01-29 05:01:35 +00:00
|
|
|
"sync"
|
2020-06-01 08:25:45 +00:00
|
|
|
|
2020-06-02 06:18:25 +00:00
|
|
|
"github.com/anacrolix/torrent/common"
|
|
|
|
"github.com/anacrolix/torrent/metainfo"
|
2020-06-01 08:41:21 +00:00
|
|
|
pp "github.com/anacrolix/torrent/peer_protocol"
|
2020-06-01 08:25:45 +00:00
|
|
|
"github.com/anacrolix/torrent/segments"
|
|
|
|
"github.com/anacrolix/torrent/webseed"
|
2020-07-10 03:18:33 +00:00
|
|
|
"github.com/pkg/errors"
|
2020-05-30 07:52:27 +00:00
|
|
|
)
|
|
|
|
|
2020-06-04 01:58:18 +00:00
|
|
|
type webseedPeer struct {
|
2021-01-29 05:01:35 +00:00
|
|
|
client webseed.Client
|
|
|
|
activeRequests map[Request]webseed.Request
|
|
|
|
requesterCond sync.Cond
|
|
|
|
peer Peer
|
2020-06-01 08:25:45 +00:00
|
|
|
}
|
|
|
|
|
2020-06-04 01:58:18 +00:00
|
|
|
var _ peerImpl = (*webseedPeer)(nil)
|
2020-06-01 08:25:45 +00:00
|
|
|
|
2020-09-29 06:21:54 +00:00
|
|
|
func (me *webseedPeer) connStatusString() string {
|
|
|
|
return me.client.Url
|
|
|
|
}
|
|
|
|
|
2020-06-04 01:58:18 +00:00
|
|
|
func (ws *webseedPeer) String() string {
|
2020-06-04 01:50:20 +00:00
|
|
|
return fmt.Sprintf("webseed peer for %q", ws.client.Url)
|
|
|
|
}
|
|
|
|
|
2020-06-04 01:58:18 +00:00
|
|
|
func (ws *webseedPeer) onGotInfo(info *metainfo.Info) {
|
2020-06-02 06:18:25 +00:00
|
|
|
ws.client.FileIndex = segments.NewIndex(common.LengthIterFromUpvertedFiles(info.UpvertedFiles()))
|
|
|
|
ws.client.Info = info
|
|
|
|
}
|
|
|
|
|
2021-01-28 03:23:22 +00:00
|
|
|
func (ws *webseedPeer) _postCancel(r Request) {
|
2020-06-02 07:41:59 +00:00
|
|
|
ws.cancel(r)
|
2020-05-30 07:52:27 +00:00
|
|
|
}
|
|
|
|
|
2020-06-04 01:58:18 +00:00
|
|
|
func (ws *webseedPeer) writeInterested(interested bool) bool {
|
2020-05-30 07:52:27 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2021-01-28 03:23:22 +00:00
|
|
|
func (ws *webseedPeer) cancel(r Request) bool {
|
2021-01-29 05:01:35 +00:00
|
|
|
active, ok := ws.activeRequests[r]
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
active.Cancel()
|
2020-06-01 08:25:45 +00:00
|
|
|
return true
|
2020-05-30 07:52:27 +00:00
|
|
|
}
|
|
|
|
|
2021-01-28 03:23:22 +00:00
|
|
|
func (ws *webseedPeer) intoSpec(r Request) webseed.RequestSpec {
|
2020-06-02 03:54:26 +00:00
|
|
|
return webseed.RequestSpec{ws.peer.t.requestOffset(r), int64(r.Length)}
|
|
|
|
}
|
|
|
|
|
2021-01-28 03:23:22 +00:00
|
|
|
func (ws *webseedPeer) request(r Request) bool {
|
2021-01-29 05:01:35 +00:00
|
|
|
ws.requesterCond.Signal()
|
2020-06-01 08:25:45 +00:00
|
|
|
return true
|
2020-05-30 07:52:27 +00:00
|
|
|
}
|
|
|
|
|
2021-01-29 05:01:35 +00:00
|
|
|
func (ws *webseedPeer) doRequest(r Request) {
|
|
|
|
webseedRequest := ws.client.NewRequest(ws.intoSpec(r))
|
|
|
|
ws.activeRequests[r] = webseedRequest
|
|
|
|
ws.requesterCond.L.Unlock()
|
|
|
|
ws.requestResultHandler(r, webseedRequest)
|
|
|
|
ws.requesterCond.L.Lock()
|
|
|
|
delete(ws.activeRequests, r)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ws *webseedPeer) requester() {
|
|
|
|
ws.requesterCond.L.Lock()
|
|
|
|
defer ws.requesterCond.L.Unlock()
|
|
|
|
start:
|
|
|
|
for !ws.peer.closed.IsSet() {
|
|
|
|
for r := range ws.peer.requests {
|
|
|
|
if _, ok := ws.activeRequests[r]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
ws.doRequest(r)
|
|
|
|
goto start
|
|
|
|
}
|
|
|
|
ws.requesterCond.Wait()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-04 01:58:18 +00:00
|
|
|
func (ws *webseedPeer) connectionFlags() string {
|
2020-05-30 07:52:27 +00:00
|
|
|
return "WS"
|
|
|
|
}
|
|
|
|
|
2021-01-04 04:51:23 +00:00
|
|
|
// TODO: This is called when banning peers. Perhaps we want to be able to ban webseeds too. We could
|
|
|
|
// return bool if this is even possible, and if it isn't, skip to the next drop candidate.
|
2020-06-04 01:58:18 +00:00
|
|
|
func (ws *webseedPeer) drop() {}
|
2020-05-30 07:52:27 +00:00
|
|
|
|
2020-06-04 01:58:18 +00:00
|
|
|
func (ws *webseedPeer) updateRequests() {
|
2020-05-30 07:52:27 +00:00
|
|
|
ws.peer.doRequestState()
|
|
|
|
}
|
2020-05-31 03:09:56 +00:00
|
|
|
|
2021-01-29 05:01:35 +00:00
|
|
|
func (ws *webseedPeer) onClose() {
|
|
|
|
ws.requesterCond.Broadcast()
|
|
|
|
}
|
2020-06-01 08:41:21 +00:00
|
|
|
|
2021-01-28 03:23:22 +00:00
|
|
|
func (ws *webseedPeer) requestResultHandler(r Request, webseedRequest webseed.Request) {
|
2020-06-02 06:41:49 +00:00
|
|
|
result := <-webseedRequest.Result
|
2020-06-02 03:54:26 +00:00
|
|
|
ws.peer.t.cl.lock()
|
2020-06-02 06:41:49 +00:00
|
|
|
defer ws.peer.t.cl.unlock()
|
|
|
|
if result.Err != nil {
|
2021-01-28 03:23:22 +00:00
|
|
|
ws.peer.logger.Printf("Request %v rejected: %v", r, result.Err)
|
2020-10-15 01:45:19 +00:00
|
|
|
// Always close for now. We need to filter out temporary errors, but this is a nightmare in
|
|
|
|
// Go. Currently a bad webseed URL can starve out the good ones due to the chunk selection
|
|
|
|
// algorithm.
|
|
|
|
const closeOnAllErrors = false
|
|
|
|
if closeOnAllErrors || strings.Contains(errors.Cause(result.Err).Error(), "unsupported protocol scheme") {
|
2020-07-10 03:18:33 +00:00
|
|
|
ws.peer.close()
|
|
|
|
} else {
|
|
|
|
ws.peer.remoteRejectedRequest(r)
|
|
|
|
}
|
2020-06-02 06:41:49 +00:00
|
|
|
} else {
|
|
|
|
err := ws.peer.receiveChunk(&pp.Message{
|
|
|
|
Type: pp.Piece,
|
|
|
|
Index: r.Index,
|
|
|
|
Begin: r.Begin,
|
|
|
|
Piece: result.Bytes,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2020-06-01 08:41:21 +00:00
|
|
|
}
|
|
|
|
}
|