2
0
mirror of synced 2025-02-24 06:38:14 +00:00

Can now download from magnet links

This commit is contained in:
Matt Joiner 2014-06-28 19:38:31 +10:00
parent 9d53e19547
commit c96293a111
11 changed files with 257 additions and 145 deletions

133
client.go
View File

@ -22,7 +22,6 @@ import (
"crypto/sha1" "crypto/sha1"
"errors" "errors"
"fmt" "fmt"
"github.com/nsf/libtorgo/bencode"
"io" "io"
"log" "log"
mathRand "math/rand" mathRand "math/rand"
@ -32,7 +31,8 @@ import (
"syscall" "syscall"
"time" "time"
metainfo "github.com/nsf/libtorgo/torrent" "github.com/anacrolix/libtorgo/metainfo"
"github.com/nsf/libtorgo/bencode"
pp "bitbucket.org/anacrolix/go.torrent/peer_protocol" pp "bitbucket.org/anacrolix/go.torrent/peer_protocol"
"bitbucket.org/anacrolix/go.torrent/tracker" "bitbucket.org/anacrolix/go.torrent/tracker"
@ -59,7 +59,7 @@ func (me *Client) PrioritizeDataRegion(ih InfoHash, off, len_ int64) error {
if t == nil { if t == nil {
return errors.New("no such active torrent") return errors.New("no such active torrent")
} }
if t.Info == nil { if !t.haveInfo() {
return errors.New("missing metadata") return errors.New("missing metadata")
} }
newPriorities := make([]request, 0, (len_+chunkSize-1)/chunkSize) newPriorities := make([]request, 0, (len_+chunkSize-1)/chunkSize)
@ -133,7 +133,7 @@ func (cl *Client) TorrentReadAt(ih InfoHash, off int64, p []byte) (n int, err er
err = errors.New("unknown torrent") err = errors.New("unknown torrent")
return return
} }
index := pp.Integer(off / t.Info.PieceLength()) index := pp.Integer(off / int64(t.UsualPieceSize()))
// Reading outside the bounds of a file is an error. // Reading outside the bounds of a file is an error.
if index < 0 { if index < 0 {
err = os.ErrInvalid err = os.ErrInvalid
@ -354,11 +354,15 @@ func (me *Client) runConnection(sock net.Conn, torrent *torrent) (err error) {
Type: pp.Extended, Type: pp.Extended,
ExtendedID: pp.HandshakeExtendedID, ExtendedID: pp.HandshakeExtendedID,
ExtendedPayload: func() []byte { ExtendedPayload: func() []byte {
b, err := bencode.Marshal(map[string]interface{}{ d := map[string]interface{}{
"m": map[string]int{ "m": map[string]int{
"ut_metadata": 1, "ut_metadata": 1,
}, },
}) }
if torrent.metadataSizeKnown() {
d["metadata_size"] = torrent.metadataSize()
}
b, err := bencode.Marshal(d)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -415,8 +419,8 @@ func (cl *Client) requestPendingMetadata(t *torrent, c *connection) {
return return
} }
var pending []int var pending []int
for index, have := range t.MetaDataHave { for index := 0; index < t.MetadataPieceCount(); index++ {
if !have { if !t.HaveMetadataPiece(index) {
pending = append(pending, index) pending = append(pending, index)
} }
} }
@ -438,6 +442,63 @@ func (cl *Client) requestPendingMetadata(t *torrent, c *connection) {
} }
} }
func (cl *Client) completedMetadata(t *torrent) {
h := sha1.New()
h.Write(t.MetaData)
var ih InfoHash
copy(ih[:], h.Sum(nil)[:])
if ih != t.InfoHash {
log.Print("bad metadata")
t.InvalidateMetadata()
return
}
var info metainfo.Info
err := bencode.Unmarshal(t.MetaData, &info)
if err != nil {
log.Printf("error unmarshalling metadata: %s", err)
t.InvalidateMetadata()
return
}
cl.setMetaData(t, info, t.MetaData)
}
func (cl *Client) gotMetadataExtensionMsg(payload []byte, t *torrent, c *connection) (err error) {
var d map[string]int
err = bencode.Unmarshal(payload, &d)
if err != nil {
err = fmt.Errorf("error unmarshalling payload: %s", err)
return
}
msgType, ok := d["msg_type"]
if !ok {
err = errors.New("missing msg_type field")
return
}
piece := d["piece"]
log.Println(piece, d["total_size"], len(payload))
switch msgType {
case pp.DataMetadataExtensionMsgType:
if t.haveInfo() {
break
}
t.SaveMetadataPiece(piece, payload[len(payload)-metadataPieceSize(d["total_size"], piece):])
if !t.HaveAllMetadataPieces() {
break
}
cl.completedMetadata(t)
case pp.RequestMetadataExtensionMsgType:
if !t.HaveMetadataPiece(piece) {
c.Post(t.NewMetadataExtensionMessage(c, pp.RejectMetadataExtensionMsgType, d["piece"], nil))
break
}
c.Post(t.NewMetadataExtensionMessage(c, pp.DataMetadataExtensionMsgType, piece, t.MetaData[(1<<14)*piece:(1<<14)*piece+t.metadataPieceSize(piece)]))
case pp.RejectMetadataExtensionMsgType:
default:
err = errors.New("unknown msg_type value")
}
return
}
func (me *Client) connectionLoop(t *torrent, c *connection) error { func (me *Client) connectionLoop(t *torrent, c *connection) error {
decoder := pp.Decoder{ decoder := pp.Decoder{
R: bufio.NewReader(c.Socket), R: bufio.NewReader(c.Socket),
@ -565,37 +626,17 @@ func (me *Client) connectionLoop(t *torrent, c *connection) error {
log.Printf("bad metadata_size type: %T", metadata_sizeUntyped) log.Printf("bad metadata_size type: %T", metadata_sizeUntyped)
} else { } else {
log.Printf("metadata_size: %d", metadata_size) log.Printf("metadata_size: %d", metadata_size)
t.SetMetaDataSize(metadata_size) t.SetMetadataSize(metadata_size)
} }
} }
log.Println(metadata_sizeUntyped, c.PeerExtensionIDs)
if _, ok := c.PeerExtensionIDs["ut_metadata"]; ok { if _, ok := c.PeerExtensionIDs["ut_metadata"]; ok {
me.requestPendingMetadata(t, c) me.requestPendingMetadata(t, c)
} }
case 1: case 1:
var d map[string]int err = me.gotMetadataExtensionMsg(msg.ExtendedPayload, t, c)
err := bencode.Unmarshal(msg.ExtendedPayload, &d) default:
if err != nil { err = fmt.Errorf("unexpected extended message ID: %s", msg.ExtendedID)
err = fmt.Errorf("error unmarshalling extended payload: %s", err)
break
}
if d["msg_type"] != 1 {
break
}
piece := d["piece"]
log.Println(piece, d["total_size"], len(msg.ExtendedPayload))
copy(t.MetaData[(1<<14)*piece:], msg.ExtendedPayload[len(msg.ExtendedPayload)-metadataPieceSize(d["total_size"], piece):])
t.MetaDataHave[piece] = true
if !t.GotAllMetadataPieces() {
break
}
log.Printf("%q", t.MetaData)
h := sha1.New()
h.Write(t.MetaData)
var ih InfoHash
copy(ih[:], h.Sum(nil)[:])
if ih != t.InfoHash {
panic(ih)
}
} }
default: default:
err = fmt.Errorf("received unknown message type: %#v", msg.Type) err = fmt.Errorf("received unknown message type: %#v", msg.Type)
@ -665,20 +706,11 @@ func (me *Client) AddPeers(infoHash InfoHash, peers []Peer) error {
return nil return nil
} }
func (cl *Client) setMetaData(t *torrent, md MetaData) (err error) { func (cl *Client) setMetaData(t *torrent, md metainfo.Info, bytes []byte) (err error) {
t.Info = md err = t.setMetadata(md, cl.DataDir, bytes)
t.Data, err = mmapTorrentData(md, cl.DataDir)
if err != nil { if err != nil {
return return
} }
for _, hash := range md.PieceHashes() {
piece := &piece{}
copyHashSum(piece.Hash[:], []byte(hash))
t.Pieces = append(t.Pieces, piece)
t.pendAllChunkSpecs(pp.Integer(len(t.Pieces) - 1))
}
t.Priorities = list.New()
// Queue all pieces for hashing. This is done sequentially to avoid // Queue all pieces for hashing. This is done sequentially to avoid
// spamming goroutines. // spamming goroutines.
for _, p := range t.Pieces { for _, p := range t.Pieces {
@ -767,7 +799,7 @@ func (me *Client) AddTorrent(metaInfo *metainfo.MetaInfo) (err error) {
if err != nil { if err != nil {
return return
} }
err = me.setMetaData(t, metaInfoMetaData{metaInfo}) err = me.setMetaData(t, metaInfo.Info, metaInfo.InfoBytes)
if err != nil { if err != nil {
return return
} }
@ -921,7 +953,7 @@ func (s *DefaultDownloadStrategy) FillRequests(t *torrent, c *connection) {
} }
ppbs := t.piecesByPendingBytes() ppbs := t.piecesByPendingBytes()
// Then finish off incomplete pieces in order of bytes remaining. // Then finish off incomplete pieces in order of bytes remaining.
for _, heatThreshold := range []int{0, 4, 100} { for _, heatThreshold := range []int{0, 1, 4, 100} {
for _, pieceIndex := range ppbs { for _, pieceIndex := range ppbs {
for _, chunkSpec := range t.Pieces[pieceIndex].shuffledPendingChunkSpecs() { for _, chunkSpec := range t.Pieces[pieceIndex].shuffledPendingChunkSpecs() {
r := request{pieceIndex, chunkSpec} r := request{pieceIndex, chunkSpec}
@ -973,6 +1005,10 @@ func (ResponsiveDownloadStrategy) DeleteRequest(*torrent, request) {}
func (me *ResponsiveDownloadStrategy) FillRequests(t *torrent, c *connection) { func (me *ResponsiveDownloadStrategy) FillRequests(t *torrent, c *connection) {
for e := t.Priorities.Front(); e != nil; e = e.Next() { for e := t.Priorities.Front(); e != nil; e = e.Next() {
req := e.Value.(request)
if _, ok := t.Pieces[req.Index].PendingChunkSpecs[req.chunkSpec]; !ok {
panic(req)
}
if !c.Request(e.Value.(request)) { if !c.Request(e.Value.(request)) {
return return
} }
@ -1004,13 +1040,14 @@ func (me *ResponsiveDownloadStrategy) FillRequests(t *torrent, c *connection) {
func (me *Client) replenishConnRequests(t *torrent, c *connection) { func (me *Client) replenishConnRequests(t *torrent, c *connection) {
me.DownloadStrategy.FillRequests(t, c) me.DownloadStrategy.FillRequests(t, c)
me.assertRequestHeat() me.assertRequestHeat()
if len(c.Requests) == 0 { if len(c.Requests) == 0 && !c.PeerChoked {
c.SetInterested(false) c.SetInterested(false)
} }
} }
func (me *Client) downloadedChunk(t *torrent, c *connection, msg *pp.Message) error { func (me *Client) downloadedChunk(t *torrent, c *connection, msg *pp.Message) error {
req := newRequest(msg.Index, msg.Begin, pp.Integer(len(msg.Piece))) req := newRequest(msg.Index, msg.Begin, pp.Integer(len(msg.Piece)))
log.Println("got", req)
// Request has been satisfied. // Request has been satisfied.
me.connDeleteRequest(t, c, req) me.connDeleteRequest(t, c, req)
@ -1028,7 +1065,6 @@ func (me *Client) downloadedChunk(t *torrent, c *connection, msg *pp.Message) er
if err != nil { if err != nil {
return err return err
} }
me.dataReady(dataSpec{t.InfoHash, req})
// Record that we have the chunk. // Record that we have the chunk.
delete(t.Pieces[req.Index].PendingChunkSpecs, req.chunkSpec) delete(t.Pieces[req.Index].PendingChunkSpecs, req.chunkSpec)
@ -1053,6 +1089,7 @@ func (me *Client) downloadedChunk(t *torrent, c *connection, msg *pp.Message) er
} }
} }
me.dataReady(dataSpec{t.InfoHash, req})
return nil return nil
} }

View File

@ -29,7 +29,11 @@ func TestPieceHashSize(t *testing.T) {
func TestTorrentInitialState(t *testing.T) { func TestTorrentInitialState(t *testing.T) {
dir, mi := testutil.GreetingTestTorrent() dir, mi := testutil.GreetingTestTorrent()
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
tor, err := newTorrent(mi, dir) tor, err := newTorrent(BytesInfoHash(mi.InfoHash), nil)
if err != nil {
t.Fatal(err)
}
err = tor.setMetadata(mi.Info, dir, mi.InfoBytes)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -10,7 +10,7 @@ import (
"os" "os"
"strings" "strings"
metainfo "github.com/nsf/libtorgo/torrent" "github.com/anacrolix/libtorgo/metainfo"
"bitbucket.org/anacrolix/go.torrent" "bitbucket.org/anacrolix/go.torrent"
) )

View File

@ -17,7 +17,7 @@ import (
fusefs "bazil.org/fuse/fs" fusefs "bazil.org/fuse/fs"
"bitbucket.org/anacrolix/go.torrent" "bitbucket.org/anacrolix/go.torrent"
"bitbucket.org/anacrolix/go.torrent/fs" "bitbucket.org/anacrolix/go.torrent/fs"
metainfo "github.com/nsf/libtorgo/torrent" "github.com/anacrolix/libtorgo/metainfo"
) )
var ( var (

View File

@ -119,13 +119,13 @@ func (c *connection) Request(chunk request) bool {
if !c.PeerHasPiece(chunk.Index) { if !c.PeerHasPiece(chunk.Index) {
return true return true
} }
if c.RequestPending(chunk) {
return true
}
c.SetInterested(true) c.SetInterested(true)
if c.PeerChoked { if c.PeerChoked {
return false return false
} }
if c.RequestPending(chunk) {
return true
}
if c.Requests == nil { if c.Requests == nil {
c.Requests = make(map[request]struct{}, c.PeerMaxRequests) c.Requests = make(map[request]struct{}, c.PeerMaxRequests)
} }
@ -214,6 +214,7 @@ var (
func (conn *connection) writer() { func (conn *connection) writer() {
for b := range conn.write { for b := range conn.write {
_, err := conn.Socket.Write(b) _, err := conn.Socket.Write(b)
// log.Printf("wrote %q to %s", b, conn.Socket.RemoteAddr())
if err != nil { if err != nil {
if !conn.getClosed() { if !conn.getClosed() {
log.Print(err) log.Print(err)

View File

@ -1,14 +1,13 @@
package torrentfs package torrentfs
import ( import (
"log"
"os"
"sync"
"bazil.org/fuse" "bazil.org/fuse"
fusefs "bazil.org/fuse/fs" fusefs "bazil.org/fuse/fs"
"bitbucket.org/anacrolix/go.torrent" "bitbucket.org/anacrolix/go.torrent"
metainfo "github.com/nsf/libtorgo/torrent" "github.com/anacrolix/libtorgo/metainfo"
"log"
"os"
"sync"
) )
const ( const (
@ -31,7 +30,7 @@ type rootNode struct {
type node struct { type node struct {
path []string path []string
metaInfo *metainfo.MetaInfo metadata *metainfo.Info
FS *torrentFS FS *torrentFS
InfoHash torrent.InfoHash InfoHash torrent.InfoHash
} }
@ -59,9 +58,9 @@ func (fn fileNode) Read(req *fuse.ReadRequest, resp *fuse.ReadResponse, intr fus
if size < 0 { if size < 0 {
size = 0 size = 0
} }
infoHash := torrent.BytesInfoHash(fn.metaInfo.InfoHash) infoHash := fn.InfoHash
torrentOff := fn.TorrentOffset + req.Offset torrentOff := fn.TorrentOffset + req.Offset
// log.Print(torrentOff, size, fn.TorrentOffset) log.Print(torrentOff, size, fn.TorrentOffset)
if err := fn.FS.Client.PrioritizeDataRegion(infoHash, torrentOff, int64(size)); err != nil { if err := fn.FS.Client.PrioritizeDataRegion(infoHash, torrentOff, int64(size)); err != nil {
panic(err) panic(err)
} }
@ -112,7 +111,7 @@ func isSubPath(parent, child []string) bool {
func (dn dirNode) ReadDir(intr fusefs.Intr) (des []fuse.Dirent, err fuse.Error) { func (dn dirNode) ReadDir(intr fusefs.Intr) (des []fuse.Dirent, err fuse.Error) {
names := map[string]bool{} names := map[string]bool{}
for _, fi := range dn.metaInfo.Files { for _, fi := range dn.metadata.Files {
if !isSubPath(dn.path, fi.Path) { if !isSubPath(dn.path, fi.Path) {
continue continue
} }
@ -136,7 +135,7 @@ func (dn dirNode) ReadDir(intr fusefs.Intr) (des []fuse.Dirent, err fuse.Error)
func (dn dirNode) Lookup(name string, intr fusefs.Intr) (_node fusefs.Node, err fuse.Error) { func (dn dirNode) Lookup(name string, intr fusefs.Intr) (_node fusefs.Node, err fuse.Error) {
var torrentOffset int64 var torrentOffset int64
for _, fi := range dn.metaInfo.Files { for _, fi := range dn.metadata.Files {
if !isSubPath(dn.path, fi.Path) { if !isSubPath(dn.path, fi.Path) {
torrentOffset += fi.Length torrentOffset += fi.Length
continue continue
@ -169,26 +168,26 @@ func (dn dirNode) Attr() (attr fuse.Attr) {
return return
} }
func isSingleFileTorrent(mi *metainfo.MetaInfo) bool { func isSingleFileTorrent(md *metainfo.Info) bool {
return len(mi.Files) == 1 && mi.Files[0].Path == nil return len(md.Files) == 0
} }
func (me rootNode) Lookup(name string, intr fusefs.Intr) (_node fusefs.Node, err fuse.Error) { func (me rootNode) Lookup(name string, intr fusefs.Intr) (_node fusefs.Node, err fuse.Error) {
for _, _torrent := range me.fs.Client.Torrents() { for _, t := range me.fs.Client.Torrents() {
metaInfo := _torrent.MetaInfo if t.Name() != name {
if metaInfo.Name == name { continue
__node := node{
metaInfo: metaInfo,
FS: me.fs,
InfoHash: torrent.BytesInfoHash(metaInfo.InfoHash),
}
if isSingleFileTorrent(metaInfo) {
_node = fileNode{__node, uint64(metaInfo.Files[0].Length), 0}
} else {
_node = dirNode{__node}
}
break
} }
__node := node{
metadata: t.Info,
FS: me.fs,
InfoHash: t.InfoHash,
}
if isSingleFileTorrent(t.Info) {
_node = fileNode{__node, uint64(t.Info.Length), 0}
} else {
_node = dirNode{__node}
}
break
} }
if _node == nil { if _node == nil {
err = fuse.ENOENT err = fuse.ENOENT
@ -198,7 +197,7 @@ func (me rootNode) Lookup(name string, intr fusefs.Intr) (_node fusefs.Node, err
func (me rootNode) ReadDir(intr fusefs.Intr) (dirents []fuse.Dirent, err fuse.Error) { func (me rootNode) ReadDir(intr fusefs.Intr) (dirents []fuse.Dirent, err fuse.Error) {
for _, _torrent := range me.fs.Client.Torrents() { for _, _torrent := range me.fs.Client.Torrents() {
metaInfo := _torrent.MetaInfo metaInfo := _torrent.Info
dirents = append(dirents, fuse.Dirent{ dirents = append(dirents, fuse.Dirent{
Name: metaInfo.Name, Name: metaInfo.Name,
Type: func() fuse.DirentType { Type: func() fuse.DirentType {

View File

@ -2,6 +2,7 @@ package torrentfs
import ( import (
"bytes" "bytes"
"fmt"
"io/ioutil" "io/ioutil"
"log" "log"
"net" "net"
@ -15,7 +16,7 @@ import (
"bazil.org/fuse" "bazil.org/fuse"
fusefs "bazil.org/fuse/fs" fusefs "bazil.org/fuse/fs"
"bitbucket.org/anacrolix/go.torrent" "bitbucket.org/anacrolix/go.torrent"
metainfo "github.com/nsf/libtorgo/torrent" "github.com/anacrolix/libtorgo/metainfo"
) )
func TestTCPAddrString(t *testing.T) { func TestTCPAddrString(t *testing.T) {
@ -83,6 +84,7 @@ func TestUnmountWedged(t *testing.T) {
DisableTrackers: true, DisableTrackers: true,
} }
client.Start() client.Start()
log.Printf("%+v", *layout.Metainfo)
client.AddTorrent(layout.Metainfo) client.AddTorrent(layout.Metainfo)
fs := New(&client) fs := New(&client)
fuseConn, err := fuse.Mount(layout.MountDir) fuseConn, err := fuse.Mount(layout.MountDir)
@ -132,14 +134,19 @@ func TestDownloadOnDemand(t *testing.T) {
} }
return conn return conn
}(), }(),
DisableTrackers: true,
} }
defer seeder.Listener.Close() defer seeder.Listener.Close()
seeder.Start() seeder.Start()
defer seeder.Stop() defer seeder.Stop()
seeder.AddTorrent(layout.Metainfo) err = seeder.AddMagnet(fmt.Sprintf("magnet:?xt=urn:btih:%x", layout.Metainfo.InfoHash))
if err != nil {
t.Fatal(err)
}
leecher := torrent.Client{ leecher := torrent.Client{
DataDir: filepath.Join(layout.BaseDir, "download"), DataDir: filepath.Join(layout.BaseDir, "download"),
DownloadStrategy: &torrent.ResponsiveDownloadStrategy{}, DownloadStrategy: &torrent.ResponsiveDownloadStrategy{},
DisableTrackers: true,
} }
leecher.Start() leecher.Start()
defer leecher.Stop() defer leecher.Stop()
@ -176,7 +183,9 @@ func TestDownloadOnDemand(t *testing.T) {
} }
go func() { go func() {
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
fuse.Unmount(mountDir) if err := fuse.Unmount(mountDir); err != nil {
t.Log(err)
}
}() }()
content, err := ioutil.ReadFile(filepath.Join(mountDir, "greeting")) content, err := ioutil.ReadFile(filepath.Join(mountDir, "greeting"))
if err != nil { if err != nil {

43
misc.go
View File

@ -2,16 +2,15 @@ package torrent
import ( import (
"bitbucket.org/anacrolix/go.torrent/mmap_span" "bitbucket.org/anacrolix/go.torrent/mmap_span"
"bitbucket.org/anacrolix/go.torrent/peer_protocol"
"crypto" "crypto"
"errors" "errors"
metainfo "github.com/nsf/libtorgo/torrent" "github.com/anacrolix/libtorgo/metainfo"
"launchpad.net/gommap"
"math/rand" "math/rand"
"os" "os"
"path/filepath" "path/filepath"
"time" "time"
"bitbucket.org/anacrolix/go.torrent/peer_protocol"
"launchpad.net/gommap"
) )
const ( const (
@ -103,44 +102,22 @@ var (
ErrDataNotReady = errors.New("data not ready") ErrDataNotReady = errors.New("data not ready")
) )
type metaInfoMetaData struct { func upvertedSingleFileInfoFiles(info *metainfo.Info) []metainfo.FileInfo {
mi *metainfo.MetaInfo if len(info.Files) != 0 {
} return info.Files
func (me metaInfoMetaData) Files() []metainfo.FileInfo { return me.mi.Files }
func (me metaInfoMetaData) Name() string { return me.mi.Name }
func (me metaInfoMetaData) PieceHashes() (ret []string) {
for i := 0; i < len(me.mi.Pieces); i += 20 {
ret = append(ret, string(me.mi.Pieces[i:i+20]))
} }
return return []metainfo.FileInfo{{Length: info.Length, Path: nil}}
}
func (me metaInfoMetaData) PieceLength() int64 { return me.mi.PieceLength }
func (me metaInfoMetaData) PieceCount() int {
return len(me.mi.Pieces) / pieceHash.Size()
} }
func NewMetaDataFromMetaInfo(mi *metainfo.MetaInfo) MetaData { func mmapTorrentData(md *metainfo.Info, location string) (mms mmap_span.MMapSpan, err error) {
return metaInfoMetaData{mi}
}
type MetaData interface {
PieceHashes() []string
Files() []metainfo.FileInfo
Name() string
PieceLength() int64
PieceCount() int
}
func mmapTorrentData(md MetaData, location string) (mms mmap_span.MMapSpan, err error) {
defer func() { defer func() {
if err != nil { if err != nil {
mms.Close() mms.Close()
mms = nil mms = nil
} }
}() }()
for _, miFile := range md.Files() { for _, miFile := range upvertedSingleFileInfoFiles(md) {
fileName := filepath.Join(append([]string{location, md.Name()}, miFile.Path...)...) fileName := filepath.Join(append([]string{location, md.Name}, miFile.Path...)...)
err = os.MkdirAll(filepath.Dir(fileName), 0777) err = os.MkdirAll(filepath.Dir(fileName), 0777)
if err != nil { if err != nil {
return return

View File

@ -36,6 +36,10 @@ const (
Extended = 20 Extended = 20
HandshakeExtendedID = 0 HandshakeExtendedID = 0
RequestMetadataExtensionMsgType = 0
DataMetadataExtensionMsgType = 1
RejectMetadataExtensionMsgType = 2
) )
type Message struct { type Message struct {

View File

@ -6,14 +6,13 @@
package testutil package testutil
import ( import (
"bytes"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
metainfo "github.com/nsf/libtorgo/torrent" "github.com/anacrolix/libtorgo/metainfo"
"bytes"
) )
const GreetingFileContents = "hello, world\n" const GreetingFileContents = "hello, world\n"

View File

@ -1,16 +1,17 @@
package torrent package torrent
import ( import (
"bitbucket.org/anacrolix/go.torrent/mmap_span"
pp "bitbucket.org/anacrolix/go.torrent/peer_protocol"
"bitbucket.org/anacrolix/go.torrent/tracker"
"container/list" "container/list"
"fmt" "fmt"
"github.com/anacrolix/libtorgo/bencode"
"github.com/anacrolix/libtorgo/metainfo"
"io" "io"
"log" "log"
"net" "net"
"sort" "sort"
"bitbucket.org/anacrolix/go.torrent/mmap_span"
pp "bitbucket.org/anacrolix/go.torrent/peer_protocol"
"bitbucket.org/anacrolix/go.torrent/tracker"
) )
func (t *torrent) PieceNumPendingBytes(index pp.Integer) (count pp.Integer) { func (t *torrent) PieceNumPendingBytes(index pp.Integer) (count pp.Integer) {
@ -29,7 +30,7 @@ type torrent struct {
InfoHash InfoHash InfoHash InfoHash
Pieces []*piece Pieces []*piece
Data mmap_span.MMapSpan Data mmap_span.MMapSpan
Info MetaData Info *metainfo.Info
Conns []*connection Conns []*connection
Peers []Peer Peers []Peer
Priorities *list.List Priorities *list.List
@ -39,14 +40,73 @@ type torrent struct {
lastReadPiece int lastReadPiece int
DisplayName string DisplayName string
MetaData []byte MetaData []byte
MetaDataHave []bool metadataHave []bool
} }
func (t *torrent) GotAllMetadataPieces() bool { func (t *torrent) InvalidateMetadata() {
if t.MetaDataHave == nil { for i := range t.metadataHave {
t.metadataHave[i] = false
}
t.Info = nil
}
func (t *torrent) SaveMetadataPiece(index int, data []byte) {
if t.haveInfo() {
return
}
copy(t.MetaData[(1<<14)*index:], data)
t.metadataHave[index] = true
}
func (t *torrent) MetadataPieceCount() int {
return (len(t.MetaData) + (1 << 14) - 1) / (1 << 14)
}
func (t *torrent) HaveMetadataPiece(piece int) bool {
return t.haveInfo() || t.metadataHave[piece]
}
func (t *torrent) metadataSizeKnown() bool {
return t.MetaData != nil
}
func (t *torrent) metadataSize() int {
return len(t.MetaData)
}
func infoPieceHashes(info *metainfo.Info) (ret []string) {
for i := 0; i < len(info.Pieces); i += 20 {
ret = append(ret, string(info.Pieces[i:i+20]))
}
return
}
func (t *torrent) setMetadata(md metainfo.Info, dataDir string, infoBytes []byte) (err error) {
t.Info = &md
t.MetaData = infoBytes
t.metadataHave = nil
t.Data, err = mmapTorrentData(&md, dataDir)
if err != nil {
return
}
for _, hash := range infoPieceHashes(&md) {
piece := &piece{}
copyHashSum(piece.Hash[:], []byte(hash))
t.Pieces = append(t.Pieces, piece)
t.pendAllChunkSpecs(pp.Integer(len(t.Pieces) - 1))
}
t.Priorities = list.New()
return
}
func (t *torrent) HaveAllMetadataPieces() bool {
if t.haveInfo() {
return true
}
if t.metadataHave == nil {
return false return false
} }
for _, have := range t.MetaDataHave { for _, have := range t.metadataHave {
if !have { if !have {
return false return false
} }
@ -54,22 +114,19 @@ func (t *torrent) GotAllMetadataPieces() bool {
return true return true
} }
func (t *torrent) SetMetaDataSize(bytes int64) { func (t *torrent) SetMetadataSize(bytes int64) {
if t.MetaData != nil { if t.MetaData != nil {
if len(t.MetaData) != int(bytes) {
log.Printf("new metadata_size differs")
}
return return
} }
t.MetaData = make([]byte, bytes) t.MetaData = make([]byte, bytes)
t.MetaDataHave = make([]bool, (bytes+(1<<14)-1)/(1<<14)) t.metadataHave = make([]bool, (bytes+(1<<14)-1)/(1<<14))
} }
func (t *torrent) Name() string { func (t *torrent) Name() string {
if t.Info == nil { if !t.haveInfo() {
return t.DisplayName return t.DisplayName
} }
return t.Info.Name() return t.Info.Name
} }
func (t *torrent) pieceStatusChar(index int) byte { func (t *torrent) pieceStatusChar(index int) byte {
@ -88,6 +145,30 @@ func (t *torrent) pieceStatusChar(index int) byte {
} }
} }
func (t *torrent) metadataPieceSize(piece int) int {
return metadataPieceSize(len(t.MetaData), piece)
}
func (t *torrent) NewMetadataExtensionMessage(c *connection, msgType int, piece int, data []byte) pp.Message {
d := map[string]int{
"msg_type": msgType,
"piece": piece,
}
if data != nil {
d["total_size"] = len(t.MetaData)
}
p, err := bencode.Marshal(d)
if err != nil {
panic(err)
}
return pp.Message{
Type: pp.Extended,
ExtendedID: byte(c.PeerExtensionIDs["ut_metadata"]),
ExtendedPayload: append(p, data...),
}
}
func (t *torrent) WriteStatus(w io.Writer) { func (t *torrent) WriteStatus(w io.Writer) {
fmt.Fprint(w, "Pieces: ") fmt.Fprint(w, "Pieces: ")
for index := range t.Pieces { for index := range t.Pieces {
@ -136,7 +217,7 @@ func (t *torrent) ChunkCount() (num int) {
} }
func (t *torrent) UsualPieceSize() int { func (t *torrent) UsualPieceSize() int {
return int(t.Info.PieceLength()) return int(t.Info.PieceLength)
} }
func (t *torrent) LastPieceSize() int { func (t *torrent) LastPieceSize() int {
@ -144,7 +225,7 @@ func (t *torrent) LastPieceSize() int {
} }
func (t *torrent) NumPieces() int { func (t *torrent) NumPieces() int {
return t.Info.PieceCount() return len(t.Info.Pieces) / 20
} }
func (t *torrent) NumPiecesCompleted() (num int) { func (t *torrent) NumPiecesCompleted() (num int) {
@ -208,16 +289,16 @@ func torrentRequestOffset(torrentLength, pieceSize int64, r request) (off int64)
} }
func (t *torrent) requestOffset(r request) int64 { func (t *torrent) requestOffset(r request) int64 {
return torrentRequestOffset(t.Length(), t.Info.PieceLength(), r) return torrentRequestOffset(t.Length(), int64(t.UsualPieceSize()), r)
} }
// Return the request that would include the given offset into the torrent data. // Return the request that would include the given offset into the torrent data.
func (t *torrent) offsetRequest(off int64) (req request, ok bool) { func (t *torrent) offsetRequest(off int64) (req request, ok bool) {
return torrentOffsetRequest(t.Length(), t.Info.PieceLength(), chunkSize, off) return torrentOffsetRequest(t.Length(), t.Info.PieceLength, chunkSize, off)
} }
func (t *torrent) WriteChunk(piece int, begin int64, data []byte) (err error) { func (t *torrent) WriteChunk(piece int, begin int64, data []byte) (err error) {
_, err = t.Data.WriteAt(data, int64(piece)*t.Info.PieceLength()+begin) _, err = t.Data.WriteAt(data, int64(piece)*t.Info.PieceLength+begin)
return return
} }
@ -233,7 +314,7 @@ func (t *torrent) pendAllChunkSpecs(index pp.Integer) {
if piece.PendingChunkSpecs == nil { if piece.PendingChunkSpecs == nil {
piece.PendingChunkSpecs = make( piece.PendingChunkSpecs = make(
map[chunkSpec]struct{}, map[chunkSpec]struct{},
(t.Info.PieceLength()+chunkSize-1)/chunkSize) (t.Info.PieceLength+chunkSize-1)/chunkSize)
} }
c := chunkSpec{ c := chunkSpec{
Begin: 0, Begin: 0,
@ -258,21 +339,22 @@ type Peer struct {
func (t *torrent) PieceLength(piece pp.Integer) (len_ pp.Integer) { func (t *torrent) PieceLength(piece pp.Integer) (len_ pp.Integer) {
if int(piece) == t.NumPieces()-1 { if int(piece) == t.NumPieces()-1 {
len_ = pp.Integer(t.Data.Size() % t.Info.PieceLength()) len_ = pp.Integer(t.Data.Size() % t.Info.PieceLength)
} }
if len_ == 0 { if len_ == 0 {
len_ = pp.Integer(t.Info.PieceLength()) len_ = pp.Integer(t.Info.PieceLength)
} }
return return
} }
func (t *torrent) HashPiece(piece pp.Integer) (ps pieceSum) { func (t *torrent) HashPiece(piece pp.Integer) (ps pieceSum) {
hash := pieceHash.New() hash := pieceHash.New()
n, err := t.Data.WriteSectionTo(hash, int64(piece)*t.Info.PieceLength(), t.Info.PieceLength()) n, err := t.Data.WriteSectionTo(hash, int64(piece)*t.Info.PieceLength, t.Info.PieceLength)
if err != nil { if err != nil {
panic(err) panic(err)
} }
if pp.Integer(n) != t.PieceLength(piece) { if pp.Integer(n) != t.PieceLength(piece) {
log.Print(t.Info)
panic(fmt.Sprintf("hashed wrong number of bytes: expected %d; did %d; piece %d", t.PieceLength(piece), n, piece)) panic(fmt.Sprintf("hashed wrong number of bytes: expected %d; did %d; piece %d", t.PieceLength(piece), n, piece))
} }
copyHashSum(ps[:], hash.Sum(nil)) copyHashSum(ps[:], hash.Sum(nil))