New storage interface
This commit is contained in:
parent
4986c61138
commit
b97b50aca9
42
client.go
42
client.go
@ -32,12 +32,12 @@ import (
|
|||||||
"github.com/edsrzf/mmap-go"
|
"github.com/edsrzf/mmap-go"
|
||||||
|
|
||||||
"github.com/anacrolix/torrent/bencode"
|
"github.com/anacrolix/torrent/bencode"
|
||||||
filePkg "github.com/anacrolix/torrent/data/file"
|
|
||||||
"github.com/anacrolix/torrent/dht"
|
"github.com/anacrolix/torrent/dht"
|
||||||
"github.com/anacrolix/torrent/iplist"
|
"github.com/anacrolix/torrent/iplist"
|
||||||
"github.com/anacrolix/torrent/metainfo"
|
"github.com/anacrolix/torrent/metainfo"
|
||||||
"github.com/anacrolix/torrent/mse"
|
"github.com/anacrolix/torrent/mse"
|
||||||
pp "github.com/anacrolix/torrent/peer_protocol"
|
pp "github.com/anacrolix/torrent/peer_protocol"
|
||||||
|
"github.com/anacrolix/torrent/storage"
|
||||||
"github.com/anacrolix/torrent/tracker"
|
"github.com/anacrolix/torrent/tracker"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -153,7 +153,7 @@ type Client struct {
|
|||||||
// through legitimate channels.
|
// through legitimate channels.
|
||||||
dopplegangerAddrs map[string]struct{}
|
dopplegangerAddrs map[string]struct{}
|
||||||
|
|
||||||
torrentDataOpener TorrentDataOpener
|
defaultStorage storage.I
|
||||||
|
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
event sync.Cond
|
event sync.Cond
|
||||||
@ -376,20 +376,17 @@ func NewClient(cfg *Config) (cl *Client, err error) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
cl = &Client{
|
cl = &Client{
|
||||||
halfOpenLimit: socketsPerTorrent,
|
halfOpenLimit: socketsPerTorrent,
|
||||||
config: *cfg,
|
config: *cfg,
|
||||||
torrentDataOpener: func(md *metainfo.Info) Data {
|
defaultStorage: cfg.DefaultStorage,
|
||||||
return filePkg.TorrentData(md, cfg.DataDir)
|
|
||||||
},
|
|
||||||
dopplegangerAddrs: make(map[string]struct{}),
|
dopplegangerAddrs: make(map[string]struct{}),
|
||||||
torrents: make(map[InfoHash]*torrent),
|
torrents: make(map[InfoHash]*torrent),
|
||||||
}
|
}
|
||||||
CopyExact(&cl.extensionBytes, defaultExtensionBytes)
|
CopyExact(&cl.extensionBytes, defaultExtensionBytes)
|
||||||
cl.event.L = &cl.mu
|
cl.event.L = &cl.mu
|
||||||
if cfg.TorrentDataOpener != nil {
|
if cl.defaultStorage == nil {
|
||||||
cl.torrentDataOpener = cfg.TorrentDataOpener
|
cl.defaultStorage = storage.NewFile(cfg.DataDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg.IPBlocklist != nil {
|
if cfg.IPBlocklist != nil {
|
||||||
cl.ipBlockList = cfg.IPBlocklist
|
cl.ipBlockList = cfg.IPBlocklist
|
||||||
} else if !cfg.NoDefaultBlocklist {
|
} else if !cfg.NoDefaultBlocklist {
|
||||||
@ -1715,14 +1712,6 @@ func (cl *Client) saveTorrentFile(t *torrent) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cl *Client) setStorage(t *torrent, td Data) (err error) {
|
|
||||||
t.setStorage(td)
|
|
||||||
cl.event.Broadcast()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
type TorrentDataOpener func(*metainfo.Info) Data
|
|
||||||
|
|
||||||
func (cl *Client) setMetaData(t *torrent, md *metainfo.Info, bytes []byte) (err error) {
|
func (cl *Client) setMetaData(t *torrent, md *metainfo.Info, bytes []byte) (err error) {
|
||||||
err = t.setMetadata(md, bytes)
|
err = t.setMetadata(md, bytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1735,8 +1724,6 @@ func (cl *Client) setMetaData(t *torrent, md *metainfo.Info, bytes []byte) (err
|
|||||||
}
|
}
|
||||||
cl.event.Broadcast()
|
cl.event.Broadcast()
|
||||||
close(t.gotMetainfo)
|
close(t.gotMetainfo)
|
||||||
td := cl.torrentDataOpener(md)
|
|
||||||
err = cl.setStorage(t, td)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1903,6 +1890,7 @@ type TorrentSpec struct {
|
|||||||
// The chunk size to use for outbound requests. Defaults to 16KiB if not
|
// The chunk size to use for outbound requests. Defaults to 16KiB if not
|
||||||
// set.
|
// set.
|
||||||
ChunkSize int
|
ChunkSize int
|
||||||
|
Storage storage.I
|
||||||
}
|
}
|
||||||
|
|
||||||
func TorrentSpecFromMagnetURI(uri string) (spec *TorrentSpec, err error) {
|
func TorrentSpecFromMagnetURI(uri string) (spec *TorrentSpec, err error) {
|
||||||
@ -1948,6 +1936,8 @@ func (cl *Client) AddTorrentSpec(spec *TorrentSpec) (T Torrent, new bool, err er
|
|||||||
if !ok {
|
if !ok {
|
||||||
new = true
|
new = true
|
||||||
|
|
||||||
|
// TODO: This doesn't belong in the core client, it's more of a
|
||||||
|
// helper.
|
||||||
if _, ok := cl.bannedTorrents[spec.InfoHash]; ok {
|
if _, ok := cl.bannedTorrents[spec.InfoHash]; ok {
|
||||||
err = errors.New("banned torrent")
|
err = errors.New("banned torrent")
|
||||||
return
|
return
|
||||||
@ -1959,6 +1949,10 @@ func (cl *Client) AddTorrentSpec(spec *TorrentSpec) (T Torrent, new bool, err er
|
|||||||
if spec.ChunkSize != 0 {
|
if spec.ChunkSize != 0 {
|
||||||
t.chunkSize = pp.Integer(spec.ChunkSize)
|
t.chunkSize = pp.Integer(spec.ChunkSize)
|
||||||
}
|
}
|
||||||
|
t.storage = spec.Storage
|
||||||
|
if t.storage == nil {
|
||||||
|
t.storage = cl.defaultStorage
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if spec.DisplayName != "" {
|
if spec.DisplayName != "" {
|
||||||
t.setDisplayName(spec.DisplayName)
|
t.setDisplayName(spec.DisplayName)
|
||||||
@ -2299,7 +2293,9 @@ func (me *Client) downloadedChunk(t *torrent, c *connection, msg *pp.Message) {
|
|||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("error writing chunk: %s", err)
|
log.Printf("error writing chunk: %s", err)
|
||||||
|
// t.updatePieceCompletion(msg.Index)
|
||||||
t.pendRequest(req)
|
t.pendRequest(req)
|
||||||
|
// t.updatePiecePriority(msg.Index)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2346,9 +2342,9 @@ func (me *Client) pieceHashed(t *torrent, piece int, correct bool) {
|
|||||||
p.EverHashed = true
|
p.EverHashed = true
|
||||||
touchers := me.reapPieceTouches(t, int(piece))
|
touchers := me.reapPieceTouches(t, int(piece))
|
||||||
if correct {
|
if correct {
|
||||||
err := t.data.PieceCompleted(int(piece))
|
err := p.Storage().MarkComplete()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("%T: error completing piece %d: %s", t.data, piece, err)
|
log.Printf("%T: error completing piece %d: %s", t.storage, piece, err)
|
||||||
}
|
}
|
||||||
t.updatePieceCompletion(piece)
|
t.updatePieceCompletion(piece)
|
||||||
} else if len(touchers) != 0 {
|
} else if len(touchers) != 0 {
|
||||||
@ -2409,7 +2405,7 @@ func (cl *Client) verifyPiece(t *torrent, piece int) {
|
|||||||
cl.mu.Lock()
|
cl.mu.Lock()
|
||||||
defer cl.mu.Unlock()
|
defer cl.mu.Unlock()
|
||||||
p := &t.Pieces[piece]
|
p := &t.Pieces[piece]
|
||||||
for p.Hashing || t.data == nil {
|
for p.Hashing || t.storage == nil {
|
||||||
cl.event.Wait()
|
cl.event.Wait()
|
||||||
}
|
}
|
||||||
p.QueuedForHash = false
|
p.QueuedForHash = false
|
||||||
|
211
client_test.go
211
client_test.go
@ -19,19 +19,17 @@ import (
|
|||||||
_ "github.com/anacrolix/envpprof"
|
_ "github.com/anacrolix/envpprof"
|
||||||
"github.com/anacrolix/missinggo"
|
"github.com/anacrolix/missinggo"
|
||||||
. "github.com/anacrolix/missinggo"
|
. "github.com/anacrolix/missinggo"
|
||||||
"github.com/anacrolix/missinggo/filecache"
|
|
||||||
"github.com/anacrolix/utp"
|
"github.com/anacrolix/utp"
|
||||||
"github.com/bradfitz/iter"
|
"github.com/bradfitz/iter"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/anacrolix/torrent/bencode"
|
"github.com/anacrolix/torrent/bencode"
|
||||||
"github.com/anacrolix/torrent/data/pieceStore"
|
|
||||||
"github.com/anacrolix/torrent/data/pieceStore/dataBackend/fileCache"
|
|
||||||
"github.com/anacrolix/torrent/dht"
|
"github.com/anacrolix/torrent/dht"
|
||||||
"github.com/anacrolix/torrent/internal/testutil"
|
"github.com/anacrolix/torrent/internal/testutil"
|
||||||
"github.com/anacrolix/torrent/iplist"
|
"github.com/anacrolix/torrent/iplist"
|
||||||
"github.com/anacrolix/torrent/metainfo"
|
"github.com/anacrolix/torrent/metainfo"
|
||||||
|
"github.com/anacrolix/torrent/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -44,7 +42,7 @@ var TestingConfig = Config{
|
|||||||
DisableTrackers: true,
|
DisableTrackers: true,
|
||||||
NoDefaultBlocklist: true,
|
NoDefaultBlocklist: true,
|
||||||
DisableMetainfoCache: true,
|
DisableMetainfoCache: true,
|
||||||
DataDir: filepath.Join(os.TempDir(), "anacrolix"),
|
DataDir: "/dev/null",
|
||||||
DHTConfig: dht.ServerConfig{
|
DHTConfig: dht.ServerConfig{
|
||||||
NoDefaultBootstrap: true,
|
NoDefaultBootstrap: true,
|
||||||
},
|
},
|
||||||
@ -102,13 +100,12 @@ func TestTorrentInitialState(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}())
|
}())
|
||||||
tor.chunkSize = 2
|
tor.chunkSize = 2
|
||||||
|
tor.storage = storage.NewFile(dir)
|
||||||
|
// Needed to lock for asynchronous piece verification.
|
||||||
|
tor.cl = new(Client)
|
||||||
err := tor.setMetadata(&mi.Info.Info, mi.Info.Bytes)
|
err := tor.setMetadata(&mi.Info.Info, mi.Info.Bytes)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
require.Len(t, tor.Pieces, 3)
|
||||||
}
|
|
||||||
if len(tor.Pieces) != 3 {
|
|
||||||
t.Fatal("wrong number of pieces")
|
|
||||||
}
|
|
||||||
tor.pendAllChunkSpecs(0)
|
tor.pendAllChunkSpecs(0)
|
||||||
assert.EqualValues(t, 3, tor.pieceNumPendingChunks(0))
|
assert.EqualValues(t, 3, tor.pieceNumPendingChunks(0))
|
||||||
assert.EqualValues(t, chunkSpec{4, 1}, chunkIndexSpec(2, tor.pieceLength(0), tor.chunkSize))
|
assert.EqualValues(t, chunkSpec{4, 1}, chunkIndexSpec(2, tor.pieceLength(0), tor.chunkSize))
|
||||||
@ -248,7 +245,9 @@ func TestAddDropManyTorrents(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestClientTransferDefault(t *testing.T) {
|
func TestClientTransferDefault(t *testing.T) {
|
||||||
testClientTransfer(t, testClientTransferParams{})
|
testClientTransfer(t, testClientTransferParams{
|
||||||
|
ExportClientStatus: true,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestClientTransferSmallCache(t *testing.T) {
|
func TestClientTransferSmallCache(t *testing.T) {
|
||||||
@ -301,21 +300,23 @@ func testClientTransfer(t *testing.T, ps testClientTransferParams) {
|
|||||||
if ps.ExportClientStatus {
|
if ps.ExportClientStatus {
|
||||||
testutil.ExportStatusWriter(seeder, "s")
|
testutil.ExportStatusWriter(seeder, "s")
|
||||||
}
|
}
|
||||||
seeder.AddTorrentSpec(TorrentSpecFromMetaInfo(mi))
|
_, new, err := seeder.AddTorrentSpec(TorrentSpecFromMetaInfo(mi))
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.True(t, new)
|
||||||
leecherDataDir, err := ioutil.TempDir("", "")
|
leecherDataDir, err := ioutil.TempDir("", "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer os.RemoveAll(leecherDataDir)
|
defer os.RemoveAll(leecherDataDir)
|
||||||
cfg.TorrentDataOpener = func() TorrentDataOpener {
|
// cfg.TorrentDataOpener = func() TorrentDataOpener {
|
||||||
fc, err := filecache.NewCache(leecherDataDir)
|
// fc, err := filecache.NewCache(leecherDataDir)
|
||||||
require.NoError(t, err)
|
// require.NoError(t, err)
|
||||||
if ps.SetLeecherStorageCapacity {
|
// if ps.SetLeecherStorageCapacity {
|
||||||
fc.SetCapacity(ps.LeecherStorageCapacity)
|
// fc.SetCapacity(ps.LeecherStorageCapacity)
|
||||||
}
|
// }
|
||||||
store := pieceStore.New(fileCacheDataBackend.New(fc))
|
// store := pieceStore.New(fileCacheDataBackend.New(fc))
|
||||||
return func(mi *metainfo.Info) Data {
|
// return func(mi *metainfo.Info) storage.I {
|
||||||
return store.OpenTorrentData(mi)
|
// return store.OpenTorrentData(mi)
|
||||||
}
|
// }
|
||||||
}()
|
// }()
|
||||||
leecher, err := NewClient(&cfg)
|
leecher, err := NewClient(&cfg)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer leecher.Close()
|
defer leecher.Close()
|
||||||
@ -325,6 +326,7 @@ func testClientTransfer(t *testing.T, ps testClientTransferParams) {
|
|||||||
leecherGreeting, new, err := leecher.AddTorrentSpec(func() (ret *TorrentSpec) {
|
leecherGreeting, new, err := leecher.AddTorrentSpec(func() (ret *TorrentSpec) {
|
||||||
ret = TorrentSpecFromMetaInfo(mi)
|
ret = TorrentSpecFromMetaInfo(mi)
|
||||||
ret.ChunkSize = 2
|
ret.ChunkSize = 2
|
||||||
|
ret.Storage = storage.NewFile(leecherDataDir)
|
||||||
return
|
return
|
||||||
}())
|
}())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -372,7 +374,7 @@ func TestSeedAfterDownloading(t *testing.T) {
|
|||||||
defer leecher.Close()
|
defer leecher.Close()
|
||||||
testutil.ExportStatusWriter(leecher, "l")
|
testutil.ExportStatusWriter(leecher, "l")
|
||||||
cfg.Seed = false
|
cfg.Seed = false
|
||||||
cfg.TorrentDataOpener = nil
|
// cfg.TorrentDataOpener = nil
|
||||||
cfg.DataDir, err = ioutil.TempDir("", "")
|
cfg.DataDir, err = ioutil.TempDir("", "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer os.RemoveAll(cfg.DataDir)
|
defer os.RemoveAll(cfg.DataDir)
|
||||||
@ -456,37 +458,41 @@ func TestMergingTrackersByAddingSpecs(t *testing.T) {
|
|||||||
assert.EqualValues(t, T.torrent.Trackers[1][0], "udp://b")
|
assert.EqualValues(t, T.torrent.Trackers[1][0], "udp://b")
|
||||||
}
|
}
|
||||||
|
|
||||||
type badData struct{}
|
type badStorage struct{}
|
||||||
|
|
||||||
func (me badData) Close() {}
|
func (me badStorage) Piece(p metainfo.Piece) storage.Piece {
|
||||||
|
return badStoragePiece{p}
|
||||||
|
}
|
||||||
|
|
||||||
func (me badData) WriteAt(b []byte, off int64) (int, error) {
|
type badStoragePiece struct {
|
||||||
|
p metainfo.Piece
|
||||||
|
}
|
||||||
|
|
||||||
|
func (me badStoragePiece) WriteAt(b []byte, off int64) (int, error) {
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (me badData) PieceComplete(piece int) bool {
|
func (me badStoragePiece) GetIsComplete() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (me badData) PieceCompleted(piece int) error {
|
func (me badStoragePiece) MarkComplete() error {
|
||||||
return errors.New("psyyyyyyyche")
|
return errors.New("psyyyyyyyche")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (me badData) randomlyTruncatedDataString() string {
|
func (me badStoragePiece) randomlyTruncatedDataString() string {
|
||||||
return "hello, world\n"[:rand.Intn(14)]
|
return "hello, world\n"[:rand.Intn(14)]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (me badData) ReadAt(b []byte, off int64) (n int, err error) {
|
func (me badStoragePiece) ReadAt(b []byte, off int64) (n int, err error) {
|
||||||
r := strings.NewReader(me.randomlyTruncatedDataString())
|
r := strings.NewReader(me.randomlyTruncatedDataString())
|
||||||
return r.ReadAt(b, off)
|
return r.ReadAt(b, off+me.p.Offset())
|
||||||
}
|
}
|
||||||
|
|
||||||
// We read from a piece which is marked completed, but is missing data.
|
// We read from a piece which is marked completed, but is missing data.
|
||||||
func TestCompletedPieceWrongSize(t *testing.T) {
|
func TestCompletedPieceWrongSize(t *testing.T) {
|
||||||
cfg := TestingConfig
|
cfg := TestingConfig
|
||||||
cfg.TorrentDataOpener = func(*metainfo.Info) Data {
|
cfg.DefaultStorage = badStorage{}
|
||||||
return badData{}
|
|
||||||
}
|
|
||||||
cl, _ := NewClient(&cfg)
|
cl, _ := NewClient(&cfg)
|
||||||
defer cl.Close()
|
defer cl.Close()
|
||||||
tt, new, err := cl.AddTorrentSpec(&TorrentSpec{
|
tt, new, err := cl.AddTorrentSpec(&TorrentSpec{
|
||||||
@ -701,57 +707,55 @@ func TestTorrentDroppedBeforeGotInfo(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAddTorrentPriorPieceCompletion(t *testing.T, alreadyCompleted bool) {
|
// func testAddTorrentPriorPieceCompletion(t *testing.T, alreadyCompleted bool) {
|
||||||
fileCacheDir, err := ioutil.TempDir("", "")
|
// fileCacheDir, err := ioutil.TempDir("", "")
|
||||||
require.NoError(t, err)
|
// require.NoError(t, err)
|
||||||
defer os.RemoveAll(fileCacheDir)
|
// defer os.RemoveAll(fileCacheDir)
|
||||||
fileCache, err := filecache.NewCache(fileCacheDir)
|
// fileCache, err := filecache.NewCache(fileCacheDir)
|
||||||
require.NoError(t, err)
|
// require.NoError(t, err)
|
||||||
greetingDataTempDir, greetingMetainfo := testutil.GreetingTestTorrent()
|
// greetingDataTempDir, greetingMetainfo := testutil.GreetingTestTorrent()
|
||||||
defer os.RemoveAll(greetingDataTempDir)
|
// defer os.RemoveAll(greetingDataTempDir)
|
||||||
filePieceStore := pieceStore.New(fileCacheDataBackend.New(fileCache))
|
// filePieceStore := pieceStore.New(fileCacheDataBackend.New(fileCache))
|
||||||
greetingData := filePieceStore.OpenTorrentData(&greetingMetainfo.Info.Info)
|
// greetingData := filePieceStore.OpenTorrentData(&greetingMetainfo.Info.Info)
|
||||||
written, err := greetingData.WriteAt([]byte(testutil.GreetingFileContents), 0)
|
// written, err := greetingData.WriteAt([]byte(testutil.GreetingFileContents), 0)
|
||||||
require.Equal(t, len(testutil.GreetingFileContents), written)
|
// require.Equal(t, len(testutil.GreetingFileContents), written)
|
||||||
require.NoError(t, err)
|
// require.NoError(t, err)
|
||||||
for i := 0; i < greetingMetainfo.Info.NumPieces(); i++ {
|
// for i := 0; i < greetingMetainfo.Info.NumPieces(); i++ {
|
||||||
// p := greetingMetainfo.Info.Piece(i)
|
// // p := greetingMetainfo.Info.Piece(i)
|
||||||
if alreadyCompleted {
|
// if alreadyCompleted {
|
||||||
err := greetingData.PieceCompleted(i)
|
// err := greetingData.PieceCompleted(i)
|
||||||
assert.NoError(t, err)
|
// assert.NoError(t, err)
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
cfg := TestingConfig
|
// cfg := TestingConfig
|
||||||
// TODO: Disable network option?
|
// // TODO: Disable network option?
|
||||||
cfg.DisableTCP = true
|
// cfg.DisableTCP = true
|
||||||
cfg.DisableUTP = true
|
// cfg.DisableUTP = true
|
||||||
cfg.TorrentDataOpener = func(mi *metainfo.Info) Data {
|
// // cfg.DefaultStorage = filePieceStore
|
||||||
return filePieceStore.OpenTorrentData(mi)
|
// cl, err := NewClient(&cfg)
|
||||||
}
|
// require.NoError(t, err)
|
||||||
cl, err := NewClient(&cfg)
|
// defer cl.Close()
|
||||||
require.NoError(t, err)
|
// tt, err := cl.AddTorrent(greetingMetainfo)
|
||||||
defer cl.Close()
|
// require.NoError(t, err)
|
||||||
tt, err := cl.AddTorrent(greetingMetainfo)
|
// psrs := tt.PieceStateRuns()
|
||||||
require.NoError(t, err)
|
// assert.Len(t, psrs, 1)
|
||||||
psrs := tt.PieceStateRuns()
|
// assert.EqualValues(t, 3, psrs[0].Length)
|
||||||
assert.Len(t, psrs, 1)
|
// assert.Equal(t, alreadyCompleted, psrs[0].Complete)
|
||||||
assert.EqualValues(t, 3, psrs[0].Length)
|
// if alreadyCompleted {
|
||||||
assert.Equal(t, alreadyCompleted, psrs[0].Complete)
|
// r := tt.NewReader()
|
||||||
if alreadyCompleted {
|
// b, err := ioutil.ReadAll(r)
|
||||||
r := tt.NewReader()
|
// assert.NoError(t, err)
|
||||||
b, err := ioutil.ReadAll(r)
|
// assert.EqualValues(t, testutil.GreetingFileContents, b)
|
||||||
assert.NoError(t, err)
|
// }
|
||||||
assert.EqualValues(t, testutil.GreetingFileContents, b)
|
// }
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAddTorrentPiecesAlreadyCompleted(t *testing.T) {
|
// func TestAddTorrentPiecesAlreadyCompleted(t *testing.T) {
|
||||||
testAddTorrentPriorPieceCompletion(t, true)
|
// testAddTorrentPriorPieceCompletion(t, true)
|
||||||
}
|
// }
|
||||||
|
|
||||||
func TestAddTorrentPiecesNotAlreadyCompleted(t *testing.T) {
|
// func TestAddTorrentPiecesNotAlreadyCompleted(t *testing.T) {
|
||||||
testAddTorrentPriorPieceCompletion(t, false)
|
// testAddTorrentPriorPieceCompletion(t, false)
|
||||||
}
|
// }
|
||||||
|
|
||||||
func TestAddMetainfoWithNodes(t *testing.T) {
|
func TestAddMetainfoWithNodes(t *testing.T) {
|
||||||
cfg := TestingConfig
|
cfg := TestingConfig
|
||||||
@ -796,17 +800,18 @@ func testDownloadCancel(t *testing.T, ps testDownloadCancelParams) {
|
|||||||
leecherDataDir, err := ioutil.TempDir("", "")
|
leecherDataDir, err := ioutil.TempDir("", "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer os.RemoveAll(leecherDataDir)
|
defer os.RemoveAll(leecherDataDir)
|
||||||
cfg.TorrentDataOpener = func() TorrentDataOpener {
|
// cfg.TorrentDataOpener = func() TorrentDataOpener {
|
||||||
fc, err := filecache.NewCache(leecherDataDir)
|
// fc, err := filecache.NewCache(leecherDataDir)
|
||||||
require.NoError(t, err)
|
// require.NoError(t, err)
|
||||||
if ps.SetLeecherStorageCapacity {
|
// if ps.SetLeecherStorageCapacity {
|
||||||
fc.SetCapacity(ps.LeecherStorageCapacity)
|
// fc.SetCapacity(ps.LeecherStorageCapacity)
|
||||||
}
|
// }
|
||||||
store := pieceStore.New(fileCacheDataBackend.New(fc))
|
// store := pieceStore.New(fileCacheDataBackend.New(fc))
|
||||||
return func(mi *metainfo.Info) Data {
|
// return func(mi *metainfo.Info) storage.I {
|
||||||
return store.OpenTorrentData(mi)
|
// return store.OpenTorrentData(mi)
|
||||||
}
|
// }
|
||||||
}()
|
// }()
|
||||||
|
cfg.DataDir = leecherDataDir
|
||||||
leecher, _ := NewClient(&cfg)
|
leecher, _ := NewClient(&cfg)
|
||||||
defer leecher.Close()
|
defer leecher.Close()
|
||||||
if ps.ExportClientStatus {
|
if ps.ExportClientStatus {
|
||||||
@ -834,10 +839,10 @@ func testDownloadCancel(t *testing.T, ps testDownloadCancelParams) {
|
|||||||
completes := make(map[int]bool, 3)
|
completes := make(map[int]bool, 3)
|
||||||
values:
|
values:
|
||||||
for {
|
for {
|
||||||
started := time.Now()
|
// started := time.Now()
|
||||||
select {
|
select {
|
||||||
case _v := <-psc.Values:
|
case _v := <-psc.Values:
|
||||||
log.Print(time.Since(started))
|
// log.Print(time.Since(started))
|
||||||
v := _v.(PieceStateChange)
|
v := _v.(PieceStateChange)
|
||||||
completes[v.Index] = v.Complete
|
completes[v.Index] = v.Complete
|
||||||
case <-time.After(100 * time.Millisecond):
|
case <-time.After(100 * time.Millisecond):
|
||||||
@ -885,3 +890,15 @@ func TestPeerInvalidHave(t *testing.T) {
|
|||||||
assert.NoError(t, cn.peerSentHave(0))
|
assert.NoError(t, cn.peerSentHave(0))
|
||||||
assert.Error(t, cn.peerSentHave(1))
|
assert.Error(t, cn.peerSentHave(1))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPieceCompletedInStorageButNotClient(t *testing.T) {
|
||||||
|
greetingTempDir, greetingMetainfo := testutil.GreetingTestTorrent()
|
||||||
|
defer os.RemoveAll(greetingTempDir)
|
||||||
|
cfg := TestingConfig
|
||||||
|
cfg.DataDir = greetingTempDir
|
||||||
|
seeder, err := NewClient(&TestingConfig)
|
||||||
|
require.NoError(t, err)
|
||||||
|
seeder.AddTorrentSpec(&TorrentSpec{
|
||||||
|
Info: &greetingMetainfo.Info,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
@ -121,7 +121,7 @@ func main() {
|
|||||||
tagflag.Parse(&opts, tagflag.SkipBadTypes())
|
tagflag.Parse(&opts, tagflag.SkipBadTypes())
|
||||||
clientConfig := opts.Config
|
clientConfig := opts.Config
|
||||||
if opts.Mmap {
|
if opts.Mmap {
|
||||||
clientConfig.TorrentDataOpener = func(info *metainfo.Info) torrent.Data {
|
clientConfig.TorrentDataOpener = func(info *metainfo.Info) torrent.Storage {
|
||||||
ret, err := mmap.TorrentData(info, "")
|
ret, err := mmap.TorrentData(info, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("error opening torrent data for %q: %s", info.Name, err)
|
log.Fatalf("error opening torrent data for %q: %s", info.Name, err)
|
||||||
|
@ -3,6 +3,7 @@ package torrent
|
|||||||
import (
|
import (
|
||||||
"github.com/anacrolix/torrent/dht"
|
"github.com/anacrolix/torrent/dht"
|
||||||
"github.com/anacrolix/torrent/iplist"
|
"github.com/anacrolix/torrent/iplist"
|
||||||
|
"github.com/anacrolix/torrent/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Override Client defaults.
|
// Override Client defaults.
|
||||||
@ -43,7 +44,7 @@ type Config struct {
|
|||||||
DisableMetainfoCache bool
|
DisableMetainfoCache bool
|
||||||
// Called to instantiate storage for each added torrent. Provided backends
|
// Called to instantiate storage for each added torrent. Provided backends
|
||||||
// are in $REPO/data. If not set, the "file" implementation is used.
|
// are in $REPO/data. If not set, the "file" implementation is used.
|
||||||
TorrentDataOpener
|
DefaultStorage storage.I
|
||||||
DisableEncryption bool `long:"disable-encryption"`
|
DisableEncryption bool `long:"disable-encryption"`
|
||||||
|
|
||||||
IPBlocklist *iplist.IPList
|
IPBlocklist *iplist.IPList
|
||||||
|
@ -194,7 +194,7 @@ func TestDownloadOnDemand(t *testing.T) {
|
|||||||
|
|
||||||
NoDefaultBlocklist: true,
|
NoDefaultBlocklist: true,
|
||||||
|
|
||||||
TorrentDataOpener: func(info *metainfo.Info) torrent.Data {
|
TorrentDataOpener: func(info *metainfo.Info) torrent.Storage {
|
||||||
ret, _ := mmap.TorrentData(info, filepath.Join(layout.BaseDir, "download"))
|
ret, _ := mmap.TorrentData(info, filepath.Join(layout.BaseDir, "download"))
|
||||||
return ret
|
return ret
|
||||||
},
|
},
|
||||||
|
@ -11,6 +11,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/anacrolix/missinggo"
|
||||||
|
|
||||||
"github.com/anacrolix/torrent/bencode"
|
"github.com/anacrolix/torrent/bencode"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -170,8 +172,9 @@ func (me Piece) Offset() int64 {
|
|||||||
return int64(me.i) * me.Info.PieceLength
|
return int64(me.i) * me.Info.PieceLength
|
||||||
}
|
}
|
||||||
|
|
||||||
func (me Piece) Hash() []byte {
|
func (me Piece) Hash() (ret InfoHash) {
|
||||||
return me.Info.Pieces[me.i*20 : (me.i+1)*20]
|
missinggo.CopyExact(&ret, me.Info.Pieces[me.i*20:(me.i+1)*20])
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (me *Info) Piece(i int) Piece {
|
func (me *Info) Piece(i int) Piece {
|
||||||
@ -253,3 +256,5 @@ func (mi *MetaInfo) SetDefaults() {
|
|||||||
mi.CreationDate = time.Now().Unix()
|
mi.CreationDate = time.Now().Unix()
|
||||||
mi.Info.PieceLength = 256 * 1024
|
mi.Info.PieceLength = 256 * 1024
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type InfoHash [20]byte
|
||||||
|
10
piece.go
10
piece.go
@ -5,7 +5,9 @@ import (
|
|||||||
|
|
||||||
"github.com/anacrolix/missinggo/bitmap"
|
"github.com/anacrolix/missinggo/bitmap"
|
||||||
|
|
||||||
|
"github.com/anacrolix/torrent/metainfo"
|
||||||
pp "github.com/anacrolix/torrent/peer_protocol"
|
pp "github.com/anacrolix/torrent/peer_protocol"
|
||||||
|
"github.com/anacrolix/torrent/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Piece priority describes the importance of obtaining a particular piece.
|
// Piece priority describes the importance of obtaining a particular piece.
|
||||||
@ -45,6 +47,14 @@ type piece struct {
|
|||||||
noPendingWrites sync.Cond
|
noPendingWrites sync.Cond
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *piece) Info() metainfo.Piece {
|
||||||
|
return p.t.Info.Piece(p.index)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *piece) Storage() storage.Piece {
|
||||||
|
return p.t.storage.Piece(p.Info())
|
||||||
|
}
|
||||||
|
|
||||||
func (p *piece) pendingChunkIndex(chunkIndex int) bool {
|
func (p *piece) pendingChunkIndex(chunkIndex int) bool {
|
||||||
return !p.DirtyChunks.Contains(chunkIndex)
|
return !p.DirtyChunks.Contains(chunkIndex)
|
||||||
}
|
}
|
||||||
|
@ -1,36 +1,65 @@
|
|||||||
package file
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/anacrolix/missinggo"
|
||||||
|
|
||||||
"github.com/anacrolix/torrent/metainfo"
|
"github.com/anacrolix/torrent/metainfo"
|
||||||
)
|
)
|
||||||
|
|
||||||
type data struct {
|
type fileStorage struct {
|
||||||
info *metainfo.Info
|
baseDir string
|
||||||
loc string
|
completed map[[20]byte]bool
|
||||||
completed []bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TorrentData(md *metainfo.Info, location string) data {
|
func NewFile(baseDir string) *fileStorage {
|
||||||
return data{md, location, make([]bool, md.NumPieces())}
|
return &fileStorage{
|
||||||
|
baseDir: baseDir,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (me data) Close() {}
|
func (me *fileStorage) Piece(p metainfo.Piece) Piece {
|
||||||
|
_io := &fileStorageTorrent{
|
||||||
func (me data) PieceComplete(piece int) bool {
|
p.Info,
|
||||||
return me.completed[piece]
|
me.baseDir,
|
||||||
|
}
|
||||||
|
return &fileStoragePiece{
|
||||||
|
me,
|
||||||
|
p,
|
||||||
|
missinggo.NewSectionWriter(_io, p.Offset(), p.Length()),
|
||||||
|
io.NewSectionReader(_io, p.Offset(), p.Length()),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (me data) PieceCompleted(piece int) error {
|
type fileStoragePiece struct {
|
||||||
me.completed[piece] = true
|
*fileStorage
|
||||||
|
p metainfo.Piece
|
||||||
|
io.WriterAt
|
||||||
|
io.ReaderAt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (me *fileStoragePiece) GetIsComplete() bool {
|
||||||
|
return me.completed[me.p.Hash()]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (me *fileStoragePiece) MarkComplete() error {
|
||||||
|
if me.completed == nil {
|
||||||
|
me.completed = make(map[[20]byte]bool)
|
||||||
|
}
|
||||||
|
me.completed[me.p.Hash()] = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type fileStorageTorrent struct {
|
||||||
|
info *metainfo.Info
|
||||||
|
baseDir string
|
||||||
|
}
|
||||||
|
|
||||||
// Returns EOF on short or missing file.
|
// Returns EOF on short or missing file.
|
||||||
func (me data) readFileAt(fi metainfo.FileInfo, b []byte, off int64) (n int, err error) {
|
func (me *fileStorageTorrent) readFileAt(fi metainfo.FileInfo, b []byte, off int64) (n int, err error) {
|
||||||
f, err := os.Open(me.fileInfoName(fi))
|
f, err := os.Open(me.fileInfoName(fi))
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
// File missing is treated the same as a short file.
|
// File missing is treated the same as a short file.
|
||||||
@ -59,7 +88,7 @@ func (me data) readFileAt(fi metainfo.FileInfo, b []byte, off int64) (n int, err
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Only returns EOF at the end of the torrent. Premature EOF is ErrUnexpectedEOF.
|
// Only returns EOF at the end of the torrent. Premature EOF is ErrUnexpectedEOF.
|
||||||
func (me data) ReadAt(b []byte, off int64) (n int, err error) {
|
func (me *fileStorageTorrent) ReadAt(b []byte, off int64) (n int, err error) {
|
||||||
for _, fi := range me.info.UpvertedFiles() {
|
for _, fi := range me.info.UpvertedFiles() {
|
||||||
for off < fi.Length {
|
for off < fi.Length {
|
||||||
n1, err1 := me.readFileAt(fi, b, off)
|
n1, err1 := me.readFileAt(fi, b, off)
|
||||||
@ -87,7 +116,7 @@ func (me data) ReadAt(b []byte, off int64) (n int, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (me data) WriteAt(p []byte, off int64) (n int, err error) {
|
func (me *fileStorageTorrent) WriteAt(p []byte, off int64) (n int, err error) {
|
||||||
for _, fi := range me.info.UpvertedFiles() {
|
for _, fi := range me.info.UpvertedFiles() {
|
||||||
if off >= fi.Length {
|
if off >= fi.Length {
|
||||||
off -= fi.Length
|
off -= fi.Length
|
||||||
@ -119,6 +148,6 @@ func (me data) WriteAt(p []byte, off int64) (n int, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (me data) fileInfoName(fi metainfo.FileInfo) string {
|
func (me *fileStorageTorrent) fileInfoName(fi metainfo.FileInfo) string {
|
||||||
return filepath.Join(append([]string{me.loc, me.info.Name}, fi.Path...)...)
|
return filepath.Join(append([]string{me.baseDir, me.info.Name}, fi.Path...)...)
|
||||||
}
|
}
|
@ -1,4 +1,4 @@
|
|||||||
package file
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
@ -8,6 +8,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/anacrolix/missinggo"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
@ -18,15 +19,18 @@ func TestShortFile(t *testing.T) {
|
|||||||
td, err := ioutil.TempDir("", "")
|
td, err := ioutil.TempDir("", "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer os.RemoveAll(td)
|
defer os.RemoveAll(td)
|
||||||
data := TorrentData(&metainfo.Info{
|
data := NewFile(td)
|
||||||
Name: "a",
|
info := &metainfo.Info{
|
||||||
Length: 2,
|
Name: "a",
|
||||||
}, td)
|
Length: 2,
|
||||||
|
PieceLength: missinggo.MiB,
|
||||||
|
}
|
||||||
f, err := os.Create(filepath.Join(td, "a"))
|
f, err := os.Create(filepath.Join(td, "a"))
|
||||||
err = f.Truncate(1)
|
err = f.Truncate(1)
|
||||||
f.Close()
|
f.Close()
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
n, err := io.Copy(&buf, io.NewSectionReader(data, 0, 2))
|
p := info.Piece(0)
|
||||||
|
n, err := io.Copy(&buf, io.NewSectionReader(data.Piece(p), 0, p.Length()))
|
||||||
assert.EqualValues(t, 1, n)
|
assert.EqualValues(t, 1, n)
|
||||||
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
||||||
}
|
}
|
@ -1,19 +1,25 @@
|
|||||||
package torrent
|
package storage
|
||||||
|
|
||||||
import "io"
|
import (
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/anacrolix/torrent/metainfo"
|
||||||
|
)
|
||||||
|
|
||||||
// Represents data storage for a Torrent.
|
// Represents data storage for a Torrent.
|
||||||
type Data interface {
|
type I interface {
|
||||||
|
Piece(metainfo.Piece) Piece
|
||||||
|
}
|
||||||
|
|
||||||
|
type Piece interface {
|
||||||
// Should return io.EOF only at end of torrent. Short reads due to missing
|
// Should return io.EOF only at end of torrent. Short reads due to missing
|
||||||
// data should return io.ErrUnexpectedEOF.
|
// data should return io.ErrUnexpectedEOF.
|
||||||
io.ReaderAt
|
io.ReaderAt
|
||||||
io.WriterAt
|
io.WriterAt
|
||||||
// Bro, do you even io.Closer?
|
|
||||||
Close()
|
|
||||||
// Called when the client believes the piece data will pass a hash check.
|
// Called when the client believes the piece data will pass a hash check.
|
||||||
// The storage can move or mark the piece data as read-only as it sees
|
// The storage can move or mark the piece data as read-only as it sees
|
||||||
// fit.
|
// fit.
|
||||||
PieceCompleted(index int) error
|
MarkComplete() error
|
||||||
// Returns true if the piece is complete.
|
// Returns true if the piece is complete.
|
||||||
PieceComplete(index int) bool
|
GetIsComplete() bool
|
||||||
}
|
}
|
35
torrent.go
35
torrent.go
@ -23,6 +23,7 @@ import (
|
|||||||
"github.com/anacrolix/torrent/bencode"
|
"github.com/anacrolix/torrent/bencode"
|
||||||
"github.com/anacrolix/torrent/metainfo"
|
"github.com/anacrolix/torrent/metainfo"
|
||||||
pp "github.com/anacrolix/torrent/peer_protocol"
|
pp "github.com/anacrolix/torrent/peer_protocol"
|
||||||
|
"github.com/anacrolix/torrent/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (t *torrent) chunkIndexSpec(chunkIndex, piece int) chunkSpec {
|
func (t *torrent) chunkIndexSpec(chunkIndex, piece int) chunkSpec {
|
||||||
@ -53,7 +54,7 @@ type torrent struct {
|
|||||||
// get this from the info dict.
|
// get this from the info dict.
|
||||||
length int64
|
length int64
|
||||||
|
|
||||||
data Data
|
storage storage.I
|
||||||
|
|
||||||
// The info dict. Nil if we don't have it (yet).
|
// The info dict. Nil if we don't have it (yet).
|
||||||
Info *metainfo.Info
|
Info *metainfo.Info
|
||||||
@ -106,9 +107,7 @@ func (t *torrent) pieceComplete(piece int) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *torrent) pieceCompleteUncached(piece int) bool {
|
func (t *torrent) pieceCompleteUncached(piece int) bool {
|
||||||
// TODO: This is called when setting metadata, and before storage is
|
return t.Pieces[piece].Storage().GetIsComplete()
|
||||||
// assigned, which doesn't seem right.
|
|
||||||
return t.data != nil && t.data.PieceComplete(piece)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *torrent) numConnsUnchoked() (num int) {
|
func (t *torrent) numConnsUnchoked() (num int) {
|
||||||
@ -248,14 +247,6 @@ func (t *torrent) setMetadata(md *metainfo.Info, infoBytes []byte) (err error) {
|
|||||||
conn.Close()
|
conn.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *torrent) setStorage(td Data) {
|
|
||||||
if t.data != nil {
|
|
||||||
t.data.Close()
|
|
||||||
}
|
|
||||||
t.data = td
|
|
||||||
for i := range t.Pieces {
|
for i := range t.Pieces {
|
||||||
t.updatePieceCompletion(i)
|
t.updatePieceCompletion(i)
|
||||||
t.Pieces[i].QueuedForHash = true
|
t.Pieces[i].QueuedForHash = true
|
||||||
@ -265,6 +256,7 @@ func (t *torrent) setStorage(td Data) {
|
|||||||
t.verifyPiece(i)
|
t.verifyPiece(i)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *torrent) verifyPiece(piece int) {
|
func (t *torrent) verifyPiece(piece int) {
|
||||||
@ -553,7 +545,7 @@ func (t *torrent) close() (err error) {
|
|||||||
}
|
}
|
||||||
t.ceaseNetworking()
|
t.ceaseNetworking()
|
||||||
close(t.closing)
|
close(t.closing)
|
||||||
if c, ok := t.data.(io.Closer); ok {
|
if c, ok := t.storage.(io.Closer); ok {
|
||||||
c.Close()
|
c.Close()
|
||||||
}
|
}
|
||||||
for _, conn := range t.Conns {
|
for _, conn := range t.Conns {
|
||||||
@ -575,7 +567,8 @@ func (t *torrent) offsetRequest(off int64) (req request, ok bool) {
|
|||||||
|
|
||||||
func (t *torrent) writeChunk(piece int, begin int64, data []byte) (err error) {
|
func (t *torrent) writeChunk(piece int, begin int64, data []byte) (err error) {
|
||||||
tr := perf.NewTimer()
|
tr := perf.NewTimer()
|
||||||
n, err := t.data.WriteAt(data, int64(piece)*t.Info.PieceLength+begin)
|
|
||||||
|
n, err := t.Pieces[piece].Storage().WriteAt(data, begin)
|
||||||
if err == nil && n != len(data) {
|
if err == nil && n != len(data) {
|
||||||
err = io.ErrShortWrite
|
err = io.ErrShortWrite
|
||||||
}
|
}
|
||||||
@ -661,13 +654,13 @@ func (t *torrent) hashPiece(piece int) (ret pieceSum) {
|
|||||||
p.waitNoPendingWrites()
|
p.waitNoPendingWrites()
|
||||||
ip := t.Info.Piece(piece)
|
ip := t.Info.Piece(piece)
|
||||||
pl := ip.Length()
|
pl := ip.Length()
|
||||||
n, err := io.Copy(hash, io.NewSectionReader(t.data, ip.Offset(), pl))
|
n, err := io.Copy(hash, io.NewSectionReader(t.Pieces[piece].Storage(), 0, pl))
|
||||||
if n == pl {
|
if n == pl {
|
||||||
missinggo.CopyExact(&ret, hash.Sum(nil))
|
missinggo.CopyExact(&ret, hash.Sum(nil))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err != io.ErrUnexpectedEOF {
|
if err != io.ErrUnexpectedEOF {
|
||||||
log.Printf("unexpected error hashing piece with %T: %s", t.data, err)
|
log.Printf("unexpected error hashing piece with %T: %s", t.storage, err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -1031,13 +1024,9 @@ func (t *torrent) updatePieceCompletion(piece int) {
|
|||||||
|
|
||||||
// Non-blocking read. Client lock is not required.
|
// Non-blocking read. Client lock is not required.
|
||||||
func (t *torrent) readAt(b []byte, off int64) (n int, err error) {
|
func (t *torrent) readAt(b []byte, off int64) (n int, err error) {
|
||||||
if off+int64(len(b)) > t.length {
|
p := &t.Pieces[off/t.Info.PieceLength]
|
||||||
b = b[:t.length-off]
|
p.waitNoPendingWrites()
|
||||||
}
|
return p.Storage().ReadAt(b, off-p.Info().Offset())
|
||||||
for pi := off / t.Info.PieceLength; pi*t.Info.PieceLength < off+int64(len(b)); pi++ {
|
|
||||||
t.Pieces[pi].waitNoPendingWrites()
|
|
||||||
}
|
|
||||||
return t.data.ReadAt(b, off)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *torrent) updateAllPieceCompletions() {
|
func (t *torrent) updateAllPieceCompletions() {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user