This commit is contained in:
Matt Joiner 2021-11-08 14:47:01 +11:00
parent f86af21cd2
commit c6ee03f449
47 changed files with 104 additions and 124 deletions

View File

@ -224,7 +224,7 @@ func getDictField(dict reflect.Type, key string) (_ dictField, err error) {
mapValue.Set(reflect.MakeMap(dict))
}
// Assigns the value into the map.
//log.Printf("map type: %v", mapValue.Type())
// log.Printf("map type: %v", mapValue.Type())
mapValue.SetMapIndex(reflect.ValueOf(key).Convert(dict.Key()), value)
}
},
@ -345,7 +345,7 @@ func (d *Decoder) parseDict(v reflect.Value) error {
continue
}
setValue := reflect.New(df.Type).Elem()
//log.Printf("parsing into %v", setValue.Type())
// log.Printf("parsing into %v", setValue.Type())
ok, err = d.parseValue(setValue)
if err != nil {
var target *UnmarshalTypeError
@ -468,7 +468,6 @@ func (d *Decoder) readOneValue() bool {
}
return true
}
func (d *Decoder) parseUnmarshaler(v reflect.Value) bool {

View File

@ -23,8 +23,10 @@ var random_decode_tests = []random_decode_test{
{"5:hello", "hello"},
{"29:unicode test проверка", "unicode test проверка"},
{"d1:ai5e1:b5:helloe", map[string]interface{}{"a": int64(5), "b": "hello"}},
{"li5ei10ei15ei20e7:bencodee",
[]interface{}{int64(5), int64(10), int64(15), int64(20), "bencode"}},
{
"li5ei10ei15ei20e7:bencodee",
[]interface{}{int64(5), int64(10), int64(15), int64(20), "bencode"},
},
{"ldedee", []interface{}{map[string]interface{}{}, map[string]interface{}{}}},
{"le", []interface{}{}},
{"i604919719469385652980544193299329427705624352086e", func() *big.Int {
@ -135,7 +137,6 @@ func TestUnmarshalerBencode(t *testing.T) {
assert_equal(t, ss[0].x, "5:hello")
assert_equal(t, ss[1].x, "5:fruit")
assert_equal(t, ss[2].x, "3:way")
}
func TestIgnoreUnmarshalTypeError(t *testing.T) {

View File

@ -101,7 +101,6 @@ func (e *Encoder) reflectMarshaler(v reflect.Value) bool {
var bigIntType = reflect.TypeOf((*big.Int)(nil)).Elem()
func (e *Encoder) reflectValue(v reflect.Value) {
if e.reflectMarshaler(v) {
return
}

View File

@ -477,7 +477,6 @@ func (cl *Client) rejectAccepted(conn net.Conn) error {
}
if cl.config.DisableIPv4 && len(rip) == net.IPv4len {
return errors.New("ipv4 disabled")
}
if cl.config.DisableIPv6 && len(rip) == net.IPv6len && rip.To4() == nil {
return errors.New("ipv6 disabled")
@ -735,7 +734,7 @@ func (cl *Client) establishOutgoingConn(t *Torrent, addr PeerRemoteAddr) (c *Pee
torrent.Add("initiated conn with preferred header obfuscation", 1)
return
}
//cl.logger.Printf("error establishing connection to %s (obfuscatedHeader=%t): %v", addr, obfuscatedHeaderFirst, err)
// cl.logger.Printf("error establishing connection to %s (obfuscatedHeader=%t): %v", addr, obfuscatedHeaderFirst, err)
if cl.config.HeaderObfuscationPolicy.RequirePreferred {
// We should have just tried with the preferred header obfuscation. If it was required,
// there's nothing else to try.
@ -746,7 +745,7 @@ func (cl *Client) establishOutgoingConn(t *Torrent, addr PeerRemoteAddr) (c *Pee
if err == nil {
torrent.Add("initiated conn with fallback header obfuscation", 1)
}
//cl.logger.Printf("error establishing fallback connection to %v: %v", addr, err)
// cl.logger.Printf("error establishing fallback connection to %v: %v", addr, err)
return
}

View File

@ -648,7 +648,7 @@ func TestSetMaxEstablishedConn(t *testing.T) {
// Creates a file containing its own name as data. Make a metainfo from that, adds it to the given
// client, and returns a magnet link.
func makeMagnet(t *testing.T, cl *Client, dir string, name string) string {
os.MkdirAll(dir, 0770)
os.MkdirAll(dir, 0o770)
file, err := os.Create(filepath.Join(dir, name))
require.NoError(t, err)
file.Write([]byte(name))
@ -688,7 +688,7 @@ func testSeederLeecherPair(t *testing.T, seeder func(*ClientConfig), leecher fun
cfg := TestingConfig(t)
cfg.Seed = true
cfg.DataDir = filepath.Join(cfg.DataDir, "server")
os.Mkdir(cfg.DataDir, 0755)
os.Mkdir(cfg.DataDir, 0o755)
seeder(cfg)
server, err := NewClient(cfg)
require.NoError(t, err)

View File

@ -10,13 +10,11 @@ import (
"github.com/anacrolix/torrent/metainfo"
)
var (
builtinAnnounceList = [][]string{
{"http://p4p.arenabg.com:1337/announce"},
{"udp://tracker.opentrackr.org:1337/announce"},
{"udp://tracker.openbittorrent.com:6969/announce"},
}
)
var builtinAnnounceList = [][]string{
{"http://p4p.arenabg.com:1337/announce"},
{"udp://tracker.opentrackr.org:1337/announce"},
{"udp://tracker.openbittorrent.com:6969/announce"},
}
func main() {
log.SetFlags(log.Flags() | log.Lshortfile)

View File

@ -78,7 +78,7 @@ func dstFileName(picked string) string {
func main() {
log.SetFlags(log.LstdFlags | log.Lshortfile)
var rootGroup = struct {
rootGroup := struct {
Client *torrent.ClientConfig `group:"Client Options"`
TestPeers []string `long:"test-peer" description:"address of peer to inject to every torrent"`
Pick string `long:"pick" description:"filename to pick"`

View File

@ -64,7 +64,7 @@ func verifyTorrent(info *metainfo.Info, root string) error {
func main() {
log.SetFlags(log.Flags() | log.Lshortfile)
var flags = struct {
flags := struct {
DataDir string
tagflag.StartPos
TorrentFile string

View File

@ -256,7 +256,7 @@ func downloadErr(flags downloadFlags) error {
if err != nil {
return fmt.Errorf("creating client: %w", err)
}
var clientClose sync.Once //In certain situations, close was being called more than once.
var clientClose sync.Once // In certain situations, close was being called more than once.
defer clientClose.Do(func() { client.Close() })
go exitSignalHandlers(&stop)
go func() {

View File

@ -24,28 +24,26 @@ import (
"github.com/anacrolix/torrent/util/dirwatch"
)
var (
args = struct {
MetainfoDir string `help:"torrent files in this location describe the contents of the mounted filesystem"`
DownloadDir string `help:"location to save torrent data"`
MountDir string `help:"location the torrent contents are made available"`
var args = struct {
MetainfoDir string `help:"torrent files in this location describe the contents of the mounted filesystem"`
DownloadDir string `help:"location to save torrent data"`
MountDir string `help:"location the torrent contents are made available"`
DisableTrackers bool
TestPeer *net.TCPAddr
ReadaheadBytes tagflag.Bytes
ListenAddr *net.TCPAddr
}{
MetainfoDir: func() string {
_user, err := user.Current()
if err != nil {
panic(err)
}
return filepath.Join(_user.HomeDir, ".config/transmission/torrents")
}(),
ReadaheadBytes: 10 << 20,
ListenAddr: &net.TCPAddr{},
}
)
DisableTrackers bool
TestPeer *net.TCPAddr
ReadaheadBytes tagflag.Bytes
ListenAddr *net.TCPAddr
}{
MetainfoDir: func() string {
_user, err := user.Current()
if err != nil {
panic(err)
}
return filepath.Join(_user.HomeDir, ".config/transmission/torrents")
}(),
ReadaheadBytes: 10 << 20,
ListenAddr: &net.TCPAddr{},
}
func exitSignalHandlers(fs *torrentfs.TorrentFS) {
c := make(chan os.Signal, 1)

View File

@ -202,8 +202,8 @@ func NewDefaultClientConfig() *ClientConfig {
Extensions: defaultPeerExtensionBytes(),
AcceptPeerConnections: true,
}
//cc.ConnTracker.SetNoMaxEntries()
//cc.ConnTracker.Timeout = func(conntrack.Entry) time.Duration { return 0 }
// cc.ConnTracker.SetNoMaxEntries()
// cc.ConnTracker.Timeout = func(conntrack.Entry) time.Duration { return 0 }
return cc
}

View File

@ -40,7 +40,6 @@ func (me testFileBytesLeft) Run(t *testing.T) {
}
func TestFileBytesLeft(t *testing.T) {
testFileBytesLeft{
usualPieceSize: 3,
firstPieceIndex: 1,

View File

@ -45,7 +45,7 @@ func (me fileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse
me.fn.FS.mu.Unlock()
var n int
r := missinggo.ContextedReader{r, ctx}
//log.Printf("reading %v bytes at %v", len(resp.Data), req.Offset)
// log.Printf("reading %v bytes at %v", len(resp.Data), req.Offset)
if true {
// A user reported on that on freebsd 12.2, the system requires that reads are
// completely filled. Their system only asks for 64KiB at a time. I've seen systems that

View File

@ -14,9 +14,7 @@ type fileNode struct {
f *torrent.File
}
var (
_ fusefs.NodeOpener = fileNode{}
)
var _ fusefs.NodeOpener = fileNode{}
func (fn fileNode) Attr(ctx context.Context, attr *fuse.Attr) error {
attr.Size = uint64(fn.f.Length())

View File

@ -15,12 +15,10 @@ import (
)
const (
defaultMode = 0555
defaultMode = 0o555
)
var (
torrentfsReadRequests = expvar.NewInt("torrentfsReadRequests")
)
var torrentfsReadRequests = expvar.NewInt("torrentfsReadRequests")
type TorrentFS struct {
Client *torrent.Client
@ -55,9 +53,7 @@ type dirNode struct {
node
}
var (
_ fusefs.HandleReadDirAller = dirNode{}
)
var _ fusefs.HandleReadDirAller = dirNode{}
func isSubPath(parent, child string) bool {
if parent == "" {

View File

@ -68,9 +68,9 @@ func newGreetingLayout() (tl testLayout, err error) {
return
}
tl.Completed = filepath.Join(tl.BaseDir, "completed")
os.Mkdir(tl.Completed, 0777)
os.Mkdir(tl.Completed, 0o777)
tl.MountDir = filepath.Join(tl.BaseDir, "mnt")
os.Mkdir(tl.MountDir, 0777)
os.Mkdir(tl.MountDir, 0o777)
testutil.CreateDummyTorrentData(tl.Completed)
tl.Metainfo = testutil.GreetingMetaInfo()
return

View File

@ -12,7 +12,7 @@ import (
)
func main() {
var flags = struct {
flags := struct {
tagflag.StartPos
Ips []net.IP
}{}

View File

@ -59,6 +59,7 @@ var (
func (h *Hash) UnmarshalText(b []byte) error {
return h.FromHexString(string(b))
}
func (h Hash) MarshalText() (text []byte, err error) {
return []byte(h.HexString()), nil
}

View File

@ -67,7 +67,6 @@ func TestParseMagnetURI(t *testing.T) {
if err == nil {
t.Errorf("Failed to detect broken Magnet URI: %v", uri)
}
}
func TestMagnetize(t *testing.T) {

View File

@ -10,9 +10,7 @@ import (
type Node string
var (
_ bencode.Unmarshaler = (*Node)(nil)
)
var _ bencode.Unmarshaler = (*Node)(nil)
func (n *Node) UnmarshalBencode(b []byte) (err error) {
var iface interface{}

View File

@ -6,9 +6,7 @@ import (
type UrlList []string
var (
_ bencode.Unmarshaler = (*UrlList)(nil)
)
var _ bencode.Unmarshaler = (*UrlList)(nil)
func (me *UrlList) UnmarshalBencode(b []byte) error {
if len(b) == 0 {

View File

@ -44,11 +44,11 @@ func (me *MMapSpan) InitIndex() {
i++
return l, true
})
//log.Printf("made mmapspan index: %v", me.segmentLocater)
// log.Printf("made mmapspan index: %v", me.segmentLocater)
}
func (ms *MMapSpan) ReadAt(p []byte, off int64) (n int, err error) {
//log.Printf("reading %v bytes at %v", len(p), off)
// log.Printf("reading %v bytes at %v", len(p), off)
ms.mu.RLock()
defer ms.mu.RUnlock()
n = ms.locateCopy(func(a, b []byte) (_, _ []byte) { return a, b }, p, off)
@ -65,7 +65,7 @@ func copyBytes(dst, src []byte) int {
func (ms *MMapSpan) locateCopy(copyArgs func(remainingArgument, mmapped []byte) (dst, src []byte), p []byte, off int64) (n int) {
ms.segmentLocater.Locate(segments.Extent{off, int64(len(p))}, func(i int, e segments.Extent) bool {
mMapBytes := ms.mMaps[i][e.Start:]
//log.Printf("got segment %v: %v, copying %v, %v", i, e, len(p), len(mMapBytes))
// log.Printf("got segment %v: %v, copying %v, %v", i, e, len(p), len(mMapBytes))
_n := copyBytes(copyArgs(p, mMapBytes))
p = p[_n:]
n += _n

View File

@ -20,7 +20,7 @@ func main() {
}
func mainErr() error {
var args = struct {
args := struct {
CryptoMethod mse.CryptoMethod
Dial *struct {
Network string `arg:"positional"`

View File

@ -260,7 +260,7 @@ func BenchmarkSkeysReceive(b *testing.B) {
}
fillRand(b, skeys...)
initSkey := skeys[len(skeys)/2]
//c := qt.New(b)
// c := qt.New(b)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i += 1 {

View File

@ -481,7 +481,6 @@ func (cn *Peer) totalExpectingTime() (ret time.Duration) {
ret += time.Since(cn.lastStartedExpectingToReceiveChunks)
}
return
}
func (cn *PeerConn) onPeerSentCancel(r Request) {
@ -961,7 +960,7 @@ func (c *PeerConn) onReadRequest(r Request) error {
value := &peerRequestState{}
c.peerRequests[r] = value
go c.peerRequestDataReader(r, value)
//c.tickleWriter()
// c.tickleWriter()
return nil
}
@ -1235,7 +1234,7 @@ func (c *PeerConn) onReadExtendedMsg(id pp.ExtensionNumber, payload []byte) (err
if cb := c.callbacks.ReadExtendedHandshake; cb != nil {
cb(c, &d)
}
//c.logger.WithDefaultLevel(log.Debug).Printf("received extended handshake message:\n%s", spew.Sdump(d))
// c.logger.WithDefaultLevel(log.Debug).Printf("received extended handshake message:\n%s", spew.Sdump(d))
if d.Reqq != 0 {
c.PeerMaxRequests = d.Reqq
}
@ -1346,7 +1345,7 @@ func (c *Peer) receiveChunk(msg *pp.Message) error {
// Do we actually want this chunk?
if t.haveChunk(ppReq) {
//panic(fmt.Sprintf("%+v", ppReq))
// panic(fmt.Sprintf("%+v", ppReq))
chunksReceived.Add("wasted", 1)
c.allStats(add(1, func(cs *ConnStats) *Count { return &cs.ChunksReadWasted }))
return nil

View File

@ -28,7 +28,7 @@ func TestSendBitfieldThenHave(t *testing.T) {
t.Log(err)
}
r, w := io.Pipe()
//c.r = r
// c.r = r
c.w = w
c.startWriter()
c.locker().Lock()
@ -155,7 +155,7 @@ func TestConnPexPeerFlags(t *testing.T) {
tcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848}
udpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848}
)
var testcases = []struct {
testcases := []struct {
conn *PeerConn
f pp.PexPeerFlags
}{
@ -181,7 +181,7 @@ func TestConnPexEvent(t *testing.T) {
dialTcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4747}
dialUdpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4747}
)
var testcases = []struct {
testcases := []struct {
t pexEventType
c *PeerConn
e pexEvent

View File

@ -164,10 +164,10 @@ func (p *Piece) VerifyData() {
if p.hashing {
target++
}
//log.Printf("target: %d", target)
// log.Printf("target: %d", target)
p.t.queuePieceCheck(p.index)
for {
//log.Printf("got %d verifies", p.numVerifies)
// log.Printf("got %d verifies", p.numVerifies)
if p.numVerifies >= target {
break
}

View File

@ -330,7 +330,7 @@ func allocatePendingChunks(p requestablePiece, peers []*requestsPeer) {
sortPeersForPiece := func(req *RequestIndex) {
peersForPieceSorter.req = req
sort.Sort(&peersForPieceSorter)
//ensureValidSortedPeersForPieceRequests(&peersForPieceSorter)
// ensureValidSortedPeersForPieceRequests(&peersForPieceSorter)
}
// Chunks can be preassigned several times, if peers haven't been able to update their "actual"
// with "next" request state before another request strategy run occurs.

View File

@ -247,7 +247,8 @@ func TestDontStealUnnecessarily(t *testing.T) {
Request: true,
NumPendingChunks: 9,
IterPendingChunks: chunkIterRange(9),
}},
},
},
Peers: []Peer{
firstStealer,
stealee,

View File

@ -116,8 +116,10 @@ func (p *peerId) GobDecode(b []byte) error {
return nil
}
type RequestIndex = request_strategy.RequestIndex
type chunkIndexType = request_strategy.ChunkIndex
type (
RequestIndex = request_strategy.RequestIndex
chunkIndexType = request_strategy.ChunkIndex
)
type peerRequests struct {
requestIndexes []RequestIndex

View File

@ -28,7 +28,6 @@ func TestLogExampleRequestMapOrdering(t *testing.T) {
for k := range makeTypicalRequests() {
t.Log(k)
}
}
func TestRequestMapOrderingPersistent(t *testing.T) {

View File

@ -67,7 +67,8 @@ func testLocater(t *testing.T, newLocater newLocater) {
{0, 1554},
{0, 1618},
{0, 1546},
{0, 8500}})
{0, 8500},
})
assertLocate(t, newLocater,
[]Length{1652, 1514, 1554, 1618, 1546, 129241752, 1537, 1536, 1551}, // 128737588
Extent{129236992, 16384},
@ -76,7 +77,8 @@ func testLocater(t *testing.T, newLocater newLocater) {
{129229108, 12644},
{0, 1537},
{0, 1536},
{0, 667}})
{0, 667},
})
}
func TestScan(t *testing.T) {

View File

@ -19,9 +19,7 @@ const (
boltDbIncompleteValue = "i"
)
var (
completionBucketKey = []byte("completion")
)
var completionBucketKey = []byte("completion")
type boltPieceCompletion struct {
db *bbolt.DB
@ -30,9 +28,9 @@ type boltPieceCompletion struct {
var _ PieceCompletion = (*boltPieceCompletion)(nil)
func NewBoltPieceCompletion(dir string) (ret PieceCompletion, err error) {
os.MkdirAll(dir, 0750)
os.MkdirAll(dir, 0o750)
p := filepath.Join(dir, ".torrent.bolt.db")
db, err := bbolt.Open(p, 0660, &bbolt.Options{
db, err := bbolt.Open(p, 0o660, &bbolt.Options{
Timeout: time.Second,
})
if err != nil {

View File

@ -51,6 +51,7 @@ func (me *boltPiece) MarkComplete() error {
func (me *boltPiece) MarkNotComplete() error {
return me.pc().Set(me.pk(), false)
}
func (me *boltPiece) ReadAt(b []byte, off int64) (n int, err error) {
err = me.db.View(func(tx *bbolt.Tx) error {
db := tx.Bucket(dataBucketKey)

View File

@ -30,7 +30,7 @@ type boltTorrent struct {
}
func NewBoltDB(filePath string) ClientImplCloser {
db, err := bbolt.Open(filepath.Join(filePath, "bolt.db"), 0600, &bbolt.Options{
db, err := bbolt.Open(filepath.Join(filePath, "bolt.db"), 0o600, &bbolt.Options{
Timeout: time.Second,
})
expect.Nil(err)

View File

@ -126,7 +126,7 @@ func (fs *fileTorrentImpl) Close() error {
// writes will ever occur to them (no torrent data is associated with a zero-length file). The
// caller should make sure the file name provided is safe/sanitized.
func CreateNativeZeroLengthFile(name string) error {
os.MkdirAll(filepath.Dir(name), 0777)
os.MkdirAll(filepath.Dir(name), 0o777)
var f io.Closer
f, err := os.Create(name)
if err != nil {
@ -185,18 +185,18 @@ func (fst fileTorrentImplIO) ReadAt(b []byte, off int64) (n int, err error) {
}
func (fst fileTorrentImplIO) WriteAt(p []byte, off int64) (n int, err error) {
//log.Printf("write at %v: %v bytes", off, len(p))
// log.Printf("write at %v: %v bytes", off, len(p))
fst.fts.segmentLocater.Locate(segments.Extent{off, int64(len(p))}, func(i int, e segments.Extent) bool {
name := fst.fts.files[i].path
os.MkdirAll(filepath.Dir(name), 0777)
os.MkdirAll(filepath.Dir(name), 0o777)
var f *os.File
f, err = os.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0666)
f, err = os.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0o666)
if err != nil {
return false
}
var n1 int
n1, err = f.WriteAt(p[:e.Length], e.Start)
//log.Printf("%v %v wrote %v: %v", i, e, n1, err)
// log.Printf("%v %v wrote %v: %v", i, e, n1, err)
closeErr := f.Close()
n += n1
p = p[n1:]

View File

@ -132,13 +132,13 @@ func mMapTorrent(md *metainfo.Info, location string) (mms *mmap_span.MMapSpan, e
func mmapFile(name string, size int64) (ret mmap.MMap, err error) {
dir := filepath.Dir(name)
err = os.MkdirAll(dir, 0750)
err = os.MkdirAll(dir, 0o750)
if err != nil {
err = fmt.Errorf("making directory %q: %s", dir, err)
return
}
var file *os.File
file, err = os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0666)
file, err = os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0o666)
if err != nil {
return
}

View File

@ -24,8 +24,10 @@ var safeFilePathTests = []struct {
{input: []string{"a", filepath.FromSlash(`b/..`)}, expectErr: false},
{input: []string{"a", filepath.FromSlash(`b/../../..`)}, expectErr: true},
{input: []string{"a", filepath.FromSlash(`b/../.././..`)}, expectErr: true},
{input: []string{
filepath.FromSlash(`NewSuperHeroMovie-2019-English-720p.avi /../../../../../Roaming/Microsoft/Windows/Start Menu/Programs/Startup/test3.exe`)},
{
input: []string{
filepath.FromSlash(`NewSuperHeroMovie-2019-English-720p.avi /../../../../../Roaming/Microsoft/Windows/Start Menu/Programs/Startup/test3.exe`),
},
expectErr: true,
},
}

View File

@ -49,7 +49,7 @@ func BenchmarkMarkComplete(b *testing.B) {
var opts NewDirectStorageOpts
opts.Memory = memory
opts.Capacity = capacity
//opts.GcBlobs = true
// opts.GcBlobs = true
opts.BlobFlushInterval = time.Second
opts.NoTriggers = noTriggers
directBench := func(b *testing.B) {

1
t.go
View File

@ -232,7 +232,6 @@ func (t *Torrent) initFiles() {
})
offset += fi.Length
}
}
// Returns handles to the files in the torrent. This requires that the Info is

View File

@ -98,7 +98,7 @@ func testReceiveChunkStorageFailure(t *testing.T, seederFast bool) {
}
}
// TODO: Check that PeerConns fastEnabled matches seederFast?
//select {}
// select {}
}
type pieceState struct {

View File

@ -68,7 +68,7 @@ func testClientTransfer(t *testing.T, ps testClientTransferParams) {
defer os.RemoveAll(greetingTempDir)
// Create seeder and a Torrent.
cfg := torrent.TestingConfig(t)
//cfg.Debug = true
// cfg.Debug = true
cfg.Seed = true
// Some test instances don't like this being on, even when there's no cache involved.
cfg.DropMutuallyCompletePeers = false
@ -116,7 +116,7 @@ func testClientTransfer(t *testing.T, ps testClientTransferParams) {
cfg.DownloadRateLimiter = ps.LeecherDownloadRateLimiter
}
cfg.Seed = false
//cfg.Debug = true
// cfg.Debug = true
if ps.ConfigureLeecher.Config != nil {
ps.ConfigureLeecher.Config(cfg)
}
@ -180,7 +180,7 @@ func testClientTransfer(t *testing.T, ps testClientTransferParams) {
leecherPeerConns := leecherTorrent.PeerConns()
if cfg.DropMutuallyCompletePeers {
// I don't think we can assume it will be empty already, due to timing.
//assert.Empty(t, leecherPeerConns)
// assert.Empty(t, leecherPeerConns)
} else {
assert.NotEmpty(t, leecherPeerConns)
}
@ -296,8 +296,8 @@ func testClientTransferSmallCache(t *testing.T, setReadahead bool, readahead int
ConfigureLeecher: ConfigureClient{
Config: func(cfg *torrent.ClientConfig) {
cfg.DropDuplicatePeerIds = true
//cfg.DisableIPv6 = true
//cfg.DisableUTP = true
// cfg.DisableIPv6 = true
// cfg.DisableUTP = true
},
},
})

View File

@ -209,7 +209,6 @@ func (t *Torrent) KnownSwarm() (ks []PeerInfo) {
// Add active peers to the list
for conn := range t.conns {
ks = append(ks, PeerInfo{
Id: conn.PeerID,
Addr: conn.RemoteAddr,
@ -888,10 +887,10 @@ func (t *Torrent) hashPiece(piece pieceIndex) (ret metainfo.Hash, err error) {
p.waitNoPendingWrites()
storagePiece := t.pieces[piece].Storage()
//Does the backend want to do its own hashing?
// Does the backend want to do its own hashing?
if i, ok := storagePiece.PieceImpl.(storage.SelfHashing); ok {
var sum metainfo.Hash
//log.Printf("A piece decided to self-hash: %d", piece)
// log.Printf("A piece decided to self-hash: %d", piece)
sum, err = i.SelfHash()
missinggo.CopyExact(&ret, sum)
return
@ -1506,7 +1505,6 @@ func (t *Torrent) startWebsocketAnnouncer(u url.URL) torrentTrackerAnnouncer {
}
}()
return wst
}
func (t *Torrent) startScrapingTracker(_url string) {

View File

@ -58,7 +58,8 @@ func TestSetAnnounceInfohashParamWithSpaces(t *testing.T) {
someUrl := &url.URL{}
ihBytes := [20]uint8{
0x2b, 0x76, 0xa, 0xa1, 0x78, 0x93, 0x20, 0x30, 0xc8, 0x47,
0xdc, 0xdf, 0x8e, 0xae, 0xbf, 0x56, 0xa, 0x1b, 0xd1, 0x6c}
0xdc, 0xdf, 0x8e, 0xae, 0xbf, 0x56, 0xa, 0x1b, 0xd1, 0x6c,
}
setAnnounceParams(
someUrl,
&udp.AnnounceRequest{

View File

@ -28,9 +28,7 @@ type Peer = trHttp.Peer
type AnnounceEvent = udp.AnnounceEvent
var (
ErrBadScheme = errors.New("unknown scheme")
)
var ErrBadScheme = errors.New("unknown scheme")
type Announce struct {
TrackerUrl string

View File

@ -112,7 +112,6 @@ func (me *trackerScraper) trackerUrl(ip net.IP) string {
// Return how long to wait before trying again. For most errors, we return 5
// minutes, a relatively quick turn around for DNS changes.
func (me *trackerScraper) announce(ctx context.Context, event tracker.AnnounceEvent) (ret trackerAnnounceResult) {
defer func() {
ret.Completed = time.Now()
}()
@ -190,7 +189,6 @@ func (me *trackerScraper) canIgnoreInterval(notify *<-chan struct{}) bool {
}
func (me *trackerScraper) Run() {
defer me.announceStopped()
ctx, cancel := context.WithCancel(context.Background())

View File

@ -150,7 +150,6 @@ func (tc *TrackerClient) Close() error {
}
func (tc *TrackerClient) announceOffers() {
// tc.Announce grabs a lock on tc.outboundOffers. It also handles the case where outboundOffers
// is nil. Take ownership of outboundOffers here.
tc.mu.Lock()
@ -256,7 +255,7 @@ func (tc *TrackerClient) trackerReadLoop(tracker *websocket.Conn) error {
if err != nil {
return fmt.Errorf("read message error: %w", err)
}
//tc.Logger.WithDefaultLevel(log.Debug).Printf("received message from tracker: %q", message)
// tc.Logger.WithDefaultLevel(log.Debug).Printf("received message from tracker: %q", message)
var ar AnnounceResponse
if err := json.Unmarshal(message, &ar); err != nil {
@ -337,7 +336,7 @@ func (tc *TrackerClient) handleAnswer(offerId string, answer webrtc.SessionDescr
tc.Logger.WithDefaultLevel(log.Warning).Printf("could not find offer for id %+q", offerId)
return
}
//tc.Logger.WithDefaultLevel(log.Debug).Printf("offer %q got answer %v", offerId, answer)
// tc.Logger.WithDefaultLevel(log.Debug).Printf("offer %q got answer %v", offerId, answer)
metrics.Add("outbound offers answered", 1)
err := offer.setAnswer(answer, func(dc datachannel.ReadWriteCloser) {
metrics.Add("outbound offers answered with datachannel", 1)