2
0
mirror of synced 2025-02-24 14:48:27 +00:00
torrent/storage/test/bench-piece-mark-complete.go
YenForYang a8db640c62
Drop bradfitz/iter dependency (#605)
* Drop bradfitz/iter dependency

`range iter.N` looks nice and doesn't allocate, but unfortunately using a `range` expression blocks a function from being inlined wherever it's used (for now). It's not that we need inlining in all cases, but I do think a C-style for loop looks just as nice and is probably clearer to the majority. There also aren't any clear disadvantages to changing (unless you just happen to dislike the look of C)

* Update misc_test.go

* Update rlreader_test.go

* Update torrent_test.go

* Update bench_test.go

* Update client_test.go

* Update iplist_test.go

* Update mse_test.go

* Update peerconn_test.go

* Update peerconn.go

* Update order_test.go

* Update decoder_test.go

* Update main.go

* Update bench-piece-mark-complete.go

* Update main.go

* Update torrent.go

* Update iplist_test.go

* Update main.go
2021-09-14 13:46:50 +10:00

88 lines
2.2 KiB
Go

package test_storage
import (
"bytes"
"math/rand"
"sync"
"testing"
"github.com/anacrolix/torrent/metainfo"
"github.com/anacrolix/torrent/storage"
qt "github.com/frankban/quicktest"
)
const (
ChunkSize = 1 << 14
DefaultPieceSize = 2 << 20
DefaultNumPieces = 16
)
func BenchmarkPieceMarkComplete(
b *testing.B, ci storage.ClientImpl,
pieceSize int64, numPieces int,
// This drives any special handling around capacity that may be configured into the storage
// implementation.
capacity int64,
) {
c := qt.New(b)
info := &metainfo.Info{
Pieces: make([]byte, numPieces*metainfo.HashSize),
PieceLength: pieceSize,
Length: pieceSize * int64(numPieces),
Name: "TorrentName",
}
ti, err := ci.OpenTorrent(info, metainfo.Hash{})
c.Assert(err, qt.IsNil)
tw := storage.Torrent{ti}
defer tw.Close()
rand.Read(info.Pieces)
data := make([]byte, pieceSize)
readData := make([]byte, pieceSize)
b.SetBytes(int64(numPieces) * pieceSize)
oneIter := func() {
for pieceIndex := 0; pieceIndex < numPieces; pieceIndex += 1 {
pi := tw.Piece(info.Piece(pieceIndex))
rand.Read(data)
b.StartTimer()
var wg sync.WaitGroup
for off := int64(0); off < int64(len(data)); off += ChunkSize {
wg.Add(1)
go func(off int64) {
defer wg.Done()
n, err := pi.WriteAt(data[off:off+ChunkSize], off)
if err != nil {
panic(err)
}
if n != ChunkSize {
panic(n)
}
}(off)
}
wg.Wait()
if capacity == 0 {
pi.MarkNotComplete()
}
// This might not apply if users of this benchmark don't cache with the expected capacity.
c.Assert(pi.Completion(), qt.Equals, storage.Completion{Complete: false, Ok: true})
c.Assert(pi.MarkComplete(), qt.IsNil)
c.Assert(pi.Completion(), qt.Equals, storage.Completion{true, true})
n, err := pi.WriteTo(bytes.NewBuffer(readData[:0]))
b.StopTimer()
c.Assert(err, qt.IsNil)
c.Assert(n, qt.Equals, int64(len(data)))
c.Assert(bytes.Equal(readData[:n], data), qt.IsTrue)
}
}
// Fill the cache
if capacity > 0 {
iterN := int((capacity + info.TotalLength() - 1) / info.TotalLength())
for i := 0; i < iterN; i += 1 {
oneIter()
}
}
b.ResetTimer()
for i := 0; i < b.N; i += 1 {
oneIter()
}
}