Make type piece public
This commit is contained in:
parent
f7e07237ed
commit
b3137b1ede
@ -122,7 +122,7 @@ func BenchmarkConnectionMainReadLoop(b *testing.B) {
|
||||
ts.writeSem.Lock()
|
||||
for range iter.N(b.N) {
|
||||
cl.mu.Lock()
|
||||
t.pieces[0].DirtyChunks.Clear()
|
||||
t.pieces[0].dirtyChunks.Clear()
|
||||
cl.mu.Unlock()
|
||||
n, err := w.Write(wb)
|
||||
require.NoError(b, err)
|
||||
|
72
piece.go
72
piece.go
@ -29,21 +29,21 @@ const (
|
||||
PiecePriorityNow // A Reader is reading in this piece.
|
||||
)
|
||||
|
||||
type piece struct {
|
||||
type Piece struct {
|
||||
// The completed piece SHA1 hash, from the metainfo "pieces" field.
|
||||
Hash metainfo.Hash
|
||||
hash metainfo.Hash
|
||||
t *Torrent
|
||||
index int
|
||||
// Chunks we've written to since the last check. The chunk offset and
|
||||
// length can be determined by the request chunkSize in use.
|
||||
DirtyChunks bitmap.Bitmap
|
||||
dirtyChunks bitmap.Bitmap
|
||||
|
||||
Hashing bool
|
||||
QueuedForHash bool
|
||||
EverHashed bool
|
||||
hashing bool
|
||||
queuedForHash bool
|
||||
everHashed bool
|
||||
numVerifies int64
|
||||
|
||||
PublicPieceState PieceState
|
||||
publicPieceState PieceState
|
||||
priority piecePriority
|
||||
|
||||
pendingWritesMutex sync.Mutex
|
||||
@ -51,55 +51,55 @@ type piece struct {
|
||||
noPendingWrites sync.Cond
|
||||
}
|
||||
|
||||
func (p *piece) Info() metainfo.Piece {
|
||||
func (p *Piece) Info() metainfo.Piece {
|
||||
return p.t.info.Piece(p.index)
|
||||
}
|
||||
|
||||
func (p *piece) Storage() storage.Piece {
|
||||
func (p *Piece) Storage() storage.Piece {
|
||||
return p.t.storage.Piece(p.Info())
|
||||
}
|
||||
|
||||
func (p *piece) pendingChunkIndex(chunkIndex int) bool {
|
||||
return !p.DirtyChunks.Contains(chunkIndex)
|
||||
func (p *Piece) pendingChunkIndex(chunkIndex int) bool {
|
||||
return !p.dirtyChunks.Contains(chunkIndex)
|
||||
}
|
||||
|
||||
func (p *piece) pendingChunk(cs chunkSpec, chunkSize pp.Integer) bool {
|
||||
func (p *Piece) pendingChunk(cs chunkSpec, chunkSize pp.Integer) bool {
|
||||
return p.pendingChunkIndex(chunkIndex(cs, chunkSize))
|
||||
}
|
||||
|
||||
func (p *piece) hasDirtyChunks() bool {
|
||||
return p.DirtyChunks.Len() != 0
|
||||
func (p *Piece) hasDirtyChunks() bool {
|
||||
return p.dirtyChunks.Len() != 0
|
||||
}
|
||||
|
||||
func (p *piece) numDirtyChunks() (ret int) {
|
||||
return p.DirtyChunks.Len()
|
||||
func (p *Piece) numDirtyChunks() (ret int) {
|
||||
return p.dirtyChunks.Len()
|
||||
}
|
||||
|
||||
func (p *piece) unpendChunkIndex(i int) {
|
||||
p.DirtyChunks.Add(i)
|
||||
func (p *Piece) unpendChunkIndex(i int) {
|
||||
p.dirtyChunks.Add(i)
|
||||
}
|
||||
|
||||
func (p *piece) pendChunkIndex(i int) {
|
||||
p.DirtyChunks.Remove(i)
|
||||
func (p *Piece) pendChunkIndex(i int) {
|
||||
p.dirtyChunks.Remove(i)
|
||||
}
|
||||
|
||||
func (p *piece) numChunks() int {
|
||||
func (p *Piece) numChunks() int {
|
||||
return p.t.pieceNumChunks(p.index)
|
||||
}
|
||||
|
||||
func (p *piece) undirtiedChunkIndices() (ret bitmap.Bitmap) {
|
||||
ret = p.DirtyChunks.Copy()
|
||||
func (p *Piece) undirtiedChunkIndices() (ret bitmap.Bitmap) {
|
||||
ret = p.dirtyChunks.Copy()
|
||||
ret.FlipRange(0, p.numChunks())
|
||||
return
|
||||
}
|
||||
|
||||
func (p *piece) incrementPendingWrites() {
|
||||
func (p *Piece) incrementPendingWrites() {
|
||||
p.pendingWritesMutex.Lock()
|
||||
p.pendingWrites++
|
||||
p.pendingWritesMutex.Unlock()
|
||||
}
|
||||
|
||||
func (p *piece) decrementPendingWrites() {
|
||||
func (p *Piece) decrementPendingWrites() {
|
||||
p.pendingWritesMutex.Lock()
|
||||
if p.pendingWrites == 0 {
|
||||
panic("assertion")
|
||||
@ -111,7 +111,7 @@ func (p *piece) decrementPendingWrites() {
|
||||
p.pendingWritesMutex.Unlock()
|
||||
}
|
||||
|
||||
func (p *piece) waitNoPendingWrites() {
|
||||
func (p *Piece) waitNoPendingWrites() {
|
||||
p.pendingWritesMutex.Lock()
|
||||
for p.pendingWrites != 0 {
|
||||
p.noPendingWrites.Wait()
|
||||
@ -119,15 +119,15 @@ func (p *piece) waitNoPendingWrites() {
|
||||
p.pendingWritesMutex.Unlock()
|
||||
}
|
||||
|
||||
func (p *piece) chunkIndexDirty(chunk int) bool {
|
||||
return p.DirtyChunks.Contains(chunk)
|
||||
func (p *Piece) chunkIndexDirty(chunk int) bool {
|
||||
return p.dirtyChunks.Contains(chunk)
|
||||
}
|
||||
|
||||
func (p *piece) chunkIndexSpec(chunk int) chunkSpec {
|
||||
func (p *Piece) chunkIndexSpec(chunk int) chunkSpec {
|
||||
return chunkIndexSpec(chunk, p.length(), p.chunkSize())
|
||||
}
|
||||
|
||||
func (p *piece) numDirtyBytes() (ret pp.Integer) {
|
||||
func (p *Piece) numDirtyBytes() (ret pp.Integer) {
|
||||
defer func() {
|
||||
if ret > p.length() {
|
||||
panic("too many dirty bytes")
|
||||
@ -142,30 +142,30 @@ func (p *piece) numDirtyBytes() (ret pp.Integer) {
|
||||
return
|
||||
}
|
||||
|
||||
func (p *piece) length() pp.Integer {
|
||||
func (p *Piece) length() pp.Integer {
|
||||
return p.t.pieceLength(p.index)
|
||||
}
|
||||
|
||||
func (p *piece) chunkSize() pp.Integer {
|
||||
func (p *Piece) chunkSize() pp.Integer {
|
||||
return p.t.chunkSize
|
||||
}
|
||||
|
||||
func (p *piece) lastChunkIndex() int {
|
||||
func (p *Piece) lastChunkIndex() int {
|
||||
return p.numChunks() - 1
|
||||
}
|
||||
|
||||
func (p *piece) bytesLeft() (ret pp.Integer) {
|
||||
func (p *Piece) bytesLeft() (ret pp.Integer) {
|
||||
if p.t.pieceComplete(p.index) {
|
||||
return 0
|
||||
}
|
||||
return p.length() - p.numDirtyBytes()
|
||||
}
|
||||
|
||||
func (p *piece) VerifyData() {
|
||||
func (p *Piece) VerifyData() {
|
||||
p.t.cl.mu.Lock()
|
||||
defer p.t.cl.mu.Unlock()
|
||||
target := p.numVerifies + 1
|
||||
if p.Hashing {
|
||||
if p.hashing {
|
||||
target++
|
||||
}
|
||||
p.t.queuePieceCheck(p.index)
|
||||
|
2
t.go
2
t.go
@ -210,7 +210,7 @@ func (t *Torrent) AddTrackers(announceList [][]string) {
|
||||
t.addTrackers(announceList)
|
||||
}
|
||||
|
||||
func (t *Torrent) Piece(i int) *piece {
|
||||
func (t *Torrent) Piece(i int) *Piece {
|
||||
t.cl.mu.Lock()
|
||||
defer t.cl.mu.Unlock()
|
||||
return &t.pieces[i]
|
||||
|
40
torrent.go
40
torrent.go
@ -47,7 +47,7 @@ type Torrent struct {
|
||||
|
||||
closed missinggo.Event
|
||||
infoHash metainfo.Hash
|
||||
pieces []piece
|
||||
pieces []Piece
|
||||
// Values are the piece indices that changed.
|
||||
pieceStateChanges *pubsub.PubSub
|
||||
// The size of chunks to request from peers over the wire. This is
|
||||
@ -233,13 +233,13 @@ func infoPieceHashes(info *metainfo.Info) (ret []string) {
|
||||
|
||||
func (t *Torrent) makePieces() {
|
||||
hashes := infoPieceHashes(t.info)
|
||||
t.pieces = make([]piece, len(hashes))
|
||||
t.pieces = make([]Piece, len(hashes))
|
||||
for i, hash := range hashes {
|
||||
piece := &t.pieces[i]
|
||||
piece.t = t
|
||||
piece.index = i
|
||||
piece.noPendingWrites.L = &piece.pendingWritesMutex
|
||||
missinggo.CopyExact(piece.Hash[:], hash)
|
||||
missinggo.CopyExact(piece.hash[:], hash)
|
||||
}
|
||||
}
|
||||
|
||||
@ -344,7 +344,7 @@ func (t *Torrent) pieceState(index int) (ret PieceState) {
|
||||
if t.pieceComplete(index) {
|
||||
ret.Complete = true
|
||||
}
|
||||
if p.QueuedForHash || p.Hashing {
|
||||
if p.queuedForHash || p.hashing {
|
||||
ret.Checking = true
|
||||
}
|
||||
if !ret.Complete && t.piecePartiallyDownloaded(index) {
|
||||
@ -598,7 +598,7 @@ func (t *Torrent) pieceNumChunks(piece int) int {
|
||||
}
|
||||
|
||||
func (t *Torrent) pendAllChunkSpecs(pieceIndex int) {
|
||||
t.pieces[pieceIndex].DirtyChunks.Clear()
|
||||
t.pieces[pieceIndex].dirtyChunks.Clear()
|
||||
}
|
||||
|
||||
type Peer struct {
|
||||
@ -691,10 +691,10 @@ func (t *Torrent) wantPieceIndex(index int) bool {
|
||||
return false
|
||||
}
|
||||
p := &t.pieces[index]
|
||||
if p.QueuedForHash {
|
||||
if p.queuedForHash {
|
||||
return false
|
||||
}
|
||||
if p.Hashing {
|
||||
if p.hashing {
|
||||
return false
|
||||
}
|
||||
if t.pieceComplete(index) {
|
||||
@ -737,8 +737,8 @@ type PieceStateChange struct {
|
||||
func (t *Torrent) publishPieceChange(piece int) {
|
||||
cur := t.pieceState(piece)
|
||||
p := &t.pieces[piece]
|
||||
if cur != p.PublicPieceState {
|
||||
p.PublicPieceState = cur
|
||||
if cur != p.publicPieceState {
|
||||
p.publicPieceState = cur
|
||||
t.pieceStateChanges.Publish(PieceStateChange{
|
||||
piece,
|
||||
cur,
|
||||
@ -754,7 +754,7 @@ func (t *Torrent) pieceNumPendingChunks(piece int) int {
|
||||
}
|
||||
|
||||
func (t *Torrent) pieceAllDirty(piece int) bool {
|
||||
return t.pieces[piece].DirtyChunks.Len() == t.pieceNumChunks(piece)
|
||||
return t.pieces[piece].dirtyChunks.Len() == t.pieceNumChunks(piece)
|
||||
}
|
||||
|
||||
func (t *Torrent) readersChanged() {
|
||||
@ -1383,17 +1383,17 @@ func (t *Torrent) pieceHashed(piece int, correct bool) {
|
||||
}
|
||||
p := &t.pieces[piece]
|
||||
touchers := t.reapPieceTouchers(piece)
|
||||
if p.EverHashed {
|
||||
if p.everHashed {
|
||||
// Don't score the first time a piece is hashed, it could be an
|
||||
// initial check.
|
||||
if correct {
|
||||
pieceHashedCorrect.Add(1)
|
||||
} else {
|
||||
log.Printf("%s: piece %d (%s) failed hash: %d connections contributed", t, piece, p.Hash, len(touchers))
|
||||
log.Printf("%s: piece %d (%s) failed hash: %d connections contributed", t, piece, p.hash, len(touchers))
|
||||
pieceHashedNotCorrect.Add(1)
|
||||
}
|
||||
}
|
||||
p.EverHashed = true
|
||||
p.everHashed = true
|
||||
if correct {
|
||||
for _, c := range touchers {
|
||||
c.goodPiecesDirtied++
|
||||
@ -1472,22 +1472,22 @@ func (t *Torrent) verifyPiece(piece int) {
|
||||
cl.mu.Lock()
|
||||
defer cl.mu.Unlock()
|
||||
p := &t.pieces[piece]
|
||||
for p.Hashing || t.storage == nil {
|
||||
for p.hashing || t.storage == nil {
|
||||
cl.event.Wait()
|
||||
}
|
||||
p.QueuedForHash = false
|
||||
p.queuedForHash = false
|
||||
if t.closed.IsSet() || t.pieceComplete(piece) {
|
||||
t.updatePiecePriority(piece)
|
||||
return
|
||||
}
|
||||
p.Hashing = true
|
||||
p.hashing = true
|
||||
t.publishPieceChange(piece)
|
||||
cl.mu.Unlock()
|
||||
sum := t.hashPiece(piece)
|
||||
cl.mu.Lock()
|
||||
p.numVerifies++
|
||||
p.Hashing = false
|
||||
t.pieceHashed(piece, sum == p.Hash)
|
||||
p.hashing = false
|
||||
t.pieceHashed(piece, sum == p.hash)
|
||||
}
|
||||
|
||||
// Return the connections that touched a piece, and clear the entry while
|
||||
@ -1512,10 +1512,10 @@ func (t *Torrent) connsAsSlice() (ret []*connection) {
|
||||
// Currently doesn't really queue, but should in the future.
|
||||
func (t *Torrent) queuePieceCheck(pieceIndex int) {
|
||||
piece := &t.pieces[pieceIndex]
|
||||
if piece.QueuedForHash {
|
||||
if piece.queuedForHash {
|
||||
return
|
||||
}
|
||||
piece.QueuedForHash = true
|
||||
piece.queuedForHash = true
|
||||
t.publishPieceChange(pieceIndex)
|
||||
go t.verifyPiece(pieceIndex)
|
||||
}
|
||||
|
@ -149,7 +149,7 @@ func TestPieceHashFailed(t *testing.T) {
|
||||
}
|
||||
require.NoError(t, tt.setInfoBytes(mi.InfoBytes))
|
||||
tt.cl.mu.Lock()
|
||||
tt.pieces[1].DirtyChunks.AddRange(0, 3)
|
||||
tt.pieces[1].dirtyChunks.AddRange(0, 3)
|
||||
require.True(t, tt.pieceAllDirty(1))
|
||||
tt.pieceHashed(1, false)
|
||||
// Dirty chunks should be cleared so we can try again.
|
||||
|
Loading…
x
Reference in New Issue
Block a user