Optimize nextRequestState to return cancel and new requests, and reinstate requestsLowWater

This commit is contained in:
Matt Joiner 2017-09-02 10:36:43 +10:00
parent bad6f07f5e
commit 4e8f6b8e5b
1 changed files with 24 additions and 29 deletions

View File

@ -332,14 +332,13 @@ func (cn *connection) SetInterested(interested bool, msg func(pp.Message) bool)
func (cn *connection) fillWriteBuffer(msg func(pp.Message) bool) { func (cn *connection) fillWriteBuffer(msg func(pp.Message) bool) {
numFillBuffers.Add(1) numFillBuffers.Add(1)
rs, i := cn.desiredRequestState() cancel, new, i := cn.desiredRequestState()
if !cn.SetInterested(i, msg) { if !cn.SetInterested(i, msg) {
return return
} }
sentCancels := false if cancel && len(cn.requests) != 0 {
for r := range cn.requests { fillBufferSentCancels.Add(1)
if _, ok := rs[r]; !ok { for r := range cn.requests {
sentCancels = true
cn.deleteRequest(r) cn.deleteRequest(r)
// log.Printf("%p: cancelling request: %v", cn, r) // log.Printf("%p: cancelling request: %v", cn, r)
if !msg(pp.Message{ if !msg(pp.Message{
@ -352,17 +351,13 @@ func (cn *connection) fillWriteBuffer(msg func(pp.Message) bool) {
} }
} }
} }
if sentCancels { if len(new) != 0 {
fillBufferSentCancels.Add(1) fillBufferSentRequests.Add(1)
} for _, r := range new {
sentRequests := false
for r := range rs {
if _, ok := cn.requests[r]; !ok {
if cn.requests == nil { if cn.requests == nil {
cn.requests = make(map[request]struct{}, cn.nominalMaxRequests()) cn.requests = make(map[request]struct{}, cn.nominalMaxRequests())
} }
cn.requests[r] = struct{}{} cn.requests[r] = struct{}{}
sentRequests = true
// log.Printf("%p: requesting %v", cn, r) // log.Printf("%p: requesting %v", cn, r)
if !msg(pp.Message{ if !msg(pp.Message{
Type: pp.Request, Type: pp.Request,
@ -373,9 +368,10 @@ func (cn *connection) fillWriteBuffer(msg func(pp.Message) bool) {
return return
} }
} }
} // If we didn't completely top up the requests, we shouldn't mark the
if sentRequests { // low water, since we'll want to top up the requests as soon as we
fillBufferSentRequests.Add(1) // have more write buffer space.
cn.requestsLowWater = len(cn.requests) / 2
} }
} }
@ -471,37 +467,36 @@ func nextRequestState(
requestsLowWater int, requestsLowWater int,
requestsHighWater int, requestsHighWater int,
) ( ) (
requests map[request]struct{}, cancelExisting bool,
newRequests []request,
interested bool, interested bool,
) { ) {
if !networkingEnabled || nextPieces.IsEmpty() { if !networkingEnabled || nextPieces.IsEmpty() {
return nil, false return true, nil, false
} }
if peerChoking || len(currentRequests) > requestsLowWater { if peerChoking || len(currentRequests) > requestsLowWater {
return currentRequests, true return false, nil, !nextPieces.IsEmpty()
}
requests = make(map[request]struct{}, requestsHighWater)
for r := range currentRequests {
requests[r] = struct{}{}
} }
nextPieces.IterTyped(func(piece int) bool { nextPieces.IterTyped(func(piece int) bool {
return pendingChunks(piece, func(cs chunkSpec) bool { return pendingChunks(piece, func(cs chunkSpec) bool {
if len(requests) >= requestsHighWater {
return false
}
r := request{pp.Integer(piece), cs} r := request{pp.Integer(piece), cs}
requests[r] = struct{}{} if _, ok := currentRequests[r]; !ok {
return true if newRequests == nil {
newRequests = make([]request, 0, requestsHighWater-len(currentRequests))
}
newRequests = append(newRequests, r)
}
return len(currentRequests)+len(newRequests) < requestsHighWater
}) })
}) })
return requests, true return false, newRequests, true
} }
func (cn *connection) updateRequests() { func (cn *connection) updateRequests() {
cn.tickleWriter() cn.tickleWriter()
} }
func (cn *connection) desiredRequestState() (map[request]struct{}, bool) { func (cn *connection) desiredRequestState() (bool, []request, bool) {
return nextRequestState( return nextRequestState(
cn.t.networkingEnabled, cn.t.networkingEnabled,
cn.requests, cn.requests,