2017-05-01 11:09:48 +00:00
|
|
|
// Copyright 2017 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package les
|
|
|
|
|
|
|
|
import (
|
|
|
|
"container/list"
|
|
|
|
"sync"
|
|
|
|
"time"
|
2019-10-04 15:21:24 +00:00
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common/mclock"
|
2017-05-01 11:09:48 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// requestDistributor implements a mechanism that distributes requests to
|
|
|
|
// suitable peers, obeying flow control rules and prioritizing them in creation
|
|
|
|
// order (even when a resend is necessary).
|
|
|
|
type requestDistributor struct {
|
2019-10-04 15:21:24 +00:00
|
|
|
clock mclock.Clock
|
|
|
|
reqQueue *list.List
|
|
|
|
lastReqOrder uint64
|
|
|
|
peers map[distPeer]struct{}
|
|
|
|
peerLock sync.RWMutex
|
|
|
|
loopChn chan struct{}
|
|
|
|
loopNextSent bool
|
|
|
|
lock sync.Mutex
|
|
|
|
|
|
|
|
closeCh chan struct{}
|
|
|
|
wg sync.WaitGroup
|
2017-05-01 11:09:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// distPeer is an LES server peer interface for the request distributor.
|
|
|
|
// waitBefore returns either the necessary waiting time before sending a request
|
|
|
|
// with the given upper estimated cost or the estimated remaining relative buffer
|
|
|
|
// value after sending such a request (in which case the request can be sent
|
|
|
|
// immediately). At least one of these values is always zero.
|
|
|
|
type distPeer interface {
|
|
|
|
waitBefore(uint64) (time.Duration, float64)
|
|
|
|
canQueue() bool
|
|
|
|
queueSend(f func())
|
|
|
|
}
|
|
|
|
|
|
|
|
// distReq is the request abstraction used by the distributor. It is based on
|
|
|
|
// three callback functions:
|
|
|
|
// - getCost returns the upper estimate of the cost of sending the request to a given peer
|
|
|
|
// - canSend tells if the server peer is suitable to serve the request
|
|
|
|
// - request prepares sending the request to the given peer and returns a function that
|
|
|
|
// does the actual sending. Request order should be preserved but the callback itself should not
|
|
|
|
// block until it is sent because other peers might still be able to receive requests while
|
|
|
|
// one of them is blocking. Instead, the returned function is put in the peer's send queue.
|
|
|
|
type distReq struct {
|
|
|
|
getCost func(distPeer) uint64
|
|
|
|
canSend func(distPeer) bool
|
|
|
|
request func(distPeer) func()
|
|
|
|
|
2019-10-04 15:21:24 +00:00
|
|
|
reqOrder uint64
|
|
|
|
sentChn chan distPeer
|
|
|
|
element *list.Element
|
|
|
|
waitForPeers mclock.AbsTime
|
|
|
|
enterQueue mclock.AbsTime
|
2017-05-01 11:09:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// newRequestDistributor creates a new request distributor
|
2019-10-04 15:21:24 +00:00
|
|
|
func newRequestDistributor(peers *peerSet, clock mclock.Clock) *requestDistributor {
|
2017-08-04 16:14:17 +00:00
|
|
|
d := &requestDistributor{
|
2019-10-04 15:21:24 +00:00
|
|
|
clock: clock,
|
2017-08-04 16:14:17 +00:00
|
|
|
reqQueue: list.New(),
|
|
|
|
loopChn: make(chan struct{}, 2),
|
2019-10-04 15:21:24 +00:00
|
|
|
closeCh: make(chan struct{}),
|
2017-08-04 16:14:17 +00:00
|
|
|
peers: make(map[distPeer]struct{}),
|
|
|
|
}
|
|
|
|
if peers != nil {
|
|
|
|
peers.notify(d)
|
2017-05-01 11:09:48 +00:00
|
|
|
}
|
2019-10-04 15:21:24 +00:00
|
|
|
d.wg.Add(1)
|
2017-08-04 16:14:17 +00:00
|
|
|
go d.loop()
|
|
|
|
return d
|
|
|
|
}
|
|
|
|
|
|
|
|
// registerPeer implements peerSetNotify
|
|
|
|
func (d *requestDistributor) registerPeer(p *peer) {
|
|
|
|
d.peerLock.Lock()
|
|
|
|
d.peers[p] = struct{}{}
|
|
|
|
d.peerLock.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
// unregisterPeer implements peerSetNotify
|
|
|
|
func (d *requestDistributor) unregisterPeer(p *peer) {
|
|
|
|
d.peerLock.Lock()
|
|
|
|
delete(d.peers, p)
|
|
|
|
d.peerLock.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
// registerTestPeer adds a new test peer
|
|
|
|
func (d *requestDistributor) registerTestPeer(p distPeer) {
|
|
|
|
d.peerLock.Lock()
|
|
|
|
d.peers[p] = struct{}{}
|
|
|
|
d.peerLock.Unlock()
|
2017-05-01 11:09:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// distMaxWait is the maximum waiting time after which further necessary waiting
|
|
|
|
// times are recalculated based on new feedback from the servers
|
2019-10-04 15:21:24 +00:00
|
|
|
const distMaxWait = time.Millisecond * 50
|
|
|
|
|
|
|
|
// waitForPeers is the time window in which a request does not fail even if it
|
|
|
|
// has no suitable peers to send to at the moment
|
|
|
|
const waitForPeers = time.Second * 3
|
2017-05-01 11:09:48 +00:00
|
|
|
|
|
|
|
// main event loop
|
|
|
|
func (d *requestDistributor) loop() {
|
2019-10-04 15:21:24 +00:00
|
|
|
defer d.wg.Done()
|
2017-05-01 11:09:48 +00:00
|
|
|
for {
|
|
|
|
select {
|
2019-10-04 15:21:24 +00:00
|
|
|
case <-d.closeCh:
|
2017-05-01 11:09:48 +00:00
|
|
|
d.lock.Lock()
|
|
|
|
elem := d.reqQueue.Front()
|
|
|
|
for elem != nil {
|
2018-09-27 19:16:15 +00:00
|
|
|
req := elem.Value.(*distReq)
|
|
|
|
close(req.sentChn)
|
|
|
|
req.sentChn = nil
|
2017-05-01 11:09:48 +00:00
|
|
|
elem = elem.Next()
|
|
|
|
}
|
|
|
|
d.lock.Unlock()
|
|
|
|
return
|
|
|
|
case <-d.loopChn:
|
|
|
|
d.lock.Lock()
|
|
|
|
d.loopNextSent = false
|
|
|
|
loop:
|
|
|
|
for {
|
|
|
|
peer, req, wait := d.nextRequest()
|
|
|
|
if req != nil && wait == 0 {
|
|
|
|
chn := req.sentChn // save sentChn because remove sets it to nil
|
|
|
|
d.remove(req)
|
|
|
|
send := req.request(peer)
|
|
|
|
if send != nil {
|
|
|
|
peer.queueSend(send)
|
2019-10-04 15:21:24 +00:00
|
|
|
requestSendDelay.Update(time.Duration(d.clock.Now() - req.enterQueue))
|
2017-05-01 11:09:48 +00:00
|
|
|
}
|
|
|
|
chn <- peer
|
|
|
|
close(chn)
|
|
|
|
} else {
|
|
|
|
if wait == 0 {
|
|
|
|
// no request to send and nothing to wait for; the next
|
|
|
|
// queued request will wake up the loop
|
|
|
|
break loop
|
|
|
|
}
|
|
|
|
d.loopNextSent = true // a "next" signal has been sent, do not send another one until this one has been received
|
|
|
|
if wait > distMaxWait {
|
|
|
|
// waiting times may be reduced by incoming request replies, if it is too long, recalculate it periodically
|
|
|
|
wait = distMaxWait
|
|
|
|
}
|
|
|
|
go func() {
|
2019-10-04 15:21:24 +00:00
|
|
|
d.clock.Sleep(wait)
|
2017-05-01 11:09:48 +00:00
|
|
|
d.loopChn <- struct{}{}
|
|
|
|
}()
|
|
|
|
break loop
|
|
|
|
}
|
|
|
|
}
|
|
|
|
d.lock.Unlock()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// selectPeerItem represents a peer to be selected for a request by weightedRandomSelect
|
|
|
|
type selectPeerItem struct {
|
|
|
|
peer distPeer
|
|
|
|
req *distReq
|
|
|
|
weight int64
|
|
|
|
}
|
|
|
|
|
|
|
|
// Weight implements wrsItem interface
|
|
|
|
func (sp selectPeerItem) Weight() int64 {
|
|
|
|
return sp.weight
|
|
|
|
}
|
|
|
|
|
|
|
|
// nextRequest returns the next possible request from any peer, along with the
|
|
|
|
// associated peer and necessary waiting time
|
|
|
|
func (d *requestDistributor) nextRequest() (distPeer, *distReq, time.Duration) {
|
2017-08-04 16:14:17 +00:00
|
|
|
checkedPeers := make(map[distPeer]struct{})
|
2017-05-01 11:09:48 +00:00
|
|
|
elem := d.reqQueue.Front()
|
|
|
|
var (
|
|
|
|
bestWait time.Duration
|
|
|
|
sel *weightedRandomSelect
|
|
|
|
)
|
|
|
|
|
2017-08-04 16:14:17 +00:00
|
|
|
d.peerLock.RLock()
|
|
|
|
defer d.peerLock.RUnlock()
|
|
|
|
|
2019-10-04 15:21:24 +00:00
|
|
|
peerCount := len(d.peers)
|
|
|
|
for (len(checkedPeers) < peerCount || elem == d.reqQueue.Front()) && elem != nil {
|
2017-05-01 11:09:48 +00:00
|
|
|
req := elem.Value.(*distReq)
|
|
|
|
canSend := false
|
2019-10-04 15:21:24 +00:00
|
|
|
now := d.clock.Now()
|
|
|
|
if req.waitForPeers > now {
|
|
|
|
canSend = true
|
|
|
|
wait := time.Duration(req.waitForPeers - now)
|
|
|
|
if bestWait == 0 || wait < bestWait {
|
|
|
|
bestWait = wait
|
|
|
|
}
|
|
|
|
}
|
2018-01-15 20:26:41 +00:00
|
|
|
for peer := range d.peers {
|
2017-08-04 16:14:17 +00:00
|
|
|
if _, ok := checkedPeers[peer]; !ok && peer.canQueue() && req.canSend(peer) {
|
2017-05-01 11:09:48 +00:00
|
|
|
canSend = true
|
|
|
|
cost := req.getCost(peer)
|
|
|
|
wait, bufRemain := peer.waitBefore(cost)
|
|
|
|
if wait == 0 {
|
|
|
|
if sel == nil {
|
|
|
|
sel = newWeightedRandomSelect()
|
|
|
|
}
|
|
|
|
sel.update(selectPeerItem{peer: peer, req: req, weight: int64(bufRemain*1000000) + 1})
|
|
|
|
} else {
|
2019-10-04 15:21:24 +00:00
|
|
|
if bestWait == 0 || wait < bestWait {
|
2017-05-01 11:09:48 +00:00
|
|
|
bestWait = wait
|
|
|
|
}
|
|
|
|
}
|
2017-08-04 16:14:17 +00:00
|
|
|
checkedPeers[peer] = struct{}{}
|
2017-05-01 11:09:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
next := elem.Next()
|
|
|
|
if !canSend && elem == d.reqQueue.Front() {
|
|
|
|
close(req.sentChn)
|
|
|
|
d.remove(req)
|
|
|
|
}
|
|
|
|
elem = next
|
|
|
|
}
|
|
|
|
|
|
|
|
if sel != nil {
|
|
|
|
c := sel.choose().(selectPeerItem)
|
|
|
|
return c.peer, c.req, 0
|
|
|
|
}
|
2019-10-04 15:21:24 +00:00
|
|
|
return nil, nil, bestWait
|
2017-05-01 11:09:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// queue adds a request to the distribution queue, returns a channel where the
|
|
|
|
// receiving peer is sent once the request has been sent (request callback returned).
|
|
|
|
// If the request is cancelled or timed out without suitable peers, the channel is
|
|
|
|
// closed without sending any peer references to it.
|
|
|
|
func (d *requestDistributor) queue(r *distReq) chan distPeer {
|
|
|
|
d.lock.Lock()
|
|
|
|
defer d.lock.Unlock()
|
|
|
|
|
|
|
|
if r.reqOrder == 0 {
|
|
|
|
d.lastReqOrder++
|
|
|
|
r.reqOrder = d.lastReqOrder
|
2019-10-04 15:21:24 +00:00
|
|
|
r.waitForPeers = d.clock.Now() + mclock.AbsTime(waitForPeers)
|
2017-05-01 11:09:48 +00:00
|
|
|
}
|
2019-10-04 15:21:24 +00:00
|
|
|
// Assign the timestamp when the request is queued no matter it's
|
|
|
|
// a new one or re-queued one.
|
|
|
|
r.enterQueue = d.clock.Now()
|
2017-05-01 11:09:48 +00:00
|
|
|
|
|
|
|
back := d.reqQueue.Back()
|
|
|
|
if back == nil || r.reqOrder > back.Value.(*distReq).reqOrder {
|
|
|
|
r.element = d.reqQueue.PushBack(r)
|
|
|
|
} else {
|
|
|
|
before := d.reqQueue.Front()
|
|
|
|
for before.Value.(*distReq).reqOrder < r.reqOrder {
|
|
|
|
before = before.Next()
|
|
|
|
}
|
|
|
|
r.element = d.reqQueue.InsertBefore(r, before)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !d.loopNextSent {
|
|
|
|
d.loopNextSent = true
|
|
|
|
d.loopChn <- struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
r.sentChn = make(chan distPeer, 1)
|
|
|
|
return r.sentChn
|
|
|
|
}
|
|
|
|
|
|
|
|
// cancel removes a request from the queue if it has not been sent yet (returns
|
|
|
|
// false if it has been sent already). It is guaranteed that the callback functions
|
|
|
|
// will not be called after cancel returns.
|
|
|
|
func (d *requestDistributor) cancel(r *distReq) bool {
|
|
|
|
d.lock.Lock()
|
|
|
|
defer d.lock.Unlock()
|
|
|
|
|
|
|
|
if r.sentChn == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
close(r.sentChn)
|
|
|
|
d.remove(r)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove removes a request from the queue
|
|
|
|
func (d *requestDistributor) remove(r *distReq) {
|
|
|
|
r.sentChn = nil
|
|
|
|
if r.element != nil {
|
|
|
|
d.reqQueue.Remove(r.element)
|
|
|
|
r.element = nil
|
|
|
|
}
|
|
|
|
}
|
2019-10-04 15:21:24 +00:00
|
|
|
|
|
|
|
func (d *requestDistributor) close() {
|
|
|
|
close(d.closeCh)
|
|
|
|
d.wg.Wait()
|
|
|
|
}
|