Upgrade linter to 1.50.0

This commit is contained in:
Andrea Maria Piana 2023-01-13 17:12:46 +00:00
parent ccbd2866fe
commit ed9ca8392c
23 changed files with 90 additions and 103 deletions

View File

@ -145,9 +145,8 @@ func Decode(bytesString string, types []string) ([]interface{}, error) {
return nil, fmt.Errorf("invalid ABI definition %s: %v", def, err) return nil, fmt.Errorf("invalid ABI definition %s: %v", def, err)
} }
if strings.HasPrefix(bytesString, "0x") { bytesString = strings.TrimPrefix(bytesString, "0x")
bytesString = bytesString[2:]
}
bytes, err := hex.DecodeString(bytesString) bytes, err := hex.DecodeString(bytesString)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid hex %s: %v", bytesString, err) return nil, fmt.Errorf("invalid hex %s: %v", bytesString, err)

View File

@ -241,7 +241,7 @@ func Utf8decode(str string) ([]byte, error) {
byteCount := len(byteArray) byteCount := len(byteArray)
byteIndex := 0 byteIndex := 0
var codePoints []rune var codePoints []rune
for true { for {
codePoint, goOn, err := decodeSymbol(byteArray, byteCount, &byteIndex) codePoint, goOn, err := decodeSymbol(byteArray, byteCount, &byteIndex)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -3,6 +3,7 @@ package metrics
import ( import (
"fmt" "fmt"
"net/http" "net/http"
"time"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
@ -23,8 +24,9 @@ func NewMetricsServer(port int, r metrics.Registry) *Server {
mux.Handle("/metrics", Handler(r)) mux.Handle("/metrics", Handler(r))
p := Server{ p := Server{
server: &http.Server{ server: &http.Server{
Addr: fmt.Sprintf(":%d", port), Addr: fmt.Sprintf(":%d", port),
Handler: mux, ReadHeaderTimeout: 5 * time.Second,
Handler: mux,
}, },
} }
return &p return &p

View File

@ -75,15 +75,15 @@ func (kp *KeyPairs) processResult(rows *sql.Rows, groupByKeycard bool) ([]*KeyPa
func (kp *KeyPairs) getAllRows(groupByKeycard bool) ([]*KeyPair, error) { func (kp *KeyPairs) getAllRows(groupByKeycard bool) ([]*KeyPair, error) {
rows, err := kp.db.Query(` rows, err := kp.db.Query(`
SELECT SELECT
keycard_uid, keycard_uid,
keycard_name, keycard_name,
keycard_locked, keycard_locked,
account_address, account_address,
key_uid key_uid
FROM FROM
keypairs keypairs
ORDER BY ORDER BY
key_uid key_uid
`) `)
if err != nil { if err != nil {
@ -104,17 +104,17 @@ func (kp *KeyPairs) GetAllMigratedKeyPairs() ([]*KeyPair, error) {
func (kp *KeyPairs) GetMigratedKeyPairByKeyUID(keyUID string) ([]*KeyPair, error) { func (kp *KeyPairs) GetMigratedKeyPairByKeyUID(keyUID string) ([]*KeyPair, error) {
rows, err := kp.db.Query(` rows, err := kp.db.Query(`
SELECT SELECT
keycard_uid, keycard_uid,
keycard_name, keycard_name,
keycard_locked, keycard_locked,
account_address, account_address,
key_uid key_uid
FROM FROM
keypairs keypairs
WHERE WHERE
key_uid = ? key_uid = ?
ORDER BY ORDER BY
keycard_uid keycard_uid
`, keyUID) `, keyUID)
if err != nil { if err != nil {
@ -143,15 +143,15 @@ func (kp *KeyPairs) AddMigratedKeyPair(kcUID string, kpName string, KeyUID strin
}() }()
insert, err = tx.Prepare(` insert, err = tx.Prepare(`
INSERT INTO INSERT INTO
keypairs keypairs
( (
keycard_uid, keycard_uid,
keycard_name, keycard_name,
keycard_locked, keycard_locked,
account_address, account_address,
key_uid key_uid
) )
VALUES VALUES
(?, ?, ?, ?, ?); (?, ?, ?, ?, ?);
`) `)
@ -174,10 +174,10 @@ func (kp *KeyPairs) AddMigratedKeyPair(kcUID string, kpName string, KeyUID strin
func (kp *KeyPairs) RemoveMigratedAccountsForKeycard(kcUID string, accountAddresses []types.Address) (err error) { func (kp *KeyPairs) RemoveMigratedAccountsForKeycard(kcUID string, accountAddresses []types.Address) (err error) {
inVector := strings.Repeat(",?", len(accountAddresses)-1) inVector := strings.Repeat(",?", len(accountAddresses)-1)
query := ` query := `
DELETE DELETE
FROM FROM
keypairs keypairs
WHERE WHERE
keycard_uid = ? keycard_uid = ?
AND AND
account_address IN (?` + inVector + `) account_address IN (?` + inVector + `)
@ -202,11 +202,11 @@ func (kp *KeyPairs) RemoveMigratedAccountsForKeycard(kcUID string, accountAddres
func (kp *KeyPairs) SetKeycardName(kcUID string, kpName string) (err error) { func (kp *KeyPairs) SetKeycardName(kcUID string, kpName string) (err error) {
update, err := kp.db.Prepare(` update, err := kp.db.Prepare(`
UPDATE UPDATE
keypairs keypairs
SET SET
keycard_name = ? keycard_name = ?
WHERE WHERE
keycard_uid = ? keycard_uid = ?
`) `)
if err != nil { if err != nil {
@ -220,8 +220,7 @@ func (kp *KeyPairs) SetKeycardName(kcUID string, kpName string) (err error) {
} }
func (kp *KeyPairs) execUpdateQuery(kcUID string, field string, value interface{}) (err error) { func (kp *KeyPairs) execUpdateQuery(kcUID string, field string, value interface{}) (err error) {
var sql string sql := fmt.Sprintf(`UPDATE keypairs SET %s = ? WHERE keycard_uid = ?`, field) // nolint: gosec
sql = fmt.Sprintf(`UPDATE keypairs SET %s = ? WHERE keycard_uid = ?`, field)
update, err := kp.db.Prepare(sql) update, err := kp.db.Prepare(sql)
@ -249,10 +248,10 @@ func (kp *KeyPairs) UpdateKeycardUID(oldKcUID string, newKcUID string) (err erro
func (kp *KeyPairs) DeleteKeycard(kcUID string) (err error) { func (kp *KeyPairs) DeleteKeycard(kcUID string) (err error) {
delete, err := kp.db.Prepare(` delete, err := kp.db.Prepare(`
DELETE DELETE
FROM FROM
keypairs keypairs
WHERE WHERE
keycard_uid = ? keycard_uid = ?
`) `)
if err != nil { if err != nil {

View File

@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"net/http" "net/http"
hpprof "net/http/pprof" hpprof "net/http/pprof"
"time"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
@ -24,8 +25,9 @@ func NewProfiler(port int) *Profiler {
mux.HandleFunc("/debug/pprof/trace", hpprof.Trace) mux.HandleFunc("/debug/pprof/trace", hpprof.Trace)
p := Profiler{ p := Profiler{
server: &http.Server{ server: &http.Server{
Addr: fmt.Sprintf(":%d", port), Addr: fmt.Sprintf(":%d", port),
Handler: mux, ReadHeaderTimeout: 5 * time.Second,
Handler: mux,
}, },
} }
return &p return &p

View File

@ -1303,14 +1303,15 @@ func (o *Community) ToBytes() ([]byte, error) {
func (o *Community) Chats() map[string]*protobuf.CommunityChat { func (o *Community) Chats() map[string]*protobuf.CommunityChat {
response := make(map[string]*protobuf.CommunityChat) response := make(map[string]*protobuf.CommunityChat)
if o != nil { // Why are we checking here for nil, it should be the responsibility of the caller
o.mutex.Lock() if o == nil {
defer o.mutex.Unlock()
} else {
return response return response
} }
if o != nil && o.config != nil && o.config.CommunityDescription != nil { o.mutex.Lock()
defer o.mutex.Unlock()
if o.config != nil && o.config.CommunityDescription != nil {
for k, v := range o.config.CommunityDescription.Chats { for k, v := range o.config.CommunityDescription.Chats {
response[k] = v response[k] = v
} }
@ -1322,14 +1323,15 @@ func (o *Community) Chats() map[string]*protobuf.CommunityChat {
func (o *Community) Images() map[string]*protobuf.IdentityImage { func (o *Community) Images() map[string]*protobuf.IdentityImage {
response := make(map[string]*protobuf.IdentityImage) response := make(map[string]*protobuf.IdentityImage)
if o != nil { // Why are we checking here for nil, it should be the responsibility of the caller
o.mutex.Lock() if o == nil {
defer o.mutex.Unlock()
} else {
return response return response
} }
if o != nil && o.config != nil && o.config.CommunityDescription != nil && o.config.CommunityDescription.Identity != nil { o.mutex.Lock()
defer o.mutex.Unlock()
if o.config != nil && o.config.CommunityDescription != nil && o.config.CommunityDescription.Identity != nil {
for k, v := range o.config.CommunityDescription.Identity.Images { for k, v := range o.config.CommunityDescription.Identity.Images {
response[k] = v response[k] = v
} }
@ -1341,14 +1343,14 @@ func (o *Community) Images() map[string]*protobuf.IdentityImage {
func (o *Community) Categories() map[string]*protobuf.CommunityCategory { func (o *Community) Categories() map[string]*protobuf.CommunityCategory {
response := make(map[string]*protobuf.CommunityCategory) response := make(map[string]*protobuf.CommunityCategory)
if o != nil { if o == nil {
o.mutex.Lock()
defer o.mutex.Unlock()
} else {
return response return response
} }
if o != nil && o.config != nil && o.config.CommunityDescription != nil { o.mutex.Lock()
defer o.mutex.Unlock()
if o.config != nil && o.config.CommunityDescription != nil {
for k, v := range o.config.CommunityDescription.Categories { for k, v := range o.config.CommunityDescription.Categories {
response[k] = v response[k] = v
} }

View File

@ -10,7 +10,7 @@ func (o *Community) ChatsByCategoryID(categoryID string) []string {
o.mutex.Lock() o.mutex.Lock()
defer o.mutex.Unlock() defer o.mutex.Unlock()
var chatIDs []string var chatIDs []string
if o == nil || o.config == nil || o.config.CommunityDescription == nil { if o.config == nil || o.config.CommunityDescription == nil {
return chatIDs return chatIDs
} }

View File

@ -321,7 +321,7 @@ func (db sqlitePersistence) tableUserMessagesScanAllFields(row scanner, message
} }
if quotedText.Valid { if quotedText.Valid {
if quotedDeleted.Bool == true { if quotedDeleted.Bool {
message.QuotedMessage = &common.QuotedMessage{ message.QuotedMessage = &common.QuotedMessage{
ID: quotedID.String, ID: quotedID.String,
Deleted: quotedDeleted.Bool, Deleted: quotedDeleted.Bool,
@ -819,12 +819,12 @@ func (db sqlitePersistence) LatestContactRequestIDs() (map[string]common.Contact
LIMIT 20 LIMIT 20
`, cursor), protobuf.ChatMessage_CONTACT_REQUEST) `, cursor), protobuf.ChatMessage_CONTACT_REQUEST)
defer rows.Close()
if err != nil { if err != nil {
return res, err return res, err
} }
defer rows.Close()
for rows.Next() { for rows.Next() {
var id string var id string
var contactRequestState sql.NullInt64 var contactRequestState sql.NullInt64

View File

@ -79,7 +79,6 @@ func (s *MessengerDeleteMessageForEveryoneSuite) TestDeleteMessageForEveryone()
}) })
s.Require().NoError(err) s.Require().NoError(err)
s.Require().Len(response.Communities(), 1) s.Require().Len(response.Communities(), 1)
community = response.Communities()[0]
_, err = WaitOnMessengerResponse(s.moderator, func(response *MessengerResponse) bool { _, err = WaitOnMessengerResponse(s.moderator, func(response *MessengerResponse) bool {
return len(response.Communities()) > 0 return len(response.Communities()) > 0
@ -180,7 +179,7 @@ func (s *MessengerDeleteMessageForEveryoneSuite) inviteAndJoin(community *commun
s.Require().NoError(target.SaveChat(response.Chats()[0])) s.Require().NoError(target.SaveChat(response.Chats()[0]))
response, err = WaitOnMessengerResponse(target, func(response *MessengerResponse) bool { _, err = WaitOnMessengerResponse(target, func(response *MessengerResponse) bool {
return len(response.Messages()) > 0 return len(response.Messages()) > 0
}, "message 'You have been invited to community' not received") }, "message 'You have been invited to community' not received")
s.Require().NoError(err) s.Require().NoError(err)

View File

@ -1422,7 +1422,7 @@ func (m *Messenger) HandleDeleteMessage(state *ReceivedMessageState, deleteMessa
return err return err
} }
if chat.LastMessage != nil && chat.LastMessage.Seen == false && chat.OneToOne() && !chat.Active { if chat.LastMessage != nil && !chat.LastMessage.Seen && chat.OneToOne() && !chat.Active {
m.createMessageNotification(chat, state) m.createMessageNotification(chat, state)
} }
} }

View File

@ -305,9 +305,6 @@ func (m *Messenger) timeoutAutomaticStatusUpdates() {
nextClock = tempNextClock nextClock = tempNextClock
// Extra 5 sec wait (broadcast receiving delay) // Extra 5 sec wait (broadcast receiving delay)
waitDuration = tempNextClock + fiveMinutes + 5 - uint64(time.Now().Unix()) waitDuration = tempNextClock + fiveMinutes + 5 - uint64(time.Now().Unix())
if waitDuration < 0 {
waitDuration = 0
}
} else { } else {
m.timeoutStatusUpdates(referenceClock, tempNextClock) m.timeoutStatusUpdates(referenceClock, tempNextClock)
waitDuration = 0 waitDuration = 0

View File

@ -1261,7 +1261,7 @@ func (db *sqlitePersistence) AddBookmark(bookmark browsers.Bookmark) (browsers.B
icons, iconError := finder.FetchIcons(bookmark.URL) icons, iconError := finder.FetchIcons(bookmark.URL)
if iconError == nil && len(icons) > 0 { if iconError == nil && len(icons) > 0 {
icon := finder.IconInSizeRange(besticon.SizeRange{48, 48, 100}) icon := finder.IconInSizeRange(besticon.SizeRange{Min: 48, Perfect: 48, Max: 100})
if icon != nil { if icon != nil {
bookmark.ImageURL = icon.URL bookmark.ImageURL = icon.URL
} else { } else {

View File

@ -6,7 +6,7 @@ import (
type BackedUpProfile struct { type BackedUpProfile struct {
DisplayName string `json:"displayName,omitempty"` DisplayName string `json:"displayName,omitempty"`
Images []images.IdentityImage `json:"images,omitempty,omitempty"` Images []images.IdentityImage `json:"images,omitempty"`
} }
func (sfwr *WakuBackedUpDataResponse) AddDisplayName(displayName string) { func (sfwr *WakuBackedUpDataResponse) AddDisplayName(displayName string) {

View File

@ -38,7 +38,7 @@ func (s *PairingServerSuite) TestMultiBackgroundForeground() {
s.PS.ToBackground() s.PS.ToBackground()
s.PS.ToForeground() s.PS.ToForeground()
s.PS.ToForeground() s.PS.ToForeground()
s.Require().Regexp(regexp.MustCompile("(https://\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:\\d{1,5})"), s.PS.MakeBaseURL().String()) s.Require().Regexp(regexp.MustCompile("(https://\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:\\d{1,5})"), s.PS.MakeBaseURL().String()) // nolint: gosimple
} }
func (s *PairingServerSuite) TestPairingServer_StartPairing() { func (s *PairingServerSuite) TestPairingServer_StartPairing() {

View File

@ -54,7 +54,7 @@ func (s *ServerURLSuite) SetupTest() {
// This is caused by the ServerURLSuite.SetupTest waiting waitTime before unlocking the portWait sync.Mutex // This is caused by the ServerURLSuite.SetupTest waiting waitTime before unlocking the portWait sync.Mutex
func (s *ServerURLSuite) testNoPort(expected string, actual string) { func (s *ServerURLSuite) testNoPort(expected string, actual string) {
s.Require().Equal(expected, actual) s.Require().Equal(expected, actual)
s.Require().Greater(time.Now().Sub(s.testStart), waitTime) s.Require().Greater(time.Since(s.testStart), waitTime)
} }
func (s *ServerURLSuite) TestServer_MakeBaseURL() { func (s *ServerURLSuite) TestServer_MakeBaseURL() {

View File

@ -75,7 +75,7 @@ func (db *Database) StoreBookmark(bookmark Bookmark) (Bookmark, error) {
icons, iconError := finder.FetchIcons(bookmark.URL) icons, iconError := finder.FetchIcons(bookmark.URL)
if iconError == nil && len(icons) > 0 { if iconError == nil && len(icons) > 0 {
icon := finder.IconInSizeRange(besticon.SizeRange{48, 48, 100}) icon := finder.IconInSizeRange(besticon.SizeRange{Min: 48, Perfect: 48, Max: 100})
if icon != nil { if icon != nil {
bookmark.ImageURL = icon.URL bookmark.ImageURL = icon.URL
} else { } else {

View File

@ -436,18 +436,6 @@ func (r *Router) getBalance(ctx context.Context, network *params.Network, token
return r.s.tokenManager.GetBalance(ctx, clients[0], account, token.Address) return r.s.tokenManager.GetBalance(ctx, clients[0], account, token.Address)
} }
func (r *Router) estimateTimes(ctx context.Context, network *params.Network, gasFees *SuggestedFees, gasFeeMode GasFeeMode) TransactionEstimation {
if gasFeeMode == GasFeeLow {
return r.s.feesManager.transactionEstimatedTime(ctx, network.ChainID, gasFees.MaxFeePerGasLow)
}
if gasFeeMode == GasFeeMedium {
return r.s.feesManager.transactionEstimatedTime(ctx, network.ChainID, gasFees.MaxFeePerGasMedium)
}
return r.s.feesManager.transactionEstimatedTime(ctx, network.ChainID, gasFees.MaxFeePerGasHigh)
}
func (r *Router) suggestedRoutes( func (r *Router) suggestedRoutes(
ctx context.Context, ctx context.Context,
sendType SendType, sendType SendType,

View File

@ -14,7 +14,6 @@ import (
"github.com/status-im/status-go/rpc" "github.com/status-im/status-go/rpc"
"github.com/status-im/status-go/services/ens" "github.com/status-im/status-go/services/ens"
"github.com/status-im/status-go/services/stickers" "github.com/status-im/status-go/services/stickers"
"github.com/status-im/status-go/services/wallet/async"
"github.com/status-im/status-go/services/wallet/token" "github.com/status-im/status-go/services/wallet/token"
"github.com/status-im/status-go/services/wallet/transfer" "github.com/status-im/status-go/services/wallet/transfer"
"github.com/status-im/status-go/services/wallet/walletevent" "github.com/status-im/status-go/services/wallet/walletevent"
@ -85,7 +84,6 @@ type Service struct {
ens *ens.Service ens *ens.Service
stickers *stickers.Service stickers *stickers.Service
feed *event.Feed feed *event.Feed
group *async.Group
signals *walletevent.SignalsTransmitter signals *walletevent.SignalsTransmitter
reader *Reader reader *Reader
} }

View File

@ -141,9 +141,8 @@ func (tm *Manager) GetAllTokens() ([]*Token, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
for _, token := range tokens {
result = append(result, token) result = append(result, tokens...)
}
return result, nil return result, nil
} }

View File

@ -36,9 +36,9 @@ func TestPendingTransactions(t *testing.T) {
To: common.Address{2}, To: common.Address{2},
Type: RegisterENS, Type: RegisterENS,
AdditionalData: "someuser.stateofus.eth", AdditionalData: "someuser.stateofus.eth",
Value: bigint.BigInt{big.NewInt(123)}, Value: bigint.BigInt{Int: big.NewInt(123)},
GasLimit: bigint.BigInt{big.NewInt(21000)}, GasLimit: bigint.BigInt{Int: big.NewInt(21000)},
GasPrice: bigint.BigInt{big.NewInt(1)}, GasPrice: bigint.BigInt{Int: big.NewInt(1)},
ChainID: 777, ChainID: 777,
} }

View File

@ -37,7 +37,7 @@ func blocksToViews(blocks map[common.Address]*LastKnownBlock) []LastKnownBlockVi
view := LastKnownBlockView{ view := LastKnownBlockView{
Address: address, Address: address,
Number: block.Number, Number: block.Number,
Balance: bigint.BigInt{block.Balance}, Balance: bigint.BigInt{Int: block.Balance},
Nonce: block.Nonce, Nonce: block.Nonce,
} }
blocksViews = append(blocksViews, view) blocksViews = append(blocksViews, view)

View File

@ -139,7 +139,7 @@ func (s *WakuTestSuite) testConfirmationsHandshake(expectConfirmations bool) {
handleError(s.T(), rw2.Close()) handleError(s.T(), rw2.Close())
}) })
p1 := s.newPeer(w1, p2p.NewPeer(enode.ID{1}, "1", []p2p.Cap{{"waku", 1}}), rw1, nil, s.stats) p1 := s.newPeer(w1, p2p.NewPeer(enode.ID{1}, "1", []p2p.Cap{{Name: "waku", Version: 1}}), rw1, nil, s.stats)
go func() { go func() {
// This will always fail eventually as we close the channels // This will always fail eventually as we close the channels
@ -186,8 +186,8 @@ func (s *WakuTestSuite) TestMessagesResponseWithError() {
s.T().Errorf("error closing MsgPipe 2, '%s'", err) s.T().Errorf("error closing MsgPipe 2, '%s'", err)
} }
}() }()
p1 := s.newPeer(w1, p2p.NewPeer(enode.ID{1}, "1", []p2p.Cap{{"waku", 0}}), rw2, nil, s.stats) p1 := s.newPeer(w1, p2p.NewPeer(enode.ID{1}, "1", []p2p.Cap{{Name: "waku", Version: 0}}), rw2, nil, s.stats)
p2 := s.newPeer(w2, p2p.NewPeer(enode.ID{2}, "2", []p2p.Cap{{"waku", 0}}), rw1, nil, s.stats) p2 := s.newPeer(w2, p2p.NewPeer(enode.ID{2}, "2", []p2p.Cap{{Name: "waku", Version: 0}}), rw1, nil, s.stats)
errorc := make(chan error, 1) errorc := make(chan error, 1)
go func() { errorc <- w1.HandlePeer(p1, rw2) }() go func() { errorc <- w1.HandlePeer(p1, rw2) }()
@ -247,7 +247,7 @@ func (s *WakuTestSuite) TestEventsWithoutConfirmation() {
defer sub.Unsubscribe() defer sub.Unsubscribe()
rw1, rw2 := p2p.MsgPipe() rw1, rw2 := p2p.MsgPipe()
p1 := s.newPeer(w1, p2p.NewPeer(enode.ID{1}, "1", []p2p.Cap{{"waku", 0}}), rw2, nil, s.stats) p1 := s.newPeer(w1, p2p.NewPeer(enode.ID{1}, "1", []p2p.Cap{{Name: "waku", Version: 0}}), rw2, nil, s.stats)
go func() { handleError(s.T(), w1.HandlePeer(p1, rw2)) }() go func() { handleError(s.T(), w1.HandlePeer(p1, rw2)) }()
@ -309,8 +309,8 @@ func (s *WakuTestSuite) TestWakuTimeDesyncEnvelopeIgnored() {
} }
}() }()
w1, w2 := New(c, nil), New(c, nil) w1, w2 := New(c, nil), New(c, nil)
p1 := s.newPeer(w2, p2p.NewPeer(enode.ID{1}, "1", []p2p.Cap{{"waku", 1}}), rw1, nil, s.stats) p1 := s.newPeer(w2, p2p.NewPeer(enode.ID{1}, "1", []p2p.Cap{{Name: "waku", Version: 1}}), rw1, nil, s.stats)
p2 := s.newPeer(w1, p2p.NewPeer(enode.ID{2}, "2", []p2p.Cap{{"waku", 1}}), rw2, nil, s.stats) p2 := s.newPeer(w1, p2p.NewPeer(enode.ID{2}, "2", []p2p.Cap{{Name: "waku", Version: 1}}), rw2, nil, s.stats)
errc := make(chan error) errc := make(chan error)
go func() { errc <- w1.HandlePeer(p2, rw2) }() go func() { errc <- w1.HandlePeer(p2, rw2) }()
@ -341,7 +341,7 @@ func (s *WakuTestSuite) TestWakuTimeDesyncEnvelopeIgnored() {
func (s *WakuTestSuite) TestRequestSentEventWithExpiry() { func (s *WakuTestSuite) TestRequestSentEventWithExpiry() {
w := New(nil, nil) w := New(nil, nil)
p := p2p.NewPeer(enode.ID{1}, "1", []p2p.Cap{{"waku", 1}}) p := p2p.NewPeer(enode.ID{1}, "1", []p2p.Cap{{Name: "waku", Version: 1}})
rw := discardPipe() rw := discardPipe()
defer func() { handleError(s.T(), rw.Close()) }() defer func() { handleError(s.T(), rw.Close()) }()
w.peers[s.newPeer(w, p, rw, nil, s.stats)] = struct{}{} w.peers[s.newPeer(w, p, rw, nil, s.stats)] = struct{}{}
@ -395,7 +395,7 @@ func (s *WakuTestSuite) TestDeprecatedDeliverMail() {
}) })
rw1, rw2 := p2p.MsgPipe() rw1, rw2 := p2p.MsgPipe()
p1 := s.newPeer(w1, p2p.NewPeer(enode.ID{1}, "1", []p2p.Cap{{"waku", 0}}), rw2, nil, s.stats) p1 := s.newPeer(w1, p2p.NewPeer(enode.ID{1}, "1", []p2p.Cap{{Name: "waku", Version: 0}}), rw2, nil, s.stats)
go func() { handleError(s.T(), w1.HandlePeer(p1, rw2)) }() go func() { handleError(s.T(), w1.HandlePeer(p1, rw2)) }()
@ -478,7 +478,7 @@ func (s *WakuTestSuite) TestRateLimiterIntegration() {
s.T().Errorf("error closing MsgPipe, '%s'", err) s.T().Errorf("error closing MsgPipe, '%s'", err)
} }
}() }()
p := s.newPeer(w, p2p.NewPeer(enode.ID{1}, "1", []p2p.Cap{{"waku", 0}}), rw2, nil, s.stats) p := s.newPeer(w, p2p.NewPeer(enode.ID{1}, "1", []p2p.Cap{{Name: "waku", Version: 0}}), rw2, nil, s.stats)
errorc := make(chan error, 1) errorc := make(chan error, 1)
go func() { errorc <- w.HandlePeer(p, rw2) }() go func() { errorc <- w.HandlePeer(p, rw2) }()

View File

@ -1319,6 +1319,8 @@ func (w *Waku) ConnectionChanged(state connection.State) {
if !state.Offline && w.offline { if !state.Offline && w.offline {
select { select {
case w.connectionChanged <- struct{}{}: case w.connectionChanged <- struct{}{}:
default:
w.logger.Warn("could not write on connection changed channel")
} }
} }