Upgrade linter and address issues

This commit is contained in:
Andrea Maria Piana 2020-12-28 10:09:45 +01:00
parent f5482ec187
commit 7387049d4b
21 changed files with 47 additions and 242 deletions

View File

@ -312,7 +312,7 @@ canary-test: node-canary
lint-install:
@# The following installs a specific version of golangci-lint, which is appropriate for a CI server to avoid different results from build to build
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | BINARY=$(GOLANGCI_BINARY) bash -s -- -d -b $(GOPATH)/bin v1.21.0
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | BINARY=$(GOLANGCI_BINARY) bash -s -- -d -b $(GOPATH)/bin v1.33.0
lint:
@echo "lint"

View File

@ -87,7 +87,7 @@ func TestVerifyAccountPassword(t *testing.T) {
require.Fail(t, "no error reported, but account key is missing")
}
accountAddress := types.BytesToAddress(types.FromHex(testCase.address))
if accountKey.Address != accountAddress {
if accountKey.Address != accountAddress { // nolint: staticcheck
require.Fail(t, "account mismatch: have %s, want %s", accountKey.Address.Hex(), accountAddress.Hex())
}
}

View File

@ -35,13 +35,13 @@ func TestSubscriptionEthWithParamsDict(t *testing.T) {
initNodeAndLogin(t, backend)
createSubscription(t, backend, fmt.Sprintf(`"eth_newFilter", [
createSubscription(t, backend, `"eth_newFilter", [
{
"fromBlock":"earliest",
"address":["0xc55cf4b03948d7ebc8b9e8bad92643703811d162","0xdee43a267e8726efd60c2e7d5b81552dcd4fa35c","0x703d7dc0bc8e314d65436adf985dda51e09ad43b","0xe639e24346d646e927f323558e6e0031bfc93581","0x2e7cd05f437eb256f363417fd8f920e2efa77540","0x57cc9b83730e6d22b224e9dc3e370967b44a2de0"],
"topics":["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef","0x0000000000000000000000005dc6108dc6296b052bbd33000553afe0ea576b5e",null]
}
]`))
]`)
}
func TestSubscriptionPendingTransaction(t *testing.T) {

View File

@ -240,7 +240,7 @@ func TestBackendConnectionChangesConcurrently(t *testing.T) {
for i := 0; i < count; i++ {
wg.Add(1)
go func() {
connIdx := rand.Intn(len(connections))
connIdx := rand.Intn(len(connections)) // nolint: gosec
backend.ConnectionChange(connections[connIdx], false)
wg.Done()
}()

View File

@ -128,7 +128,7 @@ func (r *Rendezvous) MakeRecord() (record enr.Record, err error) {
}
func (r *Rendezvous) register(topic string, record enr.Record) error {
srv := r.servers[rand.Intn(len(r.servers))]
srv := r.servers[rand.Intn(len(r.servers))] // nolint: gosec
ctx, cancel := context.WithTimeout(r.rootCtx, requestTimeout)
defer cancel()
@ -198,7 +198,7 @@ func (r *Rendezvous) Discover(
}
ticker = time.NewTicker(newPeriod)
case <-ticker.C:
srv := r.servers[rand.Intn(len(r.servers))]
srv := r.servers[rand.Intn(len(r.servers))] // nolint: gosec
records, err := r.discoverRequest(srv, topic)
if err == context.Canceled {
return err

View File

@ -17,8 +17,8 @@ func NewWhisperEnvelopeEventWrapper(envelopeEvent *whisper.EnvelopeEvent) *types
switch data := envelopeEvent.Data.(type) {
case []whisper.EnvelopeError:
wrappedData := make([]types.EnvelopeError, len(data))
for index, envError := range data {
wrappedData[index] = *NewWhisperEnvelopeErrorWrapper(&envError)
for index := range data {
wrappedData[index] = *NewWhisperEnvelopeErrorWrapper(&data[index])
}
case *whisper.MailServerResponse:
wrappedData = NewWhisperMailServerResponseWrapper(data)
@ -44,8 +44,8 @@ func NewWakuEnvelopeEventWrapper(envelopeEvent *wakucommon.EnvelopeEvent) *types
switch data := envelopeEvent.Data.(type) {
case []wakucommon.EnvelopeError:
wrappedData := make([]types.EnvelopeError, len(data))
for index, envError := range data {
wrappedData[index] = *NewWakuEnvelopeErrorWrapper(&envError)
for index := range data {
wrappedData[index] = *NewWakuEnvelopeErrorWrapper(&data[index])
}
case *waku.MailServerResponse:
wrappedData = NewWakuMailServerResponseWrapper(data)

View File

@ -826,7 +826,7 @@ func (s *mailServer) DeliverMail(peerID, reqID types.Hash, req MessagesRequestPa
func (s *mailServer) SyncMail(peerID types.Hash, req MessagesRequestPayload) error {
log.Info("Started syncing envelopes", "peer", peerID.String(), "req", req)
requestID := fmt.Sprintf("%d-%d", time.Now().UnixNano(), rand.Intn(1000))
requestID := fmt.Sprintf("%d-%d", time.Now().UnixNano(), rand.Intn(1000)) // nolint: gosec
syncAttemptsCounter.Inc()

View File

@ -16,7 +16,6 @@ import (
"github.com/ethereum/go-ethereum/les"
gethnode "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/status-im/status-go/whisper/v6"
@ -186,86 +185,6 @@ func TestStatusNodeAddPeer(t *testing.T) {
require.Equal(t, 1, n.PeerCount())
}
func TestStatusNodeReconnectStaticPeers(t *testing.T) {
// Skipping as flaky
t.Skip()
var err error
peer, err := gethnode.New(&gethnode.Config{
P2P: p2p.Config{
MaxPeers: math.MaxInt32,
NoDiscovery: true,
ListenAddr: ":0",
},
NoUSB: true,
})
require.NoError(t, err)
require.NoError(t, peer.Start())
defer func() { require.NoError(t, peer.Stop()) }()
var errCh <-chan error
peerURL := peer.Server().Self().URLv4()
n := New()
// checks before node is started
require.EqualError(t, n.ReconnectStaticPeers(), ErrNoRunningNode.Error())
// start status node
config := params.NodeConfig{
MaxPeers: math.MaxInt32,
ClusterConfig: params.ClusterConfig{
Enabled: true,
StaticNodes: []string{peerURL},
},
}
require.NoError(t, n.Start(&config, nil))
defer func() { require.NoError(t, n.Stop()) }()
// checks after node is started
// it may happen that the peer is already connected
// because it was already added to `StaticNodes`
connected, err := isPeerConnected(n, peerURL)
require.NoError(t, err)
if !connected {
errCh = helpers.WaitForPeerAsync(n.Server(), peerURL, p2p.PeerEventTypeAdd, time.Second*30)
require.NoError(t, <-errCh)
}
require.Equal(t, 1, n.PeerCount())
require.Equal(t, peer.Server().Self().ID().String(), n.GethNode().Server().PeersInfo()[0].ID)
// reconnect static peers
errDropCh := helpers.WaitForPeerAsync(n.Server(), peerURL, p2p.PeerEventTypeDrop, time.Second*30)
// it takes at least 30 seconds to bring back previously connected peer
errAddCh := helpers.WaitForPeerAsync(n.Server(), peerURL, p2p.PeerEventTypeAdd, time.Second*60)
require.NoError(t, n.ReconnectStaticPeers())
// first check if a peer gets disconnected
require.NoError(t, <-errDropCh)
require.NoError(t, <-errAddCh)
}
func isPeerConnected(node *StatusNode, peerURL string) (bool, error) {
if !node.IsRunning() {
return false, ErrNoRunningNode
}
parsedPeer, err := enode.ParseV4(peerURL)
if err != nil {
return false, err
}
server := node.GethNode().Server()
for _, peer := range server.PeersInfo() {
if peer.ID == parsedPeer.ID().String() {
return true, nil
}
}
return false, nil
}
func TestStatusNodeRendezvousDiscovery(t *testing.T) {
config := params.NodeConfig{
Rendezvous: true,

View File

@ -2,19 +2,14 @@ package peers
import (
"encoding/json"
"fmt"
"net"
"strconv"
"testing"
"time"
lcrypto "github.com/libp2p/go-libp2p-core/crypto"
ma "github.com/multiformats/go-multiaddr"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/storage"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
@ -93,24 +88,6 @@ func (s *PeerPoolSimulationSuite) setupEthV5() {
}
}
func (s *PeerPoolSimulationSuite) setupRendezvous() {
priv, _, err := lcrypto.GenerateKeyPair(lcrypto.Secp256k1, 0)
s.Require().NoError(err)
laddr, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/7777"))
s.Require().NoError(err)
db, err := leveldb.Open(storage.NewMemStorage(), nil)
s.Require().NoError(err)
s.rendezvousServer = server.NewServer(laddr, priv, server.NewStorage(db))
s.Require().NoError(s.rendezvousServer.Start())
for i := range s.peers {
peer := s.peers[i]
d, err := discovery.NewRendezvous([]ma.Multiaddr{s.rendezvousServer.Addr()}, peer.PrivateKey, peer.Self())
s.NoError(err)
s.NoError(d.Start())
s.discovery[i] = d
}
}
func (s *PeerPoolSimulationSuite) TearDown() {
s.bootnode.Stop()
for i := range s.peers {
@ -170,98 +147,6 @@ func (s *PeerPoolSimulationSuite) TestPeerPoolCacheEthV5() {
}
}
func (s *PeerPoolSimulationSuite) TestSingleTopicDiscoveryWithFailoverEthV5() {
s.T().Skip("Skipping due to being flaky")
s.setupEthV5()
s.singleTopicDiscoveryWithFailover()
}
func (s *PeerPoolSimulationSuite) TestSingleTopicDiscoveryWithFailoverRendezvous() {
s.T().Skip("Skipping due to being flaky")
s.setupRendezvous()
s.singleTopicDiscoveryWithFailover()
}
func (s *PeerPoolSimulationSuite) singleTopicDiscoveryWithFailover() {
var err error
// Buffered channels must be used because we expect the events
// to be in the same order. Use a buffer length greater than
// the expected number of events to avoid deadlock.
poolEvents := make(chan string, 10)
summaries := make(chan []*p2p.PeerInfo, 10)
signal.SetDefaultNodeNotificationHandler(func(jsonEvent string) {
var envelope struct {
Type string
Event json.RawMessage
}
s.NoError(json.Unmarshal([]byte(jsonEvent), &envelope))
switch typ := envelope.Type; typ {
case signal.EventDiscoveryStarted, signal.EventDiscoveryStopped:
poolEvents <- envelope.Type
case signal.EventDiscoverySummary:
poolEvents <- envelope.Type
var summary []*p2p.PeerInfo
s.NoError(json.Unmarshal(envelope.Event, &summary))
summaries <- summary
}
})
defer signal.ResetDefaultNodeNotificationHandler()
topic := discv5.Topic("cap=test")
// simulation should only rely on fast sync
config := map[discv5.Topic]params.Limits{
topic: params.NewLimits(1, 1), // limits are chosen for simplicity of the simulation
}
peerPoolOpts := &Options{100 * time.Millisecond, 100 * time.Millisecond, 0, true, 0, nil, ""}
cache, err := newInMemoryCache()
s.Require().NoError(err)
peerPool := NewPeerPool(s.discovery[1], config, cache, peerPoolOpts)
// create and start topic registry
register := NewRegister(s.discovery[0], topic)
s.Require().NoError(register.Start())
// subscribe for peer events before starting the peer pool
events := make(chan *p2p.PeerEvent, 20)
subscription := s.peers[1].SubscribeEvents(events)
defer subscription.Unsubscribe()
// start the peer pool
s.Require().NoError(peerPool.Start(s.peers[1], nil))
defer peerPool.Stop()
s.Equal(signal.EventDiscoveryStarted, s.getPoolEvent(poolEvents))
// wait for the peer to be found and connected
connectedPeer := s.getPeerFromEvent(events, p2p.PeerEventTypeAdd)
s.Equal(s.peers[0].Self().ID(), connectedPeer)
// as the upper limit was reached, Discovery should be stoped
s.Equal(signal.EventDiscoverySummary, s.getPoolEvent(poolEvents))
s.Equal(signal.EventDiscoveryStopped, s.getPoolEvent(poolEvents))
s.Len(<-summaries, 1)
// stop topic register and the connected peer
register.Stop()
s.peers[0].Stop()
disconnectedPeer := s.getPeerFromEvent(events, p2p.PeerEventTypeDrop)
s.Equal(connectedPeer, disconnectedPeer)
s.Equal(signal.EventDiscoverySummary, s.getPoolEvent(poolEvents))
s.Len(<-summaries, 0)
// Discovery should be restarted because the number of peers dropped
// below the lower limit.
s.Equal(signal.EventDiscoveryStarted, s.getPoolEvent(poolEvents))
// register the second peer
register = NewRegister(s.discovery[2], topic)
s.Require().NoError(register.Start())
defer register.Stop()
s.Equal(s.peers[2].Self().ID(), s.getPeerFromEvent(events, p2p.PeerEventTypeAdd))
// Discovery can be stopped again.
s.Require().Equal(signal.EventDiscoverySummary, s.getPoolEvent(poolEvents))
s.Equal(signal.EventDiscoveryStopped, s.getPoolEvent(poolEvents))
s.Len(<-summaries, 1)
}
// TestPeerPoolMaxPeersOverflow verifies that following scenario will not occur:
// - found peer A and B in the same kademlia cycle
// - process peer A

View File

@ -20,7 +20,7 @@ func TestPeerPriorityQueueSorting(t *testing.T) {
// shuffle discTimes
for i := range discTimes {
j := rand.Intn(i + 1)
j := rand.Intn(i + 1) //nolint: gosec
discTimes[i], discTimes[j] = discTimes[j], discTimes[i]
}

View File

@ -277,7 +277,7 @@ func CreateOneToOneChat(name string, publicKey *ecdsa.PublicKey, timesource comm
func CreateCommunityChat(orgID, chatID string, orgChat *protobuf.CommunityChat, timesource common.TimeSource) Chat {
color := orgChat.Identity.Color
if color == "" {
color = chatColors[rand.Intn(len(chatColors))]
color = chatColors[rand.Intn(len(chatColors))] // nolint: gosec
}
return Chat{
@ -307,7 +307,7 @@ func CreatePublicChat(name string, timesource common.TimeSource) Chat {
Name: name,
Active: true,
Timestamp: int64(timesource.GetCurrentTime()),
Color: chatColors[rand.Intn(len(chatColors))],
Color: chatColors[rand.Intn(len(chatColors))], // nolint: gosec
ChatType: ChatTypePublic,
}
}
@ -318,7 +318,7 @@ func CreateProfileChat(name string, profile string, timesource common.TimeSource
Name: name,
Active: true,
Timestamp: int64(timesource.GetCurrentTime()),
Color: chatColors[rand.Intn(len(chatColors))],
Color: chatColors[rand.Intn(len(chatColors))], // nolint: gosec
ChatType: ChatTypeProfile,
Profile: profile,
}
@ -327,7 +327,7 @@ func CreateProfileChat(name string, profile string, timesource common.TimeSource
func CreateGroupChat(timesource common.TimeSource) Chat {
return Chat{
Active: true,
Color: chatColors[rand.Intn(len(chatColors))],
Color: chatColors[rand.Intn(len(chatColors))], // nolint: gosec
Timestamp: int64(timesource.GetCurrentTime()),
ChatType: ChatTypePrivateGroupChat,
}

View File

@ -641,12 +641,12 @@ func publish(
for i := 0; i < 200; i++ {
// Simulate 5% of the messages dropped
if rand.Intn(100) <= 95 {
if rand.Intn(100) <= 95 { // nolint: gosec
wg.Add(1)
// Simulate out of order messages
go func() {
defer wg.Done()
time.Sleep(time.Duration(rand.Intn(50)) * time.Millisecond)
time.Sleep(time.Duration(rand.Intn(50)) * time.Millisecond) // nolint: gosec
response, err := e.BuildDirectMessage(privateKey, publicKey, cleartext)
if err != nil {
errChan <- err

View File

@ -81,13 +81,13 @@ func (s *ClientSuite) TestBuildPushNotificationRegisterMessage() {
// Set random generator for uuid
var seed int64 = 1
uuid.SetRand(rand.New(rand.NewSource(seed)))
uuid.SetRand(rand.New(rand.NewSource(seed))) // nolint: gosec
// Get token
expectedUUID := uuid.New().String()
// Reset random generator
uuid.SetRand(rand.New(rand.NewSource(seed)))
uuid.SetRand(rand.New(rand.NewSource(seed))) // nolint: gosec
s.client.deviceToken = testDeviceToken
// Set reader
@ -135,7 +135,7 @@ func (s *ClientSuite) TestBuildPushNotificationRegisterMessageAllowFromContactsO
// Set random generator for uuid
var seed int64 = 1
uuid.SetRand(rand.New(rand.NewSource(seed)))
uuid.SetRand(rand.New(rand.NewSource(seed))) // nolint: gosec
// Get token
expectedUUID := uuid.New().String()
@ -154,7 +154,7 @@ func (s *ClientSuite) TestBuildPushNotificationRegisterMessageAllowFromContactsO
s.Require().NoError(err)
// Reset random generator
uuid.SetRand(rand.New(rand.NewSource(seed)))
uuid.SetRand(rand.New(rand.NewSource(seed))) // nolint: gosec
s.client.config.AllowFromContactsOnly = true
s.client.deviceToken = testDeviceToken

View File

@ -320,7 +320,7 @@ func (c *newBlocksTransfersCommand) Run(parent context.Context) (err error) {
ctx, cancel = context.WithTimeout(parent, 10*time.Second)
latestHeader, removed, latestValidSavedBlock, reorgSpotted, err := c.onNewBlock(ctx, c.from, nextHeader)
cancel()
if err != nil {
if err != nil || latestHeader == nil {
log.Error("failed to process new header", "header", nextHeader, "error", err)
return err
}
@ -358,7 +358,7 @@ func (c *newBlocksTransfersCommand) Run(parent context.Context) (err error) {
Accounts: uniqueAccountsFromHeaders(removed),
})
}
log.Info("before sending new block event", "latest", latestHeader != nil, "removed", len(removed), "len", len(uniqueAccountsFromTransfers(all)))
log.Info("before sending new block event", "removed", len(removed), "len", len(uniqueAccountsFromTransfers(all)))
c.feed.Send(Event{
Type: EventNewBlock,

View File

@ -45,7 +45,7 @@ func (s *APITestSuite) TestCHTUpdate() {
func (s *APITestSuite) TestRaceConditions() {
cnt := 25
progress := make(chan struct{}, cnt)
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
rnd := rand.New(rand.NewSource(time.Now().UnixNano())) // nolint: gosec
nodeConfig1, err := utils.MakeTestNodeConfig(utils.GetNetworkID())
s.NoError(err)
@ -86,10 +86,11 @@ func (s *APITestSuite) TestRaceConditions() {
}
for i := 0; i < cnt; i++ {
randConfig := nodeConfigs[rnd.Intn(len(nodeConfigs))]
randFunc := funcsToTest[rnd.Intn(len(funcsToTest))]
randConfig := nodeConfigs[rnd.Intn(len(nodeConfigs))] // nolint: gosec
randFunc := funcsToTest[rnd.Intn(len(funcsToTest))] // nolint: gosec
if rnd.Intn(100) > 75 { // introduce random delays
// introduce random delays
if rnd.Intn(100) > 75 { // nolint: gosec
time.Sleep(500 * time.Millisecond)
}
s.NoError(s.backend.AccountManager().InitKeystore(randConfig.KeyStoreDir))

View File

@ -65,7 +65,7 @@ func TestEnvelopeOpenAcceptsOnlyOneKeyTypeInFilter(t *testing.T) {
params := MessageParams{
PoW: 0.01,
WorkTime: 1,
TTL: uint32(mrand.Intn(1024)),
TTL: uint32(mrand.Intn(1024)), // nolint: gosec
Payload: make([]byte, 50),
KeySym: symKey,
Dst: nil,

View File

@ -658,7 +658,7 @@ func TestWatchers(t *testing.T) {
var envelopes [NumMessages]*Envelope
for i = 0; i < NumMessages; i++ {
j = mrand.Uint32() % NumFilters
j = mrand.Uint32() % NumFilters // nolint: gosec
e = generateCompatibeEnvelope(t, tst[j].f)
envelopes[i] = e
tst[j].msgCnt++
@ -714,7 +714,7 @@ func TestWatchers(t *testing.T) {
envelopes[0] = e
tst[0].msgCnt++
for i = 1; i < NumMessages; i++ {
j = mrand.Uint32() % NumFilters
j = mrand.Uint32() % NumFilters // nolint: gosec
e = generateCompatibeEnvelope(t, tst[j].f)
envelopes[i] = e
tst[j].msgCnt++

View File

@ -38,12 +38,12 @@ func GenerateMessageParams() (*MessageParams, error) {
buf := make([]byte, 4)
mrand.Read(buf) // nolint: gosec
sz := mrand.Intn(400)
sz := mrand.Intn(400) // nolint: gosec
var p MessageParams
p.PoW = 0.01
p.WorkTime = 1
p.TTL = uint32(mrand.Intn(1024))
p.TTL = uint32(mrand.Intn(1024)) // nolint: gosec
p.Payload = make([]byte, sz)
p.KeySym = make([]byte, AESKeyLength)
mrand.Read(p.Payload) // nolint: gosec
@ -417,12 +417,12 @@ func TestPadding(t *testing.T) {
}
for i := 0; i < 256; i++ {
n := mrand.Intn(256*254) + 256
n := mrand.Intn(256*254) + 256 // nolint: gosec
singlePaddingTest(t, n)
}
for i := 0; i < 256; i++ {
n := mrand.Intn(256*1024) + 256*256
n := mrand.Intn(256*1024) + 256*256 // nolint: gosec
singlePaddingTest(t, n)
}
}

View File

@ -126,12 +126,12 @@ func generateMessageParams() (*common.MessageParams, error) {
buf := make([]byte, 4)
mrand.Read(buf) // nolint: gosec
sz := mrand.Intn(400)
sz := mrand.Intn(400) // nolint: gosec
var p common.MessageParams
p.PoW = 0.01
p.WorkTime = 1
p.TTL = uint32(mrand.Intn(1024))
p.TTL = uint32(mrand.Intn(1024)) // nolint: gosec
p.Payload = make([]byte, sz)
p.KeySym = make([]byte, common.AESKeyLength)
mrand.Read(p.Payload) // nolint: gosec

View File

@ -126,12 +126,12 @@ func generateMessageParams() (*common.MessageParams, error) {
buf := make([]byte, 4)
mrand.Read(buf) // nolint: gosec
sz := mrand.Intn(400)
sz := mrand.Intn(400) // nolint: gosec
var p common.MessageParams
p.PoW = 0.01
p.WorkTime = 1
p.TTL = uint32(mrand.Intn(1024))
p.TTL = uint32(mrand.Intn(1024)) // nolint: gosec
p.Payload = make([]byte, sz)
p.KeySym = make([]byte, common.AESKeyLength)
mrand.Read(p.Payload) // nolint: gosec

View File

@ -735,12 +735,12 @@ func generateMessageParams() (*common.MessageParams, error) {
buf := make([]byte, 4)
mrand.Read(buf) // nolint: gosec
sz := mrand.Intn(400)
sz := mrand.Intn(400) // nolint: gosec
var p common.MessageParams
p.PoW = 0.01
p.WorkTime = 1
p.TTL = uint32(mrand.Intn(1024))
p.TTL = uint32(mrand.Intn(1024)) // nolint: gosec
p.Payload = make([]byte, sz)
p.KeySym = make([]byte, common.AESKeyLength)
mrand.Read(p.Payload) // nolint: gosec