synchronize display name (#2989)
This commit is contained in:
parent
dd78445c22
commit
9137257638
|
@ -22,7 +22,9 @@ func RunAsync(f func() error) <-chan error {
|
||||||
|
|
||||||
// HashMessage calculates the hash of a message to be safely signed by the keycard
|
// HashMessage calculates the hash of a message to be safely signed by the keycard
|
||||||
// The hash is calulcated as
|
// The hash is calulcated as
|
||||||
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
//
|
||||||
|
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
||||||
|
//
|
||||||
// This gives context to the signed message and prevents signing of transactions.
|
// This gives context to the signed message and prevents signing of transactions.
|
||||||
func HashMessage(message string) ([]byte, error) {
|
func HashMessage(message string) ([]byte, error) {
|
||||||
buf := bytes.NewBufferString("\x19Ethereum Signed Message:\n")
|
buf := bytes.NewBufferString("\x19Ethereum Signed Message:\n")
|
||||||
|
|
|
@ -202,7 +202,8 @@ func generateSecureRandomData(length int) ([]byte, error) {
|
||||||
// safely used to calculate a signature from.
|
// safely used to calculate a signature from.
|
||||||
//
|
//
|
||||||
// The hash is calulcated as
|
// The hash is calulcated as
|
||||||
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
//
|
||||||
|
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
||||||
//
|
//
|
||||||
// This gives context to the signed message and prevents signing of transactions.
|
// This gives context to the signed message and prevents signing of transactions.
|
||||||
func TextHash(data []byte) []byte {
|
func TextHash(data []byte) []byte {
|
||||||
|
@ -214,7 +215,8 @@ func TextHash(data []byte) []byte {
|
||||||
// safely used to calculate a signature from.
|
// safely used to calculate a signature from.
|
||||||
//
|
//
|
||||||
// The hash is calulcated as
|
// The hash is calulcated as
|
||||||
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
//
|
||||||
|
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
||||||
//
|
//
|
||||||
// This gives context to the signed message and prevents signing of transactions.
|
// This gives context to the signed message and prevents signing of transactions.
|
||||||
func TextAndHash(data []byte) ([]byte, string) {
|
func TextAndHash(data []byte) ([]byte, string) {
|
||||||
|
|
|
@ -37,7 +37,7 @@ import (
|
||||||
"github.com/status-im/status-go/eth-node/types"
|
"github.com/status-im/status-go/eth-node/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
//SignatureLength indicates the byte length required to carry a signature with recovery id.
|
// SignatureLength indicates the byte length required to carry a signature with recovery id.
|
||||||
const SignatureLength = 64 + 1 // 64 bytes ECDSA signature + 1 byte recovery id
|
const SignatureLength = 64 + 1 // 64 bytes ECDSA signature + 1 byte recovery id
|
||||||
|
|
||||||
// RecoveryIDOffset points to the byte offset within the signature that contains the recovery id.
|
// RecoveryIDOffset points to the byte offset within the signature that contains the recovery id.
|
||||||
|
|
2
go.mod
2
go.mod
|
@ -68,7 +68,7 @@ require (
|
||||||
go.uber.org/zap v1.23.0
|
go.uber.org/zap v1.23.0
|
||||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e
|
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e
|
||||||
golang.org/x/image v0.0.0-20210220032944-ac19c3e999fb
|
golang.org/x/image v0.0.0-20210220032944-ac19c3e999fb
|
||||||
google.golang.org/protobuf v1.28.1 // indirect
|
google.golang.org/protobuf v1.28.1
|
||||||
gopkg.in/go-playground/assert.v1 v1.2.1 // indirect
|
gopkg.in/go-playground/assert.v1 v1.2.1 // indirect
|
||||||
gopkg.in/go-playground/validator.v9 v9.31.0
|
gopkg.in/go-playground/validator.v9 v9.31.0
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||||
|
|
|
@ -442,6 +442,7 @@ func SignMessage(rpcParams string) string {
|
||||||
|
|
||||||
// SignTypedData unmarshall data into TypedData, validate it and signs with selected account,
|
// SignTypedData unmarshall data into TypedData, validate it and signs with selected account,
|
||||||
// if password matches selected account.
|
// if password matches selected account.
|
||||||
|
//
|
||||||
//export SignTypedData
|
//export SignTypedData
|
||||||
func SignTypedData(data, address, password string) string {
|
func SignTypedData(data, address, password string) string {
|
||||||
var typed typeddata.TypedData
|
var typed typeddata.TypedData
|
||||||
|
@ -457,6 +458,7 @@ func SignTypedData(data, address, password string) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// HashTypedData unmarshalls data into TypedData, validates it and hashes it.
|
// HashTypedData unmarshalls data into TypedData, validates it and hashes it.
|
||||||
|
//
|
||||||
//export HashTypedData
|
//export HashTypedData
|
||||||
func HashTypedData(data string) string {
|
func HashTypedData(data string) string {
|
||||||
var typed typeddata.TypedData
|
var typed typeddata.TypedData
|
||||||
|
@ -473,6 +475,7 @@ func HashTypedData(data string) string {
|
||||||
|
|
||||||
// SignTypedDataV4 unmarshall data into TypedData, validate it and signs with selected account,
|
// SignTypedDataV4 unmarshall data into TypedData, validate it and signs with selected account,
|
||||||
// if password matches selected account.
|
// if password matches selected account.
|
||||||
|
//
|
||||||
//export SignTypedDataV4
|
//export SignTypedDataV4
|
||||||
func SignTypedDataV4(data, address, password string) string {
|
func SignTypedDataV4(data, address, password string) string {
|
||||||
var typed apitypes.TypedData
|
var typed apitypes.TypedData
|
||||||
|
@ -485,6 +488,7 @@ func SignTypedDataV4(data, address, password string) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// HashTypedDataV4 unmarshalls data into TypedData, validates it and hashes it.
|
// HashTypedDataV4 unmarshalls data into TypedData, validates it and hashes it.
|
||||||
|
//
|
||||||
//export HashTypedDataV4
|
//export HashTypedDataV4
|
||||||
func HashTypedDataV4(data string) string {
|
func HashTypedDataV4(data string) string {
|
||||||
var typed apitypes.TypedData
|
var typed apitypes.TypedData
|
||||||
|
@ -586,7 +590,9 @@ func HashTransaction(txArgsJSON string) string {
|
||||||
|
|
||||||
// HashMessage calculates the hash of a message to be safely signed by the keycard
|
// HashMessage calculates the hash of a message to be safely signed by the keycard
|
||||||
// The hash is calulcated as
|
// The hash is calulcated as
|
||||||
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
//
|
||||||
|
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
||||||
|
//
|
||||||
// This gives context to the signed message and prevents signing of transactions.
|
// This gives context to the signed message and prevents signing of transactions.
|
||||||
func HashMessage(message string) string {
|
func HashMessage(message string) string {
|
||||||
hash, err := api.HashMessage(message)
|
hash, err := api.HashMessage(message)
|
||||||
|
@ -609,7 +615,7 @@ func StopCPUProfiling() string { //nolint: deadcode
|
||||||
return makeJSONResponse(err)
|
return makeJSONResponse(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
//WriteHeapProfile starts pprof for heap
|
// WriteHeapProfile starts pprof for heap
|
||||||
func WriteHeapProfile(dataDir string) string { //nolint: deadcode
|
func WriteHeapProfile(dataDir string) string { //nolint: deadcode
|
||||||
err := profiling.WriteHeapFile(dataDir)
|
err := profiling.WriteHeapFile(dataDir)
|
||||||
return makeJSONResponse(err)
|
return makeJSONResponse(err)
|
||||||
|
@ -675,6 +681,7 @@ func SetSignalEventCallback(cb unsafe.Pointer) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExportNodeLogs reads current node log and returns content to a caller.
|
// ExportNodeLogs reads current node log and returns content to a caller.
|
||||||
|
//
|
||||||
//export ExportNodeLogs
|
//export ExportNodeLogs
|
||||||
func ExportNodeLogs() string {
|
func ExportNodeLogs() string {
|
||||||
node := statusBackend.StatusNode()
|
node := statusBackend.StatusNode()
|
||||||
|
|
|
@ -300,12 +300,12 @@ func (t *TopicPool) limitFastMode(timeout time.Duration) chan struct{} {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConfirmAdded called when peer was added by p2p Server.
|
// ConfirmAdded called when peer was added by p2p Server.
|
||||||
// 1. Skip a peer if it not in our peer table
|
// 1. Skip a peer if it not in our peer table
|
||||||
// 2. Add a peer to a cache.
|
// 2. Add a peer to a cache.
|
||||||
// 3. Disconnect a peer if it was connected after we reached max limit of peers.
|
// 3. Disconnect a peer if it was connected after we reached max limit of peers.
|
||||||
// (we can't know in advance if peer will be connected, thats why we allow
|
// (we can't know in advance if peer will be connected, thats why we allow
|
||||||
// to overflow for short duration)
|
// to overflow for short duration)
|
||||||
// 4. Switch search to slow mode if it is running.
|
// 4. Switch search to slow mode if it is running.
|
||||||
func (t *TopicPool) ConfirmAdded(server *p2p.Server, nodeID enode.ID) {
|
func (t *TopicPool) ConfirmAdded(server *p2p.Server, nodeID enode.ID) {
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
defer t.mu.Unlock()
|
defer t.mu.Unlock()
|
||||||
|
@ -475,9 +475,9 @@ func (t *TopicPool) handleFoundPeers(server *p2p.Server, found <-chan *discv5.No
|
||||||
|
|
||||||
// processFoundNode called when node is discovered by kademlia search query
|
// processFoundNode called when node is discovered by kademlia search query
|
||||||
// 2 important conditions
|
// 2 important conditions
|
||||||
// 1. every time when node is processed we need to update discoveredTime.
|
// 1. every time when node is processed we need to update discoveredTime.
|
||||||
// peer will be considered as valid later only if it was discovered < 60m ago
|
// peer will be considered as valid later only if it was discovered < 60m ago
|
||||||
// 2. if peer is connected or if max limit is reached we are not a adding peer to p2p server
|
// 2. if peer is connected or if max limit is reached we are not a adding peer to p2p server
|
||||||
func (t *TopicPool) processFoundNode(server *p2p.Server, node *discv5.Node) error {
|
func (t *TopicPool) processFoundNode(server *p2p.Server, node *discv5.Node) error {
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
defer t.mu.Unlock()
|
defer t.mu.Unlock()
|
||||||
|
|
|
@ -3708,13 +3708,12 @@ func (m *Messenger) handleRetrievedMessages(chatWithMessages map[transport.Filte
|
||||||
m.outputToCSV(msg.TransportMessage.Timestamp, msg.ID, senderID, filter.Topic, filter.ChatID, msg.Type, ss)
|
m.outputToCSV(msg.TransportMessage.Timestamp, msg.ID, senderID, filter.Topic, filter.ChatID, msg.Type, ss)
|
||||||
logger.Debug("Handling SyncSetting", zap.Any("message", ss))
|
logger.Debug("Handling SyncSetting", zap.Any("message", ss))
|
||||||
|
|
||||||
settingField, err := m.extractSyncSetting(&ss)
|
err := m.handleSyncSetting(messageState, &ss)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Warn("failed to handle SyncSetting", zap.Error(err))
|
logger.Warn("failed to handle SyncSetting", zap.Error(err))
|
||||||
allMessagesProcessed = false
|
allMessagesProcessed = false
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
messageState.Response.Settings = append(messageState.Response.Settings, settingField)
|
|
||||||
|
|
||||||
case protobuf.RequestAddressForTransaction:
|
case protobuf.RequestAddressForTransaction:
|
||||||
command := msg.ParsedMessage.Interface().(protobuf.RequestAddressForTransaction)
|
command := msg.ParsedMessage.Interface().(protobuf.RequestAddressForTransaction)
|
||||||
|
|
|
@ -1718,6 +1718,22 @@ func (m *Messenger) HandleRequestAddressForTransaction(messageState *ReceivedMes
|
||||||
return m.handleCommandMessage(messageState, message)
|
return m.handleCommandMessage(messageState, message)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Messenger) handleSyncSetting(messageState *ReceivedMessageState, message *protobuf.SyncSetting) error {
|
||||||
|
settingField, err := m.extractSyncSetting(message)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if message.GetType() == protobuf.SyncSetting_DISPLAY_NAME {
|
||||||
|
m.account.Name = message.GetValueString()
|
||||||
|
err = m.multiAccounts.SaveAccount(*m.account)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
messageState.Response.AddSetting(settingField)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (m *Messenger) HandleRequestTransaction(messageState *ReceivedMessageState, command protobuf.RequestTransaction) error {
|
func (m *Messenger) HandleRequestTransaction(messageState *ReceivedMessageState, command protobuf.RequestTransaction) error {
|
||||||
err := ValidateReceivedRequestTransaction(&command, messageState.CurrentMessageState.WhisperTimestamp)
|
err := ValidateReceivedRequestTransaction(&command, messageState.CurrentMessageState.WhisperTimestamp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -308,6 +308,10 @@ func (r *MessengerResponse) AddCommunitySettings(c *communities.CommunitySetting
|
||||||
r.communitiesSettings[c.CommunityID] = c
|
r.communitiesSettings[c.CommunityID] = c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *MessengerResponse) AddSetting(s *settings.SyncSettingField) {
|
||||||
|
r.Settings = append(r.Settings, s)
|
||||||
|
}
|
||||||
|
|
||||||
func (r *MessengerResponse) AddBookmark(bookmark *browsers.Bookmark) {
|
func (r *MessengerResponse) AddBookmark(bookmark *browsers.Bookmark) {
|
||||||
r.Bookmarks = append(r.Bookmarks, bookmark)
|
r.Bookmarks = append(r.Bookmarks, bookmark)
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,6 @@ Routing rules are following:
|
||||||
List of methods to be routed is currently available here: https://docs.google.com/spreadsheets/d/1N1nuzVN5tXoDmzkBLeC9_mwIlVH8DGF7YD2XwxA8BAE/edit#gid=0
|
List of methods to be routed is currently available here: https://docs.google.com/spreadsheets/d/1N1nuzVN5tXoDmzkBLeC9_mwIlVH8DGF7YD2XwxA8BAE/edit#gid=0
|
||||||
|
|
||||||
Note, upon creation of a new client, it ok to be offline - client will keep trying to reconnect in background.
|
Note, upon creation of a new client, it ok to be offline - client will keep trying to reconnect in background.
|
||||||
|
|
||||||
*/
|
*/
|
||||||
package rpc
|
package rpc
|
||||||
|
|
||||||
|
|
|
@ -59,9 +59,11 @@ func BlockedMethods() []string {
|
||||||
// the upstream node; the rest is considered to be routed to
|
// the upstream node; the rest is considered to be routed to
|
||||||
// the local node.
|
// the local node.
|
||||||
// A list of supported methods:
|
// A list of supported methods:
|
||||||
// curl --include \
|
//
|
||||||
// --header "Content-Type: application/json" \
|
// curl --include \
|
||||||
// --header "Accept: application/json" 'https://api.infura.io/v1/jsonrpc/ropsten/methods'
|
// --header "Content-Type: application/json" \
|
||||||
|
// --header "Accept: application/json" 'https://api.infura.io/v1/jsonrpc/ropsten/methods'
|
||||||
|
//
|
||||||
// Although it's tempting to only list methods coming to the local node as there're fewer of them
|
// Although it's tempting to only list methods coming to the local node as there're fewer of them
|
||||||
// but it's deceptive: we want to ensure that only known requests leave our zone of responsibility.
|
// but it's deceptive: we want to ensure that only known requests leave our zone of responsibility.
|
||||||
// Also, we want new requests in newer Geth versions not to be accidentally routed to the upstream.
|
// Also, we want new requests in newer Geth versions not to be accidentally routed to the upstream.
|
||||||
|
|
|
@ -85,7 +85,9 @@ func checkLogsAreInOrder(records []cacheRecord) error {
|
||||||
|
|
||||||
// merge merges received records into old slice starting at provided position, example:
|
// merge merges received records into old slice starting at provided position, example:
|
||||||
// [1, 2, 3]
|
// [1, 2, 3]
|
||||||
// [2, 3, 4]
|
//
|
||||||
|
// [2, 3, 4]
|
||||||
|
//
|
||||||
// [1, 2, 3, 4]
|
// [1, 2, 3, 4]
|
||||||
// if hash doesn't match previously received hash - such block was removed due to reorg
|
// if hash doesn't match previously received hash - such block was removed due to reorg
|
||||||
// logs that were a part of that block will be returned with Removed set to true
|
// logs that were a part of that block will be returned with Removed set to true
|
||||||
|
|
|
@ -463,7 +463,7 @@ func updateOrInsertTransfers(chainID uint64, creator statementCreator, transfers
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//markBlocksAsLoaded(tx, address, chainID, blocks)
|
// markBlocksAsLoaded(tx, address, chainID, blocks)
|
||||||
func markBlocksAsLoaded(chainID uint64, creator statementCreator, address common.Address, blocks []*big.Int) error {
|
func markBlocksAsLoaded(chainID uint64, creator statementCreator, address common.Address, blocks []*big.Int) error {
|
||||||
update, err := creator.Prepare("UPDATE blocks SET loaded=? WHERE address=? AND blk_number=? AND network_id=?")
|
update, err := creator.Prepare("UPDATE blocks SET loaded=? WHERE address=? AND blk_number=? AND network_id=?")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -87,16 +87,18 @@ func TriggerDefaultNodeNotificationHandler(jsonEvent string) {
|
||||||
logger.Trace("Notification received", "event", jsonEvent)
|
logger.Trace("Notification received", "event", jsonEvent)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// nolint: golint
|
||||||
|
//
|
||||||
//export NotifyNode
|
//export NotifyNode
|
||||||
//nolint: golint
|
|
||||||
func NotifyNode(jsonEvent *C.char) {
|
func NotifyNode(jsonEvent *C.char) {
|
||||||
notificationHandlerMutex.RLock()
|
notificationHandlerMutex.RLock()
|
||||||
defer notificationHandlerMutex.RUnlock()
|
defer notificationHandlerMutex.RUnlock()
|
||||||
notificationHandler(C.GoString(jsonEvent))
|
notificationHandler(C.GoString(jsonEvent))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// nolint: golint
|
||||||
|
//
|
||||||
//export TriggerTestSignal
|
//export TriggerTestSignal
|
||||||
//nolint: golint
|
|
||||||
func TriggerTestSignal() {
|
func TriggerTestSignal() {
|
||||||
str := C.CString(`{"answer": 42}`)
|
str := C.CString(`{"answer": 42}`)
|
||||||
C.StatusServiceSignalEvent(str)
|
C.StatusServiceSignalEvent(str)
|
||||||
|
|
|
@ -44,7 +44,7 @@ func initSingleTest() {
|
||||||
var sharedTopic = common.TopicType{0xF, 0x1, 0x2, 0}
|
var sharedTopic = common.TopicType{0xF, 0x1, 0x2, 0}
|
||||||
var wrongTopic = common.TopicType{0, 0, 0, 0}
|
var wrongTopic = common.TopicType{0, 0, 0, 0}
|
||||||
|
|
||||||
//two generic waku node handshake. one don't send light flag
|
// two generic waku node handshake. one don't send light flag
|
||||||
func TestTopicOrBloomMatch(t *testing.T) {
|
func TestTopicOrBloomMatch(t *testing.T) {
|
||||||
p := Peer{}
|
p := Peer{}
|
||||||
p.setTopicInterest([]common.TopicType{sharedTopic})
|
p.setTopicInterest([]common.TopicType{sharedTopic})
|
||||||
|
|
|
@ -44,7 +44,7 @@ func initSingleTest() {
|
||||||
var sharedTopic = common.TopicType{0xF, 0x1, 0x2, 0}
|
var sharedTopic = common.TopicType{0xF, 0x1, 0x2, 0}
|
||||||
var wrongTopic = common.TopicType{0, 0, 0, 0}
|
var wrongTopic = common.TopicType{0, 0, 0, 0}
|
||||||
|
|
||||||
//two generic waku node handshake. one don't send light flag
|
// two generic waku node handshake. one don't send light flag
|
||||||
func TestTopicOrBloomMatch(t *testing.T) {
|
func TestTopicOrBloomMatch(t *testing.T) {
|
||||||
p := Peer{}
|
p := Peer{}
|
||||||
p.setTopicInterest([]common.TopicType{sharedTopic})
|
p.setTopicInterest([]common.TopicType{sharedTopic})
|
||||||
|
|
|
@ -547,7 +547,7 @@ func (s *WakuTestSuite) TestMailserverCompletionEvent() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//two generic waku node handshake
|
// two generic waku node handshake
|
||||||
func (s *WakuTestSuite) TestPeerHandshakeWithTwoFullNode() {
|
func (s *WakuTestSuite) TestPeerHandshakeWithTwoFullNode() {
|
||||||
rw1, rw2 := p2p.MsgPipe()
|
rw1, rw2 := p2p.MsgPipe()
|
||||||
defer func() { handleError(s.T(), rw1.Close()) }()
|
defer func() { handleError(s.T(), rw1.Close()) }()
|
||||||
|
@ -571,7 +571,7 @@ func (s *WakuTestSuite) TestPeerHandshakeWithTwoFullNode() {
|
||||||
s.Require().Equal(pow, p2.PoWRequirement())
|
s.Require().Equal(pow, p2.PoWRequirement())
|
||||||
}
|
}
|
||||||
|
|
||||||
//two generic waku node handshake. one don't send light flag
|
// two generic waku node handshake. one don't send light flag
|
||||||
func (s *WakuTestSuite) TestHandshakeWithOldVersionWithoutLightModeFlag() {
|
func (s *WakuTestSuite) TestHandshakeWithOldVersionWithoutLightModeFlag() {
|
||||||
rw1, rw2 := p2p.MsgPipe()
|
rw1, rw2 := p2p.MsgPipe()
|
||||||
defer func() { handleError(s.T(), rw1.Close()) }()
|
defer func() { handleError(s.T(), rw1.Close()) }()
|
||||||
|
@ -591,7 +591,7 @@ func (s *WakuTestSuite) TestHandshakeWithOldVersionWithoutLightModeFlag() {
|
||||||
s.Require().NoError(err)
|
s.Require().NoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
//two light nodes handshake. restriction enable
|
// two light nodes handshake. restriction enable
|
||||||
func (s *WakuTestSuite) TestTwoLightPeerHandshakeRestrictionOff() {
|
func (s *WakuTestSuite) TestTwoLightPeerHandshakeRestrictionOff() {
|
||||||
rw1, rw2 := p2p.MsgPipe()
|
rw1, rw2 := p2p.MsgPipe()
|
||||||
defer func() { handleError(s.T(), rw1.Close()) }()
|
defer func() { handleError(s.T(), rw1.Close()) }()
|
||||||
|
@ -613,7 +613,7 @@ func (s *WakuTestSuite) TestTwoLightPeerHandshakeRestrictionOff() {
|
||||||
s.Require().NoError(p2.Start())
|
s.Require().NoError(p2.Start())
|
||||||
}
|
}
|
||||||
|
|
||||||
//two light nodes handshake. restriction enabled
|
// two light nodes handshake. restriction enabled
|
||||||
func (s *WakuTestSuite) TestTwoLightPeerHandshakeError() {
|
func (s *WakuTestSuite) TestTwoLightPeerHandshakeError() {
|
||||||
rw1, rw2 := p2p.MsgPipe()
|
rw1, rw2 := p2p.MsgPipe()
|
||||||
defer func() { handleError(s.T(), rw1.Close()) }()
|
defer func() { handleError(s.T(), rw1.Close()) }()
|
||||||
|
|
Loading…
Reference in New Issue