diff --git a/go.mod b/go.mod index 6f76fed8e..5bb517c6f 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( github.com/status-im/migrate/v4 v4.6.2-status.2 github.com/status-im/rendezvous v1.3.0 github.com/status-im/status-go/extkeys v1.1.0 - github.com/status-im/status-go/whisper/v6 v6.2.1 + github.com/status-im/status-go/whisper/v6 v6.2.4 github.com/status-im/tcp-shaker v0.0.0-20191114194237-215893130501 github.com/stretchr/testify v1.4.0 github.com/syndtr/goleveldb v1.0.0 @@ -52,6 +52,7 @@ require ( github.com/wealdtech/go-ens/v3 v3.3.0 go.uber.org/zap v1.13.0 golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c + golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e // indirect golang.org/x/tools v0.0.0-20200211045251-2de505fc5306 // indirect gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/go-playground/validator.v9 v9.31.0 diff --git a/go.sum b/go.sum index a6072fb55..262a1a468 100644 --- a/go.sum +++ b/go.sum @@ -617,8 +617,8 @@ github.com/status-im/rendezvous v1.3.0/go.mod h1:+hzjuP+j/XzLPeF6E50b88pWOTLdTcw github.com/status-im/status-go/extkeys v1.0.0/go.mod h1:GdqJbrcpkNm5ZsSCpp+PdMxnXx+OcRBdm3PI0rs1FpU= github.com/status-im/status-go/extkeys v1.1.0 h1:QgnXlMvhlFyRu+GdpPn1Ve22IidnDdslFB/Py6HWj78= github.com/status-im/status-go/extkeys v1.1.0/go.mod h1:nT/T2+G4L/6qPVIIfI3oT8dQSVyn7fQYY8G3yL3PIGY= -github.com/status-im/status-go/whisper/v6 v6.2.1 h1:rqqEX0seJPMj1JOAp1A5wmdR/oSzQJBrhme6p40UUbE= -github.com/status-im/status-go/whisper/v6 v6.2.1/go.mod h1:csqMoPMkCPW1NJO56HJzNTWAl9UMdetnQzkPbPjsAC4= +github.com/status-im/status-go/whisper/v6 v6.2.4 h1:/GAW/jtbhS/ZjVDkaTSc8wbjepMljleXwBoc30UuWok= +github.com/status-im/status-go/whisper/v6 v6.2.4/go.mod h1:csqMoPMkCPW1NJO56HJzNTWAl9UMdetnQzkPbPjsAC4= github.com/status-im/tcp-shaker v0.0.0-20191114194237-215893130501 h1:oa0KU5jJRNtXaM/P465MhvSFo/HM2O8qi2DDuPcd7ro= github.com/status-im/tcp-shaker v0.0.0-20191114194237-215893130501/go.mod h1:RYo/itke1oU5k/6sj9DNM3QAwtE5rZSgg5JnkOv83hk= github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE= @@ -746,6 +746,8 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/services/shhext/service.go b/services/shhext/service.go index 6c20a4138..263f70bd7 100644 --- a/services/shhext/service.go +++ b/services/shhext/service.go @@ -50,7 +50,7 @@ func (s *Service) APIs() []rpc.API { Namespace: "shhext", Version: "1.0", Service: NewPublicAPI(s), - Public: true, + Public: false, }, } return apis diff --git a/services/wakuext/service.go b/services/wakuext/service.go index 7aae6e329..ca4d18e48 100644 --- a/services/wakuext/service.go +++ b/services/wakuext/service.go @@ -43,7 +43,7 @@ func (s *Service) APIs() []rpc.API { Namespace: "wakuext", Version: "1.0", Service: NewPublicAPI(s), - Public: true, + Public: false, }, } return apis diff --git a/vendor/github.com/status-im/status-go/whisper/v6/events.go b/vendor/github.com/status-im/status-go/whisper/v6/events.go index 3e2c41795..18a6a6a78 100644 --- a/vendor/github.com/status-im/status-go/whisper/v6/events.go +++ b/vendor/github.com/status-im/status-go/whisper/v6/events.go @@ -17,8 +17,8 @@ const ( // EventEnvelopeReceived must be sent to the feed even if envelope was previously in the cache. // And event, ideally, should contain information about peer that sent envelope to us. EventEnvelopeReceived EventType = "envelope.received" - // EventBatchAcknowledged is sent when batch of envelopes was acknowleged by a peer. - EventBatchAcknowledged EventType = "batch.acknowleged" + // EventBatchAcknowledged is sent when batch of envelopes was acknowledged by a peer. + EventBatchAcknowledged EventType = "batch.acknowledged" // EventEnvelopeAvailable fires when envelop is available for filters EventEnvelopeAvailable EventType = "envelope.available" // EventMailServerRequestSent fires when such request is sent. diff --git a/vendor/github.com/status-im/status-go/whisper/v6/filter.go b/vendor/github.com/status-im/status-go/whisper/v6/filter.go index 9bdc7742c..21681c0d9 100644 --- a/vendor/github.com/status-im/status-go/whisper/v6/filter.go +++ b/vendor/github.com/status-im/status-go/whisper/v6/filter.go @@ -55,7 +55,7 @@ func (store *MemoryMessageStore) Add(msg *ReceivedMessage) error { return nil } -// Pop returns all avaiable messages and cleans the store. +// Pop returns all available messages and cleans the store. func (store *MemoryMessageStore) Pop() ([]*ReceivedMessage, error) { store.mu.Lock() defer store.mu.Unlock() diff --git a/vendor/github.com/status-im/status-go/whisper/v6/rate_limiter.go b/vendor/github.com/status-im/status-go/whisper/v6/rate_limiter.go index 02a1f302e..a591e0dbc 100644 --- a/vendor/github.com/status-im/status-go/whisper/v6/rate_limiter.go +++ b/vendor/github.com/status-im/status-go/whisper/v6/rate_limiter.go @@ -125,7 +125,7 @@ func (r *PeerRateLimiter) decorate(p *Peer, rw p2p.MsgReadWriter, runLoop runLoo if halted := r.throttleIP(ip); halted { for _, h := range r.handlers { if err := h.ExceedIPLimit(); err != nil { - errC <- fmt.Errorf("exceed rate limit by IP: %w", err) + errC <- fmt.Errorf("exceed rate limit by IP: %v", err) return } } @@ -138,7 +138,7 @@ func (r *PeerRateLimiter) decorate(p *Peer, rw p2p.MsgReadWriter, runLoop runLoo if halted := r.throttlePeer(peerID); halted { for _, h := range r.handlers { if err := h.ExceedPeerLimit(); err != nil { - errC <- fmt.Errorf("exceeded rate limit by peer: %w", err) + errC <- fmt.Errorf("exceeded rate limit by peer: %v", err) return } } diff --git a/vendor/github.com/status-im/status-go/whisper/v6/whisper.go b/vendor/github.com/status-im/status-go/whisper/v6/whisper.go index 22944e09d..7585b49ed 100644 --- a/vendor/github.com/status-im/status-go/whisper/v6/whisper.go +++ b/vendor/github.com/status-im/status-go/whisper/v6/whisper.go @@ -260,7 +260,7 @@ func (whisper *Whisper) APIs() []rpc.API { Namespace: ProtocolName, Version: ProtocolVersionStr, Service: NewPublicWhisperAPI(whisper), - Public: true, + Public: false, }, } } diff --git a/vendor/golang.org/x/sync/syncmap/go19.go b/vendor/golang.org/x/sync/syncmap/go19.go new file mode 100644 index 000000000..41a59091e --- /dev/null +++ b/vendor/golang.org/x/sync/syncmap/go19.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package syncmap + +import "sync" // home to the standard library's sync.map implementation as of Go 1.9 + +// Map is a concurrent map with amortized-constant-time loads, stores, and deletes. +// It is safe for multiple goroutines to call a Map's methods concurrently. +// +// The zero Map is valid and empty. +// +// A Map must not be copied after first use. +type Map = sync.Map diff --git a/vendor/golang.org/x/sync/syncmap/map.go b/vendor/golang.org/x/sync/syncmap/map.go index 80e15847e..4b638cb7a 100644 --- a/vendor/golang.org/x/sync/syncmap/map.go +++ b/vendor/golang.org/x/sync/syncmap/map.go @@ -1,372 +1,8 @@ -// Copyright 2016 The Go Authors. All rights reserved. +// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package syncmap provides a concurrent map implementation. -// It is a prototype for a proposed addition to the sync package -// in the standard library. -// (https://golang.org/issue/18177) +// This was the prototype for sync.Map which was added to the standard library's +// sync package in Go 1.9. https://golang.org/pkg/sync/#Map. package syncmap - -import ( - "sync" - "sync/atomic" - "unsafe" -) - -// Map is a concurrent map with amortized-constant-time loads, stores, and deletes. -// It is safe for multiple goroutines to call a Map's methods concurrently. -// -// The zero Map is valid and empty. -// -// A Map must not be copied after first use. -type Map struct { - mu sync.Mutex - - // read contains the portion of the map's contents that are safe for - // concurrent access (with or without mu held). - // - // The read field itself is always safe to load, but must only be stored with - // mu held. - // - // Entries stored in read may be updated concurrently without mu, but updating - // a previously-expunged entry requires that the entry be copied to the dirty - // map and unexpunged with mu held. - read atomic.Value // readOnly - - // dirty contains the portion of the map's contents that require mu to be - // held. To ensure that the dirty map can be promoted to the read map quickly, - // it also includes all of the non-expunged entries in the read map. - // - // Expunged entries are not stored in the dirty map. An expunged entry in the - // clean map must be unexpunged and added to the dirty map before a new value - // can be stored to it. - // - // If the dirty map is nil, the next write to the map will initialize it by - // making a shallow copy of the clean map, omitting stale entries. - dirty map[interface{}]*entry - - // misses counts the number of loads since the read map was last updated that - // needed to lock mu to determine whether the key was present. - // - // Once enough misses have occurred to cover the cost of copying the dirty - // map, the dirty map will be promoted to the read map (in the unamended - // state) and the next store to the map will make a new dirty copy. - misses int -} - -// readOnly is an immutable struct stored atomically in the Map.read field. -type readOnly struct { - m map[interface{}]*entry - amended bool // true if the dirty map contains some key not in m. -} - -// expunged is an arbitrary pointer that marks entries which have been deleted -// from the dirty map. -var expunged = unsafe.Pointer(new(interface{})) - -// An entry is a slot in the map corresponding to a particular key. -type entry struct { - // p points to the interface{} value stored for the entry. - // - // If p == nil, the entry has been deleted and m.dirty == nil. - // - // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry - // is missing from m.dirty. - // - // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty - // != nil, in m.dirty[key]. - // - // An entry can be deleted by atomic replacement with nil: when m.dirty is - // next created, it will atomically replace nil with expunged and leave - // m.dirty[key] unset. - // - // An entry's associated value can be updated by atomic replacement, provided - // p != expunged. If p == expunged, an entry's associated value can be updated - // only after first setting m.dirty[key] = e so that lookups using the dirty - // map find the entry. - p unsafe.Pointer // *interface{} -} - -func newEntry(i interface{}) *entry { - return &entry{p: unsafe.Pointer(&i)} -} - -// Load returns the value stored in the map for a key, or nil if no -// value is present. -// The ok result indicates whether value was found in the map. -func (m *Map) Load(key interface{}) (value interface{}, ok bool) { - read, _ := m.read.Load().(readOnly) - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - // Avoid reporting a spurious miss if m.dirty got promoted while we were - // blocked on m.mu. (If further loads of the same key will not miss, it's - // not worth copying the dirty map for this key.) - read, _ = m.read.Load().(readOnly) - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - // Regardless of whether the entry was present, record a miss: this key - // will take the slow path until the dirty map is promoted to the read - // map. - m.missLocked() - } - m.mu.Unlock() - } - if !ok { - return nil, false - } - return e.load() -} - -func (e *entry) load() (value interface{}, ok bool) { - p := atomic.LoadPointer(&e.p) - if p == nil || p == expunged { - return nil, false - } - return *(*interface{})(p), true -} - -// Store sets the value for a key. -func (m *Map) Store(key, value interface{}) { - read, _ := m.read.Load().(readOnly) - if e, ok := read.m[key]; ok && e.tryStore(&value) { - return - } - - m.mu.Lock() - read, _ = m.read.Load().(readOnly) - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - // The entry was previously expunged, which implies that there is a - // non-nil dirty map and this entry is not in it. - m.dirty[key] = e - } - e.storeLocked(&value) - } else if e, ok := m.dirty[key]; ok { - e.storeLocked(&value) - } else { - if !read.amended { - // We're adding the first new key to the dirty map. - // Make sure it is allocated and mark the read-only map as incomplete. - m.dirtyLocked() - m.read.Store(readOnly{m: read.m, amended: true}) - } - m.dirty[key] = newEntry(value) - } - m.mu.Unlock() -} - -// tryStore stores a value if the entry has not been expunged. -// -// If the entry is expunged, tryStore returns false and leaves the entry -// unchanged. -func (e *entry) tryStore(i *interface{}) bool { - p := atomic.LoadPointer(&e.p) - if p == expunged { - return false - } - for { - if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { - return true - } - p = atomic.LoadPointer(&e.p) - if p == expunged { - return false - } - } -} - -// unexpungeLocked ensures that the entry is not marked as expunged. -// -// If the entry was previously expunged, it must be added to the dirty map -// before m.mu is unlocked. -func (e *entry) unexpungeLocked() (wasExpunged bool) { - return atomic.CompareAndSwapPointer(&e.p, expunged, nil) -} - -// storeLocked unconditionally stores a value to the entry. -// -// The entry must be known not to be expunged. -func (e *entry) storeLocked(i *interface{}) { - atomic.StorePointer(&e.p, unsafe.Pointer(i)) -} - -// LoadOrStore returns the existing value for the key if present. -// Otherwise, it stores and returns the given value. -// The loaded result is true if the value was loaded, false if stored. -func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { - // Avoid locking if it's a clean hit. - read, _ := m.read.Load().(readOnly) - if e, ok := read.m[key]; ok { - actual, loaded, ok := e.tryLoadOrStore(value) - if ok { - return actual, loaded - } - } - - m.mu.Lock() - read, _ = m.read.Load().(readOnly) - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - m.dirty[key] = e - } - actual, loaded, _ = e.tryLoadOrStore(value) - } else if e, ok := m.dirty[key]; ok { - actual, loaded, _ = e.tryLoadOrStore(value) - m.missLocked() - } else { - if !read.amended { - // We're adding the first new key to the dirty map. - // Make sure it is allocated and mark the read-only map as incomplete. - m.dirtyLocked() - m.read.Store(readOnly{m: read.m, amended: true}) - } - m.dirty[key] = newEntry(value) - actual, loaded = value, false - } - m.mu.Unlock() - - return actual, loaded -} - -// tryLoadOrStore atomically loads or stores a value if the entry is not -// expunged. -// -// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and -// returns with ok==false. -func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) { - p := atomic.LoadPointer(&e.p) - if p == expunged { - return nil, false, false - } - if p != nil { - return *(*interface{})(p), true, true - } - - // Copy the interface after the first load to make this method more amenable - // to escape analysis: if we hit the "load" path or the entry is expunged, we - // shouldn't bother heap-allocating. - ic := i - for { - if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { - return i, false, true - } - p = atomic.LoadPointer(&e.p) - if p == expunged { - return nil, false, false - } - if p != nil { - return *(*interface{})(p), true, true - } - } -} - -// Delete deletes the value for a key. -func (m *Map) Delete(key interface{}) { - read, _ := m.read.Load().(readOnly) - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - read, _ = m.read.Load().(readOnly) - e, ok = read.m[key] - if !ok && read.amended { - delete(m.dirty, key) - } - m.mu.Unlock() - } - if ok { - e.delete() - } -} - -func (e *entry) delete() (hadValue bool) { - for { - p := atomic.LoadPointer(&e.p) - if p == nil || p == expunged { - return false - } - if atomic.CompareAndSwapPointer(&e.p, p, nil) { - return true - } - } -} - -// Range calls f sequentially for each key and value present in the map. -// If f returns false, range stops the iteration. -// -// Range does not necessarily correspond to any consistent snapshot of the Map's -// contents: no key will be visited more than once, but if the value for any key -// is stored or deleted concurrently, Range may reflect any mapping for that key -// from any point during the Range call. -// -// Range may be O(N) with the number of elements in the map even if f returns -// false after a constant number of calls. -func (m *Map) Range(f func(key, value interface{}) bool) { - // We need to be able to iterate over all of the keys that were already - // present at the start of the call to Range. - // If read.amended is false, then read.m satisfies that property without - // requiring us to hold m.mu for a long time. - read, _ := m.read.Load().(readOnly) - if read.amended { - // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) - // (assuming the caller does not break out early), so a call to Range - // amortizes an entire copy of the map: we can promote the dirty copy - // immediately! - m.mu.Lock() - read, _ = m.read.Load().(readOnly) - if read.amended { - read = readOnly{m: m.dirty} - m.read.Store(read) - m.dirty = nil - m.misses = 0 - } - m.mu.Unlock() - } - - for k, e := range read.m { - v, ok := e.load() - if !ok { - continue - } - if !f(k, v) { - break - } - } -} - -func (m *Map) missLocked() { - m.misses++ - if m.misses < len(m.dirty) { - return - } - m.read.Store(readOnly{m: m.dirty}) - m.dirty = nil - m.misses = 0 -} - -func (m *Map) dirtyLocked() { - if m.dirty != nil { - return - } - - read, _ := m.read.Load().(readOnly) - m.dirty = make(map[interface{}]*entry, len(read.m)) - for k, e := range read.m { - if !e.tryExpungeLocked() { - m.dirty[k] = e - } - } -} - -func (e *entry) tryExpungeLocked() (isExpunged bool) { - p := atomic.LoadPointer(&e.p) - for p == nil { - if atomic.CompareAndSwapPointer(&e.p, nil, expunged) { - return true - } - p = atomic.LoadPointer(&e.p) - } - return p == expunged -} diff --git a/vendor/golang.org/x/sync/syncmap/pre_go19.go b/vendor/golang.org/x/sync/syncmap/pre_go19.go new file mode 100644 index 000000000..01a7be7f9 --- /dev/null +++ b/vendor/golang.org/x/sync/syncmap/pre_go19.go @@ -0,0 +1,370 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package syncmap + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +// Map is a concurrent map with amortized-constant-time loads, stores, and deletes. +// It is safe for multiple goroutines to call a Map's methods concurrently. +// +// The zero Map is valid and empty. +// +// A Map must not be copied after first use. +type Map struct { + mu sync.Mutex + + // read contains the portion of the map's contents that are safe for + // concurrent access (with or without mu held). + // + // The read field itself is always safe to load, but must only be stored with + // mu held. + // + // Entries stored in read may be updated concurrently without mu, but updating + // a previously-expunged entry requires that the entry be copied to the dirty + // map and unexpunged with mu held. + read atomic.Value // readOnly + + // dirty contains the portion of the map's contents that require mu to be + // held. To ensure that the dirty map can be promoted to the read map quickly, + // it also includes all of the non-expunged entries in the read map. + // + // Expunged entries are not stored in the dirty map. An expunged entry in the + // clean map must be unexpunged and added to the dirty map before a new value + // can be stored to it. + // + // If the dirty map is nil, the next write to the map will initialize it by + // making a shallow copy of the clean map, omitting stale entries. + dirty map[interface{}]*entry + + // misses counts the number of loads since the read map was last updated that + // needed to lock mu to determine whether the key was present. + // + // Once enough misses have occurred to cover the cost of copying the dirty + // map, the dirty map will be promoted to the read map (in the unamended + // state) and the next store to the map will make a new dirty copy. + misses int +} + +// readOnly is an immutable struct stored atomically in the Map.read field. +type readOnly struct { + m map[interface{}]*entry + amended bool // true if the dirty map contains some key not in m. +} + +// expunged is an arbitrary pointer that marks entries which have been deleted +// from the dirty map. +var expunged = unsafe.Pointer(new(interface{})) + +// An entry is a slot in the map corresponding to a particular key. +type entry struct { + // p points to the interface{} value stored for the entry. + // + // If p == nil, the entry has been deleted and m.dirty == nil. + // + // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry + // is missing from m.dirty. + // + // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty + // != nil, in m.dirty[key]. + // + // An entry can be deleted by atomic replacement with nil: when m.dirty is + // next created, it will atomically replace nil with expunged and leave + // m.dirty[key] unset. + // + // An entry's associated value can be updated by atomic replacement, provided + // p != expunged. If p == expunged, an entry's associated value can be updated + // only after first setting m.dirty[key] = e so that lookups using the dirty + // map find the entry. + p unsafe.Pointer // *interface{} +} + +func newEntry(i interface{}) *entry { + return &entry{p: unsafe.Pointer(&i)} +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (m *Map) Load(key interface{}) (value interface{}, ok bool) { + read, _ := m.read.Load().(readOnly) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + // Avoid reporting a spurious miss if m.dirty got promoted while we were + // blocked on m.mu. (If further loads of the same key will not miss, it's + // not worth copying the dirty map for this key.) + read, _ = m.read.Load().(readOnly) + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + // Regardless of whether the entry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + if !ok { + return nil, false + } + return e.load() +} + +func (e *entry) load() (value interface{}, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expunged { + return nil, false + } + return *(*interface{})(p), true +} + +// Store sets the value for a key. +func (m *Map) Store(key, value interface{}) { + read, _ := m.read.Load().(readOnly) + if e, ok := read.m[key]; ok && e.tryStore(&value) { + return + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnly) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + // The entry was previously expunged, which implies that there is a + // non-nil dirty map and this entry is not in it. + m.dirty[key] = e + } + e.storeLocked(&value) + } else if e, ok := m.dirty[key]; ok { + e.storeLocked(&value) + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(readOnly{m: read.m, amended: true}) + } + m.dirty[key] = newEntry(value) + } + m.mu.Unlock() +} + +// tryStore stores a value if the entry has not been expunged. +// +// If the entry is expunged, tryStore returns false and leaves the entry +// unchanged. +func (e *entry) tryStore(i *interface{}) bool { + p := atomic.LoadPointer(&e.p) + if p == expunged { + return false + } + for { + if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { + return true + } + p = atomic.LoadPointer(&e.p) + if p == expunged { + return false + } + } +} + +// unexpungeLocked ensures that the entry is not marked as expunged. +// +// If the entry was previously expunged, it must be added to the dirty map +// before m.mu is unlocked. +func (e *entry) unexpungeLocked() (wasExpunged bool) { + return atomic.CompareAndSwapPointer(&e.p, expunged, nil) +} + +// storeLocked unconditionally stores a value to the entry. +// +// The entry must be known not to be expunged. +func (e *entry) storeLocked(i *interface{}) { + atomic.StorePointer(&e.p, unsafe.Pointer(i)) +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { + // Avoid locking if it's a clean hit. + read, _ := m.read.Load().(readOnly) + if e, ok := read.m[key]; ok { + actual, loaded, ok := e.tryLoadOrStore(value) + if ok { + return actual, loaded + } + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnly) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + m.dirty[key] = e + } + actual, loaded, _ = e.tryLoadOrStore(value) + } else if e, ok := m.dirty[key]; ok { + actual, loaded, _ = e.tryLoadOrStore(value) + m.missLocked() + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(readOnly{m: read.m, amended: true}) + } + m.dirty[key] = newEntry(value) + actual, loaded = value, false + } + m.mu.Unlock() + + return actual, loaded +} + +// tryLoadOrStore atomically loads or stores a value if the entry is not +// expunged. +// +// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and +// returns with ok==false. +func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == expunged { + return nil, false, false + } + if p != nil { + return *(*interface{})(p), true, true + } + + // Copy the interface after the first load to make this method more amenable + // to escape analysis: if we hit the "load" path or the entry is expunged, we + // shouldn't bother heap-allocating. + ic := i + for { + if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { + return i, false, true + } + p = atomic.LoadPointer(&e.p) + if p == expunged { + return nil, false, false + } + if p != nil { + return *(*interface{})(p), true, true + } + } +} + +// Delete deletes the value for a key. +func (m *Map) Delete(key interface{}) { + read, _ := m.read.Load().(readOnly) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read, _ = m.read.Load().(readOnly) + e, ok = read.m[key] + if !ok && read.amended { + delete(m.dirty, key) + } + m.mu.Unlock() + } + if ok { + e.delete() + } +} + +func (e *entry) delete() (hadValue bool) { + for { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expunged { + return false + } + if atomic.CompareAndSwapPointer(&e.p, p, nil) { + return true + } + } +} + +// Range calls f sequentially for each key and value present in the map. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +// +// Range may be O(N) with the number of elements in the map even if f returns +// false after a constant number of calls. +func (m *Map) Range(f func(key, value interface{}) bool) { + // We need to be able to iterate over all of the keys that were already + // present at the start of the call to Range. + // If read.amended is false, then read.m satisfies that property without + // requiring us to hold m.mu for a long time. + read, _ := m.read.Load().(readOnly) + if read.amended { + // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) + // (assuming the caller does not break out early), so a call to Range + // amortizes an entire copy of the map: we can promote the dirty copy + // immediately! + m.mu.Lock() + read, _ = m.read.Load().(readOnly) + if read.amended { + read = readOnly{m: m.dirty} + m.read.Store(read) + m.dirty = nil + m.misses = 0 + } + m.mu.Unlock() + } + + for k, e := range read.m { + v, ok := e.load() + if !ok { + continue + } + if !f(k, v) { + break + } + } +} + +func (m *Map) missLocked() { + m.misses++ + if m.misses < len(m.dirty) { + return + } + m.read.Store(readOnly{m: m.dirty}) + m.dirty = nil + m.misses = 0 +} + +func (m *Map) dirtyLocked() { + if m.dirty != nil { + return + } + + read, _ := m.read.Load().(readOnly) + m.dirty = make(map[interface{}]*entry, len(read.m)) + for k, e := range read.m { + if !e.tryExpungeLocked() { + m.dirty[k] = e + } + } +} + +func (e *entry) tryExpungeLocked() (isExpunged bool) { + p := atomic.LoadPointer(&e.p) + for p == nil { + if atomic.CompareAndSwapPointer(&e.p, nil, expunged) { + return true + } + p = atomic.LoadPointer(&e.p) + } + return p == expunged +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 4830617a8..77617d8f7 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -374,7 +374,7 @@ github.com/status-im/rendezvous/protocol github.com/status-im/rendezvous/server # github.com/status-im/status-go/extkeys v1.1.0 github.com/status-im/status-go/extkeys -# github.com/status-im/status-go/whisper/v6 v6.2.1 +# github.com/status-im/status-go/whisper/v6 v6.2.4 github.com/status-im/status-go/whisper/v6 # github.com/status-im/tcp-shaker v0.0.0-20191114194237-215893130501 github.com/status-im/tcp-shaker @@ -486,7 +486,7 @@ golang.org/x/net/idna golang.org/x/net/internal/iana golang.org/x/net/internal/socket golang.org/x/net/ipv4 -# golang.org/x/sync v0.0.0-20190423024810-112230192c58 +# golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e golang.org/x/sync/syncmap # golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056 golang.org/x/sys/cpu diff --git a/waku/waku.go b/waku/waku.go index fa712d46d..b0dee5358 100644 --- a/waku/waku.go +++ b/waku/waku.go @@ -419,7 +419,7 @@ func (w *Waku) APIs() []rpc.API { Namespace: ProtocolName, Version: ProtocolVersionStr, Service: NewPublicWakuAPI(w), - Public: true, + Public: false, }, } } diff --git a/whisper/whisper.go b/whisper/whisper.go index 22944e09d..7585b49ed 100644 --- a/whisper/whisper.go +++ b/whisper/whisper.go @@ -260,7 +260,7 @@ func (whisper *Whisper) APIs() []rpc.API { Namespace: ProtocolName, Version: ProtocolVersionStr, Service: NewPublicWhisperAPI(whisper), - Public: true, + Public: false, }, } }