update to geth v1.8.20 (#1327)

This commit is contained in:
Adam Babik 2018-12-19 11:02:07 +01:00 committed by GitHub
parent 79bf92bea5
commit b1f9030177
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
247 changed files with 9193 additions and 8855 deletions

21
Gopkg.lock generated
View File

@ -21,6 +21,17 @@
pruneopts = "NUT"
revision = "5312a61534124124185d41f09206b9fef1d88403"
[[projects]]
digest = "1:d1de7e0984d40e19819546c8deb7dc451d3c4c59fd12977773aaa6b528523f7f"
name = "github.com/allegro/bigcache"
packages = [
".",
"queue",
]
pruneopts = "NUT"
revision = "f31987a23e44c5121ef8c8b2f2ea2e8ffa37b068"
version = "v1.1.0"
[[projects]]
digest = "1:f5322546f652db78b7a8efd35047a61d1e492abca2263e1c647eca49e1c8a354"
name = "github.com/aristanetworks/goarista"
@ -88,7 +99,7 @@
revision = "935e0e8a636ca4ba70b713f3e38a19e1b77739e8"
[[projects]]
digest = "1:9919b13a30417311490b5e631bfecc14b20c1aeec85fb0ba5286efaa300622d6"
digest = "1:ca77c7225db49544c7bf8ebc93d18e4063f32e60ed4894ff590c1cb8fe315d66"
name = "github.com/ethereum/go-ethereum"
packages = [
".",
@ -158,9 +169,9 @@
"trie",
]
pruneopts = "T"
revision = "97bb147b17ed32d82d0ce1dd110caa31d02923db"
revision = "ffe89538dae6e643cb5dc599fe58c59a29eb75e5"
source = "github.com/status-im/go-ethereum"
version = "v1.8.17"
version = "v1.8.20"
[[projects]]
digest = "1:5ac7ecd476a2355a5201229081df2e5f57333ecf703e1f69dde699ae34169c1b"
@ -826,8 +837,8 @@
name = "github.com/status-im/whisper"
packages = ["whisperv6"]
pruneopts = "NUT"
revision = "82a7734c369137d50fcbcae86230d83db6bfc885"
version = "v1.4.4"
revision = "109fa96320654a4f15f158a03245e7cd7457574a"
version = "v1.4.5"
[[projects]]
digest = "1:572c783a763db6383aca3179976eb80e4c900f52eba56cba8bb2e3cea7ce720e"

View File

@ -24,12 +24,12 @@
[[constraint]]
name = "github.com/ethereum/go-ethereum"
version = "=v1.8.17"
version = "=v1.8.20"
source = "github.com/status-im/go-ethereum"
[[constraint]]
name = "github.com/status-im/whisper"
version = "=v1.4.4"
version = "=v1.4.5"
[[override]]
name = "github.com/golang/protobuf"

View File

@ -79,8 +79,7 @@ func main() {
log.Crit("Unable to listen on udp", "address", addr, "error", err)
}
realaddr := conn.LocalAddr().(*net.UDPAddr)
tab, err := discv5.ListenUDP(nodeKey, conn, realaddr, "", nil)
tab, err := discv5.ListenUDP(nodeKey, conn, "", nil)
if err != nil {
log.Crit("Failed to create discovery v5 table:", "error", err)
}

View File

@ -49,8 +49,7 @@ func (d *DiscV5) Start() error {
if err != nil {
return err
}
realaddr := conn.LocalAddr().(*net.UDPAddr)
ntab, err := discv5.ListenUDP(d.prv, conn, realaddr, "", nil)
ntab, err := discv5.ListenUDP(d.prv, conn, "", nil)
if err != nil {
return err
}

201
vendor/github.com/allegro/bigcache/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

155
vendor/github.com/allegro/bigcache/bigcache.go generated vendored Normal file
View File

@ -0,0 +1,155 @@
package bigcache
import (
"fmt"
"time"
)
const (
minimumEntriesInShard = 10 // Minimum number of entries in single shard
)
// BigCache is fast, concurrent, evicting cache created to keep big number of entries without impact on performance.
// It keeps entries on heap but omits GC for them. To achieve that operations on bytes arrays take place,
// therefore entries (de)serialization in front of the cache will be needed in most use cases.
type BigCache struct {
shards []*cacheShard
lifeWindow uint64
clock clock
hash Hasher
config Config
shardMask uint64
maxShardSize uint32
}
// NewBigCache initialize new instance of BigCache
func NewBigCache(config Config) (*BigCache, error) {
return newBigCache(config, &systemClock{})
}
func newBigCache(config Config, clock clock) (*BigCache, error) {
if !isPowerOfTwo(config.Shards) {
return nil, fmt.Errorf("Shards number must be power of two")
}
if config.Hasher == nil {
config.Hasher = newDefaultHasher()
}
cache := &BigCache{
shards: make([]*cacheShard, config.Shards),
lifeWindow: uint64(config.LifeWindow.Seconds()),
clock: clock,
hash: config.Hasher,
config: config,
shardMask: uint64(config.Shards - 1),
maxShardSize: uint32(config.maximumShardSize()),
}
var onRemove func(wrappedEntry []byte)
if config.OnRemove == nil {
onRemove = cache.notProvidedOnRemove
} else {
onRemove = cache.providedOnRemove
}
for i := 0; i < config.Shards; i++ {
cache.shards[i] = initNewShard(config, onRemove, clock)
}
if config.CleanWindow > 0 {
go func() {
for t := range time.Tick(config.CleanWindow) {
cache.cleanUp(uint64(t.Unix()))
}
}()
}
return cache, nil
}
// Get reads entry for the key.
// It returns an EntryNotFoundError when
// no entry exists for the given key.
func (c *BigCache) Get(key string) ([]byte, error) {
hashedKey := c.hash.Sum64(key)
shard := c.getShard(hashedKey)
return shard.get(key, hashedKey)
}
// Set saves entry under the key
func (c *BigCache) Set(key string, entry []byte) error {
hashedKey := c.hash.Sum64(key)
shard := c.getShard(hashedKey)
return shard.set(key, hashedKey, entry)
}
// Delete removes the key
func (c *BigCache) Delete(key string) error {
hashedKey := c.hash.Sum64(key)
shard := c.getShard(hashedKey)
return shard.del(key, hashedKey)
}
// Reset empties all cache shards
func (c *BigCache) Reset() error {
for _, shard := range c.shards {
shard.reset(c.config)
}
return nil
}
// Len computes number of entries in cache
func (c *BigCache) Len() int {
var len int
for _, shard := range c.shards {
len += shard.len()
}
return len
}
// Stats returns cache's statistics
func (c *BigCache) Stats() Stats {
var s Stats
for _, shard := range c.shards {
tmp := shard.getStats()
s.Hits += tmp.Hits
s.Misses += tmp.Misses
s.DelHits += tmp.DelHits
s.DelMisses += tmp.DelMisses
s.Collisions += tmp.Collisions
}
return s
}
// Iterator returns iterator function to iterate over EntryInfo's from whole cache.
func (c *BigCache) Iterator() *EntryInfoIterator {
return newIterator(c)
}
func (c *BigCache) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func() error) bool {
oldestTimestamp := readTimestampFromEntry(oldestEntry)
if currentTimestamp-oldestTimestamp > c.lifeWindow {
evict()
return true
}
return false
}
func (c *BigCache) cleanUp(currentTimestamp uint64) {
for _, shard := range c.shards {
shard.cleanUp(currentTimestamp)
}
}
func (c *BigCache) getShard(hashedKey uint64) (shard *cacheShard) {
return c.shards[hashedKey&c.shardMask]
}
func (c *BigCache) providedOnRemove(wrappedEntry []byte) {
c.config.OnRemove(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry))
}
func (c *BigCache) notProvidedOnRemove(wrappedEntry []byte) {
}

14
vendor/github.com/allegro/bigcache/clock.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
package bigcache
import "time"
type clock interface {
epoch() int64
}
type systemClock struct {
}
func (c systemClock) epoch() int64 {
return time.Now().Unix()
}

67
vendor/github.com/allegro/bigcache/config.go generated vendored Normal file
View File

@ -0,0 +1,67 @@
package bigcache
import "time"
// Config for BigCache
type Config struct {
// Number of cache shards, value must be a power of two
Shards int
// Time after which entry can be evicted
LifeWindow time.Duration
// Interval between removing expired entries (clean up).
// If set to <= 0 then no action is performed. Setting to < 1 second is counterproductive — bigcache has a one second resolution.
CleanWindow time.Duration
// Max number of entries in life window. Used only to calculate initial size for cache shards.
// When proper value is set then additional memory allocation does not occur.
MaxEntriesInWindow int
// Max size of entry in bytes. Used only to calculate initial size for cache shards.
MaxEntrySize int
// Verbose mode prints information about new memory allocation
Verbose bool
// Hasher used to map between string keys and unsigned 64bit integers, by default fnv64 hashing is used.
Hasher Hasher
// HardMaxCacheSize is a limit for cache size in MB. Cache will not allocate more memory than this limit.
// It can protect application from consuming all available memory on machine, therefore from running OOM Killer.
// Default value is 0 which means unlimited size. When the limit is higher than 0 and reached then
// the oldest entries are overridden for the new ones.
HardMaxCacheSize int
// OnRemove is a callback fired when the oldest entry is removed because of its expiration time or no space left
// for the new entry. Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
OnRemove func(key string, entry []byte)
// Logger is a logging interface and used in combination with `Verbose`
// Defaults to `DefaultLogger()`
Logger Logger
}
// DefaultConfig initializes config with default values.
// When load for BigCache can be predicted in advance then it is better to use custom config.
func DefaultConfig(eviction time.Duration) Config {
return Config{
Shards: 1024,
LifeWindow: eviction,
CleanWindow: 0,
MaxEntriesInWindow: 1000 * 10 * 60,
MaxEntrySize: 500,
Verbose: true,
Hasher: newDefaultHasher(),
HardMaxCacheSize: 0,
Logger: DefaultLogger(),
}
}
// initialShardSize computes initial shard size
func (c Config) initialShardSize() int {
return max(c.MaxEntriesInWindow/c.Shards, minimumEntriesInShard)
}
// maximumShardSize computes maximum shard size
func (c Config) maximumShardSize() int {
maxShardSize := 0
if c.HardMaxCacheSize > 0 {
maxShardSize = convertMBToBytes(c.HardMaxCacheSize) / c.Shards
}
return maxShardSize
}

70
vendor/github.com/allegro/bigcache/encoding.go generated vendored Normal file
View File

@ -0,0 +1,70 @@
package bigcache
import (
"encoding/binary"
"reflect"
"unsafe"
)
const (
timestampSizeInBytes = 8 // Number of bytes used for timestamp
hashSizeInBytes = 8 // Number of bytes used for hash
keySizeInBytes = 2 // Number of bytes used for size of entry key
headersSizeInBytes = timestampSizeInBytes + hashSizeInBytes + keySizeInBytes // Number of bytes used for all headers
)
func wrapEntry(timestamp uint64, hash uint64, key string, entry []byte, buffer *[]byte) []byte {
keyLength := len(key)
blobLength := len(entry) + headersSizeInBytes + keyLength
if blobLength > len(*buffer) {
*buffer = make([]byte, blobLength)
}
blob := *buffer
binary.LittleEndian.PutUint64(blob, timestamp)
binary.LittleEndian.PutUint64(blob[timestampSizeInBytes:], hash)
binary.LittleEndian.PutUint16(blob[timestampSizeInBytes+hashSizeInBytes:], uint16(keyLength))
copy(blob[headersSizeInBytes:], key)
copy(blob[headersSizeInBytes+keyLength:], entry)
return blob[:blobLength]
}
func readEntry(data []byte) []byte {
length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:])
// copy on read
dst := make([]byte, len(data)-int(headersSizeInBytes+length))
copy(dst, data[headersSizeInBytes+length:])
return dst
}
func readTimestampFromEntry(data []byte) uint64 {
return binary.LittleEndian.Uint64(data)
}
func readKeyFromEntry(data []byte) string {
length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:])
// copy on read
dst := make([]byte, length)
copy(dst, data[headersSizeInBytes:headersSizeInBytes+length])
return bytesToString(dst)
}
func bytesToString(b []byte) string {
bytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&b))
strHeader := reflect.StringHeader{Data: bytesHeader.Data, Len: bytesHeader.Len}
return *(*string)(unsafe.Pointer(&strHeader))
}
func readHashFromEntry(data []byte) uint64 {
return binary.LittleEndian.Uint64(data[timestampSizeInBytes:])
}
func resetKeyFromEntry(data []byte) {
binary.LittleEndian.PutUint64(data[timestampSizeInBytes:], 0)
}

View File

@ -0,0 +1,17 @@
package bigcache
import "fmt"
// EntryNotFoundError is an error type struct which is returned when entry was not found for provided key
type EntryNotFoundError struct {
message string
}
func notFound(key string) error {
return &EntryNotFoundError{fmt.Sprintf("Entry %q not found", key)}
}
// Error returned when entry does not exist.
func (e EntryNotFoundError) Error() string {
return e.message
}

28
vendor/github.com/allegro/bigcache/fnv.go generated vendored Normal file
View File

@ -0,0 +1,28 @@
package bigcache
// newDefaultHasher returns a new 64-bit FNV-1a Hasher which makes no memory allocations.
// Its Sum64 method will lay the value out in big-endian byte order.
// See https://en.wikipedia.org/wiki/FowlerNollVo_hash_function
func newDefaultHasher() Hasher {
return fnv64a{}
}
type fnv64a struct{}
const (
// offset64 FNVa offset basis. See https://en.wikipedia.org/wiki/FowlerNollVo_hash_function#FNV-1a_hash
offset64 = 14695981039346656037
// prime64 FNVa prime value. See https://en.wikipedia.org/wiki/FowlerNollVo_hash_function#FNV-1a_hash
prime64 = 1099511628211
)
// Sum64 gets the string and returns its uint64 hash value.
func (f fnv64a) Sum64(key string) uint64 {
var hash uint64 = offset64
for i := 0; i < len(key); i++ {
hash ^= uint64(key[i])
hash *= prime64
}
return hash
}

8
vendor/github.com/allegro/bigcache/hash.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
package bigcache
// Hasher is responsible for generating unsigned, 64 bit hash of provided string. Hasher should minimize collisions
// (generating same hash for different strings) and while performance is also important fast functions are preferable (i.e.
// you can use FarmHash family).
type Hasher interface {
Sum64(string) uint64
}

122
vendor/github.com/allegro/bigcache/iterator.go generated vendored Normal file
View File

@ -0,0 +1,122 @@
package bigcache
import "sync"
type iteratorError string
func (e iteratorError) Error() string {
return string(e)
}
// ErrInvalidIteratorState is reported when iterator is in invalid state
const ErrInvalidIteratorState = iteratorError("Iterator is in invalid state. Use SetNext() to move to next position")
// ErrCannotRetrieveEntry is reported when entry cannot be retrieved from underlying
const ErrCannotRetrieveEntry = iteratorError("Could not retrieve entry from cache")
var emptyEntryInfo = EntryInfo{}
// EntryInfo holds informations about entry in the cache
type EntryInfo struct {
timestamp uint64
hash uint64
key string
value []byte
}
// Key returns entry's underlying key
func (e EntryInfo) Key() string {
return e.key
}
// Hash returns entry's hash value
func (e EntryInfo) Hash() uint64 {
return e.hash
}
// Timestamp returns entry's timestamp (time of insertion)
func (e EntryInfo) Timestamp() uint64 {
return e.timestamp
}
// Value returns entry's underlying value
func (e EntryInfo) Value() []byte {
return e.value
}
// EntryInfoIterator allows to iterate over entries in the cache
type EntryInfoIterator struct {
mutex sync.Mutex
cache *BigCache
currentShard int
currentIndex int
elements []uint32
elementsCount int
valid bool
}
// SetNext moves to next element and returns true if it exists.
func (it *EntryInfoIterator) SetNext() bool {
it.mutex.Lock()
it.valid = false
it.currentIndex++
if it.elementsCount > it.currentIndex {
it.valid = true
it.mutex.Unlock()
return true
}
for i := it.currentShard + 1; i < it.cache.config.Shards; i++ {
it.elements, it.elementsCount = it.cache.shards[i].copyKeys()
// Non empty shard - stick with it
if it.elementsCount > 0 {
it.currentIndex = 0
it.currentShard = i
it.valid = true
it.mutex.Unlock()
return true
}
}
it.mutex.Unlock()
return false
}
func newIterator(cache *BigCache) *EntryInfoIterator {
elements, count := cache.shards[0].copyKeys()
return &EntryInfoIterator{
cache: cache,
currentShard: 0,
currentIndex: -1,
elements: elements,
elementsCount: count,
}
}
// Value returns current value from the iterator
func (it *EntryInfoIterator) Value() (EntryInfo, error) {
it.mutex.Lock()
if !it.valid {
it.mutex.Unlock()
return emptyEntryInfo, ErrInvalidIteratorState
}
entry, err := it.cache.shards[it.currentShard].getEntry(int(it.elements[it.currentIndex]))
if err != nil {
it.mutex.Unlock()
return emptyEntryInfo, ErrCannotRetrieveEntry
}
it.mutex.Unlock()
return EntryInfo{
timestamp: readTimestampFromEntry(entry),
hash: readHashFromEntry(entry),
key: readKeyFromEntry(entry),
value: readEntry(entry),
}, nil
}

30
vendor/github.com/allegro/bigcache/logger.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
package bigcache
import (
"log"
"os"
)
// Logger is invoked when `Config.Verbose=true`
type Logger interface {
Printf(format string, v ...interface{})
}
// this is a safeguard, breaking on compile time in case
// `log.Logger` does not adhere to our `Logger` interface.
// see https://golang.org/doc/faq#guarantee_satisfies_interface
var _ Logger = &log.Logger{}
// DefaultLogger returns a `Logger` implementation
// backed by stdlib's log
func DefaultLogger() *log.Logger {
return log.New(os.Stdout, "", log.LstdFlags)
}
func newLogger(custom Logger) Logger {
if custom != nil {
return custom
}
return DefaultLogger()
}

210
vendor/github.com/allegro/bigcache/queue/bytes_queue.go generated vendored Normal file
View File

@ -0,0 +1,210 @@
package queue
import (
"encoding/binary"
"log"
"time"
)
const (
// Number of bytes used to keep information about entry size
headerEntrySize = 4
// Bytes before left margin are not used. Zero index means element does not exist in queue, useful while reading slice from index
leftMarginIndex = 1
// Minimum empty blob size in bytes. Empty blob fills space between tail and head in additional memory allocation.
// It keeps entries indexes unchanged
minimumEmptyBlobSize = 32 + headerEntrySize
)
// BytesQueue is a non-thread safe queue type of fifo based on bytes array.
// For every push operation index of entry is returned. It can be used to read the entry later
type BytesQueue struct {
array []byte
capacity int
maxCapacity int
head int
tail int
count int
rightMargin int
headerBuffer []byte
verbose bool
initialCapacity int
}
type queueError struct {
message string
}
// NewBytesQueue initialize new bytes queue.
// Initial capacity is used in bytes array allocation
// When verbose flag is set then information about memory allocation are printed
func NewBytesQueue(initialCapacity int, maxCapacity int, verbose bool) *BytesQueue {
return &BytesQueue{
array: make([]byte, initialCapacity),
capacity: initialCapacity,
maxCapacity: maxCapacity,
headerBuffer: make([]byte, headerEntrySize),
tail: leftMarginIndex,
head: leftMarginIndex,
rightMargin: leftMarginIndex,
verbose: verbose,
initialCapacity: initialCapacity,
}
}
// Reset removes all entries from queue
func (q *BytesQueue) Reset() {
// Just reset indexes
q.tail = leftMarginIndex
q.head = leftMarginIndex
q.rightMargin = leftMarginIndex
q.count = 0
}
// Push copies entry at the end of queue and moves tail pointer. Allocates more space if needed.
// Returns index for pushed data or error if maximum size queue limit is reached.
func (q *BytesQueue) Push(data []byte) (int, error) {
dataLen := len(data)
if q.availableSpaceAfterTail() < dataLen+headerEntrySize {
if q.availableSpaceBeforeHead() >= dataLen+headerEntrySize {
q.tail = leftMarginIndex
} else if q.capacity+headerEntrySize+dataLen >= q.maxCapacity && q.maxCapacity > 0 {
return -1, &queueError{"Full queue. Maximum size limit reached."}
} else {
q.allocateAdditionalMemory(dataLen + headerEntrySize)
}
}
index := q.tail
q.push(data, dataLen)
return index, nil
}
func (q *BytesQueue) allocateAdditionalMemory(minimum int) {
start := time.Now()
if q.capacity < minimum {
q.capacity += minimum
}
q.capacity = q.capacity * 2
if q.capacity > q.maxCapacity && q.maxCapacity > 0 {
q.capacity = q.maxCapacity
}
oldArray := q.array
q.array = make([]byte, q.capacity)
if leftMarginIndex != q.rightMargin {
copy(q.array, oldArray[:q.rightMargin])
if q.tail < q.head {
emptyBlobLen := q.head - q.tail - headerEntrySize
q.push(make([]byte, emptyBlobLen), emptyBlobLen)
q.head = leftMarginIndex
q.tail = q.rightMargin
}
}
if q.verbose {
log.Printf("Allocated new queue in %s; Capacity: %d \n", time.Since(start), q.capacity)
}
}
func (q *BytesQueue) push(data []byte, len int) {
binary.LittleEndian.PutUint32(q.headerBuffer, uint32(len))
q.copy(q.headerBuffer, headerEntrySize)
q.copy(data, len)
if q.tail > q.head {
q.rightMargin = q.tail
}
q.count++
}
func (q *BytesQueue) copy(data []byte, len int) {
q.tail += copy(q.array[q.tail:], data[:len])
}
// Pop reads the oldest entry from queue and moves head pointer to the next one
func (q *BytesQueue) Pop() ([]byte, error) {
data, size, err := q.peek(q.head)
if err != nil {
return nil, err
}
q.head += headerEntrySize + size
q.count--
if q.head == q.rightMargin {
q.head = leftMarginIndex
if q.tail == q.rightMargin {
q.tail = leftMarginIndex
}
q.rightMargin = q.tail
}
return data, nil
}
// Peek reads the oldest entry from list without moving head pointer
func (q *BytesQueue) Peek() ([]byte, error) {
data, _, err := q.peek(q.head)
return data, err
}
// Get reads entry from index
func (q *BytesQueue) Get(index int) ([]byte, error) {
data, _, err := q.peek(index)
return data, err
}
// Capacity returns number of allocated bytes for queue
func (q *BytesQueue) Capacity() int {
return q.capacity
}
// Len returns number of entries kept in queue
func (q *BytesQueue) Len() int {
return q.count
}
// Error returns error message
func (e *queueError) Error() string {
return e.message
}
func (q *BytesQueue) peek(index int) ([]byte, int, error) {
if q.count == 0 {
return nil, 0, &queueError{"Empty queue"}
}
if index <= 0 {
return nil, 0, &queueError{"Index must be grater than zero. Invalid index."}
}
if index+headerEntrySize >= len(q.array) {
return nil, 0, &queueError{"Index out of range"}
}
blockSize := int(binary.LittleEndian.Uint32(q.array[index : index+headerEntrySize]))
return q.array[index+headerEntrySize : index+headerEntrySize+blockSize], blockSize, nil
}
func (q *BytesQueue) availableSpaceAfterTail() int {
if q.tail >= q.head {
return q.capacity - q.tail
}
return q.head - q.tail - minimumEmptyBlobSize
}
func (q *BytesQueue) availableSpaceBeforeHead() int {
if q.tail >= q.head {
return q.head - leftMarginIndex - minimumEmptyBlobSize
}
return q.head - q.tail - minimumEmptyBlobSize
}

229
vendor/github.com/allegro/bigcache/shard.go generated vendored Normal file
View File

@ -0,0 +1,229 @@
package bigcache
import (
"fmt"
"sync"
"sync/atomic"
"github.com/allegro/bigcache/queue"
)
type cacheShard struct {
hashmap map[uint64]uint32
entries queue.BytesQueue
lock sync.RWMutex
entryBuffer []byte
onRemove func(wrappedEntry []byte)
isVerbose bool
logger Logger
clock clock
lifeWindow uint64
stats Stats
}
type onRemoveCallback func(wrappedEntry []byte)
func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) {
s.lock.RLock()
itemIndex := s.hashmap[hashedKey]
if itemIndex == 0 {
s.lock.RUnlock()
s.miss()
return nil, notFound(key)
}
wrappedEntry, err := s.entries.Get(int(itemIndex))
if err != nil {
s.lock.RUnlock()
s.miss()
return nil, err
}
if entryKey := readKeyFromEntry(wrappedEntry); key != entryKey {
if s.isVerbose {
s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, entryKey, hashedKey)
}
s.lock.RUnlock()
s.collision()
return nil, notFound(key)
}
s.lock.RUnlock()
s.hit()
return readEntry(wrappedEntry), nil
}
func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error {
currentTimestamp := uint64(s.clock.epoch())
s.lock.Lock()
if previousIndex := s.hashmap[hashedKey]; previousIndex != 0 {
if previousEntry, err := s.entries.Get(int(previousIndex)); err == nil {
resetKeyFromEntry(previousEntry)
}
}
if oldestEntry, err := s.entries.Peek(); err == nil {
s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry)
}
w := wrapEntry(currentTimestamp, hashedKey, key, entry, &s.entryBuffer)
for {
if index, err := s.entries.Push(w); err == nil {
s.hashmap[hashedKey] = uint32(index)
s.lock.Unlock()
return nil
}
if s.removeOldestEntry() != nil {
s.lock.Unlock()
return fmt.Errorf("entry is bigger than max shard size")
}
}
}
func (s *cacheShard) del(key string, hashedKey uint64) error {
s.lock.RLock()
itemIndex := s.hashmap[hashedKey]
if itemIndex == 0 {
s.lock.RUnlock()
s.delmiss()
return notFound(key)
}
wrappedEntry, err := s.entries.Get(int(itemIndex))
if err != nil {
s.lock.RUnlock()
s.delmiss()
return err
}
s.lock.RUnlock()
s.lock.Lock()
{
delete(s.hashmap, hashedKey)
s.onRemove(wrappedEntry)
resetKeyFromEntry(wrappedEntry)
}
s.lock.Unlock()
s.delhit()
return nil
}
func (s *cacheShard) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func() error) bool {
oldestTimestamp := readTimestampFromEntry(oldestEntry)
if currentTimestamp-oldestTimestamp > s.lifeWindow {
evict()
return true
}
return false
}
func (s *cacheShard) cleanUp(currentTimestamp uint64) {
s.lock.Lock()
for {
if oldestEntry, err := s.entries.Peek(); err != nil {
break
} else if evicted := s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry); !evicted {
break
}
}
s.lock.Unlock()
}
func (s *cacheShard) getOldestEntry() ([]byte, error) {
return s.entries.Peek()
}
func (s *cacheShard) getEntry(index int) ([]byte, error) {
return s.entries.Get(index)
}
func (s *cacheShard) copyKeys() (keys []uint32, next int) {
keys = make([]uint32, len(s.hashmap))
s.lock.RLock()
for _, index := range s.hashmap {
keys[next] = index
next++
}
s.lock.RUnlock()
return keys, next
}
func (s *cacheShard) removeOldestEntry() error {
oldest, err := s.entries.Pop()
if err == nil {
hash := readHashFromEntry(oldest)
delete(s.hashmap, hash)
s.onRemove(oldest)
return nil
}
return err
}
func (s *cacheShard) reset(config Config) {
s.lock.Lock()
s.hashmap = make(map[uint64]uint32, config.initialShardSize())
s.entryBuffer = make([]byte, config.MaxEntrySize+headersSizeInBytes)
s.entries.Reset()
s.lock.Unlock()
}
func (s *cacheShard) len() int {
s.lock.RLock()
res := len(s.hashmap)
s.lock.RUnlock()
return res
}
func (s *cacheShard) getStats() Stats {
var stats = Stats{
Hits: atomic.LoadInt64(&s.stats.Hits),
Misses: atomic.LoadInt64(&s.stats.Misses),
DelHits: atomic.LoadInt64(&s.stats.DelHits),
DelMisses: atomic.LoadInt64(&s.stats.DelMisses),
Collisions: atomic.LoadInt64(&s.stats.Collisions),
}
return stats
}
func (s *cacheShard) hit() {
atomic.AddInt64(&s.stats.Hits, 1)
}
func (s *cacheShard) miss() {
atomic.AddInt64(&s.stats.Misses, 1)
}
func (s *cacheShard) delhit() {
atomic.AddInt64(&s.stats.DelHits, 1)
}
func (s *cacheShard) delmiss() {
atomic.AddInt64(&s.stats.DelMisses, 1)
}
func (s *cacheShard) collision() {
atomic.AddInt64(&s.stats.Collisions, 1)
}
func initNewShard(config Config, callback onRemoveCallback, clock clock) *cacheShard {
return &cacheShard{
hashmap: make(map[uint64]uint32, config.initialShardSize()),
entries: *queue.NewBytesQueue(config.initialShardSize()*config.MaxEntrySize, config.maximumShardSize(), config.Verbose),
entryBuffer: make([]byte, config.MaxEntrySize+headersSizeInBytes),
onRemove: callback,
isVerbose: config.Verbose,
logger: newLogger(config.Logger),
clock: clock,
lifeWindow: uint64(config.LifeWindow.Seconds()),
}
}

15
vendor/github.com/allegro/bigcache/stats.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
package bigcache
// Stats stores cache statistics
type Stats struct {
// Hits is a number of successfully found keys
Hits int64 `json:"hits"`
// Misses is a number of not found keys
Misses int64 `json:"misses"`
// DelHits is a number of successfully deleted keys
DelHits int64 `json:"delete_hits"`
// DelMisses is a number of not deleted keys
DelMisses int64 `json:"delete_misses"`
// Collisions is a number of happened key-collisions
Collisions int64 `json:"collisions"`
}

16
vendor/github.com/allegro/bigcache/utils.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
package bigcache
func max(a, b int) int {
if a > b {
return a
}
return b
}
func convertMBToBytes(value int) int {
return value * 1024 * 1024
}
func isPowerOfTwo(number int) bool {
return (number & (number - 1)) == 0
}

View File

@ -9,23 +9,26 @@ les/ @zsfelfoldi
light/ @zsfelfoldi
mobile/ @karalabe
p2p/ @fjl @zsfelfoldi
p2p/simulations @lmars
p2p/protocols @zelig
swarm/api/http @justelad
swarm/bmt @zelig
swarm/dev @lmars
swarm/fuse @jmozah @holisticode
swarm/grafana_dashboards @nonsense
swarm/metrics @nonsense @holisticode
swarm/multihash @nolash
swarm/network/bitvector @zelig @janos @gbalint
swarm/network/priorityqueue @zelig @janos @gbalint
swarm/network/simulations @zelig
swarm/network/stream @janos @zelig @gbalint @holisticode @justelad
swarm/network/bitvector @zelig @janos
swarm/network/priorityqueue @zelig @janos
swarm/network/simulations @zelig @janos
swarm/network/stream @janos @zelig @holisticode @justelad
swarm/network/stream/intervals @janos
swarm/network/stream/testing @zelig
swarm/pot @zelig
swarm/pss @nolash @zelig @nonsense
swarm/services @zelig
swarm/state @justelad
swarm/storage/encryption @gbalint @zelig @nagydani
swarm/storage/encryption @zelig @nagydani
swarm/storage/mock @janos
swarm/storage/feed @nolash @jpeletier
swarm/testutil @lmars

View File

@ -29,6 +29,14 @@ matrix:
- os: osx
go: 1.11.x
script:
- echo "Increase the maximum number of open file descriptors on macOS"
- NOFILE=20480
- sudo sysctl -w kern.maxfiles=$NOFILE
- sudo sysctl -w kern.maxfilesperproc=$NOFILE
- sudo launchctl limit maxfiles $NOFILE $NOFILE
- sudo launchctl limit maxfiles
- ulimit -S -n $NOFILE
- ulimit -n
- unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703
- go run build/ci.go install
- go run build/ci.go test -coverage $TEST_PACKAGES
@ -148,7 +156,7 @@ matrix:
git:
submodules: false # avoid cloning ethereum/tests
before_install:
- curl https://storage.googleapis.com/golang/go1.11.1.linux-amd64.tar.gz | tar -xz
- curl https://storage.googleapis.com/golang/go1.11.2.linux-amd64.tar.gz | tar -xz
- export PATH=`pwd`/go/bin:$PATH
- export GOROOT=`pwd`/go
- export GOPATH=$HOME/go

View File

@ -18,7 +18,7 @@ For prerequisites and detailed build instructions please read the
[Installation Instructions](https://github.com/ethereum/go-ethereum/wiki/Building-Ethereum)
on the wiki.
Building geth requires both a Go (version 1.7 or later) and a C compiler.
Building geth requires both a Go (version 1.9 or later) and a C compiler.
You can install them using your favourite package manager.
Once the dependencies are installed, run
@ -168,7 +168,7 @@ HTTP based JSON-RPC API options:
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
You'll need to use your own programming environments' capabilities (libraries, tools, etc) to connect
via HTTP, WS or IPC to a Geth node configured with the above flags and you'll need to speak [JSON-RPC](http://www.jsonrpc.org/specification)
via HTTP, WS or IPC to a Geth node configured with the above flags and you'll need to speak [JSON-RPC](https://www.jsonrpc.org/specification)
on all transports. You can reuse the same connection for multiple requests!
**Note: Please understand the security implications of opening up an HTTP/WS based transport before

View File

@ -1,5 +1,5 @@
diff --git a/accounts/keystore/key.go b/accounts/keystore/key.go
index 211fa863..65c83f3b 100644
index 84d8df0c5..551b386a5 100644
--- a/accounts/keystore/key.go
+++ b/accounts/keystore/key.go
@@ -33,6 +33,7 @@ import (
@ -26,14 +26,14 @@ index 211fa863..65c83f3b 100644
type encryptedKeyJSONV3 struct {
- Address string `json:"address"`
- Crypto cryptoJSON `json:"crypto"`
- Crypto CryptoJSON `json:"crypto"`
- Id string `json:"id"`
- Version int `json:"version"`
+ Address string `json:"address"`
+ Crypto cryptoJSON `json:"crypto"`
+ Crypto CryptoJSON `json:"crypto"`
+ Id string `json:"id"`
+ Version int `json:"version"`
+ ExtendedKey cryptoJSON `json:"extendedkey"`
+ ExtendedKey CryptoJSON `json:"extendedkey"`
+ SubAccountIndex uint32 `json:"subaccountindex"`
}
@ -80,7 +80,7 @@ index 211fa863..65c83f3b 100644
// into the Direct ICAP spec. for simplicity and easier compatibility with other libs, we
// retry until the first byte is 0.
diff --git a/accounts/keystore/keystore.go b/accounts/keystore/keystore.go
index 6b04acd0..ac2ab008 100644
index 2918047cc..333fbef6f 100644
--- a/accounts/keystore/keystore.go
+++ b/accounts/keystore/keystore.go
@@ -38,6 +38,7 @@ import (
@ -154,21 +154,22 @@ index 6b04acd0..ac2ab008 100644
// Update changes the passphrase of an existing account.
func (ks *KeyStore) Update(a accounts.Account, passphrase, newPassphrase string) error {
a, key, err := ks.getDecryptedKey(a, passphrase)
@@ -486,6 +529,9 @@ func (ks *KeyStore) ImportPreSaleKey(keyJSON []byte, passphrase string) (account
@@ -486,6 +529,10 @@ func (ks *KeyStore) ImportPreSaleKey(keyJSON []byte, passphrase string) (account
// zeroKey zeroes a private key in memory.
func zeroKey(k *ecdsa.PrivateKey) {
+ if k == nil {
+ return
+ }
+
b := k.D.Bits()
for i := range b {
b[i] = 0
diff --git a/accounts/keystore/keystore_passphrase.go b/accounts/keystore/keystore_passphrase.go
index 59738abe..2b6ef252 100644
--- a/accounts/keystore/keystore_passphrase.go
+++ b/accounts/keystore/keystore_passphrase.go
@@ -41,6 +41,7 @@ import (
diff --git a/accounts/keystore/passphrase.go b/accounts/keystore/passphrase.go
index a0b6cf538..e2512c6e8 100644
--- a/accounts/keystore/passphrase.go
+++ b/accounts/keystore/passphrase.go
@@ -42,6 +42,7 @@ import (
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto"
"github.com/pborman/uuid"
@ -176,9 +177,9 @@ index 59738abe..2b6ef252 100644
"golang.org/x/crypto/pbkdf2"
"golang.org/x/crypto/scrypt"
)
@@ -157,15 +158,68 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
KDFParams: scryptParamsJSON,
MAC: hex.EncodeToString(mac),
@@ -187,15 +188,68 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
if err != nil {
return nil, err
}
+ encryptedExtendedKey, err := EncryptExtendedKey(key.ExtendedKey, auth, scryptN, scryptP)
+ if err != nil {
@ -195,9 +196,9 @@ index 59738abe..2b6ef252 100644
return json.Marshal(encryptedKeyJSONV3)
}
+func EncryptExtendedKey(extKey *extkeys.ExtendedKey, auth string, scryptN, scryptP int) (cryptoJSON, error) {
+func EncryptExtendedKey(extKey *extkeys.ExtendedKey, auth string, scryptN, scryptP int) (CryptoJSON, error) {
+ if extKey == nil {
+ return cryptoJSON{}, nil
+ return CryptoJSON{}, nil
+ }
+ authArray := []byte(auth)
+ salt := make([]byte, 32)
@ -206,7 +207,7 @@ index 59738abe..2b6ef252 100644
+ }
+ derivedKey, err := scrypt.Key(authArray, salt, scryptN, scryptR, scryptP, scryptDKLen)
+ if err != nil {
+ return cryptoJSON{}, err
+ return CryptoJSON{}, err
+ }
+ encryptKey := derivedKey[:16]
+ keyBytes := []byte(extKey.String())
@ -217,7 +218,7 @@ index 59738abe..2b6ef252 100644
+ }
+ cipherText, err := aesCTRXOR(encryptKey, keyBytes, iv)
+ if err != nil {
+ return cryptoJSON{}, err
+ return CryptoJSON{}, err
+ }
+ mac := crypto.Keccak256(derivedKey[16:32], cipherText)
+
@ -232,7 +233,7 @@ index 59738abe..2b6ef252 100644
+ IV: hex.EncodeToString(iv),
+ }
+
+ return cryptoJSON{
+ return CryptoJSON{
+ Cipher: "aes-128-ctr",
+ CipherText: hex.EncodeToString(cipherText),
+ CipherParams: cipherParamsJSON,
@ -245,7 +246,7 @@ index 59738abe..2b6ef252 100644
// DecryptKey decrypts a key from a json blob, returning the private key itself.
func DecryptKey(keyjson []byte, auth string) (*Key, error) {
// Parse the json into a simple map to fetch the key version
@@ -177,20 +231,43 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
@@ -207,19 +261,41 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
var (
keyBytes, keyId []byte
err error
@ -285,11 +286,9 @@ index 59738abe..2b6ef252 100644
+ }
+ extKey, err = extkeys.NewKeyFromString(string(extKeyBytes))
}
+
// Handle any decryption errors and return the key
if err != nil {
return nil, err
@@ -198,9 +275,11 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
@@ -228,9 +304,11 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
key := crypto.ToECDSAUnsafe(keyBytes)
return &Key{
@ -304,7 +303,7 @@ index 59738abe..2b6ef252 100644
}, nil
}
@@ -280,6 +359,51 @@ func decryptKeyV1(keyProtected *encryptedKeyJSONV1, auth string) (keyBytes []byt
@@ -316,6 +394,51 @@ func decryptKeyV1(keyProtected *encryptedKeyJSONV1, auth string) (keyBytes []byt
return plainText, keyId, err
}
@ -353,6 +352,6 @@ index 59738abe..2b6ef252 100644
+ return plainText, err
+}
+
func getKDFKey(cryptoJSON cryptoJSON, auth string) ([]byte, error) {
func getKDFKey(cryptoJSON CryptoJSON, auth string) ([]byte, error) {
authArray := []byte(auth)
salt, err := hex.DecodeString(cryptoJSON.KDFParams["salt"].(string))

View File

@ -4,7 +4,7 @@ index b0749d2..d724562 100644
+++ b/cmd/geth/config.go
@@ -124,6 +124,7 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
}
// Apply flags.
+ utils.SetULC(ctx, &cfg.Eth)
utils.SetNodeConfig(ctx, &cfg.Node)
@ -142,11 +142,11 @@ index d209311..8904dd8 100644
abort, results := hc.engine.VerifyHeaders(hc, chain, seals)
defer close(abort)
diff --git a/eth/config.go b/eth/config.go
index efbaafb..7d1db9f 100644
index 7c041d1af..f71b8dfee 100644
--- a/eth/config.go
+++ b/eth/config.go
@@ -87,8 +87,12 @@ type Config struct {
NoPruning bool
@@ -91,8 +91,12 @@ type Config struct {
Whitelist map[uint64]common.Hash `toml:"-"`
// Light client options
- LightServ int `toml:",omitempty"` // Maximum percentage of time allowed for serving LES requests
@ -161,7 +161,7 @@ index efbaafb..7d1db9f 100644
// Database options
SkipBcVersionCheck bool `toml:"-"`
diff --git a/eth/gen_config.go b/eth/gen_config.go
index d401a91..382d4d9 100644
index 2777aa9e8..6662f820f 100644
--- a/eth/gen_config.go
+++ b/eth/gen_config.go
@@ -23,10 +23,12 @@ func (c Config) MarshalTOML() (interface{}, error) {
@ -179,9 +179,9 @@ index d401a91..382d4d9 100644
+ SkipBcVersionCheck bool `toml:"-"`
+ DatabaseHandles int `toml:"-"`
DatabaseCache int
TrieCache int
TrieTimeout time.Duration
@@ -51,6 +53,8 @@ func (c Config) MarshalTOML() (interface{}, error) {
TrieCleanCache int
TrieDirtyCache int
@@ -54,6 +56,8 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.NoPruning = c.NoPruning
enc.LightServ = c.LightServ
enc.LightPeers = c.LightPeers
@ -190,7 +190,7 @@ index d401a91..382d4d9 100644
enc.SkipBcVersionCheck = c.SkipBcVersionCheck
enc.DatabaseHandles = c.DatabaseHandles
enc.DatabaseCache = c.DatabaseCache
@@ -79,10 +83,12 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
@@ -85,10 +89,12 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
NetworkId *uint64
SyncMode *downloader.SyncMode
NoPruning *bool
@ -205,9 +205,9 @@ index d401a91..382d4d9 100644
+ SkipBcVersionCheck *bool `toml:"-"`
+ DatabaseHandles *int `toml:"-"`
DatabaseCache *int
TrieCache *int
TrieTimeout *time.Duration
@@ -122,6 +128,12 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
TrieCleanCache *int
TrieDirtyCache *int
@@ -131,6 +137,12 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.LightPeers != nil {
c.LightPeers = *dec.LightPeers
}
@ -342,19 +342,7 @@ index f0d3b18..16cfc00 100644
}
// fetcherPeerInfo holds fetcher-specific information about each active peer
@@ -144,9 +152,11 @@ func (f *lightFetcher) syncLoop() {
rq *distReq
reqID uint64
)
+
if !f.syncing && !(newAnnounce && s) {
rq, reqID = f.nextRequest()
}
+
syncing := f.syncing
f.lock.Unlock()
@@ -206,8 +216,11 @@ func (f *lightFetcher) syncLoop() {
@@ -209,8 +209,11 @@ func (f *lightFetcher) syncLoop() {
case p := <-f.syncDone:
f.lock.Lock()
p.Log().Debug("Done synchronising with peer")
@ -365,8 +353,8 @@ index f0d3b18..16cfc00 100644
+ f.newHeaders([]*types.Header{h}, []*big.Int{td})
+ }
f.lock.Unlock()
f.requestChn <- false
}
}
@@ -223,7 +236,6 @@ func (f *lightFetcher) registerPeer(p *peer) {
f.lock.Lock()
@ -397,7 +385,7 @@ index f0d3b18..16cfc00 100644
if fp.root != nil {
@@ -407,25 +422,13 @@ func (f *lightFetcher) requestedID(reqID uint64) bool {
// to be downloaded starting from the head backwards is also returned
func (f *lightFetcher) nextRequest() (*distReq, uint64) {
func (f *lightFetcher) nextRequest() (*distReq, uint64, bool) {
var (
- bestHash common.Hash
- bestAmount uint64
@ -424,12 +412,12 @@ index f0d3b18..16cfc00 100644
- }
- }
if bestTd == f.maxConfirmedTd {
return nil, 0
return nil, 0, false
}
@@ -435,72 +438,131 @@ func (f *lightFetcher) nextRequest() (*distReq, uint64) {
@@ -437,72 +437,131 @@ func (f *lightFetcher) nextRequest() (*distReq, uint64, bool) {
var rq *distReq
reqID := genReqID()
if f.syncing {
if bestSyncing {
- rq = &distReq{
- getCost: func(dp distPeer) uint64 {
- return 0
@ -462,7 +450,7 @@ index f0d3b18..16cfc00 100644
- canSend: func(dp distPeer) bool {
+ rq = f.newFetcherDistReq(bestHash, reqID, bestAmount)
+ }
+ return rq, reqID
+ return rq, reqID, bestSyncing
+}
+
+// findBestValues retrieves the best values for LES or ULC mode.
@ -614,7 +602,7 @@ index f0d3b18..16cfc00 100644
+ return func() { p.RequestHeadersByHash(reqID, cost, bestHash, int(bestAmount), 0, true) }
+ },
}
- return rq, reqID
- return rq, reqID, bestSyncing
}
// deliverHeaders delivers header download request responses for processing
@ -2248,53 +2236,6 @@ index 8e2734c..0b5571b 100644
+ defer self.mu.Unlock()
+ self.disableCheckFreq = false
+}
diff --git a/metrics/counter.go b/metrics/counter.go
index c7f2b4b..fbb14b8 100644
--- a/metrics/counter.go
+++ b/metrics/counter.go
@@ -1,6 +1,8 @@
package metrics
-import "sync/atomic"
+import (
+ "sync/atomic"
+)
// Counters hold an int64 value that can be incremented and decremented.
type Counter interface {
@@ -28,6 +30,12 @@ func NewCounter() Counter {
return &StandardCounter{0}
}
+// NewCounterForced constructs a new StandardCounter and returns it no matter if
+// the global switch is enabled or not.
+func NewCounterForced() Counter {
+ return &StandardCounter{0}
+}
+
// NewRegisteredCounter constructs and registers a new StandardCounter.
func NewRegisteredCounter(name string, r Registry) Counter {
c := NewCounter()
@@ -38,6 +46,19 @@ func NewRegisteredCounter(name string, r Registry) Counter {
return c
}
+// NewRegisteredCounterForced constructs and registers a new StandardCounter
+// and launches a goroutine no matter the global switch is enabled or not.
+// Be sure to unregister the counter from the registry once it is of no use to
+// allow for garbage collection.
+func NewRegisteredCounterForced(name string, r Registry) Counter {
+ c := NewCounterForced()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
// CounterSnapshot is a read-only copy of another Counter.
type CounterSnapshot int64
diff --git a/mobile/geth.go b/mobile/geth.go
index e3e2e90..4d674b2 100644
--- a/mobile/geth.go
@ -2309,21 +2250,3 @@ index e3e2e90..4d674b2 100644
}
// defaultNodeConfig contains the default node configuration values to use if all
diff --git a/p2p/enode/node.go b/p2p/enode/node.go
index 84088fc..b454ab2 100644
--- a/p2p/enode/node.go
+++ b/p2p/enode/node.go
@@ -98,6 +98,13 @@ func (n *Node) Pubkey() *ecdsa.PublicKey {
return &key
}
+// Record returns the node's record. The return value is a copy and may
+// be modified by the caller.
+func (n *Node) Record() *enr.Record {
+ cpy := n.r
+ return &cpy
+}
+
// checks whether n is a valid complete node.
func (n *Node) ValidateComplete() error {
if n.Incomplete() {

View File

@ -243,11 +243,7 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
// input offset is the bytes offset for packed output
inputOffset := 0
for _, abiArg := range abiArgs {
if abiArg.Type.T == ArrayTy {
inputOffset += 32 * abiArg.Type.Size
} else {
inputOffset += 32
}
inputOffset += getDynamicTypeOffset(abiArg.Type)
}
var ret []byte
for i, a := range args {
@ -257,14 +253,13 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
if err != nil {
return nil, err
}
// check for a slice type (string, bytes, slice)
if input.Type.requiresLengthPrefix() {
// calculate the offset
offset := inputOffset + len(variableInput)
// check for dynamic types
if isDynamicType(input.Type) {
// set the offset
ret = append(ret, packNum(reflect.ValueOf(offset))...)
// Append the packed output to the variable input. The variable input
// will be appended at the end of the input.
ret = append(ret, packNum(reflect.ValueOf(inputOffset))...)
// calculate next offset
inputOffset += len(packed)
// append to variable input
variableInput = append(variableInput, packed...)
} else {
// append the packed value to the input

View File

@ -183,23 +183,39 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
return nil, err
}
if t.T == SliceTy || t.T == ArrayTy {
var packed []byte
switch t.T {
case SliceTy, ArrayTy:
var ret []byte
if t.requiresLengthPrefix() {
// append length
ret = append(ret, packNum(reflect.ValueOf(v.Len()))...)
}
// calculate offset if any
offset := 0
offsetReq := isDynamicType(*t.Elem)
if offsetReq {
offset = getDynamicTypeOffset(*t.Elem) * v.Len()
}
var tail []byte
for i := 0; i < v.Len(); i++ {
val, err := t.Elem.pack(v.Index(i))
if err != nil {
return nil, err
}
packed = append(packed, val...)
}
if t.T == SliceTy {
return packBytesSlice(packed, v.Len()), nil
} else if t.T == ArrayTy {
return packed, nil
if !offsetReq {
ret = append(ret, val...)
continue
}
ret = append(ret, packNum(reflect.ValueOf(offset))...)
offset += len(val)
tail = append(tail, val...)
}
return append(ret, tail...), nil
default:
return packElement(t, v), nil
}
return packElement(t, v), nil
}
// requireLengthPrefix returns whether the type requires any sort of length
@ -207,3 +223,27 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
func (t Type) requiresLengthPrefix() bool {
return t.T == StringTy || t.T == BytesTy || t.T == SliceTy
}
// isDynamicType returns true if the type is dynamic.
// StringTy, BytesTy, and SliceTy(irrespective of slice element type) are dynamic types
// ArrayTy is considered dynamic if and only if the Array element is a dynamic type.
// This function recursively checks the type for slice and array elements.
func isDynamicType(t Type) bool {
// dynamic types
// array is also a dynamic type if the array type is dynamic
return t.T == StringTy || t.T == BytesTy || t.T == SliceTy || (t.T == ArrayTy && isDynamicType(*t.Elem))
}
// getDynamicTypeOffset returns the offset for the type.
// See `isDynamicType` to know which types are considered dynamic.
// If the type t is an array and element type is not a dynamic type, then we consider it a static type and
// return 32 * size of array since length prefix is not required.
// If t is a dynamic type or element type(for slices and arrays) is dynamic, then we simply return 32 as offset.
func getDynamicTypeOffset(t Type) int {
// if it is an array and there are no dynamic types
// then the array is static type
if t.T == ArrayTy && !isDynamicType(*t.Elem) {
return 32 * t.Size
}
return 32
}

View File

@ -30,8 +30,8 @@ import (
var DefaultRootDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}
// DefaultBaseDerivationPath is the base path from which custom derivation endpoints
// are incremented. As such, the first account will be at m/44'/60'/0'/0, the second
// at m/44'/60'/0'/1, etc.
// are incremented. As such, the first account will be at m/44'/60'/0'/0/0, the second
// at m/44'/60'/0'/0/1, etc.
var DefaultBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0}
// DefaultLedgerBaseDerivationPath is the base path from which custom derivation endpoints

View File

@ -265,7 +265,10 @@ func (ac *accountCache) scanAccounts() error {
case (addr == common.Address{}):
log.Debug("Failed to decode keystore key", "path", path, "err", "missing or zero address")
default:
return &accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}}
return &accounts.Account{
Address: addr,
URL: accounts.URL{Scheme: KeyStoreScheme, Path: path},
}
}
return nil
}

View File

@ -71,21 +71,21 @@ type plainKeyJSON struct {
type encryptedKeyJSONV3 struct {
Address string `json:"address"`
Crypto cryptoJSON `json:"crypto"`
Crypto CryptoJSON `json:"crypto"`
Id string `json:"id"`
Version int `json:"version"`
ExtendedKey cryptoJSON `json:"extendedkey"`
ExtendedKey CryptoJSON `json:"extendedkey"`
SubAccountIndex uint32 `json:"subaccountindex"`
}
type encryptedKeyJSONV1 struct {
Address string `json:"address"`
Crypto cryptoJSON `json:"crypto"`
Crypto CryptoJSON `json:"crypto"`
Id string `json:"id"`
Version string `json:"version"`
}
type cryptoJSON struct {
type CryptoJSON struct {
Cipher string `json:"cipher"`
CipherText string `json:"ciphertext"`
CipherParams cipherparamsJSON `json:"cipherparams"`
@ -212,7 +212,10 @@ func storeNewKey(ks keyStore, rand io.Reader, auth string) (*Key, accounts.Accou
if err != nil {
return nil, accounts.Account{}, err
}
a := accounts.Account{Address: key.Address, URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.JoinPath(keyFileName(key.Address))}}
a := accounts.Account{
Address: key.Address,
URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.JoinPath(keyFileName(key.Address))},
}
if err := ks.StoreKey(a.URL.Path, key, auth); err != nil {
zeroKey(key.PrivateKey)
return nil, a, err
@ -265,5 +268,6 @@ func toISO8601(t time.Time) string {
} else {
tz = fmt.Sprintf("%03d00", offset/3600)
}
return fmt.Sprintf("%04d-%02d-%02dT%02d-%02d-%02d.%09d%s", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), tz)
return fmt.Sprintf("%04d-%02d-%02dT%02d-%02d-%02d.%09d%s",
t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), tz)
}

View File

@ -538,6 +538,7 @@ func zeroKey(k *ecdsa.PrivateKey) {
if k == nil {
return
}
b := k.D.Bits()
for i := range b {
b[i] = 0

View File

@ -136,29 +136,26 @@ func (ks keyStorePassphrase) JoinPath(filename string) string {
return filepath.Join(ks.keysDirPath, filename)
}
// EncryptKey encrypts a key using the specified scrypt parameters into a json
// blob that can be decrypted later on.
func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
authArray := []byte(auth)
// Encryptdata encrypts the data given as 'data' with the password 'auth'.
func EncryptDataV3(data, auth []byte, scryptN, scryptP int) (CryptoJSON, error) {
salt := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
panic("reading from crypto/rand failed: " + err.Error())
}
derivedKey, err := scrypt.Key(authArray, salt, scryptN, scryptR, scryptP, scryptDKLen)
derivedKey, err := scrypt.Key(auth, salt, scryptN, scryptR, scryptP, scryptDKLen)
if err != nil {
return nil, err
return CryptoJSON{}, err
}
encryptKey := derivedKey[:16]
keyBytes := math.PaddedBigBytes(key.PrivateKey.D, 32)
iv := make([]byte, aes.BlockSize) // 16
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
panic("reading from crypto/rand failed: " + err.Error())
}
cipherText, err := aesCTRXOR(encryptKey, keyBytes, iv)
cipherText, err := aesCTRXOR(encryptKey, data, iv)
if err != nil {
return nil, err
return CryptoJSON{}, err
}
mac := crypto.Keccak256(derivedKey[16:32], cipherText)
@ -168,12 +165,11 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
scryptParamsJSON["p"] = scryptP
scryptParamsJSON["dklen"] = scryptDKLen
scryptParamsJSON["salt"] = hex.EncodeToString(salt)
cipherParamsJSON := cipherparamsJSON{
IV: hex.EncodeToString(iv),
}
cryptoStruct := cryptoJSON{
cryptoStruct := CryptoJSON{
Cipher: "aes-128-ctr",
CipherText: hex.EncodeToString(cipherText),
CipherParams: cipherParamsJSON,
@ -181,6 +177,17 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
KDFParams: scryptParamsJSON,
MAC: hex.EncodeToString(mac),
}
return cryptoStruct, nil
}
// EncryptKey encrypts a key using the specified scrypt parameters into a json
// blob that can be decrypted later on.
func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
keyBytes := math.PaddedBigBytes(key.PrivateKey.D, 32)
cryptoStruct, err := EncryptDataV3(keyBytes, []byte(auth), scryptN, scryptP)
if err != nil {
return nil, err
}
encryptedExtendedKey, err := EncryptExtendedKey(key.ExtendedKey, auth, scryptN, scryptP)
if err != nil {
return nil, err
@ -196,9 +203,9 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
return json.Marshal(encryptedKeyJSONV3)
}
func EncryptExtendedKey(extKey *extkeys.ExtendedKey, auth string, scryptN, scryptP int) (cryptoJSON, error) {
func EncryptExtendedKey(extKey *extkeys.ExtendedKey, auth string, scryptN, scryptP int) (CryptoJSON, error) {
if extKey == nil {
return cryptoJSON{}, nil
return CryptoJSON{}, nil
}
authArray := []byte(auth)
salt := make([]byte, 32)
@ -207,7 +214,7 @@ func EncryptExtendedKey(extKey *extkeys.ExtendedKey, auth string, scryptN, scryp
}
derivedKey, err := scrypt.Key(authArray, salt, scryptN, scryptR, scryptP, scryptDKLen)
if err != nil {
return cryptoJSON{}, err
return CryptoJSON{}, err
}
encryptKey := derivedKey[:16]
keyBytes := []byte(extKey.String())
@ -218,7 +225,7 @@ func EncryptExtendedKey(extKey *extkeys.ExtendedKey, auth string, scryptN, scryp
}
cipherText, err := aesCTRXOR(encryptKey, keyBytes, iv)
if err != nil {
return cryptoJSON{}, err
return CryptoJSON{}, err
}
mac := crypto.Keccak256(derivedKey[16:32], cipherText)
@ -233,7 +240,7 @@ func EncryptExtendedKey(extKey *extkeys.ExtendedKey, auth string, scryptN, scryp
IV: hex.EncodeToString(iv),
}
return cryptoJSON{
return CryptoJSON{
Cipher: "aes-128-ctr",
CipherText: hex.EncodeToString(cipherText),
CipherParams: cipherParamsJSON,
@ -290,7 +297,6 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
}
extKey, err = extkeys.NewKeyFromString(string(extKeyBytes))
}
// Handle any decryption errors and return the key
if err != nil {
return nil, err
@ -306,42 +312,48 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
}, nil
}
func decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byte, keyId []byte, err error) {
if keyProtected.Version != version {
return nil, nil, fmt.Errorf("Version not supported: %v", keyProtected.Version)
func DecryptDataV3(cryptoJson CryptoJSON, auth string) ([]byte, error) {
if cryptoJson.Cipher != "aes-128-ctr" {
return nil, fmt.Errorf("Cipher not supported: %v", cryptoJson.Cipher)
}
if keyProtected.Crypto.Cipher != "aes-128-ctr" {
return nil, nil, fmt.Errorf("Cipher not supported: %v", keyProtected.Crypto.Cipher)
}
keyId = uuid.Parse(keyProtected.Id)
mac, err := hex.DecodeString(keyProtected.Crypto.MAC)
mac, err := hex.DecodeString(cryptoJson.MAC)
if err != nil {
return nil, nil, err
return nil, err
}
iv, err := hex.DecodeString(keyProtected.Crypto.CipherParams.IV)
iv, err := hex.DecodeString(cryptoJson.CipherParams.IV)
if err != nil {
return nil, nil, err
return nil, err
}
cipherText, err := hex.DecodeString(keyProtected.Crypto.CipherText)
cipherText, err := hex.DecodeString(cryptoJson.CipherText)
if err != nil {
return nil, nil, err
return nil, err
}
derivedKey, err := getKDFKey(keyProtected.Crypto, auth)
derivedKey, err := getKDFKey(cryptoJson, auth)
if err != nil {
return nil, nil, err
return nil, err
}
calculatedMAC := crypto.Keccak256(derivedKey[16:32], cipherText)
if !bytes.Equal(calculatedMAC, mac) {
return nil, nil, ErrDecrypt
return nil, ErrDecrypt
}
plainText, err := aesCTRXOR(derivedKey[:16], cipherText, iv)
if err != nil {
return nil, err
}
return plainText, err
}
func decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byte, keyId []byte, err error) {
if keyProtected.Version != version {
return nil, nil, fmt.Errorf("Version not supported: %v", keyProtected.Version)
}
keyId = uuid.Parse(keyProtected.Id)
plainText, err := DecryptDataV3(keyProtected.Crypto, auth)
if err != nil {
return nil, nil, err
}
@ -427,7 +439,7 @@ func decryptExtendedKey(keyProtected *encryptedKeyJSONV3, auth string) (plainTex
return plainText, err
}
func getKDFKey(cryptoJSON cryptoJSON, auth string) ([]byte, error) {
func getKDFKey(cryptoJSON CryptoJSON, auth string) ([]byte, error) {
authArray := []byte(auth)
salt, err := hex.DecodeString(cryptoJSON.KDFParams["salt"].(string))
if err != nil {

View File

@ -38,7 +38,13 @@ func importPreSaleKey(keyStore keyStore, keyJSON []byte, password string) (accou
return accounts.Account{}, nil, err
}
key.Id = uuid.NewRandom()
a := accounts.Account{Address: key.Address, URL: accounts.URL{Scheme: KeyStoreScheme, Path: keyStore.JoinPath(keyFileName(key.Address))}}
a := accounts.Account{
Address: key.Address,
URL: accounts.URL{
Scheme: KeyStoreScheme,
Path: keyStore.JoinPath(keyFileName(key.Address)),
},
}
err = keyStore.StoreKey(a.URL.Path, key, password)
return a, key, err
}

View File

@ -350,7 +350,7 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
signer = new(types.HomesteadSigner)
} else {
signer = types.NewEIP155Signer(chainID)
signature[64] = signature[64] - byte(chainID.Uint64()*2+35)
signature[64] -= byte(chainID.Uint64()*2 + 35)
}
signed, err := tx.WithSignature(signer, signature)
if err != nil {

View File

@ -221,7 +221,7 @@ func (w *trezorDriver) trezorSign(derivationPath []uint32, tx *types.Transaction
signer = new(types.HomesteadSigner)
} else {
signer = types.NewEIP155Signer(chainID)
signature[64] = signature[64] - byte(chainID.Uint64()*2+35)
signature[64] -= byte(chainID.Uint64()*2 + 35)
}
// Inject the final signature into the transaction and sanity check the sender
signed, err := tx.WithSignature(signer, signature)

View File

@ -23,8 +23,8 @@ environment:
install:
- git submodule update --init
- rmdir C:\go /s /q
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.1.windows-%GETH_ARCH%.zip
- 7z x go1.11.1.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.2.windows-%GETH_ARCH%.zip
- 7z x go1.11.2.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
- go version
- gcc --version

View File

@ -38,7 +38,7 @@ func main() {
var (
listenAddr = flag.String("addr", ":30301", "listen address")
genKey = flag.String("genkey", "", "generate a node key")
writeAddr = flag.Bool("writeaddress", false, "write out the node's pubkey hash and quit")
writeAddr = flag.Bool("writeaddress", false, "write out the node's public key and quit")
nodeKeyFile = flag.String("nodekey", "", "private key filename")
nodeKeyHex = flag.String("nodekeyhex", "", "private key as hex (for testing)")
natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|extip:<IP>)")
@ -86,7 +86,7 @@ func main() {
}
if *writeAddr {
fmt.Printf("%v\n", enode.PubkeyToIDV4(&nodeKey.PublicKey))
fmt.Printf("%x\n", crypto.FromECDSAPub(&nodeKey.PublicKey)[1:])
os.Exit(0)
}
@ -119,16 +119,17 @@ func main() {
}
if *runv5 {
if _, err := discv5.ListenUDP(nodeKey, conn, realaddr, "", restrictList); err != nil {
if _, err := discv5.ListenUDP(nodeKey, conn, "", restrictList); err != nil {
utils.Fatalf("%v", err)
}
} else {
db, _ := enode.OpenDB("")
ln := enode.NewLocalNode(db, nodeKey)
cfg := discover.Config{
PrivateKey: nodeKey,
AnnounceAddr: realaddr,
NetRestrict: restrictList,
PrivateKey: nodeKey,
NetRestrict: restrictList,
}
if _, err := discover.ListenUDP(conn, cfg); err != nil {
if _, err := discover.ListenUDP(conn, ln, cfg); err != nil {
utils.Fatalf("%v", err)
}
}

View File

@ -91,7 +91,7 @@ invoking methods with the following info:
* [x] Version info about the signer
* [x] Address of API (http/ipc)
* [ ] List of known accounts
* [ ] Have a default timeout on signing operations, so that if the user has not answered withing e.g. 60 seconds, the request is rejected.
* [ ] Have a default timeout on signing operations, so that if the user has not answered within e.g. 60 seconds, the request is rejected.
* [ ] `account_signRawTransaction`
* [ ] `account_bulkSignTransactions([] transactions)` should
* only exist if enabled via config/flag
@ -129,7 +129,7 @@ The signer listens to HTTP requests on `rpcaddr`:`rpcport`, with the same JSONRP
expected to be JSON [jsonrpc 2.0 standard](http://www.jsonrpc.org/specification).
Some of these call can require user interaction. Clients must be aware that responses
may be delayed significanlty or may never be received if a users decides to ignore the confirmation request.
may be delayed significantly or may never be received if a users decides to ignore the confirmation request.
The External API is **untrusted** : it does not accept credentials over this api, nor does it expect
that requests have any authority.
@ -862,7 +862,7 @@ A UI should conform to the following rules.
* A UI SHOULD inform the user about the `SHA256` or `MD5` hash of the binary being executed
* A UI SHOULD NOT maintain a secondary storage of data, e.g. list of accounts
* The signer provides accounts
* A UI SHOULD, to the best extent possible, use static linking / bundling, so that requried libraries are bundled
* A UI SHOULD, to the best extent possible, use static linking / bundling, so that required libraries are bundled
along with the UI.

View File

@ -1,5 +1,9 @@
### Changelog for internal API (ui-api)
### 3.0.0
* Make use of `OnInputRequired(info UserInputRequest)` for obtaining master password during startup
### 2.1.0
* Add `OnInputRequired(info UserInputRequest)` to internal API. This method is used when Clef needs user input, e.g. passwords.
@ -14,7 +18,6 @@ The following structures are used:
UserInputResponse struct {
Text string `json:"text"`
}
```
### 2.0.0

View File

@ -35,8 +35,10 @@ import (
"runtime"
"strings"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/console"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
@ -48,10 +50,10 @@ import (
)
// ExternalAPIVersion -- see extapi_changelog.md
const ExternalAPIVersion = "3.0.0"
const ExternalAPIVersion = "4.0.0"
// InternalAPIVersion -- see intapi_changelog.md
const InternalAPIVersion = "2.0.0"
const InternalAPIVersion = "3.0.0"
const legalWarning = `
WARNING!
@ -91,7 +93,7 @@ var (
}
signerSecretFlag = cli.StringFlag{
Name: "signersecret",
Usage: "A file containing the password used to encrypt Clef credentials, e.g. keystore credentials and ruleset hash",
Usage: "A file containing the (encrypted) master seed to encrypt Clef data, e.g. keystore credentials and ruleset hash",
}
dBFlag = cli.StringFlag{
Name: "4bytedb",
@ -155,18 +157,18 @@ Whenever you make an edit to the rule file, you need to use attestation to tell
Clef that the file is 'safe' to execute.`,
}
addCredentialCommand = cli.Command{
Action: utils.MigrateFlags(addCredential),
Name: "addpw",
setCredentialCommand = cli.Command{
Action: utils.MigrateFlags(setCredential),
Name: "setpw",
Usage: "Store a credential for a keystore file",
ArgsUsage: "<address> <password>",
ArgsUsage: "<address>",
Flags: []cli.Flag{
logLevelFlag,
configdirFlag,
signerSecretFlag,
},
Description: `
The addpw command stores a password for a given address (keyfile). If you invoke it with only one parameter, it will
The setpw command stores a password for a given address (keyfile). If you enter a blank passphrase, it will
remove any stored credential for that address (keyfile)
`,
}
@ -198,7 +200,7 @@ func init() {
advancedMode,
}
app.Action = signer
app.Commands = []cli.Command{initCommand, attestCommand, addCredentialCommand}
app.Commands = []cli.Command{initCommand, attestCommand, setCredentialCommand}
}
func main() {
@ -212,25 +214,45 @@ func initializeSecrets(c *cli.Context) error {
if err := initialize(c); err != nil {
return err
}
configDir := c.String(configdirFlag.Name)
configDir := c.GlobalString(configdirFlag.Name)
masterSeed := make([]byte, 256)
n, err := io.ReadFull(rand.Reader, masterSeed)
num, err := io.ReadFull(rand.Reader, masterSeed)
if err != nil {
return err
}
if n != len(masterSeed) {
if num != len(masterSeed) {
return fmt.Errorf("failed to read enough random")
}
n, p := keystore.StandardScryptN, keystore.StandardScryptP
if c.GlobalBool(utils.LightKDFFlag.Name) {
n, p = keystore.LightScryptN, keystore.LightScryptP
}
text := "The master seed of clef is locked with a password. Please give a password. Do not forget this password."
var password string
for {
password = getPassPhrase(text, true)
if err := core.ValidatePasswordFormat(password); err != nil {
fmt.Printf("invalid password: %v\n", err)
} else {
break
}
}
cipherSeed, err := encryptSeed(masterSeed, []byte(password), n, p)
if err != nil {
return fmt.Errorf("failed to encrypt master seed: %v", err)
}
err = os.Mkdir(configDir, 0700)
if err != nil && !os.IsExist(err) {
return err
}
location := filepath.Join(configDir, "secrets.dat")
location := filepath.Join(configDir, "masterseed.json")
if _, err := os.Stat(location); err == nil {
return fmt.Errorf("file %v already exists, will not overwrite", location)
}
err = ioutil.WriteFile(location, masterSeed, 0400)
err = ioutil.WriteFile(location, cipherSeed, 0400)
if err != nil {
return err
}
@ -255,11 +277,11 @@ func attestFile(ctx *cli.Context) error {
return err
}
stretchedKey, err := readMasterKey(ctx)
stretchedKey, err := readMasterKey(ctx, nil)
if err != nil {
utils.Fatalf(err.Error())
}
configDir := ctx.String(configdirFlag.Name)
configDir := ctx.GlobalString(configdirFlag.Name)
vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), stretchedKey)[:10]))
confKey := crypto.Keccak256([]byte("config"), stretchedKey)
@ -271,38 +293,36 @@ func attestFile(ctx *cli.Context) error {
return nil
}
func addCredential(ctx *cli.Context) error {
func setCredential(ctx *cli.Context) error {
if len(ctx.Args()) < 1 {
utils.Fatalf("This command requires at leaste one argument.")
utils.Fatalf("This command requires an address to be passed as an argument.")
}
if err := initialize(ctx); err != nil {
return err
}
stretchedKey, err := readMasterKey(ctx)
address := ctx.Args().First()
password := getPassPhrase("Enter a passphrase to store with this address.", true)
stretchedKey, err := readMasterKey(ctx, nil)
if err != nil {
utils.Fatalf(err.Error())
}
configDir := ctx.String(configdirFlag.Name)
configDir := ctx.GlobalString(configdirFlag.Name)
vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), stretchedKey)[:10]))
pwkey := crypto.Keccak256([]byte("credentials"), stretchedKey)
// Initialize the encrypted storages
pwStorage := storage.NewAESEncryptedStorage(filepath.Join(vaultLocation, "credentials.json"), pwkey)
key := ctx.Args().First()
value := ""
if len(ctx.Args()) > 1 {
value = ctx.Args().Get(1)
}
pwStorage.Put(key, value)
log.Info("Credential store updated", "key", key)
pwStorage.Put(address, password)
log.Info("Credential store updated", "key", address)
return nil
}
func initialize(c *cli.Context) error {
// Set up the logger to print everything
logOutput := os.Stdout
if c.Bool(stdiouiFlag.Name) {
if c.GlobalBool(stdiouiFlag.Name) {
logOutput = os.Stderr
// If using the stdioui, we can't do the 'confirm'-flow
fmt.Fprintf(logOutput, legalWarning)
@ -323,26 +343,28 @@ func signer(c *cli.Context) error {
var (
ui core.SignerUI
)
if c.Bool(stdiouiFlag.Name) {
if c.GlobalBool(stdiouiFlag.Name) {
log.Info("Using stdin/stdout as UI-channel")
ui = core.NewStdIOUI()
} else {
log.Info("Using CLI as UI-channel")
ui = core.NewCommandlineUI()
}
db, err := core.NewAbiDBFromFiles(c.String(dBFlag.Name), c.String(customDBFlag.Name))
fourByteDb := c.GlobalString(dBFlag.Name)
fourByteLocal := c.GlobalString(customDBFlag.Name)
db, err := core.NewAbiDBFromFiles(fourByteDb, fourByteLocal)
if err != nil {
utils.Fatalf(err.Error())
}
log.Info("Loaded 4byte db", "signatures", db.Size(), "file", c.String("4bytedb"))
log.Info("Loaded 4byte db", "signatures", db.Size(), "file", fourByteDb, "local", fourByteLocal)
var (
api core.ExternalAPI
)
configDir := c.String(configdirFlag.Name)
if stretchedKey, err := readMasterKey(c); err != nil {
log.Info("No master seed provided, rules disabled")
configDir := c.GlobalString(configdirFlag.Name)
if stretchedKey, err := readMasterKey(c, ui); err != nil {
log.Info("No master seed provided, rules disabled", "error", err)
} else {
if err != nil {
@ -361,7 +383,7 @@ func signer(c *cli.Context) error {
configStorage := storage.NewAESEncryptedStorage(filepath.Join(vaultLocation, "config.json"), confkey)
//Do we have a rule-file?
ruleJS, err := ioutil.ReadFile(c.String(ruleFlag.Name))
ruleJS, err := ioutil.ReadFile(c.GlobalString(ruleFlag.Name))
if err != nil {
log.Info("Could not load rulefile, rules not enabled", "file", "rulefile")
} else {
@ -385,17 +407,15 @@ func signer(c *cli.Context) error {
}
apiImpl := core.NewSignerAPI(
c.Int64(utils.NetworkIdFlag.Name),
c.String(keystoreFlag.Name),
c.Bool(utils.NoUSBFlag.Name),
c.GlobalInt64(utils.NetworkIdFlag.Name),
c.GlobalString(keystoreFlag.Name),
c.GlobalBool(utils.NoUSBFlag.Name),
ui, db,
c.Bool(utils.LightKDFFlag.Name),
c.Bool(advancedMode.Name))
c.GlobalBool(utils.LightKDFFlag.Name),
c.GlobalBool(advancedMode.Name))
api = apiImpl
// Audit logging
if logfile := c.String(auditLogFlag.Name); logfile != "" {
if logfile := c.GlobalString(auditLogFlag.Name); logfile != "" {
api, err = core.NewAuditLogger(logfile, api)
if err != nil {
utils.Fatalf(err.Error())
@ -414,13 +434,13 @@ func signer(c *cli.Context) error {
Service: api,
Version: "1.0"},
}
if c.Bool(utils.RPCEnabledFlag.Name) {
if c.GlobalBool(utils.RPCEnabledFlag.Name) {
vhosts := splitAndTrim(c.GlobalString(utils.RPCVirtualHostsFlag.Name))
cors := splitAndTrim(c.GlobalString(utils.RPCCORSDomainFlag.Name))
// start http server
httpEndpoint := fmt.Sprintf("%s:%d", c.String(utils.RPCListenAddrFlag.Name), c.Int(rpcPortFlag.Name))
httpEndpoint := fmt.Sprintf("%s:%d", c.GlobalString(utils.RPCListenAddrFlag.Name), c.Int(rpcPortFlag.Name))
listener, _, err := rpc.StartHTTPEndpoint(httpEndpoint, rpcAPI, []string{"account"}, cors, vhosts, rpc.DefaultHTTPTimeouts)
if err != nil {
utils.Fatalf("Could not start RPC api: %v", err)
@ -434,9 +454,9 @@ func signer(c *cli.Context) error {
}()
}
if !c.Bool(utils.IPCDisabledFlag.Name) {
if !c.GlobalBool(utils.IPCDisabledFlag.Name) {
if c.IsSet(utils.IPCPathFlag.Name) {
ipcapiURL = c.String(utils.IPCPathFlag.Name)
ipcapiURL = c.GlobalString(utils.IPCPathFlag.Name)
} else {
ipcapiURL = filepath.Join(configDir, "clef.ipc")
}
@ -453,7 +473,7 @@ func signer(c *cli.Context) error {
}
if c.Bool(testFlag.Name) {
if c.GlobalBool(testFlag.Name) {
log.Info("Performing UI test")
go testExternalUI(apiImpl)
}
@ -512,36 +532,52 @@ func homeDir() string {
}
return ""
}
func readMasterKey(ctx *cli.Context) ([]byte, error) {
func readMasterKey(ctx *cli.Context, ui core.SignerUI) ([]byte, error) {
var (
file string
configDir = ctx.String(configdirFlag.Name)
configDir = ctx.GlobalString(configdirFlag.Name)
)
if ctx.IsSet(signerSecretFlag.Name) {
file = ctx.String(signerSecretFlag.Name)
if ctx.GlobalIsSet(signerSecretFlag.Name) {
file = ctx.GlobalString(signerSecretFlag.Name)
} else {
file = filepath.Join(configDir, "secrets.dat")
file = filepath.Join(configDir, "masterseed.json")
}
if err := checkFile(file); err != nil {
return nil, err
}
masterKey, err := ioutil.ReadFile(file)
cipherKey, err := ioutil.ReadFile(file)
if err != nil {
return nil, err
}
if len(masterKey) < 256 {
return nil, fmt.Errorf("master key of insufficient length, expected >255 bytes, got %d", len(masterKey))
var password string
// If ui is not nil, get the password from ui.
if ui != nil {
resp, err := ui.OnInputRequired(core.UserInputRequest{
Title: "Master Password",
Prompt: "Please enter the password to decrypt the master seed",
IsPassword: true})
if err != nil {
return nil, err
}
password = resp.Text
} else {
password = getPassPhrase("Decrypt master seed of clef", false)
}
masterSeed, err := decryptSeed(cipherKey, password)
if err != nil {
return nil, fmt.Errorf("failed to decrypt the master seed of clef")
}
if len(masterSeed) < 256 {
return nil, fmt.Errorf("master seed of insufficient length, expected >255 bytes, got %d", len(masterSeed))
}
// Create vault location
vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), masterKey)[:10]))
vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), masterSeed)[:10]))
err = os.Mkdir(vaultLocation, 0700)
if err != nil && !os.IsExist(err) {
return nil, err
}
//!TODO, use KDF to stretch the master key
// stretched_key := stretch_key(master_key)
return masterKey, nil
return masterSeed, nil
}
// checkFile is a convenience function to check if a file
@ -619,6 +655,59 @@ func testExternalUI(api *core.SignerAPI) {
}
// getPassPhrase retrieves the password associated with clef, either fetched
// from a list of preloaded passphrases, or requested interactively from the user.
// TODO: there are many `getPassPhrase` functions, it will be better to abstract them into one.
func getPassPhrase(prompt string, confirmation bool) string {
fmt.Println(prompt)
password, err := console.Stdin.PromptPassword("Passphrase: ")
if err != nil {
utils.Fatalf("Failed to read passphrase: %v", err)
}
if confirmation {
confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ")
if err != nil {
utils.Fatalf("Failed to read passphrase confirmation: %v", err)
}
if password != confirm {
utils.Fatalf("Passphrases do not match")
}
}
return password
}
type encryptedSeedStorage struct {
Description string `json:"description"`
Version int `json:"version"`
Params keystore.CryptoJSON `json:"params"`
}
// encryptSeed uses a similar scheme as the keystore uses, but with a different wrapping,
// to encrypt the master seed
func encryptSeed(seed []byte, auth []byte, scryptN, scryptP int) ([]byte, error) {
cryptoStruct, err := keystore.EncryptDataV3(seed, auth, scryptN, scryptP)
if err != nil {
return nil, err
}
return json.Marshal(&encryptedSeedStorage{"Clef seed", 1, cryptoStruct})
}
// decryptSeed decrypts the master seed
func decryptSeed(keyjson []byte, auth string) ([]byte, error) {
var encSeed encryptedSeedStorage
if err := json.Unmarshal(keyjson, &encSeed); err != nil {
return nil, err
}
if encSeed.Version != 1 {
log.Warn(fmt.Sprintf("unsupported encryption format of seed: %d, operation will likely fail", encSeed.Version))
}
seed, err := keystore.DecryptDataV3(encSeed.Params, auth)
if err != nil {
return nil, err
}
return seed, err
}
/**
//Create Account

View File

@ -89,7 +89,7 @@ func runCmd(ctx *cli.Context) error {
genesisConfig *core.Genesis
)
if ctx.GlobalBool(MachineFlag.Name) {
tracer = NewJSONLogger(logconfig, os.Stdout)
tracer = vm.NewJSONLogger(logconfig, os.Stdout)
} else if ctx.GlobalBool(DebugFlag.Name) {
debugLogger = vm.NewStructLogger(logconfig)
tracer = debugLogger
@ -206,6 +206,7 @@ func runCmd(ctx *cli.Context) error {
execTime := time.Since(tstart)
if ctx.GlobalBool(DumpFlag.Name) {
statedb.Commit(true)
statedb.IntermediateRoot(true)
fmt.Println(string(statedb.Dump()))
}

View File

@ -68,7 +68,7 @@ func stateTestCmd(ctx *cli.Context) error {
)
switch {
case ctx.GlobalBool(MachineFlag.Name):
tracer = NewJSONLogger(config, os.Stderr)
tracer = vm.NewJSONLogger(config, os.Stderr)
case ctx.GlobalBool(DebugFlag.Name):
debugger = vm.NewStructLogger(config)

View File

@ -256,7 +256,7 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u
}
for _, boot := range enodes {
old, err := enode.ParseV4(boot.String())
if err != nil {
if err == nil {
stack.Server().AddPeer(old)
}
}

View File

@ -21,6 +21,7 @@ import (
"errors"
"fmt"
"io"
"math/big"
"os"
"reflect"
"unicode"
@ -153,7 +154,9 @@ func enableWhisper(ctx *cli.Context) bool {
func makeFullNode(ctx *cli.Context) *node.Node {
stack, cfg := makeConfigNode(ctx)
if ctx.GlobalIsSet(utils.ConstantinopleOverrideFlag.Name) {
cfg.Eth.ConstantinopleOverride = new(big.Int).SetUint64(ctx.GlobalUint64(utils.ConstantinopleOverrideFlag.Name))
}
utils.RegisterEthService(stack, &cfg.Eth)
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {

View File

@ -21,7 +21,6 @@ import (
"fmt"
"math"
"os"
"runtime"
godebug "runtime/debug"
"sort"
"strconv"
@ -92,8 +91,10 @@ var (
utils.LightServFlag,
utils.LightPeersFlag,
utils.LightKDFFlag,
utils.WhitelistFlag,
utils.CacheFlag,
utils.CacheDatabaseFlag,
utils.CacheTrieFlag,
utils.CacheGCFlag,
utils.TrieCacheGenFlag,
utils.ListenPortFlag,
@ -126,6 +127,7 @@ var (
utils.RinkebyFlag,
utils.VMEnableDebugFlag,
utils.NetworkIdFlag,
utils.ConstantinopleOverrideFlag,
utils.RPCCORSDomainFlag,
utils.RPCVirtualHostsFlag,
utils.EthStatsURLFlag,
@ -213,8 +215,6 @@ func init() {
app.Flags = append(app.Flags, metricsFlags...)
app.Before = func(ctx *cli.Context) error {
runtime.GOMAXPROCS(runtime.NumCPU())
logdir := ""
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
logdir = (&node.Config{DataDir: utils.MakeDataDir(ctx)}).ResolvePath("logs")

View File

@ -81,6 +81,7 @@ var AppHelpFlagGroups = []flagGroup{
utils.LightServFlag,
utils.LightPeersFlag,
utils.LightKDFFlag,
utils.WhitelistFlag,
},
},
{
@ -132,6 +133,7 @@ var AppHelpFlagGroups = []flagGroup{
Flags: []cli.Flag{
utils.CacheFlag,
utils.CacheDatabaseFlag,
utils.CacheTrieFlag,
utils.CacheGCFlag,
utils.TrieCacheGenFlag,
},

View File

@ -20,35 +20,41 @@ import (
"encoding/binary"
"errors"
"math"
"math/big"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
math2 "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/params"
)
// cppEthereumGenesisSpec represents the genesis specification format used by the
// alethGenesisSpec represents the genesis specification format used by the
// C++ Ethereum implementation.
type cppEthereumGenesisSpec struct {
type alethGenesisSpec struct {
SealEngine string `json:"sealEngine"`
Params struct {
AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
HomesteadForkBlock hexutil.Uint64 `json:"homesteadForkBlock"`
EIP150ForkBlock hexutil.Uint64 `json:"EIP150ForkBlock"`
EIP158ForkBlock hexutil.Uint64 `json:"EIP158ForkBlock"`
ByzantiumForkBlock hexutil.Uint64 `json:"byzantiumForkBlock"`
ConstantinopleForkBlock hexutil.Uint64 `json:"constantinopleForkBlock"`
NetworkID hexutil.Uint64 `json:"networkID"`
ChainID hexutil.Uint64 `json:"chainID"`
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"`
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
DurationLimit *hexutil.Big `json:"durationLimit"`
BlockReward *hexutil.Big `json:"blockReward"`
AccountStartNonce math2.HexOrDecimal64 `json:"accountStartNonce"`
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
HomesteadForkBlock hexutil.Uint64 `json:"homesteadForkBlock"`
DaoHardforkBlock math2.HexOrDecimal64 `json:"daoHardforkBlock"`
EIP150ForkBlock hexutil.Uint64 `json:"EIP150ForkBlock"`
EIP158ForkBlock hexutil.Uint64 `json:"EIP158ForkBlock"`
ByzantiumForkBlock hexutil.Uint64 `json:"byzantiumForkBlock"`
ConstantinopleForkBlock hexutil.Uint64 `json:"constantinopleForkBlock"`
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
TieBreakingGas bool `json:"tieBreakingGas"`
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
DifficultyBoundDivisor *math2.HexOrDecimal256 `json:"difficultyBoundDivisor"`
DurationLimit *math2.HexOrDecimal256 `json:"durationLimit"`
BlockReward *hexutil.Big `json:"blockReward"`
NetworkID hexutil.Uint64 `json:"networkID"`
ChainID hexutil.Uint64 `json:"chainID"`
AllowFutureBlocks bool `json:"allowFutureBlocks"`
} `json:"params"`
Genesis struct {
@ -62,57 +68,68 @@ type cppEthereumGenesisSpec struct {
GasLimit hexutil.Uint64 `json:"gasLimit"`
} `json:"genesis"`
Accounts map[common.Address]*cppEthereumGenesisSpecAccount `json:"accounts"`
Accounts map[common.UnprefixedAddress]*alethGenesisSpecAccount `json:"accounts"`
}
// cppEthereumGenesisSpecAccount is the prefunded genesis account and/or precompiled
// alethGenesisSpecAccount is the prefunded genesis account and/or precompiled
// contract definition.
type cppEthereumGenesisSpecAccount struct {
Balance *hexutil.Big `json:"balance"`
Nonce uint64 `json:"nonce,omitempty"`
Precompiled *cppEthereumGenesisSpecBuiltin `json:"precompiled,omitempty"`
type alethGenesisSpecAccount struct {
Balance *math2.HexOrDecimal256 `json:"balance"`
Nonce uint64 `json:"nonce,omitempty"`
Precompiled *alethGenesisSpecBuiltin `json:"precompiled,omitempty"`
}
// cppEthereumGenesisSpecBuiltin is the precompiled contract definition.
type cppEthereumGenesisSpecBuiltin struct {
Name string `json:"name,omitempty"`
StartingBlock hexutil.Uint64 `json:"startingBlock,omitempty"`
Linear *cppEthereumGenesisSpecLinearPricing `json:"linear,omitempty"`
// alethGenesisSpecBuiltin is the precompiled contract definition.
type alethGenesisSpecBuiltin struct {
Name string `json:"name,omitempty"`
StartingBlock hexutil.Uint64 `json:"startingBlock,omitempty"`
Linear *alethGenesisSpecLinearPricing `json:"linear,omitempty"`
}
type cppEthereumGenesisSpecLinearPricing struct {
type alethGenesisSpecLinearPricing struct {
Base uint64 `json:"base"`
Word uint64 `json:"word"`
}
// newCppEthereumGenesisSpec converts a go-ethereum genesis block into a Parity specific
// newAlethGenesisSpec converts a go-ethereum genesis block into a Aleth-specific
// chain specification format.
func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEthereumGenesisSpec, error) {
// Only ethash is currently supported between go-ethereum and cpp-ethereum
func newAlethGenesisSpec(network string, genesis *core.Genesis) (*alethGenesisSpec, error) {
// Only ethash is currently supported between go-ethereum and aleth
if genesis.Config.Ethash == nil {
return nil, errors.New("unsupported consensus engine")
}
// Reconstruct the chain spec in Parity's format
spec := &cppEthereumGenesisSpec{
// Reconstruct the chain spec in Aleth format
spec := &alethGenesisSpec{
SealEngine: "Ethash",
}
// Some defaults
spec.Params.AccountStartNonce = 0
spec.Params.TieBreakingGas = false
spec.Params.AllowFutureBlocks = false
spec.Params.DaoHardforkBlock = 0
spec.Params.HomesteadForkBlock = (hexutil.Uint64)(genesis.Config.HomesteadBlock.Uint64())
spec.Params.EIP150ForkBlock = (hexutil.Uint64)(genesis.Config.EIP150Block.Uint64())
spec.Params.EIP158ForkBlock = (hexutil.Uint64)(genesis.Config.EIP158Block.Uint64())
spec.Params.ByzantiumForkBlock = (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())
spec.Params.ConstantinopleForkBlock = (hexutil.Uint64)(math.MaxUint64)
// Byzantium
if num := genesis.Config.ByzantiumBlock; num != nil {
spec.setByzantium(num)
}
// Constantinople
if num := genesis.Config.ConstantinopleBlock; num != nil {
spec.setConstantinople(num)
}
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
spec.Params.MaxGasLimit = (hexutil.Uint64)(math.MaxUint64)
spec.Params.MaxGasLimit = (hexutil.Uint64)(math.MaxInt64)
spec.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
spec.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
spec.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
spec.Params.DifficultyBoundDivisor = (*math2.HexOrDecimal256)(params.DifficultyBoundDivisor)
spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor)
spec.Params.DurationLimit = (*math2.HexOrDecimal256)(params.DurationLimit)
spec.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
spec.Genesis.Nonce = (hexutil.Bytes)(make([]byte, 8))
@ -126,77 +143,104 @@ func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEther
spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData)
spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit)
spec.Accounts = make(map[common.Address]*cppEthereumGenesisSpecAccount)
for address, account := range genesis.Alloc {
spec.Accounts[address] = &cppEthereumGenesisSpecAccount{
Balance: (*hexutil.Big)(account.Balance),
Nonce: account.Nonce,
}
}
spec.Accounts[common.BytesToAddress([]byte{1})].Precompiled = &cppEthereumGenesisSpecBuiltin{
Name: "ecrecover", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 3000},
}
spec.Accounts[common.BytesToAddress([]byte{2})].Precompiled = &cppEthereumGenesisSpecBuiltin{
Name: "sha256", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 60, Word: 12},
}
spec.Accounts[common.BytesToAddress([]byte{3})].Precompiled = &cppEthereumGenesisSpecBuiltin{
Name: "ripemd160", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 600, Word: 120},
}
spec.Accounts[common.BytesToAddress([]byte{4})].Precompiled = &cppEthereumGenesisSpecBuiltin{
Name: "identity", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 15, Word: 3},
spec.setAccount(address, account)
}
spec.setPrecompile(1, &alethGenesisSpecBuiltin{Name: "ecrecover",
Linear: &alethGenesisSpecLinearPricing{Base: 3000}})
spec.setPrecompile(2, &alethGenesisSpecBuiltin{Name: "sha256",
Linear: &alethGenesisSpecLinearPricing{Base: 60, Word: 12}})
spec.setPrecompile(3, &alethGenesisSpecBuiltin{Name: "ripemd160",
Linear: &alethGenesisSpecLinearPricing{Base: 600, Word: 120}})
spec.setPrecompile(4, &alethGenesisSpecBuiltin{Name: "identity",
Linear: &alethGenesisSpecLinearPricing{Base: 15, Word: 3}})
if genesis.Config.ByzantiumBlock != nil {
spec.Accounts[common.BytesToAddress([]byte{5})].Precompiled = &cppEthereumGenesisSpecBuiltin{
Name: "modexp", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
}
spec.Accounts[common.BytesToAddress([]byte{6})].Precompiled = &cppEthereumGenesisSpecBuiltin{
Name: "alt_bn128_G1_add", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), Linear: &cppEthereumGenesisSpecLinearPricing{Base: 500},
}
spec.Accounts[common.BytesToAddress([]byte{7})].Precompiled = &cppEthereumGenesisSpecBuiltin{
Name: "alt_bn128_G1_mul", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), Linear: &cppEthereumGenesisSpecLinearPricing{Base: 40000},
}
spec.Accounts[common.BytesToAddress([]byte{8})].Precompiled = &cppEthereumGenesisSpecBuiltin{
Name: "alt_bn128_pairing_product", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
}
spec.setPrecompile(5, &alethGenesisSpecBuiltin{Name: "modexp",
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())})
spec.setPrecompile(6, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_add",
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
Linear: &alethGenesisSpecLinearPricing{Base: 500}})
spec.setPrecompile(7, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_mul",
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
Linear: &alethGenesisSpecLinearPricing{Base: 40000}})
spec.setPrecompile(8, &alethGenesisSpecBuiltin{Name: "alt_bn128_pairing_product",
StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())})
}
return spec, nil
}
func (spec *alethGenesisSpec) setPrecompile(address byte, data *alethGenesisSpecBuiltin) {
if spec.Accounts == nil {
spec.Accounts = make(map[common.UnprefixedAddress]*alethGenesisSpecAccount)
}
spec.Accounts[common.UnprefixedAddress(common.BytesToAddress([]byte{address}))].Precompiled = data
}
func (spec *alethGenesisSpec) setAccount(address common.Address, account core.GenesisAccount) {
if spec.Accounts == nil {
spec.Accounts = make(map[common.UnprefixedAddress]*alethGenesisSpecAccount)
}
a, exist := spec.Accounts[common.UnprefixedAddress(address)]
if !exist {
a = &alethGenesisSpecAccount{}
spec.Accounts[common.UnprefixedAddress(address)] = a
}
a.Balance = (*math2.HexOrDecimal256)(account.Balance)
a.Nonce = account.Nonce
}
func (spec *alethGenesisSpec) setByzantium(num *big.Int) {
spec.Params.ByzantiumForkBlock = hexutil.Uint64(num.Uint64())
}
func (spec *alethGenesisSpec) setConstantinople(num *big.Int) {
spec.Params.ConstantinopleForkBlock = hexutil.Uint64(num.Uint64())
}
// parityChainSpec is the chain specification format used by Parity.
type parityChainSpec struct {
Name string `json:"name"`
Engine struct {
Name string `json:"name"`
Datadir string `json:"dataDir"`
Engine struct {
Ethash struct {
Params struct {
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
DurationLimit *hexutil.Big `json:"durationLimit"`
BlockReward *hexutil.Big `json:"blockReward"`
HomesteadTransition uint64 `json:"homesteadTransition"`
EIP150Transition uint64 `json:"eip150Transition"`
EIP160Transition uint64 `json:"eip160Transition"`
EIP161abcTransition uint64 `json:"eip161abcTransition"`
EIP161dTransition uint64 `json:"eip161dTransition"`
EIP649Reward *hexutil.Big `json:"eip649Reward"`
EIP100bTransition uint64 `json:"eip100bTransition"`
EIP649Transition uint64 `json:"eip649Transition"`
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
DurationLimit *hexutil.Big `json:"durationLimit"`
BlockReward map[string]string `json:"blockReward"`
DifficultyBombDelays map[string]string `json:"difficultyBombDelays"`
HomesteadTransition hexutil.Uint64 `json:"homesteadTransition"`
EIP100bTransition hexutil.Uint64 `json:"eip100bTransition"`
} `json:"params"`
} `json:"Ethash"`
} `json:"engine"`
Params struct {
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"`
NetworkID hexutil.Uint64 `json:"networkID"`
MaxCodeSize uint64 `json:"maxCodeSize"`
EIP155Transition uint64 `json:"eip155Transition"`
EIP98Transition uint64 `json:"eip98Transition"`
EIP86Transition uint64 `json:"eip86Transition"`
EIP140Transition uint64 `json:"eip140Transition"`
EIP211Transition uint64 `json:"eip211Transition"`
EIP214Transition uint64 `json:"eip214Transition"`
EIP658Transition uint64 `json:"eip658Transition"`
AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
NetworkID hexutil.Uint64 `json:"networkID"`
ChainID hexutil.Uint64 `json:"chainID"`
MaxCodeSize hexutil.Uint64 `json:"maxCodeSize"`
MaxCodeSizeTransition hexutil.Uint64 `json:"maxCodeSizeTransition"`
EIP98Transition hexutil.Uint64 `json:"eip98Transition"`
EIP150Transition hexutil.Uint64 `json:"eip150Transition"`
EIP160Transition hexutil.Uint64 `json:"eip160Transition"`
EIP161abcTransition hexutil.Uint64 `json:"eip161abcTransition"`
EIP161dTransition hexutil.Uint64 `json:"eip161dTransition"`
EIP155Transition hexutil.Uint64 `json:"eip155Transition"`
EIP140Transition hexutil.Uint64 `json:"eip140Transition"`
EIP211Transition hexutil.Uint64 `json:"eip211Transition"`
EIP214Transition hexutil.Uint64 `json:"eip214Transition"`
EIP658Transition hexutil.Uint64 `json:"eip658Transition"`
EIP145Transition hexutil.Uint64 `json:"eip145Transition"`
EIP1014Transition hexutil.Uint64 `json:"eip1014Transition"`
EIP1052Transition hexutil.Uint64 `json:"eip1052Transition"`
EIP1283Transition hexutil.Uint64 `json:"eip1283Transition"`
} `json:"params"`
Genesis struct {
@ -215,22 +259,22 @@ type parityChainSpec struct {
GasLimit hexutil.Uint64 `json:"gasLimit"`
} `json:"genesis"`
Nodes []string `json:"nodes"`
Accounts map[common.Address]*parityChainSpecAccount `json:"accounts"`
Nodes []string `json:"nodes"`
Accounts map[common.UnprefixedAddress]*parityChainSpecAccount `json:"accounts"`
}
// parityChainSpecAccount is the prefunded genesis account and/or precompiled
// contract definition.
type parityChainSpecAccount struct {
Balance *hexutil.Big `json:"balance"`
Nonce uint64 `json:"nonce,omitempty"`
Balance math2.HexOrDecimal256 `json:"balance"`
Nonce math2.HexOrDecimal64 `json:"nonce,omitempty"`
Builtin *parityChainSpecBuiltin `json:"builtin,omitempty"`
}
// parityChainSpecBuiltin is the precompiled contract definition.
type parityChainSpecBuiltin struct {
Name string `json:"name,omitempty"`
ActivateAt uint64 `json:"activate_at,omitempty"`
ActivateAt math2.HexOrDecimal64 `json:"activate_at,omitempty"`
Pricing *parityChainSpecPricing `json:"pricing,omitempty"`
}
@ -265,34 +309,51 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
}
// Reconstruct the chain spec in Parity's format
spec := &parityChainSpec{
Name: network,
Nodes: bootnodes,
Name: network,
Nodes: bootnodes,
Datadir: strings.ToLower(network),
}
spec.Engine.Ethash.Params.BlockReward = make(map[string]string)
spec.Engine.Ethash.Params.DifficultyBombDelays = make(map[string]string)
// Frontier
spec.Engine.Ethash.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
spec.Engine.Ethash.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
spec.Engine.Ethash.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
spec.Engine.Ethash.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
spec.Engine.Ethash.Params.HomesteadTransition = genesis.Config.HomesteadBlock.Uint64()
spec.Engine.Ethash.Params.EIP150Transition = genesis.Config.EIP150Block.Uint64()
spec.Engine.Ethash.Params.EIP160Transition = genesis.Config.EIP155Block.Uint64()
spec.Engine.Ethash.Params.EIP161abcTransition = genesis.Config.EIP158Block.Uint64()
spec.Engine.Ethash.Params.EIP161dTransition = genesis.Config.EIP158Block.Uint64()
spec.Engine.Ethash.Params.EIP649Reward = (*hexutil.Big)(ethash.ByzantiumBlockReward)
spec.Engine.Ethash.Params.EIP100bTransition = genesis.Config.ByzantiumBlock.Uint64()
spec.Engine.Ethash.Params.EIP649Transition = genesis.Config.ByzantiumBlock.Uint64()
spec.Engine.Ethash.Params.BlockReward["0x0"] = hexutil.EncodeBig(ethash.FrontierBlockReward)
// Homestead
spec.Engine.Ethash.Params.HomesteadTransition = hexutil.Uint64(genesis.Config.HomesteadBlock.Uint64())
// Tangerine Whistle : 150
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-608.md
spec.Params.EIP150Transition = hexutil.Uint64(genesis.Config.EIP150Block.Uint64())
// Spurious Dragon: 155, 160, 161, 170
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-607.md
spec.Params.EIP155Transition = hexutil.Uint64(genesis.Config.EIP155Block.Uint64())
spec.Params.EIP160Transition = hexutil.Uint64(genesis.Config.EIP155Block.Uint64())
spec.Params.EIP161abcTransition = hexutil.Uint64(genesis.Config.EIP158Block.Uint64())
spec.Params.EIP161dTransition = hexutil.Uint64(genesis.Config.EIP158Block.Uint64())
// Byzantium
if num := genesis.Config.ByzantiumBlock; num != nil {
spec.setByzantium(num)
}
// Constantinople
if num := genesis.Config.ConstantinopleBlock; num != nil {
spec.setConstantinople(num)
}
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor)
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64())
spec.Params.MaxCodeSize = params.MaxCodeSize
spec.Params.EIP155Transition = genesis.Config.EIP155Block.Uint64()
spec.Params.EIP98Transition = math.MaxUint64
spec.Params.EIP86Transition = math.MaxUint64
spec.Params.EIP140Transition = genesis.Config.ByzantiumBlock.Uint64()
spec.Params.EIP211Transition = genesis.Config.ByzantiumBlock.Uint64()
spec.Params.EIP214Transition = genesis.Config.ByzantiumBlock.Uint64()
spec.Params.EIP658Transition = genesis.Config.ByzantiumBlock.Uint64()
// geth has it set from zero
spec.Params.MaxCodeSizeTransition = 0
// Disable this one
spec.Params.EIP98Transition = math.MaxInt64
spec.Genesis.Seal.Ethereum.Nonce = (hexutil.Bytes)(make([]byte, 8))
binary.LittleEndian.PutUint64(spec.Genesis.Seal.Ethereum.Nonce[:], genesis.Nonce)
@ -305,42 +366,77 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData)
spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit)
spec.Accounts = make(map[common.Address]*parityChainSpecAccount)
spec.Accounts = make(map[common.UnprefixedAddress]*parityChainSpecAccount)
for address, account := range genesis.Alloc {
spec.Accounts[address] = &parityChainSpecAccount{
Balance: (*hexutil.Big)(account.Balance),
Nonce: account.Nonce,
bal := math2.HexOrDecimal256(*account.Balance)
spec.Accounts[common.UnprefixedAddress(address)] = &parityChainSpecAccount{
Balance: bal,
Nonce: math2.HexOrDecimal64(account.Nonce),
}
}
spec.Accounts[common.BytesToAddress([]byte{1})].Builtin = &parityChainSpecBuiltin{
Name: "ecrecover", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 3000}},
}
spec.Accounts[common.BytesToAddress([]byte{2})].Builtin = &parityChainSpecBuiltin{
spec.setPrecompile(1, &parityChainSpecBuiltin{Name: "ecrecover",
Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 3000}}})
spec.setPrecompile(2, &parityChainSpecBuiltin{
Name: "sha256", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 60, Word: 12}},
}
spec.Accounts[common.BytesToAddress([]byte{3})].Builtin = &parityChainSpecBuiltin{
})
spec.setPrecompile(3, &parityChainSpecBuiltin{
Name: "ripemd160", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 600, Word: 120}},
}
spec.Accounts[common.BytesToAddress([]byte{4})].Builtin = &parityChainSpecBuiltin{
})
spec.setPrecompile(4, &parityChainSpecBuiltin{
Name: "identity", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 15, Word: 3}},
}
})
if genesis.Config.ByzantiumBlock != nil {
spec.Accounts[common.BytesToAddress([]byte{5})].Builtin = &parityChainSpecBuiltin{
Name: "modexp", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}},
}
spec.Accounts[common.BytesToAddress([]byte{6})].Builtin = &parityChainSpecBuiltin{
Name: "alt_bn128_add", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 500}},
}
spec.Accounts[common.BytesToAddress([]byte{7})].Builtin = &parityChainSpecBuiltin{
Name: "alt_bn128_mul", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 40000}},
}
spec.Accounts[common.BytesToAddress([]byte{8})].Builtin = &parityChainSpecBuiltin{
Name: "alt_bn128_pairing", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}},
}
blnum := math2.HexOrDecimal64(genesis.Config.ByzantiumBlock.Uint64())
spec.setPrecompile(5, &parityChainSpecBuiltin{
Name: "modexp", ActivateAt: blnum, Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}},
})
spec.setPrecompile(6, &parityChainSpecBuiltin{
Name: "alt_bn128_add", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 500}},
})
spec.setPrecompile(7, &parityChainSpecBuiltin{
Name: "alt_bn128_mul", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 40000}},
})
spec.setPrecompile(8, &parityChainSpecBuiltin{
Name: "alt_bn128_pairing", ActivateAt: blnum, Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}},
})
}
return spec, nil
}
func (spec *parityChainSpec) setPrecompile(address byte, data *parityChainSpecBuiltin) {
if spec.Accounts == nil {
spec.Accounts = make(map[common.UnprefixedAddress]*parityChainSpecAccount)
}
a := common.UnprefixedAddress(common.BytesToAddress([]byte{address}))
if _, exist := spec.Accounts[a]; !exist {
spec.Accounts[a] = &parityChainSpecAccount{}
}
spec.Accounts[a].Builtin = data
}
func (spec *parityChainSpec) setByzantium(num *big.Int) {
spec.Engine.Ethash.Params.BlockReward[hexutil.EncodeBig(num)] = hexutil.EncodeBig(ethash.ByzantiumBlockReward)
spec.Engine.Ethash.Params.DifficultyBombDelays[hexutil.EncodeBig(num)] = hexutil.EncodeUint64(3000000)
n := hexutil.Uint64(num.Uint64())
spec.Engine.Ethash.Params.EIP100bTransition = n
spec.Params.EIP140Transition = n
spec.Params.EIP211Transition = n
spec.Params.EIP214Transition = n
spec.Params.EIP658Transition = n
}
func (spec *parityChainSpec) setConstantinople(num *big.Int) {
spec.Engine.Ethash.Params.BlockReward[hexutil.EncodeBig(num)] = hexutil.EncodeBig(ethash.ConstantinopleBlockReward)
spec.Engine.Ethash.Params.DifficultyBombDelays[hexutil.EncodeBig(num)] = hexutil.EncodeUint64(2000000)
n := hexutil.Uint64(num.Uint64())
spec.Params.EIP145Transition = n
spec.Params.EIP1014Transition = n
spec.Params.EIP1052Transition = n
spec.Params.EIP1283Transition = n
}
// pyEthereumGenesisSpec represents the genesis specification format used by the
// Python Ethereum implementation.
type pyEthereumGenesisSpec struct {

View File

@ -640,7 +640,7 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
files[filepath.Join(workdir, network+".json")] = genesis
if conf.Genesis.Config.Ethash != nil {
cppSpec, err := newCppEthereumGenesisSpec(network, conf.Genesis)
cppSpec, err := newAlethGenesisSpec(network, conf.Genesis)
if err != nil {
return nil, err
}

View File

@ -43,7 +43,8 @@ version: '2'
services:
ethstats:
build: .
image: {{.Network}}/ethstats{{if not .VHost}}
image: {{.Network}}/ethstats
container_name: {{.Network}}_ethstats_1{{if not .VHost}}
ports:
- "{{.Port}}:3000"{{end}}
environment:

View File

@ -77,6 +77,7 @@ services:
explorer:
build: .
image: {{.Network}}/explorer
container_name: {{.Network}}_explorer_1
ports:
- "{{.NodePort}}:{{.NodePort}}"
- "{{.NodePort}}:{{.NodePort}}/udp"{{if not .VHost}}

View File

@ -56,8 +56,10 @@ services:
faucet:
build: .
image: {{.Network}}/faucet
container_name: {{.Network}}_faucet_1
ports:
- "{{.EthPort}}:{{.EthPort}}"{{if not .VHost}}
- "{{.EthPort}}:{{.EthPort}}"
- "{{.EthPort}}:{{.EthPort}}/udp"{{if not .VHost}}
- "{{.ApiPort}}:8080"{{end}}
volumes:
- {{.Datadir}}:/root/.faucet

View File

@ -40,6 +40,7 @@ services:
nginx:
build: .
image: {{.Network}}/nginx
container_name: {{.Network}}_nginx_1
ports:
- "{{.Port}}:80"
volumes:

View File

@ -55,6 +55,7 @@ services:
{{.Type}}:
build: .
image: {{.Network}}/{{.Type}}
container_name: {{.Network}}_{{.Type}}_1
ports:
- "{{.Port}}:{{.Port}}"
- "{{.Port}}:{{.Port}}/udp"

View File

@ -57,6 +57,7 @@ services:
wallet:
build: .
image: {{.Network}}/wallet
container_name: {{.Network}}_wallet_1
ports:
- "{{.NodePort}}:{{.NodePort}}"
- "{{.NodePort}}:{{.NodePort}}/udp"

View File

@ -43,18 +43,23 @@ func main() {
Usage: "log level to emit to the screen",
},
}
app.Action = func(c *cli.Context) error {
app.Before = func(c *cli.Context) error {
// Set up the logger to print everything and the random generator
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(c.Int("loglevel")), log.StreamHandler(os.Stdout, log.TerminalFormat(true))))
rand.Seed(time.Now().UnixNano())
network := c.String("network")
if strings.Contains(network, " ") || strings.Contains(network, "-") {
log.Crit("No spaces or hyphens allowed in network name")
}
// Start the wizard and relinquish control
makeWizard(c.String("network")).run()
return nil
}
app.Action = runWizard
app.Run(os.Args)
}
// runWizard start the wizard and relinquish control to it.
func runWizard(c *cli.Context) error {
network := c.String("network")
if strings.Contains(network, " ") || strings.Contains(network, "-") || strings.ToLower(network) != network {
log.Crit("No spaces, hyphens or capital letters allowed in network name")
}
makeWizard(c.String("network")).run()
return nil
}

View File

@ -0,0 +1,112 @@
{
"sealEngine":"Ethash",
"params":{
"accountStartNonce":"0x00",
"maximumExtraDataSize":"0x20",
"homesteadForkBlock":"0x2710",
"daoHardforkBlock":"0x00",
"EIP150ForkBlock":"0x3a98",
"EIP158ForkBlock":"0x59d8",
"byzantiumForkBlock":"0x7530",
"constantinopleForkBlock":"0x9c40",
"minGasLimit":"0x1388",
"maxGasLimit":"0x7fffffffffffffff",
"tieBreakingGas":false,
"gasLimitBoundDivisor":"0x0400",
"minimumDifficulty":"0x20000",
"difficultyBoundDivisor":"0x0800",
"durationLimit":"0x0d",
"blockReward":"0x4563918244F40000",
"networkID":"0x4cb2e",
"chainID":"0x4cb2e",
"allowFutureBlocks":false
},
"genesis":{
"nonce":"0x0000000000000000",
"difficulty":"0x20000",
"mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
"author":"0x0000000000000000000000000000000000000000",
"timestamp":"0x59a4e76d",
"parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
"extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
"gasLimit":"0x47b760"
},
"accounts":{
"0000000000000000000000000000000000000001":{
"balance":"1",
"precompiled":{
"name":"ecrecover",
"linear":{
"base":3000,
"word":0
}
}
},
"0000000000000000000000000000000000000002":{
"balance":"1",
"precompiled":{
"name":"sha256",
"linear":{
"base":60,
"word":12
}
}
},
"0000000000000000000000000000000000000003":{
"balance":"1",
"precompiled":{
"name":"ripemd160",
"linear":{
"base":600,
"word":120
}
}
},
"0000000000000000000000000000000000000004":{
"balance":"1",
"precompiled":{
"name":"identity",
"linear":{
"base":15,
"word":3
}
}
},
"0000000000000000000000000000000000000005":{
"balance":"1",
"precompiled":{
"name":"modexp",
"startingBlock":"0x7530"
}
},
"0000000000000000000000000000000000000006":{
"balance":"1",
"precompiled":{
"name":"alt_bn128_G1_add",
"startingBlock":"0x7530",
"linear":{
"base":500,
"word":0
}
}
},
"0000000000000000000000000000000000000007":{
"balance":"1",
"precompiled":{
"name":"alt_bn128_G1_mul",
"startingBlock":"0x7530",
"linear":{
"base":40000,
"word":0
}
}
},
"0000000000000000000000000000000000000008":{
"balance":"1",
"precompiled":{
"name":"alt_bn128_pairing_product",
"startingBlock":"0x7530"
}
}
}
}

View File

@ -0,0 +1,47 @@
{
"config": {
"ethash":{},
"chainId": 314158,
"homesteadBlock": 10000,
"eip150Block": 15000,
"eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"eip155Block": 23000,
"eip158Block": 23000,
"byzantiumBlock": 30000,
"constantinopleBlock": 40000
},
"nonce": "0x0",
"timestamp": "0x59a4e76d",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"extraData": "0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
"gasLimit": "0x47b760",
"difficulty": "0x20000",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"coinbase": "0x0000000000000000000000000000000000000000",
"alloc": {
"0000000000000000000000000000000000000001": {
"balance": "0x01"
},
"0000000000000000000000000000000000000002": {
"balance": "0x01"
},
"0000000000000000000000000000000000000003": {
"balance": "0x01"
},
"0000000000000000000000000000000000000004": {
"balance": "0x01"
},
"0000000000000000000000000000000000000005": {
"balance": "0x01"
},
"0000000000000000000000000000000000000006": {
"balance": "0x01"
},
"0000000000000000000000000000000000000007": {
"balance": "0x01"
},
"0000000000000000000000000000000000000008": {
"balance": "0x01"
}
}
}

View File

@ -0,0 +1,181 @@
{
"name":"Stureby",
"dataDir":"stureby",
"engine":{
"Ethash":{
"params":{
"minimumDifficulty":"0x20000",
"difficultyBoundDivisor":"0x800",
"durationLimit":"0xd",
"blockReward":{
"0x0":"0x4563918244f40000",
"0x7530":"0x29a2241af62c0000",
"0x9c40":"0x1bc16d674ec80000"
},
"homesteadTransition":"0x2710",
"eip100bTransition":"0x7530",
"difficultyBombDelays":{
"0x7530":"0x2dc6c0",
"0x9c40":"0x1e8480"
}
}
}
},
"params":{
"accountStartNonce":"0x0",
"maximumExtraDataSize":"0x20",
"gasLimitBoundDivisor":"0x400",
"minGasLimit":"0x1388",
"networkID":"0x4cb2e",
"chainID":"0x4cb2e",
"maxCodeSize":"0x6000",
"maxCodeSizeTransition":"0x0",
"eip98Transition": "0x7fffffffffffffff",
"eip150Transition":"0x3a98",
"eip160Transition":"0x59d8",
"eip161abcTransition":"0x59d8",
"eip161dTransition":"0x59d8",
"eip155Transition":"0x59d8",
"eip140Transition":"0x7530",
"eip211Transition":"0x7530",
"eip214Transition":"0x7530",
"eip658Transition":"0x7530",
"eip145Transition":"0x9c40",
"eip1014Transition":"0x9c40",
"eip1052Transition":"0x9c40",
"eip1283Transition":"0x9c40"
},
"genesis":{
"seal":{
"ethereum":{
"nonce":"0x0000000000000000",
"mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000"
}
},
"difficulty":"0x20000",
"author":"0x0000000000000000000000000000000000000000",
"timestamp":"0x59a4e76d",
"parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000",
"extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee",
"gasLimit":"0x47b760"
},
"nodes":[
"enode://dfa7aca3f5b635fbfe7d0b20575f25e40d9e27b4bfbb3cf74364a42023ad9f25c1a4383bcc8cced86ee511a7d03415345a4df05be37f1dff040e4c780699f1c0@168.61.153.255:31303",
"enode://ef441b20dd70aeabf0eac35c3b8a2854e5ce04db0e30be9152ea9fd129359dcbb3f803993303ff5781c755dfd7223f3fe43505f583cccb740949407677412ba9@40.74.91.252:31303",
"enode://953b5ea1c8987cf46008232a0160324fd00d41320ecf00e23af86ec8f5396b19eb57ddab37c78141be56f62e9077de4f4dfa0747fa768ed8c8531bbfb1046237@40.70.214.166:31303",
"enode://276e613dd4b277a66591e565711e6c8bb107f0905248a9f8f8228c1a87992e156e5114bb9937c02824a9d9d25f76340442cf86e2028bf5293cae19904fb2b98e@35.178.251.52:30303",
"enode://064c820d41e52ed7d426ac64b60506c2998235bedc7e67cb497c6faf7bb4fc54fe56fc82d0add3180b747c0c4f40a1108a6f84d7d0629ed606d504528e61cc57@3.8.5.3:30303",
"enode://90069fdabcc5e684fa5d59430bebbb12755d9362dfe5006a1485b13d71a78a3812d36e74dd7d88e50b51add01e097ea80f16263aeaa4f0230db6c79e2a97e7ca@217.29.191.142:30303",
"enode://0aac74b7fd28726275e466acb5e03bc88a95927e9951eb66b5efb239b2f798ada0690853b2f2823fe4efa408f0f3d4dd258430bc952a5ff70677b8625b3e3b14@40.115.33.57:40404",
"enode://0b96415a10f835106d83e090a0528eed5e7887e5c802a6d084e9f1993a9d0fc713781e6e4101f6365e9b91259712f291acc0a9e6e667e22023050d602c36fbe2@40.115.33.57:40414"
],
"accounts":{
"0000000000000000000000000000000000000001":{
"balance":"1",
"nonce":"0",
"builtin":{
"name":"ecrecover",
"pricing":{
"linear":{
"base":3000,
"word":0
}
}
}
},
"0000000000000000000000000000000000000002":{
"balance":"1",
"nonce":"0",
"builtin":{
"name":"sha256",
"pricing":{
"linear":{
"base":60,
"word":12
}
}
}
},
"0000000000000000000000000000000000000003":{
"balance":"1",
"nonce":"0",
"builtin":{
"name":"ripemd160",
"pricing":{
"linear":{
"base":600,
"word":120
}
}
}
},
"0000000000000000000000000000000000000004":{
"balance":"1",
"nonce":"0",
"builtin":{
"name":"identity",
"pricing":{
"linear":{
"base":15,
"word":3
}
}
}
},
"0000000000000000000000000000000000000005":{
"balance":"1",
"nonce":"0",
"builtin":{
"name":"modexp",
"activate_at":"0x7530",
"pricing":{
"modexp":{
"divisor":20
}
}
}
},
"0000000000000000000000000000000000000006":{
"balance":"1",
"nonce":"0",
"builtin":{
"name":"alt_bn128_add",
"activate_at":"0x7530",
"pricing":{
"linear":{
"base":500,
"word":0
}
}
}
},
"0000000000000000000000000000000000000007":{
"balance":"1",
"nonce":"0",
"builtin":{
"name":"alt_bn128_mul",
"activate_at":"0x7530",
"pricing":{
"linear":{
"base":40000,
"word":0
}
}
}
},
"0000000000000000000000000000000000000008":{
"balance":"1",
"nonce":"0",
"builtin":{
"name":"alt_bn128_pairing",
"activate_at":"0x7530",
"pricing":{
"alt_bn128_pairing":{
"base":100000,
"pair":80000
}
}
}
}
}
}

View File

@ -23,6 +23,7 @@ import (
"io/ioutil"
"math/big"
"net"
"net/url"
"os"
"path/filepath"
"sort"
@ -118,6 +119,47 @@ func (w *wizard) readDefaultString(def string) string {
return def
}
// readDefaultYesNo reads a single line from stdin, trimming if from spaces and
// interpreting it as a 'yes' or a 'no'. If an empty line is entered, the default
// value is returned.
func (w *wizard) readDefaultYesNo(def bool) bool {
for {
fmt.Printf("> ")
text, err := w.in.ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
if text = strings.ToLower(strings.TrimSpace(text)); text == "" {
return def
}
if text == "y" || text == "yes" {
return true
}
if text == "n" || text == "no" {
return false
}
log.Error("Invalid input, expected 'y', 'yes', 'n', 'no' or empty")
}
}
// readURL reads a single line from stdin, trimming if from spaces and trying to
// interpret it as a URL (http, https or file).
func (w *wizard) readURL() *url.URL {
for {
fmt.Printf("> ")
text, err := w.in.ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
uri, err := url.Parse(strings.TrimSpace(text))
if err != nil {
log.Error("Invalid input, expected URL", "err", err)
continue
}
return uri
}
}
// readInt reads a single line from stdin, trimming if from spaces, enforcing it
// to parse into an integer.
func (w *wizard) readInt() int {

View File

@ -137,14 +137,14 @@ func (w *wizard) deployDashboard() {
if w.conf.ethstats != "" {
fmt.Println()
fmt.Println("Include ethstats secret on dashboard (y/n)? (default = yes)")
infos.trusted = w.readDefaultString("y") == "y"
infos.trusted = w.readDefaultYesNo(true)
}
// Try to deploy the dashboard container on the host
nocache := false
if existed {
fmt.Println()
fmt.Printf("Should the dashboard be built from scratch (y/n)? (default = no)\n")
nocache = w.readDefaultString("n") != "n"
nocache = w.readDefaultYesNo(false)
}
if out, err := deployDashboard(client, w.network, &w.conf, infos, nocache); err != nil {
log.Error("Failed to deploy dashboard container", "err", err)

View File

@ -67,11 +67,11 @@ func (w *wizard) deployEthstats() {
if existed {
fmt.Println()
fmt.Printf("Keep existing IP %v blacklist (y/n)? (default = yes)\n", infos.banned)
if w.readDefaultString("y") != "y" {
if !w.readDefaultYesNo(true) {
// The user might want to clear the entire list, although generally probably not
fmt.Println()
fmt.Printf("Clear out blacklist and start over (y/n)? (default = no)\n")
if w.readDefaultString("n") != "n" {
if w.readDefaultYesNo(false) {
infos.banned = nil
}
// Offer the user to explicitly add/remove certain IP addresses
@ -106,7 +106,7 @@ func (w *wizard) deployEthstats() {
if existed {
fmt.Println()
fmt.Printf("Should the ethstats be built from scratch (y/n)? (default = no)\n")
nocache = w.readDefaultString("n") != "n"
nocache = w.readDefaultYesNo(false)
}
trusted := make([]string, 0, len(w.servers))
for _, client := range w.servers {

View File

@ -100,7 +100,7 @@ func (w *wizard) deployExplorer() {
if existed {
fmt.Println()
fmt.Printf("Should the explorer be built from scratch (y/n)? (default = no)\n")
nocache = w.readDefaultString("n") != "n"
nocache = w.readDefaultYesNo(false)
}
if out, err := deployExplorer(client, w.network, chain, infos, nocache); err != nil {
log.Error("Failed to deploy explorer container", "err", err)

View File

@ -81,7 +81,7 @@ func (w *wizard) deployFaucet() {
if infos.captchaToken != "" {
fmt.Println()
fmt.Println("Reuse previous reCaptcha API authorization (y/n)? (default = yes)")
if w.readDefaultString("y") != "y" {
if !w.readDefaultYesNo(true) {
infos.captchaToken, infos.captchaSecret = "", ""
}
}
@ -89,7 +89,7 @@ func (w *wizard) deployFaucet() {
// No previous authorization (or old one discarded)
fmt.Println()
fmt.Println("Enable reCaptcha protection against robots (y/n)? (default = no)")
if w.readDefaultString("n") == "n" {
if !w.readDefaultYesNo(false) {
log.Warn("Users will be able to requests funds via automated scripts")
} else {
// Captcha protection explicitly requested, read the site and secret keys
@ -132,7 +132,7 @@ func (w *wizard) deployFaucet() {
} else {
fmt.Println()
fmt.Printf("Reuse previous (%s) funding account (y/n)? (default = yes)\n", key.Address.Hex())
if w.readDefaultString("y") != "y" {
if !w.readDefaultYesNo(true) {
infos.node.keyJSON, infos.node.keyPass = "", ""
}
}
@ -166,7 +166,7 @@ func (w *wizard) deployFaucet() {
if existed {
fmt.Println()
fmt.Printf("Should the faucet be built from scratch (y/n)? (default = no)\n")
nocache = w.readDefaultString("n") != "n"
nocache = w.readDefaultYesNo(false)
}
if out, err := deployFaucet(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
log.Error("Failed to deploy faucet container", "err", err)

View File

@ -20,9 +20,13 @@ import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/big"
"math/rand"
"net/http"
"os"
"path/filepath"
"time"
"github.com/ethereum/go-ethereum/common"
@ -40,11 +44,12 @@ func (w *wizard) makeGenesis() {
Difficulty: big.NewInt(524288),
Alloc: make(core.GenesisAlloc),
Config: &params.ChainConfig{
HomesteadBlock: big.NewInt(1),
EIP150Block: big.NewInt(2),
EIP155Block: big.NewInt(3),
EIP158Block: big.NewInt(3),
ByzantiumBlock: big.NewInt(4),
HomesteadBlock: big.NewInt(1),
EIP150Block: big.NewInt(2),
EIP155Block: big.NewInt(3),
EIP158Block: big.NewInt(3),
ByzantiumBlock: big.NewInt(4),
ConstantinopleBlock: big.NewInt(5),
},
}
// Figure out which consensus engine to choose
@ -114,9 +119,13 @@ func (w *wizard) makeGenesis() {
}
break
}
// Add a batch of precompile balances to avoid them getting deleted
for i := int64(0); i < 256; i++ {
genesis.Alloc[common.BigToAddress(big.NewInt(i))] = core.GenesisAccount{Balance: big.NewInt(1)}
fmt.Println()
fmt.Println("Should the precompile-addresses (0x1 .. 0xff) be pre-funded with 1 wei? (advisable yes)")
if w.readDefaultYesNo(true) {
// Add a batch of precompile balances to avoid them getting deleted
for i := int64(0); i < 256; i++ {
genesis.Alloc[common.BigToAddress(big.NewInt(i))] = core.GenesisAccount{Balance: big.NewInt(1)}
}
}
// Query the user for some custom extras
fmt.Println()
@ -130,53 +139,130 @@ func (w *wizard) makeGenesis() {
w.conf.flush()
}
// importGenesis imports a Geth genesis spec into puppeth.
func (w *wizard) importGenesis() {
// Request the genesis JSON spec URL from the user
fmt.Println()
fmt.Println("Where's the genesis file? (local file or http/https url)")
url := w.readURL()
// Convert the various allowed URLs to a reader stream
var reader io.Reader
switch url.Scheme {
case "http", "https":
// Remote web URL, retrieve it via an HTTP client
res, err := http.Get(url.String())
if err != nil {
log.Error("Failed to retrieve remote genesis", "err", err)
return
}
defer res.Body.Close()
reader = res.Body
case "":
// Schemaless URL, interpret as a local file
file, err := os.Open(url.String())
if err != nil {
log.Error("Failed to open local genesis", "err", err)
return
}
defer file.Close()
reader = file
default:
log.Error("Unsupported genesis URL scheme", "scheme", url.Scheme)
return
}
// Parse the genesis file and inject it successful
var genesis core.Genesis
if err := json.NewDecoder(reader).Decode(&genesis); err != nil {
log.Error("Invalid genesis spec: %v", err)
return
}
log.Info("Imported genesis block")
w.conf.Genesis = &genesis
w.conf.flush()
}
// manageGenesis permits the modification of chain configuration parameters in
// a genesis config and the export of the entire genesis spec.
func (w *wizard) manageGenesis() {
// Figure out whether to modify or export the genesis
fmt.Println()
fmt.Println(" 1. Modify existing fork rules")
fmt.Println(" 2. Export genesis configuration")
fmt.Println(" 2. Export genesis configurations")
fmt.Println(" 3. Remove genesis configuration")
choice := w.read()
switch {
case choice == "1":
switch choice {
case "1":
// Fork rule updating requested, iterate over each fork
fmt.Println()
fmt.Printf("Which block should Homestead come into effect? (default = %v)\n", w.conf.Genesis.Config.HomesteadBlock)
w.conf.Genesis.Config.HomesteadBlock = w.readDefaultBigInt(w.conf.Genesis.Config.HomesteadBlock)
fmt.Println()
fmt.Printf("Which block should EIP150 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP150Block)
fmt.Printf("Which block should EIP150 (Tangerine Whistle) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP150Block)
w.conf.Genesis.Config.EIP150Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP150Block)
fmt.Println()
fmt.Printf("Which block should EIP155 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP155Block)
fmt.Printf("Which block should EIP155 (Spurious Dragon) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP155Block)
w.conf.Genesis.Config.EIP155Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP155Block)
fmt.Println()
fmt.Printf("Which block should EIP158 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP158Block)
fmt.Printf("Which block should EIP158/161 (also Spurious Dragon) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP158Block)
w.conf.Genesis.Config.EIP158Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP158Block)
fmt.Println()
fmt.Printf("Which block should Byzantium come into effect? (default = %v)\n", w.conf.Genesis.Config.ByzantiumBlock)
w.conf.Genesis.Config.ByzantiumBlock = w.readDefaultBigInt(w.conf.Genesis.Config.ByzantiumBlock)
fmt.Println()
fmt.Printf("Which block should Constantinople come into effect? (default = %v)\n", w.conf.Genesis.Config.ConstantinopleBlock)
w.conf.Genesis.Config.ConstantinopleBlock = w.readDefaultBigInt(w.conf.Genesis.Config.ConstantinopleBlock)
out, _ := json.MarshalIndent(w.conf.Genesis.Config, "", " ")
fmt.Printf("Chain configuration updated:\n\n%s\n", out)
case choice == "2":
case "2":
// Save whatever genesis configuration we currently have
fmt.Println()
fmt.Printf("Which file to save the genesis into? (default = %s.json)\n", w.network)
out, _ := json.MarshalIndent(w.conf.Genesis, "", " ")
if err := ioutil.WriteFile(w.readDefaultString(fmt.Sprintf("%s.json", w.network)), out, 0644); err != nil {
log.Error("Failed to save genesis file", "err", err)
}
log.Info("Exported existing genesis block")
fmt.Printf("Which folder to save the genesis specs into? (default = current)\n")
fmt.Printf(" Will create %s.json, %s-aleth.json, %s-harmony.json, %s-parity.json\n", w.network, w.network, w.network, w.network)
case choice == "3":
folder := w.readDefaultString(".")
if err := os.MkdirAll(folder, 0755); err != nil {
log.Error("Failed to create spec folder", "folder", folder, "err", err)
return
}
out, _ := json.MarshalIndent(w.conf.Genesis, "", " ")
// Export the native genesis spec used by puppeth and Geth
gethJson := filepath.Join(folder, fmt.Sprintf("%s.json", w.network))
if err := ioutil.WriteFile((gethJson), out, 0644); err != nil {
log.Error("Failed to save genesis file", "err", err)
return
}
log.Info("Saved native genesis chain spec", "path", gethJson)
// Export the genesis spec used by Aleth (formerly C++ Ethereum)
if spec, err := newAlethGenesisSpec(w.network, w.conf.Genesis); err != nil {
log.Error("Failed to create Aleth chain spec", "err", err)
} else {
saveGenesis(folder, w.network, "aleth", spec)
}
// Export the genesis spec used by Parity
if spec, err := newParityChainSpec(w.network, w.conf.Genesis, []string{}); err != nil {
log.Error("Failed to create Parity chain spec", "err", err)
} else {
saveGenesis(folder, w.network, "parity", spec)
}
// Export the genesis spec used by Harmony (formerly EthereumJ
saveGenesis(folder, w.network, "harmony", w.conf.Genesis)
case "3":
// Make sure we don't have any services running
if len(w.conf.servers()) > 0 {
log.Error("Genesis reset requires all services and servers torn down")
@ -186,8 +272,20 @@ func (w *wizard) manageGenesis() {
w.conf.Genesis = nil
w.conf.flush()
default:
log.Error("That's not something I can do")
return
}
}
// saveGenesis JSON encodes an arbitrary genesis spec into a pre-defined file.
func saveGenesis(folder, network, client string, spec interface{}) {
path := filepath.Join(folder, fmt.Sprintf("%s-%s.json", network, client))
out, _ := json.Marshal(spec)
if err := ioutil.WriteFile(path, out, 0644); err != nil {
log.Error("Failed to save genesis file", "client", client, "err", err)
return
}
log.Info("Saved genesis chain spec", "client", client, "path", path)
}

View File

@ -61,14 +61,14 @@ func (w *wizard) run() {
// Make sure we have a good network name to work with fmt.Println()
// Docker accepts hyphens in image names, but doesn't like it for container names
if w.network == "" {
fmt.Println("Please specify a network name to administer (no spaces or hyphens, please)")
fmt.Println("Please specify a network name to administer (no spaces, hyphens or capital letters please)")
for {
w.network = w.readString()
if !strings.Contains(w.network, " ") && !strings.Contains(w.network, "-") {
if !strings.Contains(w.network, " ") && !strings.Contains(w.network, "-") && strings.ToLower(w.network) == w.network {
fmt.Printf("\nSweet, you can set this via --network=%s next time!\n\n", w.network)
break
}
log.Error("I also like to live dangerously, still no spaces or hyphens")
log.Error("I also like to live dangerously, still no spaces, hyphens or capital letters")
}
}
log.Info("Administering Ethereum network", "name", w.network)
@ -131,7 +131,20 @@ func (w *wizard) run() {
case choice == "2":
if w.conf.Genesis == nil {
w.makeGenesis()
fmt.Println()
fmt.Println("What would you like to do? (default = create)")
fmt.Println(" 1. Create new genesis from scratch")
fmt.Println(" 2. Import already existing genesis")
choice := w.read()
switch {
case choice == "" || choice == "1":
w.makeGenesis()
case choice == "2":
w.importGenesis()
default:
log.Error("That's not something I can do")
}
} else {
w.manageGenesis()
}
@ -149,7 +162,6 @@ func (w *wizard) run() {
} else {
w.manageComponents()
}
default:
log.Error("That's not something I can do")
}

View File

@ -41,12 +41,12 @@ func (w *wizard) ensureVirtualHost(client *sshClient, port int, def string) (str
// Reverse proxy is not running, offer to deploy a new one
fmt.Println()
fmt.Println("Allow sharing the port with other services (y/n)? (default = yes)")
if w.readDefaultString("y") == "y" {
if w.readDefaultYesNo(true) {
nocache := false
if proxy != nil {
fmt.Println()
fmt.Printf("Should the reverse-proxy be rebuilt from scratch (y/n)? (default = no)\n")
nocache = w.readDefaultString("n") != "n"
nocache = w.readDefaultYesNo(false)
}
if out, err := deployNginx(client, w.network, port, nocache); err != nil {
log.Error("Failed to deploy reverse-proxy", "err", err)

View File

@ -126,7 +126,7 @@ func (w *wizard) deployNode(boot bool) {
} else {
fmt.Println()
fmt.Printf("Reuse previous (%s) signing account (y/n)? (default = yes)\n", key.Address.Hex())
if w.readDefaultString("y") != "y" {
if !w.readDefaultYesNo(true) {
infos.keyJSON, infos.keyPass = "", ""
}
}
@ -165,7 +165,7 @@ func (w *wizard) deployNode(boot bool) {
if existed {
fmt.Println()
fmt.Printf("Should the node be built from scratch (y/n)? (default = no)\n")
nocache = w.readDefaultString("n") != "n"
nocache = w.readDefaultYesNo(false)
}
if out, err := deployNode(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
log.Error("Failed to deploy Ethereum node container", "err", err)

View File

@ -96,7 +96,7 @@ func (w *wizard) deployWallet() {
if existed {
fmt.Println()
fmt.Printf("Should the wallet be built from scratch (y/n)? (default = no)\n")
nocache = w.readDefaultString("n") != "n"
nocache = w.readDefaultYesNo(false)
}
if out, err := deployWallet(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
log.Error("Failed to deploy wallet container", "err", err)

View File

@ -29,7 +29,65 @@ import (
"gopkg.in/urfave/cli.v1"
)
var salt = make([]byte, 32)
var (
salt = make([]byte, 32)
accessCommand = cli.Command{
CustomHelpTemplate: helpTemplate,
Name: "access",
Usage: "encrypts a reference and embeds it into a root manifest",
ArgsUsage: "<ref>",
Description: "encrypts a reference and embeds it into a root manifest",
Subcommands: []cli.Command{
{
CustomHelpTemplate: helpTemplate,
Name: "new",
Usage: "encrypts a reference and embeds it into a root manifest",
ArgsUsage: "<ref>",
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
Subcommands: []cli.Command{
{
Action: accessNewPass,
CustomHelpTemplate: helpTemplate,
Flags: []cli.Flag{
utils.PasswordFileFlag,
SwarmDryRunFlag,
},
Name: "pass",
Usage: "encrypts a reference with a password and embeds it into a root manifest",
ArgsUsage: "<ref>",
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
},
{
Action: accessNewPK,
CustomHelpTemplate: helpTemplate,
Flags: []cli.Flag{
utils.PasswordFileFlag,
SwarmDryRunFlag,
SwarmAccessGrantKeyFlag,
},
Name: "pk",
Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest",
ArgsUsage: "<ref>",
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
},
{
Action: accessNewACT,
CustomHelpTemplate: helpTemplate,
Flags: []cli.Flag{
SwarmAccessGrantKeysFlag,
SwarmDryRunFlag,
utils.PasswordFileFlag,
},
Name: "act",
Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest",
ArgsUsage: "<ref>",
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
},
},
},
},
}
)
func init() {
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
@ -56,6 +114,9 @@ func accessNewPass(ctx *cli.Context) {
utils.Fatalf("error getting session key: %v", err)
}
m, err := api.GenerateAccessControlManifest(ctx, ref, accessKey, ae)
if err != nil {
utils.Fatalf("had an error generating the manifest: %v", err)
}
if dryRun {
err = printManifests(m, nil)
if err != nil {
@ -89,6 +150,9 @@ func accessNewPK(ctx *cli.Context) {
utils.Fatalf("error getting session key: %v", err)
}
m, err := api.GenerateAccessControlManifest(ctx, ref, sessionKey, ae)
if err != nil {
utils.Fatalf("had an error generating the manifest: %v", err)
}
if dryRun {
err = printManifests(m, nil)
if err != nil {

View File

@ -80,6 +80,7 @@ const (
SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY"
SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY"
SWARM_ACCESS_PASSWORD = "SWARM_ACCESS_PASSWORD"
SWARM_AUTO_DEFAULTPATH = "SWARM_AUTO_DEFAULTPATH"
GETH_ENV_DATADIR = "GETH_DATADIR"
)

View File

@ -29,6 +29,48 @@ import (
"gopkg.in/urfave/cli.v1"
)
var dbCommand = cli.Command{
Name: "db",
CustomHelpTemplate: helpTemplate,
Usage: "manage the local chunk database",
ArgsUsage: "db COMMAND",
Description: "Manage the local chunk database",
Subcommands: []cli.Command{
{
Action: dbExport,
CustomHelpTemplate: helpTemplate,
Name: "export",
Usage: "export a local chunk database as a tar archive (use - to send to stdout)",
ArgsUsage: "<chunkdb> <file>",
Description: `
Export a local chunk database as a tar archive (use - to send to stdout).
swarm db export ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar
The export may be quite large, consider piping the output through the Unix
pv(1) tool to get a progress bar:
swarm db export ~/.ethereum/swarm/bzz-KEY/chunks - | pv > chunks.tar
`,
},
{
Action: dbImport,
CustomHelpTemplate: helpTemplate,
Name: "import",
Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
ArgsUsage: "<chunkdb> <file>",
Description: `Import chunks from a tar archive into a local chunk database (use - to read from stdin).
swarm db import ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar
The import may be quite large, consider piping the input through the Unix
pv(1) tool to get a progress bar:
pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -`,
},
},
}
func dbExport(ctx *cli.Context) {
args := ctx.Args()
if len(args) != 3 {

View File

@ -28,6 +28,15 @@ import (
"gopkg.in/urfave/cli.v1"
)
var downloadCommand = cli.Command{
Action: download,
Name: "down",
Flags: []cli.Flag{SwarmRecursiveFlag, SwarmAccessPasswordFlag},
Usage: "downloads a swarm manifest or a file inside a manifest",
ArgsUsage: " <uri> [<dir>]",
Description: `Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries.`,
}
func download(ctx *cli.Context) {
log.Debug("downloading content using swarm down")
args := ctx.Args()

View File

@ -31,6 +31,68 @@ import (
"gopkg.in/urfave/cli.v1"
)
var feedCommand = cli.Command{
CustomHelpTemplate: helpTemplate,
Name: "feed",
Usage: "(Advanced) Create and update Swarm Feeds",
ArgsUsage: "<create|update|info>",
Description: "Works with Swarm Feeds",
Subcommands: []cli.Command{
{
Action: feedCreateManifest,
CustomHelpTemplate: helpTemplate,
Name: "create",
Usage: "creates and publishes a new feed manifest",
Description: `creates and publishes a new feed manifest pointing to a specified user's updates about a particular topic.
The feed topic can be built in the following ways:
* use --topic to set the topic to an arbitrary binary hex string.
* use --name to set the topic to a human-readable name.
For example --name could be set to "profile-picture", meaning this feed allows to get this user's current profile picture.
* use both --topic and --name to create named subtopics.
For example, --topic could be set to an Ethereum contract address and --name could be set to "comments", meaning
this feed tracks a discussion about that contract.
The --user flag allows to have this manifest refer to a user other than yourself. If not specified,
it will then default to your local account (--bzzaccount)`,
Flags: []cli.Flag{SwarmFeedNameFlag, SwarmFeedTopicFlag, SwarmFeedUserFlag},
},
{
Action: feedUpdate,
CustomHelpTemplate: helpTemplate,
Name: "update",
Usage: "updates the content of an existing Swarm Feed",
ArgsUsage: "<0x Hex data>",
Description: `publishes a new update on the specified topic
The feed topic can be built in the following ways:
* use --topic to set the topic to an arbitrary binary hex string.
* use --name to set the topic to a human-readable name.
For example --name could be set to "profile-picture", meaning this feed allows to get this user's current profile picture.
* use both --topic and --name to create named subtopics.
For example, --topic could be set to an Ethereum contract address and --name could be set to "comments", meaning
this feed tracks a discussion about that contract.
If you have a manifest, you can specify it with --manifest to refer to the feed,
instead of using --topic / --name
`,
Flags: []cli.Flag{SwarmFeedManifestFlag, SwarmFeedNameFlag, SwarmFeedTopicFlag},
},
{
Action: feedInfo,
CustomHelpTemplate: helpTemplate,
Name: "info",
Usage: "obtains information about an existing Swarm feed",
Description: `obtains information about an existing Swarm feed
The topic can be specified directly with the --topic flag as an hex string
If no topic is specified, the default topic (zero) will be used
The --name flag can be used to specify subtopics with a specific name.
The --user flag allows to refer to a user other than yourself. If not specified,
it will then default to your local account (--bzzaccount)
If you have a manifest, you can specify it with --manifest instead of --topic / --name / ---user
to refer to the feed`,
Flags: []cli.Flag{SwarmFeedManifestFlag, SwarmFeedNameFlag, SwarmFeedTopicFlag, SwarmFeedUserFlag},
},
},
}
func NewGenericSigner(ctx *cli.Context) feed.Signer {
return feed.NewGenericSigner(getPrivKey(ctx))
}
@ -107,7 +169,6 @@ func feedUpdate(ctx *cli.Context) {
query = new(feed.Query)
query.User = signer.Address()
query.Topic = getTopic(ctx)
}
// Retrieve a feed update request
@ -116,6 +177,11 @@ func feedUpdate(ctx *cli.Context) {
utils.Fatalf("Error retrieving feed status: %s", err.Error())
}
// Check that the provided signer matches the request to sign
if updateRequest.User != signer.Address() {
utils.Fatalf("Signer address does not match the update request")
}
// set the new data
updateRequest.SetData(data)

View File

@ -0,0 +1,179 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
// Command feed allows the user to create and update signed Swarm feeds
package main
import cli "gopkg.in/urfave/cli.v1"
var (
ChequebookAddrFlag = cli.StringFlag{
Name: "chequebook",
Usage: "chequebook contract address",
EnvVar: SWARM_ENV_CHEQUEBOOK_ADDR,
}
SwarmAccountFlag = cli.StringFlag{
Name: "bzzaccount",
Usage: "Swarm account key file",
EnvVar: SWARM_ENV_ACCOUNT,
}
SwarmListenAddrFlag = cli.StringFlag{
Name: "httpaddr",
Usage: "Swarm HTTP API listening interface",
EnvVar: SWARM_ENV_LISTEN_ADDR,
}
SwarmPortFlag = cli.StringFlag{
Name: "bzzport",
Usage: "Swarm local http api port",
EnvVar: SWARM_ENV_PORT,
}
SwarmNetworkIdFlag = cli.IntFlag{
Name: "bzznetworkid",
Usage: "Network identifier (integer, default 3=swarm testnet)",
EnvVar: SWARM_ENV_NETWORK_ID,
}
SwarmSwapEnabledFlag = cli.BoolFlag{
Name: "swap",
Usage: "Swarm SWAP enabled (default false)",
EnvVar: SWARM_ENV_SWAP_ENABLE,
}
SwarmSwapAPIFlag = cli.StringFlag{
Name: "swap-api",
Usage: "URL of the Ethereum API provider to use to settle SWAP payments",
EnvVar: SWARM_ENV_SWAP_API,
}
SwarmSyncDisabledFlag = cli.BoolTFlag{
Name: "nosync",
Usage: "Disable swarm syncing",
EnvVar: SWARM_ENV_SYNC_DISABLE,
}
SwarmSyncUpdateDelay = cli.DurationFlag{
Name: "sync-update-delay",
Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)",
EnvVar: SWARM_ENV_SYNC_UPDATE_DELAY,
}
SwarmMaxStreamPeerServersFlag = cli.IntFlag{
Name: "max-stream-peer-servers",
Usage: "Limit of Stream peer servers, 0 denotes unlimited",
EnvVar: SWARM_ENV_MAX_STREAM_PEER_SERVERS,
Value: 10000, // A very large default value is possible as stream servers have very small memory footprint
}
SwarmLightNodeEnabled = cli.BoolFlag{
Name: "lightnode",
Usage: "Enable Swarm LightNode (default false)",
EnvVar: SWARM_ENV_LIGHT_NODE_ENABLE,
}
SwarmDeliverySkipCheckFlag = cli.BoolFlag{
Name: "delivery-skip-check",
Usage: "Skip chunk delivery check (default false)",
EnvVar: SWARM_ENV_DELIVERY_SKIP_CHECK,
}
EnsAPIFlag = cli.StringSliceFlag{
Name: "ens-api",
Usage: "ENS API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url",
EnvVar: SWARM_ENV_ENS_API,
}
SwarmApiFlag = cli.StringFlag{
Name: "bzzapi",
Usage: "Specifies the Swarm HTTP endpoint to connect to",
Value: "http://127.0.0.1:8500",
}
SwarmRecursiveFlag = cli.BoolFlag{
Name: "recursive",
Usage: "Upload directories recursively",
}
SwarmWantManifestFlag = cli.BoolTFlag{
Name: "manifest",
Usage: "Automatic manifest upload (default true)",
}
SwarmUploadDefaultPath = cli.StringFlag{
Name: "defaultpath",
Usage: "path to file served for empty url path (none)",
}
SwarmAccessGrantKeyFlag = cli.StringFlag{
Name: "grant-key",
Usage: "grants a given public key access to an ACT",
}
SwarmAccessGrantKeysFlag = cli.StringFlag{
Name: "grant-keys",
Usage: "grants a given list of public keys in the following file (separated by line breaks) access to an ACT",
}
SwarmUpFromStdinFlag = cli.BoolFlag{
Name: "stdin",
Usage: "reads data to be uploaded from stdin",
}
SwarmUploadMimeType = cli.StringFlag{
Name: "mime",
Usage: "Manually specify MIME type",
}
SwarmEncryptedFlag = cli.BoolFlag{
Name: "encrypt",
Usage: "use encrypted upload",
}
SwarmAccessPasswordFlag = cli.StringFlag{
Name: "password",
Usage: "Password",
EnvVar: SWARM_ACCESS_PASSWORD,
}
SwarmDryRunFlag = cli.BoolFlag{
Name: "dry-run",
Usage: "dry-run",
}
CorsStringFlag = cli.StringFlag{
Name: "corsdomain",
Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')",
EnvVar: SWARM_ENV_CORS,
}
SwarmStorePath = cli.StringFlag{
Name: "store.path",
Usage: "Path to leveldb chunk DB (default <$GETH_ENV_DIR>/swarm/bzz-<$BZZ_KEY>/chunks)",
EnvVar: SWARM_ENV_STORE_PATH,
}
SwarmStoreCapacity = cli.Uint64Flag{
Name: "store.size",
Usage: "Number of chunks (5M is roughly 20-25GB) (default 5000000)",
EnvVar: SWARM_ENV_STORE_CAPACITY,
}
SwarmStoreCacheCapacity = cli.UintFlag{
Name: "store.cache.size",
Usage: "Number of recent chunks cached in memory (default 5000)",
EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY,
}
SwarmCompressedFlag = cli.BoolFlag{
Name: "compressed",
Usage: "Prints encryption keys in compressed form",
}
SwarmFeedNameFlag = cli.StringFlag{
Name: "name",
Usage: "User-defined name for the new feed, limited to 32 characters. If combined with topic, it will refer to a subtopic with this name",
}
SwarmFeedTopicFlag = cli.StringFlag{
Name: "topic",
Usage: "User-defined topic this feed is tracking, hex encoded. Limited to 64 hexadecimal characters",
}
SwarmFeedDataOnCreateFlag = cli.StringFlag{
Name: "data",
Usage: "Initializes the feed with the given hex-encoded data. Data must be prefixed by 0x",
}
SwarmFeedManifestFlag = cli.StringFlag{
Name: "manifest",
Usage: "Refers to the feed through a manifest",
}
SwarmFeedUserFlag = cli.StringFlag{
Name: "user",
Usage: "Indicates the user who updates the feed",
}
)

View File

@ -24,16 +24,50 @@ import (
"time"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm/fuse"
"gopkg.in/urfave/cli.v1"
)
var fsCommand = cli.Command{
Name: "fs",
CustomHelpTemplate: helpTemplate,
Usage: "perform FUSE operations",
ArgsUsage: "fs COMMAND",
Description: "Performs FUSE operations by mounting/unmounting/listing mount points. This assumes you already have a Swarm node running locally. For all operation you must reference the correct path to bzzd.ipc in order to communicate with the node",
Subcommands: []cli.Command{
{
Action: mount,
CustomHelpTemplate: helpTemplate,
Name: "mount",
Usage: "mount a swarm hash to a mount point",
ArgsUsage: "swarm fs mount <manifest hash> <mount point>",
Description: "Mounts a Swarm manifest hash to a given mount point. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
},
{
Action: unmount,
CustomHelpTemplate: helpTemplate,
Name: "unmount",
Usage: "unmount a swarmfs mount",
ArgsUsage: "swarm fs unmount <mount point>",
Description: "Unmounts a swarmfs mount residing at <mount point>. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
},
{
Action: listMounts,
CustomHelpTemplate: helpTemplate,
Name: "list",
Usage: "list swarmfs mounts",
ArgsUsage: "swarm fs list",
Description: "Lists all mounted swarmfs volumes. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
},
},
}
func mount(cliContext *cli.Context) {
args := cliContext.Args()
if len(args) < 2 {
utils.Fatalf("Usage: swarm fs mount --ipcpath <path to bzzd.ipc> <manifestHash> <file name>")
utils.Fatalf("Usage: swarm fs mount <manifestHash> <file name>")
}
client, err := dialRPC(cliContext)
@ -60,7 +94,7 @@ func unmount(cliContext *cli.Context) {
args := cliContext.Args()
if len(args) < 1 {
utils.Fatalf("Usage: swarm fs unmount --ipcpath <path to bzzd.ipc> <mount path>")
utils.Fatalf("Usage: swarm fs unmount <mount path>")
}
client, err := dialRPC(cliContext)
if err != nil {
@ -108,20 +142,21 @@ func listMounts(cliContext *cli.Context) {
}
func dialRPC(ctx *cli.Context) (*rpc.Client, error) {
var endpoint string
endpoint := getIPCEndpoint(ctx)
log.Info("IPC endpoint", "path", endpoint)
return rpc.Dial(endpoint)
}
if ctx.IsSet(utils.IPCPathFlag.Name) {
endpoint = ctx.String(utils.IPCPathFlag.Name)
} else {
utils.Fatalf("swarm ipc endpoint not specified")
}
func getIPCEndpoint(ctx *cli.Context) string {
cfg := defaultNodeConfig
utils.SetNodeConfig(ctx, &cfg)
if endpoint == "" {
endpoint = node.DefaultIPCEndpoint(clientIdentifier)
} else if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
endpoint := cfg.IPCEndpoint()
if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
// Backwards compatibility with geth < 1.5 which required
// these prefixes.
endpoint = endpoint[4:]
}
return rpc.Dial(endpoint)
return endpoint
}

View File

@ -27,6 +27,15 @@ import (
"gopkg.in/urfave/cli.v1"
)
var hashCommand = cli.Command{
Action: hash,
CustomHelpTemplate: helpTemplate,
Name: "hash",
Usage: "print the swarm hash of a file or directory",
ArgsUsage: "<file>",
Description: "Prints the swarm hash of file or directory",
}
func hash(ctx *cli.Context) {
args := ctx.Args()
if len(args) < 1 {

View File

@ -27,6 +27,15 @@ import (
"gopkg.in/urfave/cli.v1"
)
var listCommand = cli.Command{
Action: list,
CustomHelpTemplate: helpTemplate,
Name: "ls",
Usage: "list files and directories contained in a manifest",
ArgsUsage: "<manifest> [<prefix>]",
Description: "Lists files and directories contained in a manifest",
}
func list(ctx *cli.Context) {
args := ctx.Args()

View File

@ -70,165 +70,6 @@ var (
gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
)
var (
ChequebookAddrFlag = cli.StringFlag{
Name: "chequebook",
Usage: "chequebook contract address",
EnvVar: SWARM_ENV_CHEQUEBOOK_ADDR,
}
SwarmAccountFlag = cli.StringFlag{
Name: "bzzaccount",
Usage: "Swarm account key file",
EnvVar: SWARM_ENV_ACCOUNT,
}
SwarmListenAddrFlag = cli.StringFlag{
Name: "httpaddr",
Usage: "Swarm HTTP API listening interface",
EnvVar: SWARM_ENV_LISTEN_ADDR,
}
SwarmPortFlag = cli.StringFlag{
Name: "bzzport",
Usage: "Swarm local http api port",
EnvVar: SWARM_ENV_PORT,
}
SwarmNetworkIdFlag = cli.IntFlag{
Name: "bzznetworkid",
Usage: "Network identifier (integer, default 3=swarm testnet)",
EnvVar: SWARM_ENV_NETWORK_ID,
}
SwarmSwapEnabledFlag = cli.BoolFlag{
Name: "swap",
Usage: "Swarm SWAP enabled (default false)",
EnvVar: SWARM_ENV_SWAP_ENABLE,
}
SwarmSwapAPIFlag = cli.StringFlag{
Name: "swap-api",
Usage: "URL of the Ethereum API provider to use to settle SWAP payments",
EnvVar: SWARM_ENV_SWAP_API,
}
SwarmSyncDisabledFlag = cli.BoolTFlag{
Name: "nosync",
Usage: "Disable swarm syncing",
EnvVar: SWARM_ENV_SYNC_DISABLE,
}
SwarmSyncUpdateDelay = cli.DurationFlag{
Name: "sync-update-delay",
Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)",
EnvVar: SWARM_ENV_SYNC_UPDATE_DELAY,
}
SwarmMaxStreamPeerServersFlag = cli.IntFlag{
Name: "max-stream-peer-servers",
Usage: "Limit of Stream peer servers, 0 denotes unlimited",
EnvVar: SWARM_ENV_MAX_STREAM_PEER_SERVERS,
Value: 10000, // A very large default value is possible as stream servers have very small memory footprint
}
SwarmLightNodeEnabled = cli.BoolFlag{
Name: "lightnode",
Usage: "Enable Swarm LightNode (default false)",
EnvVar: SWARM_ENV_LIGHT_NODE_ENABLE,
}
SwarmDeliverySkipCheckFlag = cli.BoolFlag{
Name: "delivery-skip-check",
Usage: "Skip chunk delivery check (default false)",
EnvVar: SWARM_ENV_DELIVERY_SKIP_CHECK,
}
EnsAPIFlag = cli.StringSliceFlag{
Name: "ens-api",
Usage: "ENS API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url",
EnvVar: SWARM_ENV_ENS_API,
}
SwarmApiFlag = cli.StringFlag{
Name: "bzzapi",
Usage: "Swarm HTTP endpoint",
Value: "http://127.0.0.1:8500",
}
SwarmRecursiveFlag = cli.BoolFlag{
Name: "recursive",
Usage: "Upload directories recursively",
}
SwarmWantManifestFlag = cli.BoolTFlag{
Name: "manifest",
Usage: "Automatic manifest upload (default true)",
}
SwarmUploadDefaultPath = cli.StringFlag{
Name: "defaultpath",
Usage: "path to file served for empty url path (none)",
}
SwarmAccessGrantKeyFlag = cli.StringFlag{
Name: "grant-key",
Usage: "grants a given public key access to an ACT",
}
SwarmAccessGrantKeysFlag = cli.StringFlag{
Name: "grant-keys",
Usage: "grants a given list of public keys in the following file (separated by line breaks) access to an ACT",
}
SwarmUpFromStdinFlag = cli.BoolFlag{
Name: "stdin",
Usage: "reads data to be uploaded from stdin",
}
SwarmUploadMimeType = cli.StringFlag{
Name: "mime",
Usage: "Manually specify MIME type",
}
SwarmEncryptedFlag = cli.BoolFlag{
Name: "encrypt",
Usage: "use encrypted upload",
}
SwarmAccessPasswordFlag = cli.StringFlag{
Name: "password",
Usage: "Password",
EnvVar: SWARM_ACCESS_PASSWORD,
}
SwarmDryRunFlag = cli.BoolFlag{
Name: "dry-run",
Usage: "dry-run",
}
CorsStringFlag = cli.StringFlag{
Name: "corsdomain",
Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')",
EnvVar: SWARM_ENV_CORS,
}
SwarmStorePath = cli.StringFlag{
Name: "store.path",
Usage: "Path to leveldb chunk DB (default <$GETH_ENV_DIR>/swarm/bzz-<$BZZ_KEY>/chunks)",
EnvVar: SWARM_ENV_STORE_PATH,
}
SwarmStoreCapacity = cli.Uint64Flag{
Name: "store.size",
Usage: "Number of chunks (5M is roughly 20-25GB) (default 5000000)",
EnvVar: SWARM_ENV_STORE_CAPACITY,
}
SwarmStoreCacheCapacity = cli.UintFlag{
Name: "store.cache.size",
Usage: "Number of recent chunks cached in memory (default 5000)",
EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY,
}
SwarmCompressedFlag = cli.BoolFlag{
Name: "compressed",
Usage: "Prints encryption keys in compressed form",
}
SwarmFeedNameFlag = cli.StringFlag{
Name: "name",
Usage: "User-defined name for the new feed, limited to 32 characters. If combined with topic, it will refer to a subtopic with this name",
}
SwarmFeedTopicFlag = cli.StringFlag{
Name: "topic",
Usage: "User-defined topic this feed is tracking, hex encoded. Limited to 64 hexadecimal characters",
}
SwarmFeedDataOnCreateFlag = cli.StringFlag{
Name: "data",
Usage: "Initializes the feed with the given hex-encoded data. Data must be prefixed by 0x",
}
SwarmFeedManifestFlag = cli.StringFlag{
Name: "manifest",
Usage: "Refers to the feed through a manifest",
}
SwarmFeedUserFlag = cli.StringFlag{
Name: "user",
Usage: "Indicates the user who updates the feed",
}
)
//declare a few constant error messages, useful for later error check comparisons in test
var (
SWARM_ERR_NO_BZZACCOUNT = "bzzaccount option is required but not set; check your config file, command line or environment variables"
@ -279,267 +120,24 @@ func init() {
Usage: "Print public key information",
Description: "The output of this command is supposed to be machine-readable",
},
{
Action: upload,
CustomHelpTemplate: helpTemplate,
Name: "up",
Usage: "uploads a file or directory to swarm using the HTTP API",
ArgsUsage: "<file>",
Flags: []cli.Flag{SwarmEncryptedFlag},
Description: "uploads a file or directory to swarm using the HTTP API and prints the root hash",
},
{
CustomHelpTemplate: helpTemplate,
Name: "access",
Usage: "encrypts a reference and embeds it into a root manifest",
ArgsUsage: "<ref>",
Description: "encrypts a reference and embeds it into a root manifest",
Subcommands: []cli.Command{
{
CustomHelpTemplate: helpTemplate,
Name: "new",
Usage: "encrypts a reference and embeds it into a root manifest",
ArgsUsage: "<ref>",
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
Subcommands: []cli.Command{
{
Action: accessNewPass,
CustomHelpTemplate: helpTemplate,
Flags: []cli.Flag{
utils.PasswordFileFlag,
SwarmDryRunFlag,
},
Name: "pass",
Usage: "encrypts a reference with a password and embeds it into a root manifest",
ArgsUsage: "<ref>",
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
},
{
Action: accessNewPK,
CustomHelpTemplate: helpTemplate,
Flags: []cli.Flag{
utils.PasswordFileFlag,
SwarmDryRunFlag,
SwarmAccessGrantKeyFlag,
},
Name: "pk",
Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest",
ArgsUsage: "<ref>",
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
},
{
Action: accessNewACT,
CustomHelpTemplate: helpTemplate,
Flags: []cli.Flag{
SwarmAccessGrantKeysFlag,
SwarmDryRunFlag,
utils.PasswordFileFlag,
},
Name: "act",
Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest",
ArgsUsage: "<ref>",
Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest",
},
},
},
},
},
{
CustomHelpTemplate: helpTemplate,
Name: "feed",
Usage: "(Advanced) Create and update Swarm Feeds",
ArgsUsage: "<create|update|info>",
Description: "Works with Swarm Feeds",
Subcommands: []cli.Command{
{
Action: feedCreateManifest,
CustomHelpTemplate: helpTemplate,
Name: "create",
Usage: "creates and publishes a new feed manifest",
Description: `creates and publishes a new feed manifest pointing to a specified user's updates about a particular topic.
The feed topic can be built in the following ways:
* use --topic to set the topic to an arbitrary binary hex string.
* use --name to set the topic to a human-readable name.
For example --name could be set to "profile-picture", meaning this feed allows to get this user's current profile picture.
* use both --topic and --name to create named subtopics.
For example, --topic could be set to an Ethereum contract address and --name could be set to "comments", meaning
this feed tracks a discussion about that contract.
The --user flag allows to have this manifest refer to a user other than yourself. If not specified,
it will then default to your local account (--bzzaccount)`,
Flags: []cli.Flag{SwarmFeedNameFlag, SwarmFeedTopicFlag, SwarmFeedUserFlag},
},
{
Action: feedUpdate,
CustomHelpTemplate: helpTemplate,
Name: "update",
Usage: "updates the content of an existing Swarm Feed",
ArgsUsage: "<0x Hex data>",
Description: `publishes a new update on the specified topic
The feed topic can be built in the following ways:
* use --topic to set the topic to an arbitrary binary hex string.
* use --name to set the topic to a human-readable name.
For example --name could be set to "profile-picture", meaning this feed allows to get this user's current profile picture.
* use both --topic and --name to create named subtopics.
For example, --topic could be set to an Ethereum contract address and --name could be set to "comments", meaning
this feed tracks a discussion about that contract.
If you have a manifest, you can specify it with --manifest to refer to the feed,
instead of using --topic / --name
`,
Flags: []cli.Flag{SwarmFeedManifestFlag, SwarmFeedNameFlag, SwarmFeedTopicFlag},
},
{
Action: feedInfo,
CustomHelpTemplate: helpTemplate,
Name: "info",
Usage: "obtains information about an existing Swarm feed",
Description: `obtains information about an existing Swarm feed
The topic can be specified directly with the --topic flag as an hex string
If no topic is specified, the default topic (zero) will be used
The --name flag can be used to specify subtopics with a specific name.
The --user flag allows to refer to a user other than yourself. If not specified,
it will then default to your local account (--bzzaccount)
If you have a manifest, you can specify it with --manifest instead of --topic / --name / ---user
to refer to the feed`,
Flags: []cli.Flag{SwarmFeedManifestFlag, SwarmFeedNameFlag, SwarmFeedTopicFlag, SwarmFeedUserFlag},
},
},
},
{
Action: list,
CustomHelpTemplate: helpTemplate,
Name: "ls",
Usage: "list files and directories contained in a manifest",
ArgsUsage: "<manifest> [<prefix>]",
Description: "Lists files and directories contained in a manifest",
},
{
Action: hash,
CustomHelpTemplate: helpTemplate,
Name: "hash",
Usage: "print the swarm hash of a file or directory",
ArgsUsage: "<file>",
Description: "Prints the swarm hash of file or directory",
},
{
Action: download,
Name: "down",
Flags: []cli.Flag{SwarmRecursiveFlag, SwarmAccessPasswordFlag},
Usage: "downloads a swarm manifest or a file inside a manifest",
ArgsUsage: " <uri> [<dir>]",
Description: `Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries.`,
},
{
Name: "manifest",
CustomHelpTemplate: helpTemplate,
Usage: "perform operations on swarm manifests",
ArgsUsage: "COMMAND",
Description: "Updates a MANIFEST by adding/removing/updating the hash of a path.\nCOMMAND could be: add, update, remove",
Subcommands: []cli.Command{
{
Action: manifestAdd,
CustomHelpTemplate: helpTemplate,
Name: "add",
Usage: "add a new path to the manifest",
ArgsUsage: "<MANIFEST> <path> <hash>",
Description: "Adds a new path to the manifest",
},
{
Action: manifestUpdate,
CustomHelpTemplate: helpTemplate,
Name: "update",
Usage: "update the hash for an already existing path in the manifest",
ArgsUsage: "<MANIFEST> <path> <newhash>",
Description: "Update the hash for an already existing path in the manifest",
},
{
Action: manifestRemove,
CustomHelpTemplate: helpTemplate,
Name: "remove",
Usage: "removes a path from the manifest",
ArgsUsage: "<MANIFEST> <path>",
Description: "Removes a path from the manifest",
},
},
},
{
Name: "fs",
CustomHelpTemplate: helpTemplate,
Usage: "perform FUSE operations",
ArgsUsage: "fs COMMAND",
Description: "Performs FUSE operations by mounting/unmounting/listing mount points. This assumes you already have a Swarm node running locally. For all operation you must reference the correct path to bzzd.ipc in order to communicate with the node",
Subcommands: []cli.Command{
{
Action: mount,
CustomHelpTemplate: helpTemplate,
Name: "mount",
Flags: []cli.Flag{utils.IPCPathFlag},
Usage: "mount a swarm hash to a mount point",
ArgsUsage: "swarm fs mount --ipcpath <path to bzzd.ipc> <manifest hash> <mount point>",
Description: "Mounts a Swarm manifest hash to a given mount point. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
},
{
Action: unmount,
CustomHelpTemplate: helpTemplate,
Name: "unmount",
Flags: []cli.Flag{utils.IPCPathFlag},
Usage: "unmount a swarmfs mount",
ArgsUsage: "swarm fs unmount --ipcpath <path to bzzd.ipc> <mount point>",
Description: "Unmounts a swarmfs mount residing at <mount point>. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
},
{
Action: listMounts,
CustomHelpTemplate: helpTemplate,
Name: "list",
Flags: []cli.Flag{utils.IPCPathFlag},
Usage: "list swarmfs mounts",
ArgsUsage: "swarm fs list --ipcpath <path to bzzd.ipc>",
Description: "Lists all mounted swarmfs volumes. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
},
},
},
{
Name: "db",
CustomHelpTemplate: helpTemplate,
Usage: "manage the local chunk database",
ArgsUsage: "db COMMAND",
Description: "Manage the local chunk database",
Subcommands: []cli.Command{
{
Action: dbExport,
CustomHelpTemplate: helpTemplate,
Name: "export",
Usage: "export a local chunk database as a tar archive (use - to send to stdout)",
ArgsUsage: "<chunkdb> <file>",
Description: `
Export a local chunk database as a tar archive (use - to send to stdout).
swarm db export ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar
The export may be quite large, consider piping the output through the Unix
pv(1) tool to get a progress bar:
swarm db export ~/.ethereum/swarm/bzz-KEY/chunks - | pv > chunks.tar
`,
},
{
Action: dbImport,
CustomHelpTemplate: helpTemplate,
Name: "import",
Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
ArgsUsage: "<chunkdb> <file>",
Description: `Import chunks from a tar archive into a local chunk database (use - to read from stdin).
swarm db import ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar
The import may be quite large, consider piping the input through the Unix
pv(1) tool to get a progress bar:
pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -`,
},
},
},
// See upload.go
upCommand,
// See access.go
accessCommand,
// See feeds.go
feedCommand,
// See list.go
listCommand,
// See hash.go
hashCommand,
// See download.go
downloadCommand,
// See manifest.go
manifestCommand,
// See fs.go
fsCommand,
// See db.go
dbCommand,
// See config.go
DumpConfigCommand,
}

View File

@ -28,6 +28,40 @@ import (
"gopkg.in/urfave/cli.v1"
)
var manifestCommand = cli.Command{
Name: "manifest",
CustomHelpTemplate: helpTemplate,
Usage: "perform operations on swarm manifests",
ArgsUsage: "COMMAND",
Description: "Updates a MANIFEST by adding/removing/updating the hash of a path.\nCOMMAND could be: add, update, remove",
Subcommands: []cli.Command{
{
Action: manifestAdd,
CustomHelpTemplate: helpTemplate,
Name: "add",
Usage: "add a new path to the manifest",
ArgsUsage: "<MANIFEST> <path> <hash>",
Description: "Adds a new path to the manifest",
},
{
Action: manifestUpdate,
CustomHelpTemplate: helpTemplate,
Name: "update",
Usage: "update the hash for an already existing path in the manifest",
ArgsUsage: "<MANIFEST> <path> <newhash>",
Description: "Update the hash for an already existing path in the manifest",
},
{
Action: manifestRemove,
CustomHelpTemplate: helpTemplate,
Name: "remove",
Usage: "removes a path from the manifest",
ArgsUsage: "<MANIFEST> <path>",
Description: "Removes a path from the manifest",
},
},
}
// manifestAdd adds a new entry to the manifest at the given path.
// New entry hash, the last argument, must be the hash of a manifest
// with only one entry, which meta-data will be added to the original manifest.

View File

@ -0,0 +1,366 @@
package main
import (
"bytes"
"context"
"crypto/md5"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptrace"
"os"
"os/exec"
"strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/api/client"
"github.com/ethereum/go-ethereum/swarm/spancontext"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
"github.com/ethereum/go-ethereum/swarm/testutil"
colorable "github.com/mattn/go-colorable"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pborman/uuid"
cli "gopkg.in/urfave/cli.v1"
)
const (
feedRandomDataLength = 8
)
func cliFeedUploadAndSync(c *cli.Context) error {
metrics.GetOrRegisterCounter("feed-and-sync", nil).Inc(1)
log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))))
errc := make(chan error)
go func() {
errc <- feedUploadAndSync(c)
}()
select {
case err := <-errc:
if err != nil {
metrics.GetOrRegisterCounter("feed-and-sync.fail", nil).Inc(1)
}
return err
case <-time.After(time.Duration(timeout) * time.Second):
metrics.GetOrRegisterCounter("feed-and-sync.timeout", nil).Inc(1)
return fmt.Errorf("timeout after %v sec", timeout)
}
}
// TODO: retrieve with manifest + extract repeating code
func feedUploadAndSync(c *cli.Context) error {
defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size (kb)", filesize) }(time.Now())
generateEndpoints(scheme, cluster, appName, from, to)
log.Info("generating and uploading feeds to " + endpoints[0] + " and syncing")
// create a random private key to sign updates with and derive the address
pkFile, err := ioutil.TempFile("", "swarm-feed-smoke-test")
if err != nil {
return err
}
defer pkFile.Close()
defer os.Remove(pkFile.Name())
privkeyHex := "0000000000000000000000000000000000000000000000000000000000001976"
privKey, err := crypto.HexToECDSA(privkeyHex)
if err != nil {
return err
}
user := crypto.PubkeyToAddress(privKey.PublicKey)
userHex := hexutil.Encode(user.Bytes())
// save the private key to a file
_, err = io.WriteString(pkFile, privkeyHex)
if err != nil {
return err
}
// keep hex strings for topic and subtopic
var topicHex string
var subTopicHex string
// and create combination hex topics for bzz-feed retrieval
// xor'ed with topic (zero-value topic if no topic)
var subTopicOnlyHex string
var mergedSubTopicHex string
// generate random topic and subtopic and put a hex on them
topicBytes, err := generateRandomData(feed.TopicLength)
topicHex = hexutil.Encode(topicBytes)
subTopicBytes, err := generateRandomData(8)
subTopicHex = hexutil.Encode(subTopicBytes)
if err != nil {
return err
}
mergedSubTopic, err := feed.NewTopic(subTopicHex, topicBytes)
if err != nil {
return err
}
mergedSubTopicHex = hexutil.Encode(mergedSubTopic[:])
subTopicOnlyBytes, err := feed.NewTopic(subTopicHex, nil)
if err != nil {
return err
}
subTopicOnlyHex = hexutil.Encode(subTopicOnlyBytes[:])
// create feed manifest, topic only
var out bytes.Buffer
cmd := exec.Command("swarm", "--bzzapi", endpoints[0], "feed", "create", "--topic", topicHex, "--user", userHex)
cmd.Stdout = &out
log.Debug("create feed manifest topic cmd", "cmd", cmd)
err = cmd.Run()
if err != nil {
return err
}
manifestWithTopic := strings.TrimRight(out.String(), string([]byte{0x0a}))
if len(manifestWithTopic) != 64 {
return fmt.Errorf("unknown feed create manifest hash format (topic): (%d) %s", len(out.String()), manifestWithTopic)
}
log.Debug("create topic feed", "manifest", manifestWithTopic)
out.Reset()
// create feed manifest, subtopic only
cmd = exec.Command("swarm", "--bzzapi", endpoints[0], "feed", "create", "--name", subTopicHex, "--user", userHex)
cmd.Stdout = &out
log.Debug("create feed manifest subtopic cmd", "cmd", cmd)
err = cmd.Run()
if err != nil {
return err
}
manifestWithSubTopic := strings.TrimRight(out.String(), string([]byte{0x0a}))
if len(manifestWithSubTopic) != 64 {
return fmt.Errorf("unknown feed create manifest hash format (subtopic): (%d) %s", len(out.String()), manifestWithSubTopic)
}
log.Debug("create subtopic feed", "manifest", manifestWithTopic)
out.Reset()
// create feed manifest, merged topic
cmd = exec.Command("swarm", "--bzzapi", endpoints[0], "feed", "create", "--topic", topicHex, "--name", subTopicHex, "--user", userHex)
cmd.Stdout = &out
log.Debug("create feed manifest mergetopic cmd", "cmd", cmd)
err = cmd.Run()
if err != nil {
log.Error(err.Error())
return err
}
manifestWithMergedTopic := strings.TrimRight(out.String(), string([]byte{0x0a}))
if len(manifestWithMergedTopic) != 64 {
return fmt.Errorf("unknown feed create manifest hash format (mergedtopic): (%d) %s", len(out.String()), manifestWithMergedTopic)
}
log.Debug("create mergedtopic feed", "manifest", manifestWithMergedTopic)
out.Reset()
// create test data
data, err := generateRandomData(feedRandomDataLength)
if err != nil {
return err
}
h := md5.New()
h.Write(data)
dataHash := h.Sum(nil)
dataHex := hexutil.Encode(data)
// update with topic
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--topic", topicHex, dataHex)
cmd.Stdout = &out
log.Debug("update feed manifest topic cmd", "cmd", cmd)
err = cmd.Run()
if err != nil {
return err
}
log.Debug("feed update topic", "out", out)
out.Reset()
// update with subtopic
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--name", subTopicHex, dataHex)
cmd.Stdout = &out
log.Debug("update feed manifest subtopic cmd", "cmd", cmd)
err = cmd.Run()
if err != nil {
return err
}
log.Debug("feed update subtopic", "out", out)
out.Reset()
// update with merged topic
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--topic", topicHex, "--name", subTopicHex, dataHex)
cmd.Stdout = &out
log.Debug("update feed manifest merged topic cmd", "cmd", cmd)
err = cmd.Run()
if err != nil {
return err
}
log.Debug("feed update mergedtopic", "out", out)
out.Reset()
time.Sleep(3 * time.Second)
// retrieve the data
wg := sync.WaitGroup{}
for _, endpoint := range endpoints {
// raw retrieve, topic only
for _, hex := range []string{topicHex, subTopicOnlyHex, mergedSubTopicHex} {
wg.Add(1)
ruid := uuid.New()[:8]
go func(hex string, endpoint string, ruid string) {
for {
err := fetchFeed(hex, userHex, endpoint, dataHash, ruid)
if err != nil {
continue
}
wg.Done()
return
}
}(hex, endpoint, ruid)
}
}
wg.Wait()
log.Info("all endpoints synced random data successfully")
// upload test file
seed := int(time.Now().UnixNano() / 1e6)
log.Info("feed uploading to "+endpoints[0]+" and syncing", "seed", seed)
randomBytes := testutil.RandomBytes(seed, filesize*1000)
hash, err := upload(&randomBytes, endpoints[0])
if err != nil {
return err
}
hashBytes, err := hexutil.Decode("0x" + hash)
if err != nil {
return err
}
multihashHex := hexutil.Encode(hashBytes)
fileHash, err := digest(bytes.NewReader(randomBytes))
if err != nil {
return err
}
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fileHash))
// update file with topic
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--topic", topicHex, multihashHex)
cmd.Stdout = &out
err = cmd.Run()
if err != nil {
return err
}
log.Debug("feed update topic", "out", out)
out.Reset()
// update file with subtopic
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--name", subTopicHex, multihashHex)
cmd.Stdout = &out
err = cmd.Run()
if err != nil {
return err
}
log.Debug("feed update subtopic", "out", out)
out.Reset()
// update file with merged topic
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--topic", topicHex, "--name", subTopicHex, multihashHex)
cmd.Stdout = &out
err = cmd.Run()
if err != nil {
return err
}
log.Debug("feed update mergedtopic", "out", out)
out.Reset()
time.Sleep(3 * time.Second)
for _, endpoint := range endpoints {
// manifest retrieve, topic only
for _, url := range []string{manifestWithTopic, manifestWithSubTopic, manifestWithMergedTopic} {
wg.Add(1)
ruid := uuid.New()[:8]
go func(url string, endpoint string, ruid string) {
for {
err := fetch(url, endpoint, fileHash, ruid)
if err != nil {
continue
}
wg.Done()
return
}
}(url, endpoint, ruid)
}
}
wg.Wait()
log.Info("all endpoints synced random file successfully")
return nil
}
func fetchFeed(topic string, user string, endpoint string, original []byte, ruid string) error {
ctx, sp := spancontext.StartSpan(context.Background(), "feed-and-sync.fetch")
defer sp.Finish()
log.Trace("sleeping", "ruid", ruid)
time.Sleep(3 * time.Second)
log.Trace("http get request (feed)", "ruid", ruid, "api", endpoint, "topic", topic, "user", user)
var tn time.Time
reqUri := endpoint + "/bzz-feed:/?topic=" + topic + "&user=" + user
req, _ := http.NewRequest("GET", reqUri, nil)
opentracing.GlobalTracer().Inject(
sp.Context(),
opentracing.HTTPHeaders,
opentracing.HTTPHeadersCarrier(req.Header))
trace := client.GetClientTrace("feed-and-sync - http get", "feed-and-sync", ruid, &tn)
req = req.WithContext(httptrace.WithClientTrace(ctx, trace))
transport := http.DefaultTransport
//transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
tn = time.Now()
res, err := transport.RoundTrip(req)
if err != nil {
log.Error(err.Error(), "ruid", ruid)
return err
}
log.Trace("http get response (feed)", "ruid", ruid, "api", endpoint, "topic", topic, "user", user, "code", res.StatusCode, "len", res.ContentLength)
if res.StatusCode != 200 {
return fmt.Errorf("expected status code %d, got %v (ruid %v)", 200, res.StatusCode, ruid)
}
defer res.Body.Close()
rdigest, err := digest(res.Body)
if err != nil {
log.Warn(err.Error(), "ruid", ruid)
return err
}
if !bytes.Equal(rdigest, original) {
err := fmt.Errorf("downloaded imported file md5=%x is not the same as the generated one=%x", rdigest, original)
log.Warn(err.Error(), "ruid", ruid)
return err
}
log.Trace("downloaded file matches random file", "ruid", ruid, "len", res.ContentLength)
return nil
}

View File

@ -17,28 +17,41 @@
package main
import (
"fmt"
"os"
"sort"
"github.com/ethereum/go-ethereum/cmd/utils"
gethmetrics "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/metrics/influxdb"
swarmmetrics "github.com/ethereum/go-ethereum/swarm/metrics"
"github.com/ethereum/go-ethereum/swarm/tracing"
"github.com/ethereum/go-ethereum/log"
colorable "github.com/mattn/go-colorable"
cli "gopkg.in/urfave/cli.v1"
)
var (
gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
)
var (
endpoints []string
includeLocalhost bool
cluster string
appName string
scheme string
filesize int
syncDelay int
from int
to int
verbosity int
timeout int
single bool
)
func main() {
log.PrintOrigins(true)
log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
app := cli.NewApp()
app.Name = "smoke-test"
@ -47,10 +60,16 @@ func main() {
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "cluster-endpoint",
Value: "testing",
Usage: "cluster to point to (local, open or testing)",
Value: "prod",
Usage: "cluster to point to (prod or a given namespace)",
Destination: &cluster,
},
cli.StringFlag{
Name: "app",
Value: "swarm",
Usage: "application to point to (swarm or swarm-private)",
Destination: &appName,
},
cli.IntFlag{
Name: "cluster-from",
Value: 8501,
@ -80,8 +99,42 @@ func main() {
Usage: "file size for generated random file in KB",
Destination: &filesize,
},
cli.IntFlag{
Name: "sync-delay",
Value: 5,
Usage: "duration of delay in seconds to wait for content to be synced",
Destination: &syncDelay,
},
cli.IntFlag{
Name: "verbosity",
Value: 1,
Usage: "verbosity",
Destination: &verbosity,
},
cli.IntFlag{
Name: "timeout",
Value: 120,
Usage: "timeout in seconds after which kill the process",
Destination: &timeout,
},
cli.BoolFlag{
Name: "single",
Usage: "whether to fetch content from a single node or from all nodes",
Destination: &single,
},
}
app.Flags = append(app.Flags, []cli.Flag{
utils.MetricsEnabledFlag,
swarmmetrics.MetricsInfluxDBEndpointFlag,
swarmmetrics.MetricsInfluxDBDatabaseFlag,
swarmmetrics.MetricsInfluxDBUsernameFlag,
swarmmetrics.MetricsInfluxDBPasswordFlag,
swarmmetrics.MetricsInfluxDBHostTagFlag,
}...)
app.Flags = append(app.Flags, tracing.Flags...)
app.Commands = []cli.Command{
{
Name: "upload_and_sync",
@ -89,13 +142,49 @@ func main() {
Usage: "upload and sync",
Action: cliUploadAndSync,
},
{
Name: "feed_sync",
Aliases: []string{"f"},
Usage: "feed update generate, upload and sync",
Action: cliFeedUploadAndSync,
},
}
sort.Sort(cli.FlagsByName(app.Flags))
sort.Sort(cli.CommandsByName(app.Commands))
app.Before = func(ctx *cli.Context) error {
tracing.Setup(ctx)
return nil
}
app.After = func(ctx *cli.Context) error {
return emitMetrics(ctx)
}
err := app.Run(os.Args)
if err != nil {
log.Error(err.Error())
os.Exit(1)
}
}
func emitMetrics(ctx *cli.Context) error {
if gethmetrics.Enabled {
var (
endpoint = ctx.GlobalString(swarmmetrics.MetricsInfluxDBEndpointFlag.Name)
database = ctx.GlobalString(swarmmetrics.MetricsInfluxDBDatabaseFlag.Name)
username = ctx.GlobalString(swarmmetrics.MetricsInfluxDBUsernameFlag.Name)
password = ctx.GlobalString(swarmmetrics.MetricsInfluxDBPasswordFlag.Name)
hosttag = ctx.GlobalString(swarmmetrics.MetricsInfluxDBHostTagFlag.Name)
)
return influxdb.InfluxDBWithTagsOnce(gethmetrics.DefaultRegistry, endpoint, database, username, password, "swarm-smoke.", map[string]string{
"host": hosttag,
"version": gitCommit,
"filesize": fmt.Sprintf("%v", filesize),
})
}
return nil
}

View File

@ -18,38 +18,41 @@ package main
import (
"bytes"
"context"
"crypto/md5"
"crypto/rand"
crand "crypto/rand"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/http/httptrace"
"os"
"os/exec"
"strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/api"
"github.com/ethereum/go-ethereum/swarm/api/client"
"github.com/ethereum/go-ethereum/swarm/spancontext"
"github.com/ethereum/go-ethereum/swarm/testutil"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pborman/uuid"
cli "gopkg.in/urfave/cli.v1"
)
func generateEndpoints(scheme string, cluster string, from int, to int) {
func generateEndpoints(scheme string, cluster string, app string, from int, to int) {
if cluster == "prod" {
cluster = ""
} else if cluster == "local" {
for port := from; port <= to; port++ {
endpoints = append(endpoints, fmt.Sprintf("%s://localhost:%v", scheme, port))
for port := from; port < to; port++ {
endpoints = append(endpoints, fmt.Sprintf("%s://%v.swarm-gateways.net", scheme, port))
}
return
} else {
cluster = cluster + "."
}
for port := from; port <= to; port++ {
endpoints = append(endpoints, fmt.Sprintf("%s://%v.%sswarm-gateways.net", scheme, port, cluster))
for port := from; port < to; port++ {
endpoints = append(endpoints, fmt.Sprintf("%s://%s-%v-%s.stg.swarm-gateways.net", scheme, app, port, cluster))
}
}
if includeLocalhost {
@ -58,22 +61,51 @@ func generateEndpoints(scheme string, cluster string, from int, to int) {
}
func cliUploadAndSync(c *cli.Context) error {
defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size (kb)", filesize) }(time.Now())
log.PrintOrigins(true)
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(os.Stdout, log.TerminalFormat(true))))
generateEndpoints(scheme, cluster, from, to)
metrics.GetOrRegisterCounter("upload-and-sync", nil).Inc(1)
log.Info("uploading to " + endpoints[0] + " and syncing")
errc := make(chan error)
go func() {
errc <- uploadAndSync(c)
}()
f, cleanup := generateRandomFile(filesize * 1000)
defer cleanup()
select {
case err := <-errc:
if err != nil {
metrics.GetOrRegisterCounter("upload-and-sync.fail", nil).Inc(1)
}
return err
case <-time.After(time.Duration(timeout) * time.Second):
metrics.GetOrRegisterCounter("upload-and-sync.timeout", nil).Inc(1)
return fmt.Errorf("timeout after %v sec", timeout)
}
}
hash, err := upload(f, endpoints[0])
func uploadAndSync(c *cli.Context) error {
defer func(now time.Time) {
totalTime := time.Since(now)
log.Info("total time", "time", totalTime, "kb", filesize)
metrics.GetOrRegisterCounter("upload-and-sync.total-time", nil).Inc(int64(totalTime))
}(time.Now())
generateEndpoints(scheme, cluster, appName, from, to)
seed := int(time.Now().UnixNano() / 1e6)
log.Info("uploading to "+endpoints[0]+" and syncing", "seed", seed)
randomBytes := testutil.RandomBytes(seed, filesize*1000)
t1 := time.Now()
hash, err := upload(&randomBytes, endpoints[0])
if err != nil {
log.Error(err.Error())
return err
}
metrics.GetOrRegisterCounter("upload-and-sync.upload-time", nil).Inc(int64(time.Since(t1)))
fhash, err := digest(f)
fhash, err := digest(bytes.NewReader(randomBytes))
if err != nil {
log.Error(err.Error())
return err
@ -81,24 +113,47 @@ func cliUploadAndSync(c *cli.Context) error {
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash))
time.Sleep(3 * time.Second)
time.Sleep(time.Duration(syncDelay) * time.Second)
wg := sync.WaitGroup{}
for _, endpoint := range endpoints {
endpoint := endpoint
if single {
rand.Seed(time.Now().UTC().UnixNano())
randIndex := 1 + rand.Intn(len(endpoints)-1)
ruid := uuid.New()[:8]
wg.Add(1)
go func(endpoint string, ruid string) {
for {
start := time.Now()
err := fetch(hash, endpoint, fhash, ruid)
fetchTime := time.Since(start)
if err != nil {
continue
}
metrics.GetOrRegisterMeter("upload-and-sync.single.fetch-time", nil).Mark(int64(fetchTime))
wg.Done()
return
}
}(endpoint, ruid)
}(endpoints[randIndex], ruid)
} else {
for _, endpoint := range endpoints {
ruid := uuid.New()[:8]
wg.Add(1)
go func(endpoint string, ruid string) {
for {
start := time.Now()
err := fetch(hash, endpoint, fhash, ruid)
fetchTime := time.Since(start)
if err != nil {
continue
}
metrics.GetOrRegisterMeter("upload-and-sync.each.fetch-time", nil).Mark(int64(fetchTime))
wg.Done()
return
}
}(endpoint, ruid)
}
}
wg.Wait()
log.Info("all endpoints synced random file successfully")
@ -108,13 +163,33 @@ func cliUploadAndSync(c *cli.Context) error {
// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file
func fetch(hash string, endpoint string, original []byte, ruid string) error {
ctx, sp := spancontext.StartSpan(context.Background(), "upload-and-sync.fetch")
defer sp.Finish()
log.Trace("sleeping", "ruid", ruid)
time.Sleep(3 * time.Second)
log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash)
res, err := http.Get(endpoint + "/bzz:/" + hash + "/")
var tn time.Time
reqUri := endpoint + "/bzz:/" + hash + "/"
req, _ := http.NewRequest("GET", reqUri, nil)
opentracing.GlobalTracer().Inject(
sp.Context(),
opentracing.HTTPHeaders,
opentracing.HTTPHeadersCarrier(req.Header))
trace := client.GetClientTrace("upload-and-sync - http get", "upload-and-sync", ruid, &tn)
req = req.WithContext(httptrace.WithClientTrace(ctx, trace))
transport := http.DefaultTransport
//transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
tn = time.Now()
res, err := transport.RoundTrip(req)
if err != nil {
log.Warn(err.Error(), "ruid", ruid)
log.Error(err.Error(), "ruid", ruid)
return err
}
log.Trace("http get response", "ruid", ruid, "api", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength)
@ -145,16 +220,19 @@ func fetch(hash string, endpoint string, original []byte, ruid string) error {
}
// upload is uploading a file `f` to `endpoint` via the `swarm up` cmd
func upload(f *os.File, endpoint string) (string, error) {
var out bytes.Buffer
cmd := exec.Command("swarm", "--bzzapi", endpoint, "up", f.Name())
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
return "", err
func upload(dataBytes *[]byte, endpoint string) (string, error) {
swarm := client.NewClient(endpoint)
f := &client.File{
ReadCloser: ioutil.NopCloser(bytes.NewReader(*dataBytes)),
ManifestEntry: api.ManifestEntry{
ContentType: "text/plain",
Mode: 0660,
Size: int64(len(*dataBytes)),
},
}
hash := strings.TrimRight(out.String(), "\r\n")
return hash, nil
// upload data to bzz:// and retrieve the content-addressed manifest hash, hex-encoded.
return swarm.Upload(f, "", false)
}
func digest(r io.Reader) ([]byte, error) {
@ -166,26 +244,14 @@ func digest(r io.Reader) ([]byte, error) {
return h.Sum(nil), nil
}
// generateRandomFile is creating a temporary file with the requested byte size
func generateRandomFile(size int) (f *os.File, teardown func()) {
// create a tmp file
tmp, err := ioutil.TempFile("", "swarm-test")
// generates random data in heap buffer
func generateRandomData(datasize int) ([]byte, error) {
b := make([]byte, datasize)
c, err := crand.Read(b)
if err != nil {
panic(err)
return nil, err
} else if c != datasize {
return nil, errors.New("short read")
}
// callback for tmp file cleanup
teardown = func() {
tmp.Close()
os.Remove(tmp.Name())
}
buf := make([]byte, size)
_, err = rand.Read(buf)
if err != nil {
panic(err)
}
ioutil.WriteFile(tmp.Name(), buf, 0755)
return tmp, teardown
return b, nil
}

View File

@ -26,29 +26,47 @@ import (
"os/user"
"path"
"path/filepath"
"strconv"
"strings"
"github.com/ethereum/go-ethereum/log"
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
"github.com/ethereum/go-ethereum/cmd/utils"
"gopkg.in/urfave/cli.v1"
)
func upload(ctx *cli.Context) {
var upCommand = cli.Command{
Action: upload,
CustomHelpTemplate: helpTemplate,
Name: "up",
Usage: "uploads a file or directory to swarm using the HTTP API",
ArgsUsage: "<file>",
Flags: []cli.Flag{SwarmEncryptedFlag},
Description: "uploads a file or directory to swarm using the HTTP API and prints the root hash",
}
func upload(ctx *cli.Context) {
args := ctx.Args()
var (
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
recursive = ctx.GlobalBool(SwarmRecursiveFlag.Name)
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
defaultPath = ctx.GlobalString(SwarmUploadDefaultPath.Name)
fromStdin = ctx.GlobalBool(SwarmUpFromStdinFlag.Name)
mimeType = ctx.GlobalString(SwarmUploadMimeType.Name)
client = swarm.NewClient(bzzapi)
toEncrypt = ctx.Bool(SwarmEncryptedFlag.Name)
file string
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
recursive = ctx.GlobalBool(SwarmRecursiveFlag.Name)
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
defaultPath = ctx.GlobalString(SwarmUploadDefaultPath.Name)
fromStdin = ctx.GlobalBool(SwarmUpFromStdinFlag.Name)
mimeType = ctx.GlobalString(SwarmUploadMimeType.Name)
client = swarm.NewClient(bzzapi)
toEncrypt = ctx.Bool(SwarmEncryptedFlag.Name)
autoDefaultPath = false
file string
)
if autoDefaultPathString := os.Getenv(SWARM_AUTO_DEFAULTPATH); autoDefaultPathString != "" {
b, err := strconv.ParseBool(autoDefaultPathString)
if err != nil {
utils.Fatalf("invalid environment variable %s: %v", SWARM_AUTO_DEFAULTPATH, err)
}
autoDefaultPath = b
}
if len(args) != 1 {
if fromStdin {
tmp, err := ioutil.TempFile("", "swarm-stdin")
@ -97,6 +115,15 @@ func upload(ctx *cli.Context) {
if !recursive {
return "", errors.New("Argument is a directory and recursive upload is disabled")
}
if autoDefaultPath && defaultPath == "" {
defaultEntryCandidate := path.Join(file, "index.html")
log.Debug("trying to find default path", "path", defaultEntryCandidate)
defaultEntryStat, err := os.Stat(defaultEntryCandidate)
if err == nil && !defaultEntryStat.IsDir() {
log.Debug("setting auto detected default path", "path", defaultEntryCandidate)
defaultPath = defaultEntryCandidate
}
}
if defaultPath != "" {
// construct absolute default path
absDefaultPath, _ := filepath.Abs(defaultPath)

View File

@ -272,13 +272,13 @@ func ImportPreimages(db *ethdb.LDBDatabase, fn string) error {
// Accumulate the preimages and flush when enough ws gathered
preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob)
if len(preimages) > 1024 {
rawdb.WritePreimages(db, 0, preimages)
rawdb.WritePreimages(db, preimages)
preimages = make(map[common.Hash][]byte)
}
}
// Flush the last batch preimage data
if len(preimages) > 0 {
rawdb.WritePreimages(db, 0, preimages)
rawdb.WritePreimages(db, preimages)
}
return nil
}

View File

@ -141,6 +141,10 @@ var (
Name: "rinkeby",
Usage: "Rinkeby network: pre-configured proof-of-authority test network",
}
ConstantinopleOverrideFlag = cli.Uint64Flag{
Name: "override.constantinople",
Usage: "Manually specify constantinople fork-block, overriding the bundled setting",
}
DeveloperFlag = cli.BoolFlag{
Name: "dev",
Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled",
@ -200,6 +204,10 @@ var (
Name: "lightkdf",
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
}
WhitelistFlag = cli.StringFlag{
Name: "whitelist",
Usage: "Comma separated block number-to-hash mappings to enforce (<number>=<hash>)",
}
// Dashboard settings
DashboardEnabledFlag = cli.BoolFlag{
Name: metrics.DashboardEnabledFlag,
@ -313,7 +321,12 @@ var (
CacheDatabaseFlag = cli.IntFlag{
Name: "cache.database",
Usage: "Percentage of cache memory allowance to use for database io",
Value: 75,
Value: 50,
}
CacheTrieFlag = cli.IntFlag{
Name: "cache.trie",
Usage: "Percentage of cache memory allowance to use for trie caching",
Value: 25,
}
CacheGCFlag = cli.IntFlag{
Name: "cache.gc",
@ -871,17 +884,12 @@ func SetULC(ctx *cli.Context, cfg *eth.Config) {
// makeDatabaseHandles raises out the number of allowed file handles per process
// for Geth and returns half of the allowance to assign to the database.
func makeDatabaseHandles() int {
limit, err := fdlimit.Current()
limit, err := fdlimit.Maximum()
if err != nil {
Fatalf("Failed to retrieve file descriptor allowance: %v", err)
}
if limit < 2048 {
if err := fdlimit.Raise(2048); err != nil {
Fatalf("Failed to raise file descriptor allowance: %v", err)
}
}
if limit > 2048 { // cap database file descriptors even if more is available
limit = 2048
if err := fdlimit.Raise(uint64(limit)); err != nil {
Fatalf("Failed to raise file descriptor allowance: %v", err)
}
return limit / 2 // Leave half for networking and other stuff
}
@ -1025,16 +1033,7 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
setWS(ctx, cfg)
setNodeUserIdent(ctx, cfg)
switch {
case ctx.GlobalIsSet(DataDirFlag.Name):
cfg.DataDir = ctx.GlobalString(DataDirFlag.Name)
case ctx.GlobalBool(DeveloperFlag.Name):
cfg.DataDir = "" // unless explicitly requested, use memory databases
case ctx.GlobalBool(TestnetFlag.Name):
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet")
case ctx.GlobalBool(RinkebyFlag.Name):
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
}
setDataDir(ctx, cfg)
if ctx.GlobalIsSet(KeyStoreDirFlag.Name) {
cfg.KeyStoreDir = ctx.GlobalString(KeyStoreDirFlag.Name)
@ -1047,6 +1046,19 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
}
}
func setDataDir(ctx *cli.Context, cfg *node.Config) {
switch {
case ctx.GlobalIsSet(DataDirFlag.Name):
cfg.DataDir = ctx.GlobalString(DataDirFlag.Name)
case ctx.GlobalBool(DeveloperFlag.Name):
cfg.DataDir = "" // unless explicitly requested, use memory databases
case ctx.GlobalBool(TestnetFlag.Name):
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet")
case ctx.GlobalBool(RinkebyFlag.Name):
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
}
}
func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
if ctx.GlobalIsSet(GpoBlocksFlag.Name) {
cfg.Blocks = ctx.GlobalInt(GpoBlocksFlag.Name)
@ -1120,6 +1132,29 @@ func setEthash(ctx *cli.Context, cfg *eth.Config) {
}
}
func setWhitelist(ctx *cli.Context, cfg *eth.Config) {
whitelist := ctx.GlobalString(WhitelistFlag.Name)
if whitelist == "" {
return
}
cfg.Whitelist = make(map[uint64]common.Hash)
for _, entry := range strings.Split(whitelist, ",") {
parts := strings.Split(entry, "=")
if len(parts) != 2 {
Fatalf("Invalid whitelist entry: %s", entry)
}
number, err := strconv.ParseUint(parts[0], 0, 64)
if err != nil {
Fatalf("Invalid whitelist block number %s: %v", parts[0], err)
}
var hash common.Hash
if err = hash.UnmarshalText([]byte(parts[1])); err != nil {
Fatalf("Invalid whitelist hash %s: %v", parts[1], err)
}
cfg.Whitelist[number] = hash
}
}
// checkExclusive verifies that only a single instance of the provided flags was
// set by the user. Each flag might optionally be followed by a string type to
// specialize it further.
@ -1185,6 +1220,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
setGPO(ctx, &cfg.GPO)
setTxPool(ctx, &cfg.TxPool)
setEthash(ctx, cfg)
setWhitelist(ctx, cfg)
if ctx.GlobalIsSet(SyncModeFlag.Name) {
cfg.SyncMode = *GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode)
@ -1201,7 +1237,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
if ctx.GlobalIsSet(NetworkIdFlag.Name) {
cfg.NetworkId = ctx.GlobalUint64(NetworkIdFlag.Name)
}
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheDatabaseFlag.Name) {
cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
}
@ -1212,8 +1247,11 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
}
cfg.NoPruning = ctx.GlobalString(GCModeFlag.Name) == "archive"
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) {
cfg.TrieCleanCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100
}
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) {
cfg.TrieCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
cfg.TrieDirtyCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
}
if ctx.GlobalIsSet(MinerNotifyFlag.Name) {
cfg.MinerNotify = strings.Split(ctx.GlobalString(MinerNotifyFlag.Name), ",")
@ -1423,7 +1461,6 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chainDb ethdb.Database) {
var err error
chainDb = MakeChainDatabase(ctx, stack)
config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
if err != nil {
Fatalf("%v", err)
@ -1448,12 +1485,16 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
}
cache := &core.CacheConfig{
Disabled: ctx.GlobalString(GCModeFlag.Name) == "archive",
TrieNodeLimit: eth.DefaultConfig.TrieCache,
TrieTimeLimit: eth.DefaultConfig.TrieTimeout,
Disabled: ctx.GlobalString(GCModeFlag.Name) == "archive",
TrieCleanLimit: eth.DefaultConfig.TrieCleanCache,
TrieDirtyLimit: eth.DefaultConfig.TrieDirtyCache,
TrieTimeLimit: eth.DefaultConfig.TrieTimeout,
}
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) {
cache.TrieCleanLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100
}
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) {
cache.TrieNodeLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
cache.TrieDirtyLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
}
vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}
chain, err = core.NewBlockChain(chainDb, cache, config, engine, vmcfg, nil)

View File

@ -31,6 +31,15 @@ func ToHex(b []byte) string {
return "0x" + hex
}
// ToHexArray creates a array of hex-string based on []byte
func ToHexArray(b [][]byte) []string {
r := make([]string, len(b))
for i := range b {
r[i] = ToHex(b[i])
}
return r
}
// FromHex returns the bytes represented by the hexadecimal string s.
// s may be prefixed with "0x".
func FromHex(s string) []byte {

View File

@ -31,14 +31,15 @@ import (
var versionRegexp = regexp.MustCompile(`([0-9]+)\.([0-9]+)\.([0-9]+)`)
// Contract contains information about a compiled contract, alongside its code.
// Contract contains information about a compiled contract, alongside its code and runtime code.
type Contract struct {
Code string `json:"code"`
Info ContractInfo `json:"info"`
Code string `json:"code"`
RuntimeCode string `json:"runtime-code"`
Info ContractInfo `json:"info"`
}
// ContractInfo contains information about a compiled contract, including access
// to the ABI definition, user and developer docs, and metadata.
// to the ABI definition, source mapping, user and developer docs, and metadata.
//
// Depending on the source, language version, compiler version, and compiler
// options will provide information about how the contract was compiled.
@ -48,6 +49,8 @@ type ContractInfo struct {
LanguageVersion string `json:"languageVersion"`
CompilerVersion string `json:"compilerVersion"`
CompilerOptions string `json:"compilerOptions"`
SrcMap string `json:"srcMap"`
SrcMapRuntime string `json:"srcMapRuntime"`
AbiDefinition interface{} `json:"abiDefinition"`
UserDoc interface{} `json:"userDoc"`
DeveloperDoc interface{} `json:"developerDoc"`
@ -63,14 +66,16 @@ type Solidity struct {
// --combined-output format
type solcOutput struct {
Contracts map[string]struct {
Bin, Abi, Devdoc, Userdoc, Metadata string
BinRuntime string `json:"bin-runtime"`
SrcMapRuntime string `json:"srcmap-runtime"`
Bin, SrcMap, Abi, Devdoc, Userdoc, Metadata string
}
Version string
}
func (s *Solidity) makeArgs() []string {
p := []string{
"--combined-json", "bin,abi,userdoc,devdoc",
"--combined-json", "bin,bin-runtime,srcmap,srcmap-runtime,abi,userdoc,devdoc",
"--optimize", // code optimizer switched on
}
if s.Major > 0 || s.Minor > 4 || s.Patch > 6 {
@ -157,7 +162,7 @@ func (s *Solidity) run(cmd *exec.Cmd, source string) (map[string]*Contract, erro
// provided source, language and compiler version, and compiler options are all
// passed through into the Contract structs.
//
// The solc output is expected to contain ABI, user docs, and dev docs.
// The solc output is expected to contain ABI, source mapping, user docs, and dev docs.
//
// Returns an error if the JSON is malformed or missing data, or if the JSON
// embedded within the JSON is malformed.
@ -184,13 +189,16 @@ func ParseCombinedJSON(combinedJSON []byte, source string, languageVersion strin
return nil, fmt.Errorf("solc: error reading dev doc: %v", err)
}
contracts[name] = &Contract{
Code: "0x" + info.Bin,
Code: "0x" + info.Bin,
RuntimeCode: "0x" + info.BinRuntime,
Info: ContractInfo{
Source: source,
Language: "Solidity",
LanguageVersion: languageVersion,
CompilerVersion: compilerVersion,
CompilerOptions: compilerOptions,
SrcMap: info.SrcMap,
SrcMapRuntime: info.SrcMapRuntime,
AbiDefinition: abi,
UserDoc: userdoc,
DeveloperDoc: devdoc,

View File

@ -696,7 +696,7 @@ func (c *Clique) SealHash(header *types.Header) common.Hash {
return sigHash(header)
}
// Close implements consensus.Engine. It's a noop for clique as there is are no background threads.
// Close implements consensus.Engine. It's a noop for clique as there are no background threads.
func (c *Clique) Close() error {
return nil
}

View File

@ -37,27 +37,28 @@ type API struct {
// result[0] - 32 bytes hex encoded current block header pow-hash
// result[1] - 32 bytes hex encoded seed hash used for DAG
// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
func (api *API) GetWork() ([3]string, error) {
// result[3] - hex encoded block number
func (api *API) GetWork() ([4]string, error) {
if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
return [3]string{}, errors.New("not supported")
return [4]string{}, errors.New("not supported")
}
var (
workCh = make(chan [3]string, 1)
workCh = make(chan [4]string, 1)
errc = make(chan error, 1)
)
select {
case api.ethash.fetchWorkCh <- &sealWork{errc: errc, res: workCh}:
case <-api.ethash.exitCh:
return [3]string{}, errEthashStopped
return [4]string{}, errEthashStopped
}
select {
case work := <-workCh:
return work, nil
case err := <-errc:
return [3]string{}, err
return [4]string{}, err
}
}

View File

@ -432,7 +432,7 @@ type hashrate struct {
// sealWork wraps a seal work package for remote sealer.
type sealWork struct {
errc chan error
res chan [3]string
res chan [4]string
}
// Ethash is a consensus engine based on proof-of-work implementing the ethash

View File

@ -30,6 +30,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
@ -193,7 +194,7 @@ func (ethash *Ethash) remote(notify []string, noverify bool) {
results chan<- *types.Block
currentBlock *types.Block
currentWork [3]string
currentWork [4]string
notifyTransport = &http.Transport{}
notifyClient = &http.Client{
@ -234,12 +235,14 @@ func (ethash *Ethash) remote(notify []string, noverify bool) {
// result[0], 32 bytes hex encoded current block header pow-hash
// result[1], 32 bytes hex encoded seed hash used for DAG
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
// result[3], hex encoded block number
makeWork := func(block *types.Block) {
hash := ethash.SealHash(block.Header())
currentWork[0] = hash.Hex()
currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex()
currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex()
currentWork[3] = hexutil.EncodeBig(block.Number())
// Trace the seal work fetched by remote sealer.
currentBlock = block

View File

@ -53,12 +53,6 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
if v.bc.HasBlockAndState(block.Hash(), block.NumberU64()) {
return ErrKnownBlock
}
if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {
return consensus.ErrUnknownAncestor
}
return consensus.ErrPrunedAncestor
}
// Header validity is known at this point, check the uncles and transactions
header := block.Header()
if err := v.engine.VerifyUncles(v.bc, block); err != nil {
@ -70,6 +64,12 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
if hash := types.DeriveSha(block.Transactions()); hash != header.TxHash {
return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
}
if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {
return consensus.ErrUnknownAncestor
}
return consensus.ErrPrunedAncestor
}
return nil
}

View File

@ -47,7 +47,10 @@ import (
)
var (
blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil)
ErrNoGenesis = errors.New("Genesis not found in chain")
)
@ -68,9 +71,10 @@ const (
// CacheConfig contains the configuration values for the trie caching/pruning
// that's resident in a blockchain.
type CacheConfig struct {
Disabled bool // Whether to disable trie write caching (archive node)
TrieNodeLimit int // Memory limit (MB) at which to flush the current in-memory trie to disk
TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
Disabled bool // Whether to disable trie write caching (archive node)
TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk
TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
}
// BlockChain represents the canonical chain given a database with a genesis
@ -140,8 +144,9 @@ type BlockChain struct {
func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) {
if cacheConfig == nil {
cacheConfig = &CacheConfig{
TrieNodeLimit: 256 * 1024 * 1024,
TrieTimeLimit: 5 * time.Minute,
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
}
}
bodyCache, _ := lru.New(bodyCacheLimit)
@ -156,7 +161,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
cacheConfig: cacheConfig,
db: db,
triegc: prque.New(nil),
stateCache: state.NewDatabase(db),
stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit),
quit: make(chan struct{}),
shouldPreserve: shouldPreserve,
bodyCache: bodyCache,
@ -205,6 +210,11 @@ func (bc *BlockChain) getProcInterrupt() bool {
return atomic.LoadInt32(&bc.procInterrupt) == 1
}
// GetVMConfig returns the block chain VM config.
func (bc *BlockChain) GetVMConfig() *vm.Config {
return &bc.vmConfig
}
// loadLastState loads the last known chain state from the database. This method
// assumes that the chain manager mutex is held.
func (bc *BlockChain) loadLastState() error {
@ -393,6 +403,11 @@ func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
return state.New(root, bc.stateCache)
}
// StateCache returns the caching database underpinning the blockchain instance.
func (bc *BlockChain) StateCache() state.Database {
return bc.stateCache
}
// Reset purges the entire blockchain, restoring it to its genesis state.
func (bc *BlockChain) Reset() error {
return bc.ResetWithGenesisBlock(bc.genesisBlock)
@ -438,7 +453,11 @@ func (bc *BlockChain) repair(head **types.Block) error {
return nil
}
// Otherwise rewind one block and recheck state availability there
(*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
if block == nil {
return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash())
}
(*head) = block
}
}
@ -554,6 +573,17 @@ func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
return rawdb.HasBody(bc.db, hash, number)
}
// HasFastBlock checks if a fast block is fully present in the database or not.
func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool {
if !bc.HasBlock(hash, number) {
return false
}
if bc.receiptsCache.Contains(hash) {
return true
}
return rawdb.HasReceipts(bc.db, hash, number)
}
// HasState checks if state trie is fully present in the database or not.
func (bc *BlockChain) HasState(hash common.Hash) bool {
_, err := bc.stateCache.OpenTrie(hash)
@ -611,12 +641,10 @@ func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
if receipts, ok := bc.receiptsCache.Get(hash); ok {
return receipts.(types.Receipts)
}
number := rawdb.ReadHeaderNumber(bc.db, hash)
if number == nil {
return nil
}
receipts := rawdb.ReadReceipts(bc.db, hash, *number)
bc.receiptsCache.Add(hash, receipts)
return receipts
@ -938,7 +966,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
// If we exceeded our memory allowance, flush matured singleton nodes to disk
var (
nodes, imgs = triedb.Size()
limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024
limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
)
if nodes > limit || imgs > 4*1024*1024 {
triedb.Cap(limit - ethdb.IdealBatchSize)
@ -1002,7 +1030,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
}
// Write the positional metadata for transaction/receipt lookups and preimages
rawdb.WriteTxLookupEntries(batch, block)
rawdb.WritePreimages(batch, block.NumberU64(), state.Preimages())
rawdb.WritePreimages(batch, state.Preimages())
status = CanonStatTy
} else {
@ -1020,6 +1048,18 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
return status, nil
}
// addFutureBlock checks if the block is within the max allowed window to get
// accepted for future processing, and returns an error if the block is too far
// ahead and was not added.
func (bc *BlockChain) addFutureBlock(block *types.Block) error {
max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
if block.Time().Cmp(max) > 0 {
return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max)
}
bc.futureBlocks.Add(block.Hash(), block)
return nil
}
// InsertChain attempts to insert the given batch of blocks in to the canonical
// chain or, otherwise, create a fork. If an error is returned it will return
// the index number of the failing block as well an error describing what went
@ -1027,18 +1067,9 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
//
// After insertion is done, all accumulated events will be fired.
func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
n, events, logs, err := bc.insertChain(chain)
bc.PostChainEvents(events, logs)
return n, err
}
// insertChain will execute the actual chain insertion and event aggregation. The
// only reason this method exists as a separate one is to make locking cleaner
// with deferred statements.
func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) {
// Sanity check that we have something meaningful to import
if len(chain) == 0 {
return 0, nil, nil, nil
return 0, nil
}
// Do a sanity check that the provided chain is actually ordered and linked
for i := 1; i < len(chain); i++ {
@ -1047,16 +1078,36 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
"parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
}
}
// Pre-checks passed, start the full block imports
bc.wg.Add(1)
defer bc.wg.Done()
bc.chainmu.Lock()
defer bc.chainmu.Unlock()
n, events, logs, err := bc.insertChain(chain, true)
bc.chainmu.Unlock()
bc.wg.Done()
bc.PostChainEvents(events, logs)
return n, err
}
// insertChain is the internal implementation of insertChain, which assumes that
// 1) chains are contiguous, and 2) The chain mutex is held.
//
// This method is split out so that import batches that require re-injecting
// historical blocks can do so without releasing the lock, which could lead to
// racey behaviour. If a sidechain import is in progress, and the historic state
// is imported, but then new canon-head is added before the actual sidechain
// completes, then the historic state could be pruned again
func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) {
// If the chain is terminating, don't even bother starting u
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
return 0, nil, nil, nil
}
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
// A queued approach to delivering events. This is generally
// faster than direct delivery and requires much less mutex
@ -1073,16 +1124,56 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
for i, block := range chain {
headers[i] = block.Header()
seals[i] = true
seals[i] = verifySeals
}
abort, results := bc.engine.VerifyHeaders(bc, headers, seals)
defer close(abort)
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
// Peek the error for the first block to decide the directing import logic
it := newInsertIterator(chain, results, bc.Validator())
// Iterate over the blocks and insert when the verifier permits
for i, block := range chain {
block, err := it.next()
switch {
// First block is pruned, insert as sidechain and reorg only if TD grows enough
case err == consensus.ErrPrunedAncestor:
return bc.insertSidechain(it)
// First block is future, shove it (and all children) to the future queue (unknown ancestor)
case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())):
for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) {
if err := bc.addFutureBlock(block); err != nil {
return it.index, events, coalescedLogs, err
}
block, err = it.next()
}
stats.queued += it.processed()
stats.ignored += it.remaining()
// If there are any still remaining, mark as ignored
return it.index, events, coalescedLogs, err
// First block (and state) is known
// 1. We did a roll-back, and should now do a re-import
// 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
// from the canonical chain, which has not been verified.
case err == ErrKnownBlock:
// Skip all known blocks that behind us
current := bc.CurrentBlock().NumberU64()
for block != nil && err == ErrKnownBlock && current >= block.NumberU64() {
stats.ignored++
block, err = it.next()
}
// Falls through to the block import
// Some other error occurred, abort
case err != nil:
stats.ignored += len(it.chain)
bc.reportBlock(block, nil, err)
return it.index, events, coalescedLogs, err
}
// No validation errors for the first block (or chain prefix skipped)
for ; block != nil && err == nil; block, err = it.next() {
// If the chain is terminating, stop processing blocks
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
log.Debug("Premature abort during blocks processing")
@ -1091,115 +1182,53 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
// If the header is a banned one, straight out abort
if BadHashes[block.Hash()] {
bc.reportBlock(block, nil, ErrBlacklistedHash)
return i, events, coalescedLogs, ErrBlacklistedHash
return it.index, events, coalescedLogs, ErrBlacklistedHash
}
// Wait for the block's verification to complete
bstart := time.Now()
// Retrieve the parent block and it's state to execute on top
start := time.Now()
err := <-results
if err == nil {
err = bc.Validator().ValidateBody(block)
}
switch {
case err == ErrKnownBlock:
// Block and state both already known. However if the current block is below
// this number we did a rollback and we should reimport it nonetheless.
if bc.CurrentBlock().NumberU64() >= block.NumberU64() {
stats.ignored++
continue
}
case err == consensus.ErrFutureBlock:
// Allow up to MaxFuture second in the future blocks. If this limit is exceeded
// the chain is discarded and processed at a later time if given.
max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
if block.Time().Cmp(max) > 0 {
return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max)
}
bc.futureBlocks.Add(block.Hash(), block)
stats.queued++
continue
case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()):
bc.futureBlocks.Add(block.Hash(), block)
stats.queued++
continue
case err == consensus.ErrPrunedAncestor:
// Block competing with the canonical chain, store in the db, but don't process
// until the competitor TD goes above the canonical TD
currentBlock := bc.CurrentBlock()
localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty())
if localTd.Cmp(externTd) > 0 {
if err = bc.WriteBlockWithoutState(block, externTd); err != nil {
return i, events, coalescedLogs, err
}
continue
}
// Competitor chain beat canonical, gather all blocks from the common ancestor
var winner []*types.Block
parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
for !bc.HasState(parent.Root()) {
winner = append(winner, parent)
parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1)
}
for j := 0; j < len(winner)/2; j++ {
winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j]
}
// Import all the pruned blocks to make the state available
bc.chainmu.Unlock()
_, evs, logs, err := bc.insertChain(winner)
bc.chainmu.Lock()
events, coalescedLogs = evs, logs
if err != nil {
return i, events, coalescedLogs, err
}
case err != nil:
bc.reportBlock(block, nil, err)
return i, events, coalescedLogs, err
}
// Create a new statedb using the parent block and report an
// error if it fails.
var parent *types.Block
if i == 0 {
parent := it.previous()
if parent == nil {
parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
} else {
parent = chain[i-1]
}
state, err := state.New(parent.Root(), bc.stateCache)
if err != nil {
return i, events, coalescedLogs, err
return it.index, events, coalescedLogs, err
}
// Process block using the parent state as reference point.
t0 := time.Now()
receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig)
t1 := time.Now()
if err != nil {
bc.reportBlock(block, receipts, err)
return i, events, coalescedLogs, err
return it.index, events, coalescedLogs, err
}
// Validate the state using the default validator
err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas)
if err != nil {
if err := bc.Validator().ValidateState(block, parent, state, receipts, usedGas); err != nil {
bc.reportBlock(block, receipts, err)
return i, events, coalescedLogs, err
return it.index, events, coalescedLogs, err
}
proctime := time.Since(bstart)
t2 := time.Now()
proctime := time.Since(start)
// Write the block to the chain and get the status.
status, err := bc.WriteBlockWithState(block, receipts, state)
t3 := time.Now()
if err != nil {
return i, events, coalescedLogs, err
return it.index, events, coalescedLogs, err
}
blockInsertTimer.UpdateSince(start)
blockExecutionTimer.Update(t1.Sub(t0))
blockValidationTimer.Update(t2.Sub(t1))
blockWriteTimer.Update(t3.Sub(t2))
switch status {
case CanonStatTy:
log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()),
"txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)))
log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
"elapsed", common.PrettyDuration(time.Since(start)),
"root", block.Root())
coalescedLogs = append(coalescedLogs, logs...)
blockInsertTimer.UpdateSince(bstart)
events = append(events, ChainEvent{block, block.Hash(), logs})
lastCanon = block
@ -1207,78 +1236,153 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
bc.gcproc += proctime
case SideStatTy:
log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed",
common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()))
blockInsertTimer.UpdateSince(bstart)
log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(),
"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
"root", block.Root())
events = append(events, ChainSideEvent{block})
}
blockInsertTimer.UpdateSince(start)
stats.processed++
stats.usedGas += usedGas
cache, _ := bc.stateCache.TrieDB().Size()
stats.report(chain, i, cache)
stats.report(chain, it.index, cache)
}
// Any blocks remaining here? The only ones we care about are the future ones
if block != nil && err == consensus.ErrFutureBlock {
if err := bc.addFutureBlock(block); err != nil {
return it.index, events, coalescedLogs, err
}
block, err = it.next()
for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() {
if err := bc.addFutureBlock(block); err != nil {
return it.index, events, coalescedLogs, err
}
stats.queued++
}
}
stats.ignored += it.remaining()
// Append a single chain head event if we've progressed the chain
if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
events = append(events, ChainHeadEvent{lastCanon})
}
return 0, events, coalescedLogs, nil
return it.index, events, coalescedLogs, err
}
// insertStats tracks and reports on block insertion.
type insertStats struct {
queued, processed, ignored int
usedGas uint64
lastIndex int
startTime mclock.AbsTime
}
// statsReportLimit is the time limit during import and export after which we
// always print out progress. This avoids the user wondering what's going on.
const statsReportLimit = 8 * time.Second
// report prints statistics if some number of blocks have been processed
// or more than a few seconds have passed since the last message.
func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
// Fetch the timings for the batch
// insertSidechain is called when an import batch hits upon a pruned ancestor
// error, which happens when a sidechain with a sufficiently old fork-block is
// found.
//
// The method writes all (header-and-body-valid) blocks to disk, then tries to
// switch over to the new chain if the TD exceeded the current chain.
func (bc *BlockChain) insertSidechain(it *insertIterator) (int, []interface{}, []*types.Log, error) {
var (
now = mclock.Now()
elapsed = time.Duration(now) - time.Duration(st.startTime)
externTd *big.Int
current = bc.CurrentBlock().NumberU64()
)
// If we're at the last block of the batch or report period reached, log
if index == len(chain)-1 || elapsed >= statsReportLimit {
var (
end = chain[index]
txs = countTransactions(chain[st.lastIndex : index+1])
)
context := []interface{}{
"blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
"number", end.Number(), "hash", end.Hash(),
}
if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
}
context = append(context, []interface{}{"cache", cache}...)
// The first sidechain block error is already verified to be ErrPrunedAncestor.
// Since we don't import them here, we expect ErrUnknownAncestor for the remaining
// ones. Any other errors means that the block is invalid, and should not be written
// to disk.
block, err := it.current(), consensus.ErrPrunedAncestor
for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() {
// Check the canonical state root for that number
if number := block.NumberU64(); current >= number {
if canonical := bc.GetBlockByNumber(number); canonical != nil && canonical.Root() == block.Root() {
// This is most likely a shadow-state attack. When a fork is imported into the
// database, and it eventually reaches a block height which is not pruned, we
// just found that the state already exist! This means that the sidechain block
// refers to a state which already exists in our canon chain.
//
// If left unchecked, we would now proceed importing the blocks, without actually
// having verified the state of the previous blocks.
log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root())
if st.queued > 0 {
context = append(context, []interface{}{"queued", st.queued}...)
// If someone legitimately side-mines blocks, they would still be imported as usual. However,
// we cannot risk writing unverified blocks to disk when they obviously target the pruning
// mechanism.
return it.index, nil, nil, errors.New("sidechain ghost-state attack")
}
}
if st.ignored > 0 {
context = append(context, []interface{}{"ignored", st.ignored}...)
if externTd == nil {
externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1)
}
log.Info("Imported new chain segment", context...)
externTd = new(big.Int).Add(externTd, block.Difficulty())
*st = insertStats{startTime: now, lastIndex: index + 1}
if !bc.HasBlock(block.Hash(), block.NumberU64()) {
start := time.Now()
if err := bc.WriteBlockWithoutState(block, externTd); err != nil {
return it.index, nil, nil, err
}
log.Debug("Inserted sidechain block", "number", block.Number(), "hash", block.Hash(),
"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
"root", block.Root())
}
}
}
func countTransactions(chain []*types.Block) (c int) {
for _, b := range chain {
c += len(b.Transactions())
// At this point, we've written all sidechain blocks to database. Loop ended
// either on some other error or all were processed. If there was some other
// error, we can ignore the rest of those blocks.
//
// If the externTd was larger than our local TD, we now need to reimport the previous
// blocks to regenerate the required state
localTd := bc.GetTd(bc.CurrentBlock().Hash(), current)
if localTd.Cmp(externTd) > 0 {
log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().NumberU64(), "sidetd", externTd, "localtd", localTd)
return it.index, nil, nil, err
}
return c
// Gather all the sidechain hashes (full blocks may be memory heavy)
var (
hashes []common.Hash
numbers []uint64
)
parent := bc.GetHeader(it.previous().Hash(), it.previous().NumberU64())
for parent != nil && !bc.HasState(parent.Root) {
hashes = append(hashes, parent.Hash())
numbers = append(numbers, parent.Number.Uint64())
parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
}
if parent == nil {
return it.index, nil, nil, errors.New("missing parent")
}
// Import all the pruned blocks to make the state available
var (
blocks []*types.Block
memory common.StorageSize
)
for i := len(hashes) - 1; i >= 0; i-- {
// Append the next block to our batch
block := bc.GetBlock(hashes[i], numbers[i])
blocks = append(blocks, block)
memory += block.Size()
// If memory use grew too large, import and continue. Sadly we need to discard
// all raised events and logs from notifications since we're too heavy on the
// memory here.
if len(blocks) >= 2048 || memory > 64*1024*1024 {
log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
if _, _, _, err := bc.insertChain(blocks, false); err != nil {
return 0, nil, nil, err
}
blocks, memory = blocks[:0], 0
// If the chain is terminating, stop processing blocks
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
log.Debug("Premature abort during blocks processing")
return 0, nil, nil, nil
}
}
}
if len(blocks) > 0 {
log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
return bc.insertChain(blocks, false)
}
return 0, nil, nil, nil
}
// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
@ -1453,8 +1557,10 @@ func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, e
bc.addBadBlock(block)
var receiptString string
for _, receipt := range receipts {
receiptString += fmt.Sprintf("\t%v\n", receipt)
for i, receipt := range receipts {
receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n",
i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(),
receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState)
}
log.Error(fmt.Sprintf(`
########## BAD BLOCK #########

View File

@ -0,0 +1,143 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
// insertStats tracks and reports on block insertion.
type insertStats struct {
queued, processed, ignored int
usedGas uint64
lastIndex int
startTime mclock.AbsTime
}
// statsReportLimit is the time limit during import and export after which we
// always print out progress. This avoids the user wondering what's going on.
const statsReportLimit = 8 * time.Second
// report prints statistics if some number of blocks have been processed
// or more than a few seconds have passed since the last message.
func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
// Fetch the timings for the batch
var (
now = mclock.Now()
elapsed = time.Duration(now) - time.Duration(st.startTime)
)
// If we're at the last block of the batch or report period reached, log
if index == len(chain)-1 || elapsed >= statsReportLimit {
// Count the number of transactions in this segment
var txs int
for _, block := range chain[st.lastIndex : index+1] {
txs += len(block.Transactions())
}
end := chain[index]
// Assemble the log context and send it to the logger
context := []interface{}{
"blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
"number", end.Number(), "hash", end.Hash(),
}
if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
}
context = append(context, []interface{}{"cache", cache}...)
if st.queued > 0 {
context = append(context, []interface{}{"queued", st.queued}...)
}
if st.ignored > 0 {
context = append(context, []interface{}{"ignored", st.ignored}...)
}
log.Info("Imported new chain segment", context...)
// Bump the stats reported to the next section
*st = insertStats{startTime: now, lastIndex: index + 1}
}
}
// insertIterator is a helper to assist during chain import.
type insertIterator struct {
chain types.Blocks
results <-chan error
index int
validator Validator
}
// newInsertIterator creates a new iterator based on the given blocks, which are
// assumed to be a contiguous chain.
func newInsertIterator(chain types.Blocks, results <-chan error, validator Validator) *insertIterator {
return &insertIterator{
chain: chain,
results: results,
index: -1,
validator: validator,
}
}
// next returns the next block in the iterator, along with any potential validation
// error for that block. When the end is reached, it will return (nil, nil).
func (it *insertIterator) next() (*types.Block, error) {
if it.index+1 >= len(it.chain) {
it.index = len(it.chain)
return nil, nil
}
it.index++
if err := <-it.results; err != nil {
return it.chain[it.index], err
}
return it.chain[it.index], it.validator.ValidateBody(it.chain[it.index])
}
// current returns the current block that's being processed.
func (it *insertIterator) current() *types.Block {
if it.index < 0 || it.index+1 >= len(it.chain) {
return nil
}
return it.chain[it.index]
}
// previous returns the previous block was being processed, or nil
func (it *insertIterator) previous() *types.Block {
if it.index < 1 {
return nil
}
return it.chain[it.index-1]
}
// first returns the first block in the it.
func (it *insertIterator) first() *types.Block {
return it.chain[0]
}
// remaining returns the number of remaining blocks.
func (it *insertIterator) remaining() int {
return len(it.chain) - it.index
}
// processed returns the number of processed blocks.
func (it *insertIterator) processed() int {
return it.index + 1
}

View File

@ -33,12 +33,11 @@ import (
// BlockGen creates blocks for testing.
// See GenerateChain for a detailed explanation.
type BlockGen struct {
i int
parent *types.Block
chain []*types.Block
chainReader consensus.ChainReader
header *types.Header
statedb *state.StateDB
i int
parent *types.Block
chain []*types.Block
header *types.Header
statedb *state.StateDB
gasPool *GasPool
txs []*types.Transaction
@ -138,7 +137,7 @@ func (b *BlockGen) AddUncle(h *types.Header) {
// For index -1, PrevBlock returns the parent block given to GenerateChain.
func (b *BlockGen) PrevBlock(index int) *types.Block {
if index >= b.i {
panic("block index out of range")
panic(fmt.Errorf("block index %d out of range (%d,%d)", index, -1, b.i))
}
if index == -1 {
return b.parent
@ -154,7 +153,8 @@ func (b *BlockGen) OffsetTime(seconds int64) {
if b.header.Time.Cmp(b.parent.Header().Time) <= 0 {
panic("block time out of range")
}
b.header.Difficulty = b.engine.CalcDifficulty(b.chainReader, b.header.Time.Uint64(), b.parent.Header())
chainreader := &fakeChainReader{config: b.config}
b.header.Difficulty = b.engine.CalcDifficulty(chainreader, b.header.Time.Uint64(), b.parent.Header())
}
// GenerateChain creates a chain of n blocks. The first block's
@ -174,14 +174,10 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
config = params.TestChainConfig
}
blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n)
chainreader := &fakeChainReader{config: config}
genblock := func(i int, parent *types.Block, statedb *state.StateDB) (*types.Block, types.Receipts) {
// TODO(karalabe): This is needed for clique, which depends on multiple blocks.
// It's nonetheless ugly to spin up a blockchain here. Get rid of this somehow.
blockchain, _ := NewBlockChain(db, nil, config, engine, vm.Config{}, nil)
defer blockchain.Stop()
b := &BlockGen{i: i, parent: parent, chain: blocks, chainReader: blockchain, statedb: statedb, config: config, engine: engine}
b.header = makeHeader(b.chainReader, parent, statedb, b.engine)
b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine}
b.header = makeHeader(chainreader, parent, statedb, b.engine)
// Mutate the state and block according to any hard-fork specs
if daoBlock := config.DAOForkBlock; daoBlock != nil {
@ -201,7 +197,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
}
if b.engine != nil {
// Finalize and seal the block
block, _ := b.engine.Finalize(b.chainReader, b.header, statedb, b.txs, b.uncles, b.receipts)
block, _ := b.engine.Finalize(chainreader, b.header, statedb, b.txs, b.uncles, b.receipts)
// Write state changes to db
root, err := statedb.Commit(config.IsEIP158(b.header.Number))
@ -269,3 +265,19 @@ func makeBlockChain(parent *types.Block, n int, engine consensus.Engine, db ethd
})
return blocks
}
type fakeChainReader struct {
config *params.ChainConfig
genesis *types.Block
}
// Config returns the chain configuration.
func (cr *fakeChainReader) Config() *params.ChainConfig {
return cr.config
}
func (cr *fakeChainReader) CurrentHeader() *types.Header { return nil }
func (cr *fakeChainReader) GetHeaderByNumber(number uint64) *types.Header { return nil }
func (cr *fakeChainReader) GetHeaderByHash(hash common.Hash) *types.Header { return nil }
func (cr *fakeChainReader) GetHeader(hash common.Hash, number uint64) *types.Header { return nil }
func (cr *fakeChainReader) GetBlock(hash common.Hash, number uint64) *types.Block { return nil }

View File

@ -151,6 +151,9 @@ func (e *GenesisMismatchError) Error() string {
//
// The returned chain configuration is never nil.
func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
return SetupGenesisBlockWithOverride(db, genesis, nil)
}
func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, constantinopleOverride *big.Int) (*params.ChainConfig, common.Hash, error) {
if genesis != nil && genesis.Config == nil {
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
}
@ -178,6 +181,9 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
// Get the existing chain configuration.
newcfg := genesis.configOrDefault(stored)
if constantinopleOverride != nil {
newcfg.ConstantinopleBlock = constantinopleOverride
}
storedcfg := rawdb.ReadChainConfig(db, stored)
if storedcfg == nil {
log.Warn("Found genesis block without chain config")

View File

@ -271,6 +271,15 @@ func DeleteTd(db DatabaseDeleter, hash common.Hash, number uint64) {
}
}
// HasReceipts verifies the existence of all the transaction receipts belonging
// to a block.
func HasReceipts(db DatabaseReader, hash common.Hash, number uint64) bool {
if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil {
return false
}
return true
}
// ReadReceipts retrieves all the transaction receipts belonging to a block.
func ReadReceipts(db DatabaseReader, hash common.Hash, number uint64) types.Receipts {
// Retrieve the flattened receipt slice
@ -278,7 +287,7 @@ func ReadReceipts(db DatabaseReader, hash common.Hash, number uint64) types.Rece
if len(data) == 0 {
return nil
}
// Convert the revceipts from their storage form to their internal representation
// Convert the receipts from their storage form to their internal representation
storageReceipts := []*types.ReceiptForStorage{}
if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
log.Error("Invalid receipt array RLP", "hash", hash, "err", err)

View File

@ -77,9 +77,8 @@ func ReadPreimage(db DatabaseReader, hash common.Hash) []byte {
return data
}
// WritePreimages writes the provided set of preimages to the database. `number` is the
// current block number, and is used for debug messages only.
func WritePreimages(db DatabaseWriter, number uint64, preimages map[common.Hash][]byte) {
// WritePreimages writes the provided set of preimages to the database.
func WritePreimages(db DatabaseWriter, preimages map[common.Hash][]byte) {
for hash, preimage := range preimages {
if err := db.Put(preimageKey(hash), preimage); err != nil {
log.Crit("Failed to store trie preimage", "err", err)

View File

@ -35,7 +35,7 @@ var (
// headBlockKey tracks the latest know full block's hash.
headBlockKey = []byte("LastBlock")
// headFastBlockKey tracks the latest known incomplete block's hash duirng fast sync.
// headFastBlockKey tracks the latest known incomplete block's hash during fast sync.
headFastBlockKey = []byte("LastFast")
// fastTrieProgressKey tracks the number of trie entries imported during fast sync.

View File

@ -72,13 +72,19 @@ type Trie interface {
}
// NewDatabase creates a backing store for state. The returned database is safe for
// concurrent use and retains cached trie nodes in memory. The pool is an optional
// intermediate trie-node memory pool between the low level storage layer and the
// high level trie abstraction.
// concurrent use and retains a few recent expanded trie nodes in memory. To keep
// more historical state in memory, use the NewDatabaseWithCache constructor.
func NewDatabase(db ethdb.Database) Database {
return NewDatabaseWithCache(db, 0)
}
// NewDatabase creates a backing store for state. The returned database is safe for
// concurrent use and retains both a few recent expanded trie nodes in memory, as
// well as a lot of collapsed RLP trie nodes in a large memory cache.
func NewDatabaseWithCache(db ethdb.Database, cache int) Database {
csc, _ := lru.New(codeSizeCacheSize)
return &cachingDB{
db: trie.NewDatabase(db),
db: trie.NewDatabaseWithCache(db, cache),
codeSizeCache: csc,
}
}

Some files were not shown because too many files have changed in this diff Show More