From b1f9030177a36f6d910e8b5a83edc8744bbc5471 Mon Sep 17 00:00:00 2001 From: Adam Babik Date: Wed, 19 Dec 2018 11:02:07 +0100 Subject: [PATCH] update to geth v1.8.20 (#1327) --- Gopkg.lock | 21 +- Gopkg.toml | 4 +- cmd/bootnode/main.go | 3 +- discovery/discv5.go | 3 +- vendor/github.com/allegro/bigcache/LICENSE | 201 ++ .../github.com/allegro/bigcache/bigcache.go | 155 + vendor/github.com/allegro/bigcache/clock.go | 14 + vendor/github.com/allegro/bigcache/config.go | 67 + .../github.com/allegro/bigcache/encoding.go | 70 + .../allegro/bigcache/entry_not_found_error.go | 17 + vendor/github.com/allegro/bigcache/fnv.go | 28 + vendor/github.com/allegro/bigcache/hash.go | 8 + .../github.com/allegro/bigcache/iterator.go | 122 + vendor/github.com/allegro/bigcache/logger.go | 30 + .../allegro/bigcache/queue/bytes_queue.go | 210 ++ vendor/github.com/allegro/bigcache/shard.go | 229 ++ vendor/github.com/allegro/bigcache/stats.go | 15 + vendor/github.com/allegro/bigcache/utils.go | 16 + .../ethereum/go-ethereum/.github/CODEOWNERS | 13 +- .../ethereum/go-ethereum/.travis.yml | 10 +- .../github.com/ethereum/go-ethereum/README.md | 4 +- .../patches/0000-accounts-hd-keys.patch | 49 +- .../_assets/patches/0038-ulc.patch | 117 +- .../go-ethereum/accounts/abi/argument.go | 19 +- .../ethereum/go-ethereum/accounts/abi/type.go | 58 +- .../ethereum/go-ethereum/accounts/hd.go | 4 +- .../accounts/keystore/account_cache.go | 5 +- .../go-ethereum/accounts/keystore/key.go | 16 +- .../go-ethereum/accounts/keystore/keystore.go | 1 + .../{keystore_passphrase.go => passphrase.go} | 84 +- .../keystore/{keystore_plain.go => plain.go} | 0 .../go-ethereum/accounts/keystore/presale.go | 8 +- .../{keystore_wallet.go => wallet.go} | 0 .../go-ethereum/accounts/usbwallet/ledger.go | 2 +- .../go-ethereum/accounts/usbwallet/trezor.go | 2 +- .../ethereum/go-ethereum/appveyor.yml | 4 +- .../ethereum/go-ethereum/cmd/bootnode/main.go | 15 +- .../ethereum/go-ethereum/cmd/clef/README.md | 6 +- .../go-ethereum/cmd/clef/intapi_changelog.md | 5 +- .../ethereum/go-ethereum/cmd/clef/main.go | 211 +- .../ethereum/go-ethereum/cmd/evm/runner.go | 3 +- .../go-ethereum/cmd/evm/staterunner.go | 2 +- .../ethereum/go-ethereum/cmd/faucet/faucet.go | 2 +- .../ethereum/go-ethereum/cmd/geth/config.go | 5 +- .../ethereum/go-ethereum/cmd/geth/main.go | 6 +- .../ethereum/go-ethereum/cmd/geth/usage.go | 2 + .../go-ethereum/cmd/puppeth/genesis.go | 388 +- .../cmd/puppeth/module_dashboard.go | 2 +- .../cmd/puppeth/module_ethstats.go | 3 +- .../cmd/puppeth/module_explorer.go | 1 + .../go-ethereum/cmd/puppeth/module_faucet.go | 4 +- .../go-ethereum/cmd/puppeth/module_nginx.go | 1 + .../go-ethereum/cmd/puppeth/module_node.go | 1 + .../go-ethereum/cmd/puppeth/module_wallet.go | 1 + .../go-ethereum/cmd/puppeth/puppeth.go | 19 +- .../cmd/puppeth/testdata/stureby_aleth.json | 112 + .../cmd/puppeth/testdata/stureby_geth.json | 47 + .../cmd/puppeth/testdata/stureby_parity.json | 181 + .../go-ethereum/cmd/puppeth/wizard.go | 42 + .../cmd/puppeth/wizard_dashboard.go | 4 +- .../cmd/puppeth/wizard_ethstats.go | 6 +- .../cmd/puppeth/wizard_explorer.go | 2 +- .../go-ethereum/cmd/puppeth/wizard_faucet.go | 8 +- .../go-ethereum/cmd/puppeth/wizard_genesis.go | 144 +- .../go-ethereum/cmd/puppeth/wizard_intro.go | 22 +- .../go-ethereum/cmd/puppeth/wizard_nginx.go | 4 +- .../go-ethereum/cmd/puppeth/wizard_node.go | 4 +- .../go-ethereum/cmd/puppeth/wizard_wallet.go | 2 +- .../ethereum/go-ethereum/cmd/swarm/access.go | 66 +- .../ethereum/go-ethereum/cmd/swarm/config.go | 1 + .../ethereum/go-ethereum/cmd/swarm/db.go | 42 + .../go-ethereum/cmd/swarm/download.go | 9 + .../ethereum/go-ethereum/cmd/swarm/feeds.go | 68 +- .../ethereum/go-ethereum/cmd/swarm/flags.go | 179 + .../ethereum/go-ethereum/cmd/swarm/fs.go | 61 +- .../ethereum/go-ethereum/cmd/swarm/hash.go | 9 + .../ethereum/go-ethereum/cmd/swarm/list.go | 9 + .../ethereum/go-ethereum/cmd/swarm/main.go | 438 +-- .../go-ethereum/cmd/swarm/manifest.go | 34 + .../swarm/swarm-smoke/feed_upload_and_sync.go | 366 ++ .../go-ethereum/cmd/swarm/swarm-smoke/main.go | 99 +- .../cmd/swarm/swarm-smoke/upload_and_sync.go | 180 +- .../ethereum/go-ethereum/cmd/swarm/upload.go | 49 +- .../ethereum/go-ethereum/cmd/utils/cmd.go | 4 +- .../ethereum/go-ethereum/cmd/utils/flags.go | 93 +- .../ethereum/go-ethereum/common/bytes.go | 9 + .../go-ethereum/common/compiler/solidity.go | 24 +- .../go-ethereum/consensus/clique/clique.go | 2 +- .../go-ethereum/consensus/ethash/api.go | 11 +- .../go-ethereum/consensus/ethash/ethash.go | 2 +- .../go-ethereum/consensus/ethash/sealer.go | 5 +- .../go-ethereum/core/block_validator.go | 12 +- .../ethereum/go-ethereum/core/blockchain.go | 444 ++- .../go-ethereum/core/blockchain_insert.go | 143 + .../ethereum/go-ethereum/core/chain_makers.go | 44 +- .../ethereum/go-ethereum/core/genesis.go | 6 + .../go-ethereum/core/rawdb/accessors_chain.go | 11 +- .../core/rawdb/accessors_metadata.go | 5 +- .../ethereum/go-ethereum/core/rawdb/schema.go | 2 +- .../go-ethereum/core/state/database.go | 14 +- .../go-ethereum/core/state/statedb.go | 32 +- .../ethereum/go-ethereum/core/tx_pool.go | 2 +- .../ethereum/go-ethereum/core/types/block.go | 4 +- .../go-ethereum/core/types/gen_header_json.go | 20 +- .../core/types/transaction_signing.go | 2 +- .../ethereum/go-ethereum/core/vm/evm.go | 6 + .../go-ethereum/core/vm/gen_structlog.go | 52 +- .../go-ethereum/core/vm/instructions.go | 27 +- .../ethereum/go-ethereum/core/vm/logger.go | 23 +- .../json_logger.go => core/vm/logger_json.go} | 30 +- .../ethereum/go-ethereum/eth/api.go | 6 +- .../ethereum/go-ethereum/eth/api_backend.go | 4 +- .../ethereum/go-ethereum/eth/api_tracer.go | 177 +- .../ethereum/go-ethereum/eth/backend.go | 6 +- .../ethereum/go-ethereum/eth/config.go | 31 +- .../go-ethereum/eth/downloader/downloader.go | 174 +- .../go-ethereum/eth/downloader/peer.go | 7 - .../go-ethereum/eth/downloader/queue.go | 15 +- .../go-ethereum/eth/downloader/statesync.go | 19 +- .../ethereum/go-ethereum/eth/gen_config.go | 28 +- .../ethereum/go-ethereum/eth/handler.go | 25 +- .../eth/tracers/internal/tracers/assets.go | 26 +- .../internal/tracers/prestate_tracer.js | 5 +- .../go-ethereum/eth/tracers/tracer.go | 16 +- .../go-ethereum/ethclient/ethclient.go | 36 +- .../ethereum/go-ethereum/ethdb/database.go | 70 +- .../ethereum/go-ethereum/ethdb/database_js.go | 68 + .../generic_filter.go => ethdb/table.go} | 53 +- .../ethereum/go-ethereum/ethdb/table_batch.go | 51 + .../go-ethereum/event/filter/filter.go | 95 - .../ethereum/go-ethereum/interfaces.go | 2 +- .../go-ethereum/internal/cmdtest/test_cmd.go | 21 +- .../go-ethereum/internal/ethapi/api.go | 88 +- .../go-ethereum/internal/ethapi/backend.go | 2 +- .../go-ethereum/internal/web3ext/web3ext.go | 18 + .../ethereum/go-ethereum/les/api_backend.go | 4 +- .../ethereum/go-ethereum/les/backend.go | 2 +- .../ethereum/go-ethereum/les/fetcher.go | 58 +- .../go-ethereum/les/flowcontrol/control.go | 1 - .../ethereum/go-ethereum/les/serverpool.go | 2 +- .../ethereum/go-ethereum/light/postprocess.go | 4 +- .../ethereum/go-ethereum/light/trie.go | 2 +- .../ethereum/go-ethereum/metrics/counter.go | 11 + .../go-ethereum/metrics/influxdb/influxdb.go | 28 + .../ethereum/go-ethereum/metrics/registry.go | 5 +- .../go-ethereum/miner/stress_clique.go | 23 +- .../go-ethereum/miner/stress_ethash.go | 23 +- .../ethereum/go-ethereum/miner/worker.go | 3 +- .../ethereum/go-ethereum/mobile/big.go | 7 + .../ethereum/go-ethereum/node/config.go | 44 +- .../ethereum/go-ethereum/node/node.go | 16 +- .../ethereum/go-ethereum/p2p/dial.go | 9 +- .../go-ethereum/p2p/discover/table.go | 43 +- .../ethereum/go-ethereum/p2p/discover/udp.go | 92 +- .../ethereum/go-ethereum/p2p/discv5/net.go | 9 +- .../ethereum/go-ethereum/p2p/discv5/udp.go | 3 +- .../go-ethereum/p2p/enode/localnode.go | 246 ++ .../ethereum/go-ethereum/p2p/enode/nodedb.go | 124 +- .../ethereum/go-ethereum/p2p/enr/enr.go | 2 +- .../ethereum/go-ethereum/p2p/metrics.go | 195 +- .../ethereum/go-ethereum/p2p/nat/nat.go | 18 +- .../go-ethereum/p2p/netutil/iptrack.go | 130 + .../ethereum/go-ethereum/p2p/protocol.go | 10 +- .../go-ethereum/p2p/protocols/accounting.go | 195 + .../go-ethereum/p2p/protocols/protocol.go | 32 +- .../go-ethereum/p2p/protocols/reporter.go | 147 + .../ethereum/go-ethereum/p2p/rlpx.go | 2 +- .../ethereum/go-ethereum/p2p/server.go | 258 +- .../go-ethereum/p2p/simulations/README.md | 12 - .../p2p/simulations/adapters/docker.go | 190 - .../p2p/simulations/adapters/exec.go | 225 +- .../p2p/simulations/adapters/ws.go | 51 - .../p2p/simulations/examples/ping-pong.go | 8 - .../go-ethereum/p2p/simulations/network.go | 163 +- .../ethereum/go-ethereum/params/config.go | 34 +- .../ethereum/go-ethereum/params/version.go | 2 +- .../ethereum/go-ethereum/rpc/client.go | 59 +- .../ethereum/go-ethereum/rpc/doc.go | 2 +- .../ethereum/go-ethereum/rpc/ipc.go | 4 +- .../{swarm/state/store.go => rpc/ipc_js.go} | 29 +- .../ethereum/go-ethereum/rpc/stdio.go | 66 + .../ethereum/go-ethereum/rpc/subscription.go | 34 +- .../ethereum/go-ethereum/signer/core/api.go | 8 +- .../ethereum/go-ethereum/swarm/OWNERS | 1 - .../ethereum/go-ethereum/swarm/api/act.go | 3 + .../ethereum/go-ethereum/swarm/api/api.go | 27 +- .../go-ethereum/swarm/api/client/client.go | 76 +- .../go-ethereum/swarm/api/filesystem.go | 4 + .../go-ethereum/swarm/api/http/middleware.go | 12 +- .../go-ethereum/swarm/api/http/response.go | 25 +- .../go-ethereum/swarm/api/http/sctx.go | 14 +- .../go-ethereum/swarm/api/http/server.go | 97 +- .../http.go => api/http/test_server.go} | 2 +- .../go-ethereum/swarm/api/manifest.go | 1 - .../swarm/grafana_dashboards/ldbstore.json | 2278 ------------ .../swarm/grafana_dashboards/swarm.json | 3198 ----------------- .../go-ethereum/swarm/metrics/flags.go | 32 +- .../go-ethereum/swarm/multihash/multihash.go | 92 - .../go-ethereum/swarm/network/hive.go | 2 +- .../go-ethereum/swarm/network/kademlia.go | 173 +- .../go-ethereum/swarm/network/protocol.go | 4 +- .../swarm/network/simulation/events.go | 114 +- .../swarm/network/simulation/kademlia.go | 1 + .../swarm/network/simulation/simulation.go | 7 + .../swarm/network/simulations/overlay.go | 4 +- .../swarm/network/stream/delivery.go | 31 +- .../swarm/network/stream/messages.go | 15 +- .../go-ethereum/swarm/network/stream/peer.go | 42 +- .../swarm/network/stream/stream.go | 188 +- .../swarm/network/stream/syncer.go | 52 +- .../ethereum/go-ethereum/swarm/pot/address.go | 40 - .../ethereum/go-ethereum/swarm/pss/api.go | 16 +- .../go-ethereum/swarm/pss/client/client.go | 2 +- .../go-ethereum/swarm/pss/handshake.go | 2 +- .../go-ethereum/swarm/pss/notify/notify.go | 4 +- .../ethereum/go-ethereum/swarm/pss/pss.go | 171 +- .../ethereum/go-ethereum/swarm/pss/types.go | 38 +- .../ethereum/go-ethereum/swarm/sctx/sctx.go | 12 +- .../ethereum/go-ethereum/swarm/shed/db.go | 130 + .../go-ethereum/swarm/shed/field_string.go | 66 + .../go-ethereum/swarm/shed/field_struct.go | 71 + .../go-ethereum/swarm/shed/field_uint64.go | 108 + .../ethereum/go-ethereum/swarm/shed/index.go | 264 ++ .../ethereum/go-ethereum/swarm/shed/schema.go | 134 + .../go-ethereum/swarm/state/dbstore.go | 23 +- .../go-ethereum/swarm/state/inmemorystore.go | 94 - .../go-ethereum/swarm/storage/chunker.go | 8 + .../go-ethereum/swarm/storage/ldbstore.go | 535 ++- .../go-ethereum/swarm/storage/localstore.go | 59 +- .../go-ethereum/swarm/storage/mock/db/db.go | 7 + .../go-ethereum/swarm/storage/mock/mem/mem.go | 16 + .../go-ethereum/swarm/storage/mock/mock.go | 7 + .../go-ethereum/swarm/storage/mock/rpc/rpc.go | 6 + .../swarm/storage/mock/test/test.go | 53 + .../go-ethereum/swarm/storage/schema.go | 13 +- .../go-ethereum/swarm/storage/types.go | 29 +- .../ethereum/go-ethereum/swarm/swap/swap.go | 98 + .../ethereum/go-ethereum/swarm/swarm.go | 83 +- .../go-ethereum/swarm/testutil/file.go | 21 + .../go-ethereum/swarm/tracing/tracing.go | 5 +- .../go-ethereum/swarm/version/version.go | 2 +- .../go-ethereum/tests/block_test_util.go | 23 +- .../ethereum/go-ethereum/tests/init.go | 9 + .../ethereum/go-ethereum/trie/database.go | 207 +- .../ethereum/go-ethereum/trie/iterator.go | 2 + .../ethereum/go-ethereum/trie/proof.go | 2 + .../whisper/mailserver/mailserver.go | 12 +- 247 files changed, 9193 insertions(+), 8855 deletions(-) create mode 100644 vendor/github.com/allegro/bigcache/LICENSE create mode 100644 vendor/github.com/allegro/bigcache/bigcache.go create mode 100644 vendor/github.com/allegro/bigcache/clock.go create mode 100644 vendor/github.com/allegro/bigcache/config.go create mode 100644 vendor/github.com/allegro/bigcache/encoding.go create mode 100644 vendor/github.com/allegro/bigcache/entry_not_found_error.go create mode 100644 vendor/github.com/allegro/bigcache/fnv.go create mode 100644 vendor/github.com/allegro/bigcache/hash.go create mode 100644 vendor/github.com/allegro/bigcache/iterator.go create mode 100644 vendor/github.com/allegro/bigcache/logger.go create mode 100644 vendor/github.com/allegro/bigcache/queue/bytes_queue.go create mode 100644 vendor/github.com/allegro/bigcache/shard.go create mode 100644 vendor/github.com/allegro/bigcache/stats.go create mode 100644 vendor/github.com/allegro/bigcache/utils.go rename vendor/github.com/ethereum/go-ethereum/accounts/keystore/{keystore_passphrase.go => passphrase.go} (90%) rename vendor/github.com/ethereum/go-ethereum/accounts/keystore/{keystore_plain.go => plain.go} (100%) rename vendor/github.com/ethereum/go-ethereum/accounts/keystore/{keystore_wallet.go => wallet.go} (100%) create mode 100644 vendor/github.com/ethereum/go-ethereum/cmd/puppeth/testdata/stureby_aleth.json create mode 100644 vendor/github.com/ethereum/go-ethereum/cmd/puppeth/testdata/stureby_geth.json create mode 100644 vendor/github.com/ethereum/go-ethereum/cmd/puppeth/testdata/stureby_parity.json create mode 100644 vendor/github.com/ethereum/go-ethereum/cmd/swarm/flags.go create mode 100644 vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/feed_upload_and_sync.go create mode 100644 vendor/github.com/ethereum/go-ethereum/core/blockchain_insert.go rename vendor/github.com/ethereum/go-ethereum/{cmd/evm/json_logger.go => core/vm/logger_json.go} (75%) create mode 100644 vendor/github.com/ethereum/go-ethereum/ethdb/database_js.go rename vendor/github.com/ethereum/go-ethereum/{event/filter/generic_filter.go => ethdb/table.go} (51%) create mode 100644 vendor/github.com/ethereum/go-ethereum/ethdb/table_batch.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/event/filter/filter.go create mode 100644 vendor/github.com/ethereum/go-ethereum/p2p/enode/localnode.go create mode 100644 vendor/github.com/ethereum/go-ethereum/p2p/netutil/iptrack.go create mode 100644 vendor/github.com/ethereum/go-ethereum/p2p/protocols/accounting.go create mode 100644 vendor/github.com/ethereum/go-ethereum/p2p/protocols/reporter.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/docker.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/ws.go rename vendor/github.com/ethereum/go-ethereum/{swarm/state/store.go => rpc/ipc_js.go} (59%) create mode 100644 vendor/github.com/ethereum/go-ethereum/rpc/stdio.go rename vendor/github.com/ethereum/go-ethereum/swarm/{testutil/http.go => api/http/test_server.go} (99%) delete mode 100644 vendor/github.com/ethereum/go-ethereum/swarm/grafana_dashboards/ldbstore.json delete mode 100644 vendor/github.com/ethereum/go-ethereum/swarm/grafana_dashboards/swarm.json delete mode 100644 vendor/github.com/ethereum/go-ethereum/swarm/multihash/multihash.go create mode 100644 vendor/github.com/ethereum/go-ethereum/swarm/shed/db.go create mode 100644 vendor/github.com/ethereum/go-ethereum/swarm/shed/field_string.go create mode 100644 vendor/github.com/ethereum/go-ethereum/swarm/shed/field_struct.go create mode 100644 vendor/github.com/ethereum/go-ethereum/swarm/shed/field_uint64.go create mode 100644 vendor/github.com/ethereum/go-ethereum/swarm/shed/index.go create mode 100644 vendor/github.com/ethereum/go-ethereum/swarm/shed/schema.go delete mode 100644 vendor/github.com/ethereum/go-ethereum/swarm/state/inmemorystore.go create mode 100644 vendor/github.com/ethereum/go-ethereum/swarm/swap/swap.go diff --git a/Gopkg.lock b/Gopkg.lock index 08b783c46..16088f2db 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -21,6 +21,17 @@ pruneopts = "NUT" revision = "5312a61534124124185d41f09206b9fef1d88403" +[[projects]] + digest = "1:d1de7e0984d40e19819546c8deb7dc451d3c4c59fd12977773aaa6b528523f7f" + name = "github.com/allegro/bigcache" + packages = [ + ".", + "queue", + ] + pruneopts = "NUT" + revision = "f31987a23e44c5121ef8c8b2f2ea2e8ffa37b068" + version = "v1.1.0" + [[projects]] digest = "1:f5322546f652db78b7a8efd35047a61d1e492abca2263e1c647eca49e1c8a354" name = "github.com/aristanetworks/goarista" @@ -88,7 +99,7 @@ revision = "935e0e8a636ca4ba70b713f3e38a19e1b77739e8" [[projects]] - digest = "1:9919b13a30417311490b5e631bfecc14b20c1aeec85fb0ba5286efaa300622d6" + digest = "1:ca77c7225db49544c7bf8ebc93d18e4063f32e60ed4894ff590c1cb8fe315d66" name = "github.com/ethereum/go-ethereum" packages = [ ".", @@ -158,9 +169,9 @@ "trie", ] pruneopts = "T" - revision = "97bb147b17ed32d82d0ce1dd110caa31d02923db" + revision = "ffe89538dae6e643cb5dc599fe58c59a29eb75e5" source = "github.com/status-im/go-ethereum" - version = "v1.8.17" + version = "v1.8.20" [[projects]] digest = "1:5ac7ecd476a2355a5201229081df2e5f57333ecf703e1f69dde699ae34169c1b" @@ -826,8 +837,8 @@ name = "github.com/status-im/whisper" packages = ["whisperv6"] pruneopts = "NUT" - revision = "82a7734c369137d50fcbcae86230d83db6bfc885" - version = "v1.4.4" + revision = "109fa96320654a4f15f158a03245e7cd7457574a" + version = "v1.4.5" [[projects]] digest = "1:572c783a763db6383aca3179976eb80e4c900f52eba56cba8bb2e3cea7ce720e" diff --git a/Gopkg.toml b/Gopkg.toml index 0d8516583..29b954429 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -24,12 +24,12 @@ [[constraint]] name = "github.com/ethereum/go-ethereum" - version = "=v1.8.17" + version = "=v1.8.20" source = "github.com/status-im/go-ethereum" [[constraint]] name = "github.com/status-im/whisper" - version = "=v1.4.4" + version = "=v1.4.5" [[override]] name = "github.com/golang/protobuf" diff --git a/cmd/bootnode/main.go b/cmd/bootnode/main.go index 6ade25bd5..83c8b9faf 100644 --- a/cmd/bootnode/main.go +++ b/cmd/bootnode/main.go @@ -79,8 +79,7 @@ func main() { log.Crit("Unable to listen on udp", "address", addr, "error", err) } - realaddr := conn.LocalAddr().(*net.UDPAddr) - tab, err := discv5.ListenUDP(nodeKey, conn, realaddr, "", nil) + tab, err := discv5.ListenUDP(nodeKey, conn, "", nil) if err != nil { log.Crit("Failed to create discovery v5 table:", "error", err) } diff --git a/discovery/discv5.go b/discovery/discv5.go index 6ffe1fa89..882b0fe4f 100644 --- a/discovery/discv5.go +++ b/discovery/discv5.go @@ -49,8 +49,7 @@ func (d *DiscV5) Start() error { if err != nil { return err } - realaddr := conn.LocalAddr().(*net.UDPAddr) - ntab, err := discv5.ListenUDP(d.prv, conn, realaddr, "", nil) + ntab, err := discv5.ListenUDP(d.prv, conn, "", nil) if err != nil { return err } diff --git a/vendor/github.com/allegro/bigcache/LICENSE b/vendor/github.com/allegro/bigcache/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/allegro/bigcache/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/allegro/bigcache/bigcache.go b/vendor/github.com/allegro/bigcache/bigcache.go new file mode 100644 index 000000000..3a6f6bd66 --- /dev/null +++ b/vendor/github.com/allegro/bigcache/bigcache.go @@ -0,0 +1,155 @@ +package bigcache + +import ( + "fmt" + "time" +) + +const ( + minimumEntriesInShard = 10 // Minimum number of entries in single shard +) + +// BigCache is fast, concurrent, evicting cache created to keep big number of entries without impact on performance. +// It keeps entries on heap but omits GC for them. To achieve that operations on bytes arrays take place, +// therefore entries (de)serialization in front of the cache will be needed in most use cases. +type BigCache struct { + shards []*cacheShard + lifeWindow uint64 + clock clock + hash Hasher + config Config + shardMask uint64 + maxShardSize uint32 +} + +// NewBigCache initialize new instance of BigCache +func NewBigCache(config Config) (*BigCache, error) { + return newBigCache(config, &systemClock{}) +} + +func newBigCache(config Config, clock clock) (*BigCache, error) { + + if !isPowerOfTwo(config.Shards) { + return nil, fmt.Errorf("Shards number must be power of two") + } + + if config.Hasher == nil { + config.Hasher = newDefaultHasher() + } + + cache := &BigCache{ + shards: make([]*cacheShard, config.Shards), + lifeWindow: uint64(config.LifeWindow.Seconds()), + clock: clock, + hash: config.Hasher, + config: config, + shardMask: uint64(config.Shards - 1), + maxShardSize: uint32(config.maximumShardSize()), + } + + var onRemove func(wrappedEntry []byte) + if config.OnRemove == nil { + onRemove = cache.notProvidedOnRemove + } else { + onRemove = cache.providedOnRemove + } + + for i := 0; i < config.Shards; i++ { + cache.shards[i] = initNewShard(config, onRemove, clock) + } + + if config.CleanWindow > 0 { + go func() { + for t := range time.Tick(config.CleanWindow) { + cache.cleanUp(uint64(t.Unix())) + } + }() + } + + return cache, nil +} + +// Get reads entry for the key. +// It returns an EntryNotFoundError when +// no entry exists for the given key. +func (c *BigCache) Get(key string) ([]byte, error) { + hashedKey := c.hash.Sum64(key) + shard := c.getShard(hashedKey) + return shard.get(key, hashedKey) +} + +// Set saves entry under the key +func (c *BigCache) Set(key string, entry []byte) error { + hashedKey := c.hash.Sum64(key) + shard := c.getShard(hashedKey) + return shard.set(key, hashedKey, entry) +} + +// Delete removes the key +func (c *BigCache) Delete(key string) error { + hashedKey := c.hash.Sum64(key) + shard := c.getShard(hashedKey) + return shard.del(key, hashedKey) +} + +// Reset empties all cache shards +func (c *BigCache) Reset() error { + for _, shard := range c.shards { + shard.reset(c.config) + } + return nil +} + +// Len computes number of entries in cache +func (c *BigCache) Len() int { + var len int + for _, shard := range c.shards { + len += shard.len() + } + return len +} + +// Stats returns cache's statistics +func (c *BigCache) Stats() Stats { + var s Stats + for _, shard := range c.shards { + tmp := shard.getStats() + s.Hits += tmp.Hits + s.Misses += tmp.Misses + s.DelHits += tmp.DelHits + s.DelMisses += tmp.DelMisses + s.Collisions += tmp.Collisions + } + return s +} + +// Iterator returns iterator function to iterate over EntryInfo's from whole cache. +func (c *BigCache) Iterator() *EntryInfoIterator { + return newIterator(c) +} + +func (c *BigCache) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func() error) bool { + oldestTimestamp := readTimestampFromEntry(oldestEntry) + if currentTimestamp-oldestTimestamp > c.lifeWindow { + evict() + return true + } + return false +} + +func (c *BigCache) cleanUp(currentTimestamp uint64) { + for _, shard := range c.shards { + shard.cleanUp(currentTimestamp) + } +} + +func (c *BigCache) getShard(hashedKey uint64) (shard *cacheShard) { + return c.shards[hashedKey&c.shardMask] +} + +func (c *BigCache) providedOnRemove(wrappedEntry []byte) { + c.config.OnRemove(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry)) +} + +func (c *BigCache) notProvidedOnRemove(wrappedEntry []byte) { +} diff --git a/vendor/github.com/allegro/bigcache/clock.go b/vendor/github.com/allegro/bigcache/clock.go new file mode 100644 index 000000000..f8b535e13 --- /dev/null +++ b/vendor/github.com/allegro/bigcache/clock.go @@ -0,0 +1,14 @@ +package bigcache + +import "time" + +type clock interface { + epoch() int64 +} + +type systemClock struct { +} + +func (c systemClock) epoch() int64 { + return time.Now().Unix() +} diff --git a/vendor/github.com/allegro/bigcache/config.go b/vendor/github.com/allegro/bigcache/config.go new file mode 100644 index 000000000..0a523947e --- /dev/null +++ b/vendor/github.com/allegro/bigcache/config.go @@ -0,0 +1,67 @@ +package bigcache + +import "time" + +// Config for BigCache +type Config struct { + // Number of cache shards, value must be a power of two + Shards int + // Time after which entry can be evicted + LifeWindow time.Duration + // Interval between removing expired entries (clean up). + // If set to <= 0 then no action is performed. Setting to < 1 second is counterproductive — bigcache has a one second resolution. + CleanWindow time.Duration + // Max number of entries in life window. Used only to calculate initial size for cache shards. + // When proper value is set then additional memory allocation does not occur. + MaxEntriesInWindow int + // Max size of entry in bytes. Used only to calculate initial size for cache shards. + MaxEntrySize int + // Verbose mode prints information about new memory allocation + Verbose bool + // Hasher used to map between string keys and unsigned 64bit integers, by default fnv64 hashing is used. + Hasher Hasher + // HardMaxCacheSize is a limit for cache size in MB. Cache will not allocate more memory than this limit. + // It can protect application from consuming all available memory on machine, therefore from running OOM Killer. + // Default value is 0 which means unlimited size. When the limit is higher than 0 and reached then + // the oldest entries are overridden for the new ones. + HardMaxCacheSize int + // OnRemove is a callback fired when the oldest entry is removed because of its expiration time or no space left + // for the new entry. Default value is nil which means no callback and it prevents from unwrapping the oldest entry. + OnRemove func(key string, entry []byte) + + // Logger is a logging interface and used in combination with `Verbose` + // Defaults to `DefaultLogger()` + Logger Logger +} + +// DefaultConfig initializes config with default values. +// When load for BigCache can be predicted in advance then it is better to use custom config. +func DefaultConfig(eviction time.Duration) Config { + return Config{ + Shards: 1024, + LifeWindow: eviction, + CleanWindow: 0, + MaxEntriesInWindow: 1000 * 10 * 60, + MaxEntrySize: 500, + Verbose: true, + Hasher: newDefaultHasher(), + HardMaxCacheSize: 0, + Logger: DefaultLogger(), + } +} + +// initialShardSize computes initial shard size +func (c Config) initialShardSize() int { + return max(c.MaxEntriesInWindow/c.Shards, minimumEntriesInShard) +} + +// maximumShardSize computes maximum shard size +func (c Config) maximumShardSize() int { + maxShardSize := 0 + + if c.HardMaxCacheSize > 0 { + maxShardSize = convertMBToBytes(c.HardMaxCacheSize) / c.Shards + } + + return maxShardSize +} diff --git a/vendor/github.com/allegro/bigcache/encoding.go b/vendor/github.com/allegro/bigcache/encoding.go new file mode 100644 index 000000000..5d90d71d4 --- /dev/null +++ b/vendor/github.com/allegro/bigcache/encoding.go @@ -0,0 +1,70 @@ +package bigcache + +import ( + "encoding/binary" + "reflect" + "unsafe" +) + +const ( + timestampSizeInBytes = 8 // Number of bytes used for timestamp + hashSizeInBytes = 8 // Number of bytes used for hash + keySizeInBytes = 2 // Number of bytes used for size of entry key + headersSizeInBytes = timestampSizeInBytes + hashSizeInBytes + keySizeInBytes // Number of bytes used for all headers +) + +func wrapEntry(timestamp uint64, hash uint64, key string, entry []byte, buffer *[]byte) []byte { + keyLength := len(key) + blobLength := len(entry) + headersSizeInBytes + keyLength + + if blobLength > len(*buffer) { + *buffer = make([]byte, blobLength) + } + blob := *buffer + + binary.LittleEndian.PutUint64(blob, timestamp) + binary.LittleEndian.PutUint64(blob[timestampSizeInBytes:], hash) + binary.LittleEndian.PutUint16(blob[timestampSizeInBytes+hashSizeInBytes:], uint16(keyLength)) + copy(blob[headersSizeInBytes:], key) + copy(blob[headersSizeInBytes+keyLength:], entry) + + return blob[:blobLength] +} + +func readEntry(data []byte) []byte { + length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:]) + + // copy on read + dst := make([]byte, len(data)-int(headersSizeInBytes+length)) + copy(dst, data[headersSizeInBytes+length:]) + + return dst +} + +func readTimestampFromEntry(data []byte) uint64 { + return binary.LittleEndian.Uint64(data) +} + +func readKeyFromEntry(data []byte) string { + length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:]) + + // copy on read + dst := make([]byte, length) + copy(dst, data[headersSizeInBytes:headersSizeInBytes+length]) + + return bytesToString(dst) +} + +func bytesToString(b []byte) string { + bytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + strHeader := reflect.StringHeader{Data: bytesHeader.Data, Len: bytesHeader.Len} + return *(*string)(unsafe.Pointer(&strHeader)) +} + +func readHashFromEntry(data []byte) uint64 { + return binary.LittleEndian.Uint64(data[timestampSizeInBytes:]) +} + +func resetKeyFromEntry(data []byte) { + binary.LittleEndian.PutUint64(data[timestampSizeInBytes:], 0) +} diff --git a/vendor/github.com/allegro/bigcache/entry_not_found_error.go b/vendor/github.com/allegro/bigcache/entry_not_found_error.go new file mode 100644 index 000000000..e6955a57b --- /dev/null +++ b/vendor/github.com/allegro/bigcache/entry_not_found_error.go @@ -0,0 +1,17 @@ +package bigcache + +import "fmt" + +// EntryNotFoundError is an error type struct which is returned when entry was not found for provided key +type EntryNotFoundError struct { + message string +} + +func notFound(key string) error { + return &EntryNotFoundError{fmt.Sprintf("Entry %q not found", key)} +} + +// Error returned when entry does not exist. +func (e EntryNotFoundError) Error() string { + return e.message +} diff --git a/vendor/github.com/allegro/bigcache/fnv.go b/vendor/github.com/allegro/bigcache/fnv.go new file mode 100644 index 000000000..188c9aa6d --- /dev/null +++ b/vendor/github.com/allegro/bigcache/fnv.go @@ -0,0 +1,28 @@ +package bigcache + +// newDefaultHasher returns a new 64-bit FNV-1a Hasher which makes no memory allocations. +// Its Sum64 method will lay the value out in big-endian byte order. +// See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function +func newDefaultHasher() Hasher { + return fnv64a{} +} + +type fnv64a struct{} + +const ( + // offset64 FNVa offset basis. See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash + offset64 = 14695981039346656037 + // prime64 FNVa prime value. See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash + prime64 = 1099511628211 +) + +// Sum64 gets the string and returns its uint64 hash value. +func (f fnv64a) Sum64(key string) uint64 { + var hash uint64 = offset64 + for i := 0; i < len(key); i++ { + hash ^= uint64(key[i]) + hash *= prime64 + } + + return hash +} diff --git a/vendor/github.com/allegro/bigcache/hash.go b/vendor/github.com/allegro/bigcache/hash.go new file mode 100644 index 000000000..5f8ade774 --- /dev/null +++ b/vendor/github.com/allegro/bigcache/hash.go @@ -0,0 +1,8 @@ +package bigcache + +// Hasher is responsible for generating unsigned, 64 bit hash of provided string. Hasher should minimize collisions +// (generating same hash for different strings) and while performance is also important fast functions are preferable (i.e. +// you can use FarmHash family). +type Hasher interface { + Sum64(string) uint64 +} diff --git a/vendor/github.com/allegro/bigcache/iterator.go b/vendor/github.com/allegro/bigcache/iterator.go new file mode 100644 index 000000000..70b98d900 --- /dev/null +++ b/vendor/github.com/allegro/bigcache/iterator.go @@ -0,0 +1,122 @@ +package bigcache + +import "sync" + +type iteratorError string + +func (e iteratorError) Error() string { + return string(e) +} + +// ErrInvalidIteratorState is reported when iterator is in invalid state +const ErrInvalidIteratorState = iteratorError("Iterator is in invalid state. Use SetNext() to move to next position") + +// ErrCannotRetrieveEntry is reported when entry cannot be retrieved from underlying +const ErrCannotRetrieveEntry = iteratorError("Could not retrieve entry from cache") + +var emptyEntryInfo = EntryInfo{} + +// EntryInfo holds informations about entry in the cache +type EntryInfo struct { + timestamp uint64 + hash uint64 + key string + value []byte +} + +// Key returns entry's underlying key +func (e EntryInfo) Key() string { + return e.key +} + +// Hash returns entry's hash value +func (e EntryInfo) Hash() uint64 { + return e.hash +} + +// Timestamp returns entry's timestamp (time of insertion) +func (e EntryInfo) Timestamp() uint64 { + return e.timestamp +} + +// Value returns entry's underlying value +func (e EntryInfo) Value() []byte { + return e.value +} + +// EntryInfoIterator allows to iterate over entries in the cache +type EntryInfoIterator struct { + mutex sync.Mutex + cache *BigCache + currentShard int + currentIndex int + elements []uint32 + elementsCount int + valid bool +} + +// SetNext moves to next element and returns true if it exists. +func (it *EntryInfoIterator) SetNext() bool { + it.mutex.Lock() + + it.valid = false + it.currentIndex++ + + if it.elementsCount > it.currentIndex { + it.valid = true + it.mutex.Unlock() + return true + } + + for i := it.currentShard + 1; i < it.cache.config.Shards; i++ { + it.elements, it.elementsCount = it.cache.shards[i].copyKeys() + + // Non empty shard - stick with it + if it.elementsCount > 0 { + it.currentIndex = 0 + it.currentShard = i + it.valid = true + it.mutex.Unlock() + return true + } + } + it.mutex.Unlock() + return false +} + +func newIterator(cache *BigCache) *EntryInfoIterator { + elements, count := cache.shards[0].copyKeys() + + return &EntryInfoIterator{ + cache: cache, + currentShard: 0, + currentIndex: -1, + elements: elements, + elementsCount: count, + } +} + +// Value returns current value from the iterator +func (it *EntryInfoIterator) Value() (EntryInfo, error) { + it.mutex.Lock() + + if !it.valid { + it.mutex.Unlock() + return emptyEntryInfo, ErrInvalidIteratorState + } + + entry, err := it.cache.shards[it.currentShard].getEntry(int(it.elements[it.currentIndex])) + + if err != nil { + it.mutex.Unlock() + return emptyEntryInfo, ErrCannotRetrieveEntry + } + it.mutex.Unlock() + + return EntryInfo{ + timestamp: readTimestampFromEntry(entry), + hash: readHashFromEntry(entry), + key: readKeyFromEntry(entry), + value: readEntry(entry), + }, nil +} diff --git a/vendor/github.com/allegro/bigcache/logger.go b/vendor/github.com/allegro/bigcache/logger.go new file mode 100644 index 000000000..50e84abc8 --- /dev/null +++ b/vendor/github.com/allegro/bigcache/logger.go @@ -0,0 +1,30 @@ +package bigcache + +import ( + "log" + "os" +) + +// Logger is invoked when `Config.Verbose=true` +type Logger interface { + Printf(format string, v ...interface{}) +} + +// this is a safeguard, breaking on compile time in case +// `log.Logger` does not adhere to our `Logger` interface. +// see https://golang.org/doc/faq#guarantee_satisfies_interface +var _ Logger = &log.Logger{} + +// DefaultLogger returns a `Logger` implementation +// backed by stdlib's log +func DefaultLogger() *log.Logger { + return log.New(os.Stdout, "", log.LstdFlags) +} + +func newLogger(custom Logger) Logger { + if custom != nil { + return custom + } + + return DefaultLogger() +} diff --git a/vendor/github.com/allegro/bigcache/queue/bytes_queue.go b/vendor/github.com/allegro/bigcache/queue/bytes_queue.go new file mode 100644 index 000000000..0285c72cd --- /dev/null +++ b/vendor/github.com/allegro/bigcache/queue/bytes_queue.go @@ -0,0 +1,210 @@ +package queue + +import ( + "encoding/binary" + "log" + "time" +) + +const ( + // Number of bytes used to keep information about entry size + headerEntrySize = 4 + // Bytes before left margin are not used. Zero index means element does not exist in queue, useful while reading slice from index + leftMarginIndex = 1 + // Minimum empty blob size in bytes. Empty blob fills space between tail and head in additional memory allocation. + // It keeps entries indexes unchanged + minimumEmptyBlobSize = 32 + headerEntrySize +) + +// BytesQueue is a non-thread safe queue type of fifo based on bytes array. +// For every push operation index of entry is returned. It can be used to read the entry later +type BytesQueue struct { + array []byte + capacity int + maxCapacity int + head int + tail int + count int + rightMargin int + headerBuffer []byte + verbose bool + initialCapacity int +} + +type queueError struct { + message string +} + +// NewBytesQueue initialize new bytes queue. +// Initial capacity is used in bytes array allocation +// When verbose flag is set then information about memory allocation are printed +func NewBytesQueue(initialCapacity int, maxCapacity int, verbose bool) *BytesQueue { + return &BytesQueue{ + array: make([]byte, initialCapacity), + capacity: initialCapacity, + maxCapacity: maxCapacity, + headerBuffer: make([]byte, headerEntrySize), + tail: leftMarginIndex, + head: leftMarginIndex, + rightMargin: leftMarginIndex, + verbose: verbose, + initialCapacity: initialCapacity, + } +} + +// Reset removes all entries from queue +func (q *BytesQueue) Reset() { + // Just reset indexes + q.tail = leftMarginIndex + q.head = leftMarginIndex + q.rightMargin = leftMarginIndex + q.count = 0 +} + +// Push copies entry at the end of queue and moves tail pointer. Allocates more space if needed. +// Returns index for pushed data or error if maximum size queue limit is reached. +func (q *BytesQueue) Push(data []byte) (int, error) { + dataLen := len(data) + + if q.availableSpaceAfterTail() < dataLen+headerEntrySize { + if q.availableSpaceBeforeHead() >= dataLen+headerEntrySize { + q.tail = leftMarginIndex + } else if q.capacity+headerEntrySize+dataLen >= q.maxCapacity && q.maxCapacity > 0 { + return -1, &queueError{"Full queue. Maximum size limit reached."} + } else { + q.allocateAdditionalMemory(dataLen + headerEntrySize) + } + } + + index := q.tail + + q.push(data, dataLen) + + return index, nil +} + +func (q *BytesQueue) allocateAdditionalMemory(minimum int) { + start := time.Now() + if q.capacity < minimum { + q.capacity += minimum + } + q.capacity = q.capacity * 2 + if q.capacity > q.maxCapacity && q.maxCapacity > 0 { + q.capacity = q.maxCapacity + } + + oldArray := q.array + q.array = make([]byte, q.capacity) + + if leftMarginIndex != q.rightMargin { + copy(q.array, oldArray[:q.rightMargin]) + + if q.tail < q.head { + emptyBlobLen := q.head - q.tail - headerEntrySize + q.push(make([]byte, emptyBlobLen), emptyBlobLen) + q.head = leftMarginIndex + q.tail = q.rightMargin + } + } + + if q.verbose { + log.Printf("Allocated new queue in %s; Capacity: %d \n", time.Since(start), q.capacity) + } +} + +func (q *BytesQueue) push(data []byte, len int) { + binary.LittleEndian.PutUint32(q.headerBuffer, uint32(len)) + q.copy(q.headerBuffer, headerEntrySize) + + q.copy(data, len) + + if q.tail > q.head { + q.rightMargin = q.tail + } + + q.count++ +} + +func (q *BytesQueue) copy(data []byte, len int) { + q.tail += copy(q.array[q.tail:], data[:len]) +} + +// Pop reads the oldest entry from queue and moves head pointer to the next one +func (q *BytesQueue) Pop() ([]byte, error) { + data, size, err := q.peek(q.head) + if err != nil { + return nil, err + } + + q.head += headerEntrySize + size + q.count-- + + if q.head == q.rightMargin { + q.head = leftMarginIndex + if q.tail == q.rightMargin { + q.tail = leftMarginIndex + } + q.rightMargin = q.tail + } + + return data, nil +} + +// Peek reads the oldest entry from list without moving head pointer +func (q *BytesQueue) Peek() ([]byte, error) { + data, _, err := q.peek(q.head) + return data, err +} + +// Get reads entry from index +func (q *BytesQueue) Get(index int) ([]byte, error) { + data, _, err := q.peek(index) + return data, err +} + +// Capacity returns number of allocated bytes for queue +func (q *BytesQueue) Capacity() int { + return q.capacity +} + +// Len returns number of entries kept in queue +func (q *BytesQueue) Len() int { + return q.count +} + +// Error returns error message +func (e *queueError) Error() string { + return e.message +} + +func (q *BytesQueue) peek(index int) ([]byte, int, error) { + + if q.count == 0 { + return nil, 0, &queueError{"Empty queue"} + } + + if index <= 0 { + return nil, 0, &queueError{"Index must be grater than zero. Invalid index."} + } + + if index+headerEntrySize >= len(q.array) { + return nil, 0, &queueError{"Index out of range"} + } + + blockSize := int(binary.LittleEndian.Uint32(q.array[index : index+headerEntrySize])) + return q.array[index+headerEntrySize : index+headerEntrySize+blockSize], blockSize, nil +} + +func (q *BytesQueue) availableSpaceAfterTail() int { + if q.tail >= q.head { + return q.capacity - q.tail + } + return q.head - q.tail - minimumEmptyBlobSize +} + +func (q *BytesQueue) availableSpaceBeforeHead() int { + if q.tail >= q.head { + return q.head - leftMarginIndex - minimumEmptyBlobSize + } + return q.head - q.tail - minimumEmptyBlobSize +} diff --git a/vendor/github.com/allegro/bigcache/shard.go b/vendor/github.com/allegro/bigcache/shard.go new file mode 100644 index 000000000..af48ebc3b --- /dev/null +++ b/vendor/github.com/allegro/bigcache/shard.go @@ -0,0 +1,229 @@ +package bigcache + +import ( + "fmt" + "sync" + "sync/atomic" + + "github.com/allegro/bigcache/queue" +) + +type cacheShard struct { + hashmap map[uint64]uint32 + entries queue.BytesQueue + lock sync.RWMutex + entryBuffer []byte + onRemove func(wrappedEntry []byte) + + isVerbose bool + logger Logger + clock clock + lifeWindow uint64 + + stats Stats +} + +type onRemoveCallback func(wrappedEntry []byte) + +func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) { + s.lock.RLock() + itemIndex := s.hashmap[hashedKey] + + if itemIndex == 0 { + s.lock.RUnlock() + s.miss() + return nil, notFound(key) + } + + wrappedEntry, err := s.entries.Get(int(itemIndex)) + if err != nil { + s.lock.RUnlock() + s.miss() + return nil, err + } + if entryKey := readKeyFromEntry(wrappedEntry); key != entryKey { + if s.isVerbose { + s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, entryKey, hashedKey) + } + s.lock.RUnlock() + s.collision() + return nil, notFound(key) + } + s.lock.RUnlock() + s.hit() + return readEntry(wrappedEntry), nil +} + +func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error { + currentTimestamp := uint64(s.clock.epoch()) + + s.lock.Lock() + + if previousIndex := s.hashmap[hashedKey]; previousIndex != 0 { + if previousEntry, err := s.entries.Get(int(previousIndex)); err == nil { + resetKeyFromEntry(previousEntry) + } + } + + if oldestEntry, err := s.entries.Peek(); err == nil { + s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry) + } + + w := wrapEntry(currentTimestamp, hashedKey, key, entry, &s.entryBuffer) + + for { + if index, err := s.entries.Push(w); err == nil { + s.hashmap[hashedKey] = uint32(index) + s.lock.Unlock() + return nil + } + if s.removeOldestEntry() != nil { + s.lock.Unlock() + return fmt.Errorf("entry is bigger than max shard size") + } + } +} + +func (s *cacheShard) del(key string, hashedKey uint64) error { + s.lock.RLock() + itemIndex := s.hashmap[hashedKey] + + if itemIndex == 0 { + s.lock.RUnlock() + s.delmiss() + return notFound(key) + } + + wrappedEntry, err := s.entries.Get(int(itemIndex)) + if err != nil { + s.lock.RUnlock() + s.delmiss() + return err + } + s.lock.RUnlock() + + s.lock.Lock() + { + delete(s.hashmap, hashedKey) + s.onRemove(wrappedEntry) + resetKeyFromEntry(wrappedEntry) + } + s.lock.Unlock() + + s.delhit() + return nil +} + +func (s *cacheShard) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func() error) bool { + oldestTimestamp := readTimestampFromEntry(oldestEntry) + if currentTimestamp-oldestTimestamp > s.lifeWindow { + evict() + return true + } + return false +} + +func (s *cacheShard) cleanUp(currentTimestamp uint64) { + s.lock.Lock() + for { + if oldestEntry, err := s.entries.Peek(); err != nil { + break + } else if evicted := s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry); !evicted { + break + } + } + s.lock.Unlock() +} + +func (s *cacheShard) getOldestEntry() ([]byte, error) { + return s.entries.Peek() +} + +func (s *cacheShard) getEntry(index int) ([]byte, error) { + return s.entries.Get(index) +} + +func (s *cacheShard) copyKeys() (keys []uint32, next int) { + keys = make([]uint32, len(s.hashmap)) + + s.lock.RLock() + + for _, index := range s.hashmap { + keys[next] = index + next++ + } + + s.lock.RUnlock() + return keys, next +} + +func (s *cacheShard) removeOldestEntry() error { + oldest, err := s.entries.Pop() + if err == nil { + hash := readHashFromEntry(oldest) + delete(s.hashmap, hash) + s.onRemove(oldest) + return nil + } + return err +} + +func (s *cacheShard) reset(config Config) { + s.lock.Lock() + s.hashmap = make(map[uint64]uint32, config.initialShardSize()) + s.entryBuffer = make([]byte, config.MaxEntrySize+headersSizeInBytes) + s.entries.Reset() + s.lock.Unlock() +} + +func (s *cacheShard) len() int { + s.lock.RLock() + res := len(s.hashmap) + s.lock.RUnlock() + return res +} + +func (s *cacheShard) getStats() Stats { + var stats = Stats{ + Hits: atomic.LoadInt64(&s.stats.Hits), + Misses: atomic.LoadInt64(&s.stats.Misses), + DelHits: atomic.LoadInt64(&s.stats.DelHits), + DelMisses: atomic.LoadInt64(&s.stats.DelMisses), + Collisions: atomic.LoadInt64(&s.stats.Collisions), + } + return stats +} + +func (s *cacheShard) hit() { + atomic.AddInt64(&s.stats.Hits, 1) +} + +func (s *cacheShard) miss() { + atomic.AddInt64(&s.stats.Misses, 1) +} + +func (s *cacheShard) delhit() { + atomic.AddInt64(&s.stats.DelHits, 1) +} + +func (s *cacheShard) delmiss() { + atomic.AddInt64(&s.stats.DelMisses, 1) +} + +func (s *cacheShard) collision() { + atomic.AddInt64(&s.stats.Collisions, 1) +} + +func initNewShard(config Config, callback onRemoveCallback, clock clock) *cacheShard { + return &cacheShard{ + hashmap: make(map[uint64]uint32, config.initialShardSize()), + entries: *queue.NewBytesQueue(config.initialShardSize()*config.MaxEntrySize, config.maximumShardSize(), config.Verbose), + entryBuffer: make([]byte, config.MaxEntrySize+headersSizeInBytes), + onRemove: callback, + + isVerbose: config.Verbose, + logger: newLogger(config.Logger), + clock: clock, + lifeWindow: uint64(config.LifeWindow.Seconds()), + } +} diff --git a/vendor/github.com/allegro/bigcache/stats.go b/vendor/github.com/allegro/bigcache/stats.go new file mode 100644 index 000000000..07157132a --- /dev/null +++ b/vendor/github.com/allegro/bigcache/stats.go @@ -0,0 +1,15 @@ +package bigcache + +// Stats stores cache statistics +type Stats struct { + // Hits is a number of successfully found keys + Hits int64 `json:"hits"` + // Misses is a number of not found keys + Misses int64 `json:"misses"` + // DelHits is a number of successfully deleted keys + DelHits int64 `json:"delete_hits"` + // DelMisses is a number of not deleted keys + DelMisses int64 `json:"delete_misses"` + // Collisions is a number of happened key-collisions + Collisions int64 `json:"collisions"` +} diff --git a/vendor/github.com/allegro/bigcache/utils.go b/vendor/github.com/allegro/bigcache/utils.go new file mode 100644 index 000000000..ca1df79b9 --- /dev/null +++ b/vendor/github.com/allegro/bigcache/utils.go @@ -0,0 +1,16 @@ +package bigcache + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func convertMBToBytes(value int) int { + return value * 1024 * 1024 +} + +func isPowerOfTwo(number int) bool { + return (number & (number - 1)) == 0 +} diff --git a/vendor/github.com/ethereum/go-ethereum/.github/CODEOWNERS b/vendor/github.com/ethereum/go-ethereum/.github/CODEOWNERS index 5a717da00..9a61d3932 100644 --- a/vendor/github.com/ethereum/go-ethereum/.github/CODEOWNERS +++ b/vendor/github.com/ethereum/go-ethereum/.github/CODEOWNERS @@ -9,23 +9,26 @@ les/ @zsfelfoldi light/ @zsfelfoldi mobile/ @karalabe p2p/ @fjl @zsfelfoldi +p2p/simulations @lmars +p2p/protocols @zelig +swarm/api/http @justelad swarm/bmt @zelig swarm/dev @lmars swarm/fuse @jmozah @holisticode swarm/grafana_dashboards @nonsense swarm/metrics @nonsense @holisticode swarm/multihash @nolash -swarm/network/bitvector @zelig @janos @gbalint -swarm/network/priorityqueue @zelig @janos @gbalint -swarm/network/simulations @zelig -swarm/network/stream @janos @zelig @gbalint @holisticode @justelad +swarm/network/bitvector @zelig @janos +swarm/network/priorityqueue @zelig @janos +swarm/network/simulations @zelig @janos +swarm/network/stream @janos @zelig @holisticode @justelad swarm/network/stream/intervals @janos swarm/network/stream/testing @zelig swarm/pot @zelig swarm/pss @nolash @zelig @nonsense swarm/services @zelig swarm/state @justelad -swarm/storage/encryption @gbalint @zelig @nagydani +swarm/storage/encryption @zelig @nagydani swarm/storage/mock @janos swarm/storage/feed @nolash @jpeletier swarm/testutil @lmars diff --git a/vendor/github.com/ethereum/go-ethereum/.travis.yml b/vendor/github.com/ethereum/go-ethereum/.travis.yml index 69535b7ef..33a4f8949 100644 --- a/vendor/github.com/ethereum/go-ethereum/.travis.yml +++ b/vendor/github.com/ethereum/go-ethereum/.travis.yml @@ -29,6 +29,14 @@ matrix: - os: osx go: 1.11.x script: + - echo "Increase the maximum number of open file descriptors on macOS" + - NOFILE=20480 + - sudo sysctl -w kern.maxfiles=$NOFILE + - sudo sysctl -w kern.maxfilesperproc=$NOFILE + - sudo launchctl limit maxfiles $NOFILE $NOFILE + - sudo launchctl limit maxfiles + - ulimit -S -n $NOFILE + - ulimit -n - unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703 - go run build/ci.go install - go run build/ci.go test -coverage $TEST_PACKAGES @@ -148,7 +156,7 @@ matrix: git: submodules: false # avoid cloning ethereum/tests before_install: - - curl https://storage.googleapis.com/golang/go1.11.1.linux-amd64.tar.gz | tar -xz + - curl https://storage.googleapis.com/golang/go1.11.2.linux-amd64.tar.gz | tar -xz - export PATH=`pwd`/go/bin:$PATH - export GOROOT=`pwd`/go - export GOPATH=$HOME/go diff --git a/vendor/github.com/ethereum/go-ethereum/README.md b/vendor/github.com/ethereum/go-ethereum/README.md index f308fb101..7593dd090 100644 --- a/vendor/github.com/ethereum/go-ethereum/README.md +++ b/vendor/github.com/ethereum/go-ethereum/README.md @@ -18,7 +18,7 @@ For prerequisites and detailed build instructions please read the [Installation Instructions](https://github.com/ethereum/go-ethereum/wiki/Building-Ethereum) on the wiki. -Building geth requires both a Go (version 1.7 or later) and a C compiler. +Building geth requires both a Go (version 1.9 or later) and a C compiler. You can install them using your favourite package manager. Once the dependencies are installed, run @@ -168,7 +168,7 @@ HTTP based JSON-RPC API options: * `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it) You'll need to use your own programming environments' capabilities (libraries, tools, etc) to connect -via HTTP, WS or IPC to a Geth node configured with the above flags and you'll need to speak [JSON-RPC](http://www.jsonrpc.org/specification) +via HTTP, WS or IPC to a Geth node configured with the above flags and you'll need to speak [JSON-RPC](https://www.jsonrpc.org/specification) on all transports. You can reuse the same connection for multiple requests! **Note: Please understand the security implications of opening up an HTTP/WS based transport before diff --git a/vendor/github.com/ethereum/go-ethereum/_assets/patches/0000-accounts-hd-keys.patch b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0000-accounts-hd-keys.patch index 68bfa8220..d59533605 100644 --- a/vendor/github.com/ethereum/go-ethereum/_assets/patches/0000-accounts-hd-keys.patch +++ b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0000-accounts-hd-keys.patch @@ -1,5 +1,5 @@ diff --git a/accounts/keystore/key.go b/accounts/keystore/key.go -index 211fa863..65c83f3b 100644 +index 84d8df0c5..551b386a5 100644 --- a/accounts/keystore/key.go +++ b/accounts/keystore/key.go @@ -33,6 +33,7 @@ import ( @@ -26,14 +26,14 @@ index 211fa863..65c83f3b 100644 type encryptedKeyJSONV3 struct { - Address string `json:"address"` -- Crypto cryptoJSON `json:"crypto"` +- Crypto CryptoJSON `json:"crypto"` - Id string `json:"id"` - Version int `json:"version"` + Address string `json:"address"` -+ Crypto cryptoJSON `json:"crypto"` ++ Crypto CryptoJSON `json:"crypto"` + Id string `json:"id"` + Version int `json:"version"` -+ ExtendedKey cryptoJSON `json:"extendedkey"` ++ ExtendedKey CryptoJSON `json:"extendedkey"` + SubAccountIndex uint32 `json:"subaccountindex"` } @@ -80,7 +80,7 @@ index 211fa863..65c83f3b 100644 // into the Direct ICAP spec. for simplicity and easier compatibility with other libs, we // retry until the first byte is 0. diff --git a/accounts/keystore/keystore.go b/accounts/keystore/keystore.go -index 6b04acd0..ac2ab008 100644 +index 2918047cc..333fbef6f 100644 --- a/accounts/keystore/keystore.go +++ b/accounts/keystore/keystore.go @@ -38,6 +38,7 @@ import ( @@ -154,21 +154,22 @@ index 6b04acd0..ac2ab008 100644 // Update changes the passphrase of an existing account. func (ks *KeyStore) Update(a accounts.Account, passphrase, newPassphrase string) error { a, key, err := ks.getDecryptedKey(a, passphrase) -@@ -486,6 +529,9 @@ func (ks *KeyStore) ImportPreSaleKey(keyJSON []byte, passphrase string) (account +@@ -486,6 +529,10 @@ func (ks *KeyStore) ImportPreSaleKey(keyJSON []byte, passphrase string) (account // zeroKey zeroes a private key in memory. func zeroKey(k *ecdsa.PrivateKey) { + if k == nil { + return + } ++ b := k.D.Bits() for i := range b { b[i] = 0 -diff --git a/accounts/keystore/keystore_passphrase.go b/accounts/keystore/keystore_passphrase.go -index 59738abe..2b6ef252 100644 ---- a/accounts/keystore/keystore_passphrase.go -+++ b/accounts/keystore/keystore_passphrase.go -@@ -41,6 +41,7 @@ import ( +diff --git a/accounts/keystore/passphrase.go b/accounts/keystore/passphrase.go +index a0b6cf538..e2512c6e8 100644 +--- a/accounts/keystore/passphrase.go ++++ b/accounts/keystore/passphrase.go +@@ -42,6 +42,7 @@ import ( "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" "github.com/pborman/uuid" @@ -176,9 +177,9 @@ index 59738abe..2b6ef252 100644 "golang.org/x/crypto/pbkdf2" "golang.org/x/crypto/scrypt" ) -@@ -157,15 +158,68 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) { - KDFParams: scryptParamsJSON, - MAC: hex.EncodeToString(mac), +@@ -187,15 +188,68 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) { + if err != nil { + return nil, err } + encryptedExtendedKey, err := EncryptExtendedKey(key.ExtendedKey, auth, scryptN, scryptP) + if err != nil { @@ -195,9 +196,9 @@ index 59738abe..2b6ef252 100644 return json.Marshal(encryptedKeyJSONV3) } -+func EncryptExtendedKey(extKey *extkeys.ExtendedKey, auth string, scryptN, scryptP int) (cryptoJSON, error) { ++func EncryptExtendedKey(extKey *extkeys.ExtendedKey, auth string, scryptN, scryptP int) (CryptoJSON, error) { + if extKey == nil { -+ return cryptoJSON{}, nil ++ return CryptoJSON{}, nil + } + authArray := []byte(auth) + salt := make([]byte, 32) @@ -206,7 +207,7 @@ index 59738abe..2b6ef252 100644 + } + derivedKey, err := scrypt.Key(authArray, salt, scryptN, scryptR, scryptP, scryptDKLen) + if err != nil { -+ return cryptoJSON{}, err ++ return CryptoJSON{}, err + } + encryptKey := derivedKey[:16] + keyBytes := []byte(extKey.String()) @@ -217,7 +218,7 @@ index 59738abe..2b6ef252 100644 + } + cipherText, err := aesCTRXOR(encryptKey, keyBytes, iv) + if err != nil { -+ return cryptoJSON{}, err ++ return CryptoJSON{}, err + } + mac := crypto.Keccak256(derivedKey[16:32], cipherText) + @@ -232,7 +233,7 @@ index 59738abe..2b6ef252 100644 + IV: hex.EncodeToString(iv), + } + -+ return cryptoJSON{ ++ return CryptoJSON{ + Cipher: "aes-128-ctr", + CipherText: hex.EncodeToString(cipherText), + CipherParams: cipherParamsJSON, @@ -245,7 +246,7 @@ index 59738abe..2b6ef252 100644 // DecryptKey decrypts a key from a json blob, returning the private key itself. func DecryptKey(keyjson []byte, auth string) (*Key, error) { // Parse the json into a simple map to fetch the key version -@@ -177,20 +231,43 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) { +@@ -207,19 +261,41 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) { var ( keyBytes, keyId []byte err error @@ -285,11 +286,9 @@ index 59738abe..2b6ef252 100644 + } + extKey, err = extkeys.NewKeyFromString(string(extKeyBytes)) } -+ // Handle any decryption errors and return the key if err != nil { - return nil, err -@@ -198,9 +275,11 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) { +@@ -228,9 +304,11 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) { key := crypto.ToECDSAUnsafe(keyBytes) return &Key{ @@ -304,7 +303,7 @@ index 59738abe..2b6ef252 100644 }, nil } -@@ -280,6 +359,51 @@ func decryptKeyV1(keyProtected *encryptedKeyJSONV1, auth string) (keyBytes []byt +@@ -316,6 +394,51 @@ func decryptKeyV1(keyProtected *encryptedKeyJSONV1, auth string) (keyBytes []byt return plainText, keyId, err } @@ -353,6 +352,6 @@ index 59738abe..2b6ef252 100644 + return plainText, err +} + - func getKDFKey(cryptoJSON cryptoJSON, auth string) ([]byte, error) { + func getKDFKey(cryptoJSON CryptoJSON, auth string) ([]byte, error) { authArray := []byte(auth) salt, err := hex.DecodeString(cryptoJSON.KDFParams["salt"].(string)) diff --git a/vendor/github.com/ethereum/go-ethereum/_assets/patches/0038-ulc.patch b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0038-ulc.patch index 11a4c247a..40188ea1d 100644 --- a/vendor/github.com/ethereum/go-ethereum/_assets/patches/0038-ulc.patch +++ b/vendor/github.com/ethereum/go-ethereum/_assets/patches/0038-ulc.patch @@ -4,7 +4,7 @@ index b0749d2..d724562 100644 +++ b/cmd/geth/config.go @@ -124,6 +124,7 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) { } - + // Apply flags. + utils.SetULC(ctx, &cfg.Eth) utils.SetNodeConfig(ctx, &cfg.Node) @@ -142,11 +142,11 @@ index d209311..8904dd8 100644 abort, results := hc.engine.VerifyHeaders(hc, chain, seals) defer close(abort) diff --git a/eth/config.go b/eth/config.go -index efbaafb..7d1db9f 100644 +index 7c041d1af..f71b8dfee 100644 --- a/eth/config.go +++ b/eth/config.go -@@ -87,8 +87,12 @@ type Config struct { - NoPruning bool +@@ -91,8 +91,12 @@ type Config struct { + Whitelist map[uint64]common.Hash `toml:"-"` // Light client options - LightServ int `toml:",omitempty"` // Maximum percentage of time allowed for serving LES requests @@ -161,7 +161,7 @@ index efbaafb..7d1db9f 100644 // Database options SkipBcVersionCheck bool `toml:"-"` diff --git a/eth/gen_config.go b/eth/gen_config.go -index d401a91..382d4d9 100644 +index 2777aa9e8..6662f820f 100644 --- a/eth/gen_config.go +++ b/eth/gen_config.go @@ -23,10 +23,12 @@ func (c Config) MarshalTOML() (interface{}, error) { @@ -179,9 +179,9 @@ index d401a91..382d4d9 100644 + SkipBcVersionCheck bool `toml:"-"` + DatabaseHandles int `toml:"-"` DatabaseCache int - TrieCache int - TrieTimeout time.Duration -@@ -51,6 +53,8 @@ func (c Config) MarshalTOML() (interface{}, error) { + TrieCleanCache int + TrieDirtyCache int +@@ -54,6 +56,8 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.NoPruning = c.NoPruning enc.LightServ = c.LightServ enc.LightPeers = c.LightPeers @@ -190,7 +190,7 @@ index d401a91..382d4d9 100644 enc.SkipBcVersionCheck = c.SkipBcVersionCheck enc.DatabaseHandles = c.DatabaseHandles enc.DatabaseCache = c.DatabaseCache -@@ -79,10 +83,12 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { +@@ -85,10 +89,12 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { NetworkId *uint64 SyncMode *downloader.SyncMode NoPruning *bool @@ -205,9 +205,9 @@ index d401a91..382d4d9 100644 + SkipBcVersionCheck *bool `toml:"-"` + DatabaseHandles *int `toml:"-"` DatabaseCache *int - TrieCache *int - TrieTimeout *time.Duration -@@ -122,6 +128,12 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { + TrieCleanCache *int + TrieDirtyCache *int +@@ -131,6 +137,12 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.LightPeers != nil { c.LightPeers = *dec.LightPeers } @@ -342,19 +342,7 @@ index f0d3b18..16cfc00 100644 } // fetcherPeerInfo holds fetcher-specific information about each active peer -@@ -144,9 +152,11 @@ func (f *lightFetcher) syncLoop() { - rq *distReq - reqID uint64 - ) -+ - if !f.syncing && !(newAnnounce && s) { - rq, reqID = f.nextRequest() - } -+ - syncing := f.syncing - f.lock.Unlock() - -@@ -206,8 +216,11 @@ func (f *lightFetcher) syncLoop() { +@@ -209,8 +209,11 @@ func (f *lightFetcher) syncLoop() { case p := <-f.syncDone: f.lock.Lock() p.Log().Debug("Done synchronising with peer") @@ -365,8 +353,8 @@ index f0d3b18..16cfc00 100644 + f.newHeaders([]*types.Header{h}, []*big.Int{td}) + } f.lock.Unlock() + f.requestChn <- false } - } @@ -223,7 +236,6 @@ func (f *lightFetcher) registerPeer(p *peer) { f.lock.Lock() @@ -397,7 +385,7 @@ index f0d3b18..16cfc00 100644 if fp.root != nil { @@ -407,25 +422,13 @@ func (f *lightFetcher) requestedID(reqID uint64) bool { // to be downloaded starting from the head backwards is also returned - func (f *lightFetcher) nextRequest() (*distReq, uint64) { + func (f *lightFetcher) nextRequest() (*distReq, uint64, bool) { var ( - bestHash common.Hash - bestAmount uint64 @@ -424,12 +412,12 @@ index f0d3b18..16cfc00 100644 - } - } if bestTd == f.maxConfirmedTd { - return nil, 0 + return nil, 0, false } -@@ -435,72 +438,131 @@ func (f *lightFetcher) nextRequest() (*distReq, uint64) { +@@ -437,72 +437,131 @@ func (f *lightFetcher) nextRequest() (*distReq, uint64, bool) { var rq *distReq reqID := genReqID() - if f.syncing { + if bestSyncing { - rq = &distReq{ - getCost: func(dp distPeer) uint64 { - return 0 @@ -462,7 +450,7 @@ index f0d3b18..16cfc00 100644 - canSend: func(dp distPeer) bool { + rq = f.newFetcherDistReq(bestHash, reqID, bestAmount) + } -+ return rq, reqID ++ return rq, reqID, bestSyncing +} + +// findBestValues retrieves the best values for LES or ULC mode. @@ -614,7 +602,7 @@ index f0d3b18..16cfc00 100644 + return func() { p.RequestHeadersByHash(reqID, cost, bestHash, int(bestAmount), 0, true) } + }, } -- return rq, reqID +- return rq, reqID, bestSyncing } // deliverHeaders delivers header download request responses for processing @@ -2248,53 +2236,6 @@ index 8e2734c..0b5571b 100644 + defer self.mu.Unlock() + self.disableCheckFreq = false +} -diff --git a/metrics/counter.go b/metrics/counter.go -index c7f2b4b..fbb14b8 100644 ---- a/metrics/counter.go -+++ b/metrics/counter.go -@@ -1,6 +1,8 @@ - package metrics - --import "sync/atomic" -+import ( -+ "sync/atomic" -+) - - // Counters hold an int64 value that can be incremented and decremented. - type Counter interface { -@@ -28,6 +30,12 @@ func NewCounter() Counter { - return &StandardCounter{0} - } - -+// NewCounterForced constructs a new StandardCounter and returns it no matter if -+// the global switch is enabled or not. -+func NewCounterForced() Counter { -+ return &StandardCounter{0} -+} -+ - // NewRegisteredCounter constructs and registers a new StandardCounter. - func NewRegisteredCounter(name string, r Registry) Counter { - c := NewCounter() -@@ -38,6 +46,19 @@ func NewRegisteredCounter(name string, r Registry) Counter { - return c - } - -+// NewRegisteredCounterForced constructs and registers a new StandardCounter -+// and launches a goroutine no matter the global switch is enabled or not. -+// Be sure to unregister the counter from the registry once it is of no use to -+// allow for garbage collection. -+func NewRegisteredCounterForced(name string, r Registry) Counter { -+ c := NewCounterForced() -+ if nil == r { -+ r = DefaultRegistry -+ } -+ r.Register(name, c) -+ return c -+} -+ - // CounterSnapshot is a read-only copy of another Counter. - type CounterSnapshot int64 - diff --git a/mobile/geth.go b/mobile/geth.go index e3e2e90..4d674b2 100644 --- a/mobile/geth.go @@ -2309,21 +2250,3 @@ index e3e2e90..4d674b2 100644 } // defaultNodeConfig contains the default node configuration values to use if all -diff --git a/p2p/enode/node.go b/p2p/enode/node.go -index 84088fc..b454ab2 100644 ---- a/p2p/enode/node.go -+++ b/p2p/enode/node.go -@@ -98,6 +98,13 @@ func (n *Node) Pubkey() *ecdsa.PublicKey { - return &key - } - -+// Record returns the node's record. The return value is a copy and may -+// be modified by the caller. -+func (n *Node) Record() *enr.Record { -+ cpy := n.r -+ return &cpy -+} -+ - // checks whether n is a valid complete node. - func (n *Node) ValidateComplete() error { - if n.Incomplete() { diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/argument.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/argument.go index 93b513c34..f544c80db 100644 --- a/vendor/github.com/ethereum/go-ethereum/accounts/abi/argument.go +++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/argument.go @@ -243,11 +243,7 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) { // input offset is the bytes offset for packed output inputOffset := 0 for _, abiArg := range abiArgs { - if abiArg.Type.T == ArrayTy { - inputOffset += 32 * abiArg.Type.Size - } else { - inputOffset += 32 - } + inputOffset += getDynamicTypeOffset(abiArg.Type) } var ret []byte for i, a := range args { @@ -257,14 +253,13 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) { if err != nil { return nil, err } - // check for a slice type (string, bytes, slice) - if input.Type.requiresLengthPrefix() { - // calculate the offset - offset := inputOffset + len(variableInput) + // check for dynamic types + if isDynamicType(input.Type) { // set the offset - ret = append(ret, packNum(reflect.ValueOf(offset))...) - // Append the packed output to the variable input. The variable input - // will be appended at the end of the input. + ret = append(ret, packNum(reflect.ValueOf(inputOffset))...) + // calculate next offset + inputOffset += len(packed) + // append to variable input variableInput = append(variableInput, packed...) } else { // append the packed value to the input diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/abi/type.go b/vendor/github.com/ethereum/go-ethereum/accounts/abi/type.go index dce89d2b4..6bfaabf5a 100644 --- a/vendor/github.com/ethereum/go-ethereum/accounts/abi/type.go +++ b/vendor/github.com/ethereum/go-ethereum/accounts/abi/type.go @@ -183,23 +183,39 @@ func (t Type) pack(v reflect.Value) ([]byte, error) { return nil, err } - if t.T == SliceTy || t.T == ArrayTy { - var packed []byte + switch t.T { + case SliceTy, ArrayTy: + var ret []byte + if t.requiresLengthPrefix() { + // append length + ret = append(ret, packNum(reflect.ValueOf(v.Len()))...) + } + + // calculate offset if any + offset := 0 + offsetReq := isDynamicType(*t.Elem) + if offsetReq { + offset = getDynamicTypeOffset(*t.Elem) * v.Len() + } + var tail []byte for i := 0; i < v.Len(); i++ { val, err := t.Elem.pack(v.Index(i)) if err != nil { return nil, err } - packed = append(packed, val...) - } - if t.T == SliceTy { - return packBytesSlice(packed, v.Len()), nil - } else if t.T == ArrayTy { - return packed, nil + if !offsetReq { + ret = append(ret, val...) + continue + } + ret = append(ret, packNum(reflect.ValueOf(offset))...) + offset += len(val) + tail = append(tail, val...) } + return append(ret, tail...), nil + default: + return packElement(t, v), nil } - return packElement(t, v), nil } // requireLengthPrefix returns whether the type requires any sort of length @@ -207,3 +223,27 @@ func (t Type) pack(v reflect.Value) ([]byte, error) { func (t Type) requiresLengthPrefix() bool { return t.T == StringTy || t.T == BytesTy || t.T == SliceTy } + +// isDynamicType returns true if the type is dynamic. +// StringTy, BytesTy, and SliceTy(irrespective of slice element type) are dynamic types +// ArrayTy is considered dynamic if and only if the Array element is a dynamic type. +// This function recursively checks the type for slice and array elements. +func isDynamicType(t Type) bool { + // dynamic types + // array is also a dynamic type if the array type is dynamic + return t.T == StringTy || t.T == BytesTy || t.T == SliceTy || (t.T == ArrayTy && isDynamicType(*t.Elem)) +} + +// getDynamicTypeOffset returns the offset for the type. +// See `isDynamicType` to know which types are considered dynamic. +// If the type t is an array and element type is not a dynamic type, then we consider it a static type and +// return 32 * size of array since length prefix is not required. +// If t is a dynamic type or element type(for slices and arrays) is dynamic, then we simply return 32 as offset. +func getDynamicTypeOffset(t Type) int { + // if it is an array and there are no dynamic types + // then the array is static type + if t.T == ArrayTy && !isDynamicType(*t.Elem) { + return 32 * t.Size + } + return 32 +} diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/hd.go b/vendor/github.com/ethereum/go-ethereum/accounts/hd.go index 277f688e4..6ed631807 100644 --- a/vendor/github.com/ethereum/go-ethereum/accounts/hd.go +++ b/vendor/github.com/ethereum/go-ethereum/accounts/hd.go @@ -30,8 +30,8 @@ import ( var DefaultRootDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0} // DefaultBaseDerivationPath is the base path from which custom derivation endpoints -// are incremented. As such, the first account will be at m/44'/60'/0'/0, the second -// at m/44'/60'/0'/1, etc. +// are incremented. As such, the first account will be at m/44'/60'/0'/0/0, the second +// at m/44'/60'/0'/0/1, etc. var DefaultBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0} // DefaultLedgerBaseDerivationPath is the base path from which custom derivation endpoints diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/account_cache.go b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/account_cache.go index da3a46eb8..8f660e282 100644 --- a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/account_cache.go +++ b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/account_cache.go @@ -265,7 +265,10 @@ func (ac *accountCache) scanAccounts() error { case (addr == common.Address{}): log.Debug("Failed to decode keystore key", "path", path, "err", "missing or zero address") default: - return &accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}} + return &accounts.Account{ + Address: addr, + URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}, + } } return nil } diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/key.go b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/key.go index 5bb8dc5cc..1873c8218 100644 --- a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/key.go +++ b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/key.go @@ -71,21 +71,21 @@ type plainKeyJSON struct { type encryptedKeyJSONV3 struct { Address string `json:"address"` - Crypto cryptoJSON `json:"crypto"` + Crypto CryptoJSON `json:"crypto"` Id string `json:"id"` Version int `json:"version"` - ExtendedKey cryptoJSON `json:"extendedkey"` + ExtendedKey CryptoJSON `json:"extendedkey"` SubAccountIndex uint32 `json:"subaccountindex"` } type encryptedKeyJSONV1 struct { Address string `json:"address"` - Crypto cryptoJSON `json:"crypto"` + Crypto CryptoJSON `json:"crypto"` Id string `json:"id"` Version string `json:"version"` } -type cryptoJSON struct { +type CryptoJSON struct { Cipher string `json:"cipher"` CipherText string `json:"ciphertext"` CipherParams cipherparamsJSON `json:"cipherparams"` @@ -212,7 +212,10 @@ func storeNewKey(ks keyStore, rand io.Reader, auth string) (*Key, accounts.Accou if err != nil { return nil, accounts.Account{}, err } - a := accounts.Account{Address: key.Address, URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.JoinPath(keyFileName(key.Address))}} + a := accounts.Account{ + Address: key.Address, + URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.JoinPath(keyFileName(key.Address))}, + } if err := ks.StoreKey(a.URL.Path, key, auth); err != nil { zeroKey(key.PrivateKey) return nil, a, err @@ -265,5 +268,6 @@ func toISO8601(t time.Time) string { } else { tz = fmt.Sprintf("%03d00", offset/3600) } - return fmt.Sprintf("%04d-%02d-%02dT%02d-%02d-%02d.%09d%s", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), tz) + return fmt.Sprintf("%04d-%02d-%02dT%02d-%02d-%02d.%09d%s", + t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), tz) } diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore.go b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore.go index 2e3a3e99f..0a183aa68 100644 --- a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore.go +++ b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore.go @@ -538,6 +538,7 @@ func zeroKey(k *ecdsa.PrivateKey) { if k == nil { return } + b := k.D.Bits() for i := range b { b[i] = 0 diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_passphrase.go b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/passphrase.go similarity index 90% rename from vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_passphrase.go rename to vendor/github.com/ethereum/go-ethereum/accounts/keystore/passphrase.go index f6ac25a2a..e2512c6e8 100644 --- a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_passphrase.go +++ b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/passphrase.go @@ -136,29 +136,26 @@ func (ks keyStorePassphrase) JoinPath(filename string) string { return filepath.Join(ks.keysDirPath, filename) } -// EncryptKey encrypts a key using the specified scrypt parameters into a json -// blob that can be decrypted later on. -func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) { - authArray := []byte(auth) +// Encryptdata encrypts the data given as 'data' with the password 'auth'. +func EncryptDataV3(data, auth []byte, scryptN, scryptP int) (CryptoJSON, error) { salt := make([]byte, 32) if _, err := io.ReadFull(rand.Reader, salt); err != nil { panic("reading from crypto/rand failed: " + err.Error()) } - derivedKey, err := scrypt.Key(authArray, salt, scryptN, scryptR, scryptP, scryptDKLen) + derivedKey, err := scrypt.Key(auth, salt, scryptN, scryptR, scryptP, scryptDKLen) if err != nil { - return nil, err + return CryptoJSON{}, err } encryptKey := derivedKey[:16] - keyBytes := math.PaddedBigBytes(key.PrivateKey.D, 32) iv := make([]byte, aes.BlockSize) // 16 if _, err := io.ReadFull(rand.Reader, iv); err != nil { panic("reading from crypto/rand failed: " + err.Error()) } - cipherText, err := aesCTRXOR(encryptKey, keyBytes, iv) + cipherText, err := aesCTRXOR(encryptKey, data, iv) if err != nil { - return nil, err + return CryptoJSON{}, err } mac := crypto.Keccak256(derivedKey[16:32], cipherText) @@ -168,12 +165,11 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) { scryptParamsJSON["p"] = scryptP scryptParamsJSON["dklen"] = scryptDKLen scryptParamsJSON["salt"] = hex.EncodeToString(salt) - cipherParamsJSON := cipherparamsJSON{ IV: hex.EncodeToString(iv), } - cryptoStruct := cryptoJSON{ + cryptoStruct := CryptoJSON{ Cipher: "aes-128-ctr", CipherText: hex.EncodeToString(cipherText), CipherParams: cipherParamsJSON, @@ -181,6 +177,17 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) { KDFParams: scryptParamsJSON, MAC: hex.EncodeToString(mac), } + return cryptoStruct, nil +} + +// EncryptKey encrypts a key using the specified scrypt parameters into a json +// blob that can be decrypted later on. +func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) { + keyBytes := math.PaddedBigBytes(key.PrivateKey.D, 32) + cryptoStruct, err := EncryptDataV3(keyBytes, []byte(auth), scryptN, scryptP) + if err != nil { + return nil, err + } encryptedExtendedKey, err := EncryptExtendedKey(key.ExtendedKey, auth, scryptN, scryptP) if err != nil { return nil, err @@ -196,9 +203,9 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) { return json.Marshal(encryptedKeyJSONV3) } -func EncryptExtendedKey(extKey *extkeys.ExtendedKey, auth string, scryptN, scryptP int) (cryptoJSON, error) { +func EncryptExtendedKey(extKey *extkeys.ExtendedKey, auth string, scryptN, scryptP int) (CryptoJSON, error) { if extKey == nil { - return cryptoJSON{}, nil + return CryptoJSON{}, nil } authArray := []byte(auth) salt := make([]byte, 32) @@ -207,7 +214,7 @@ func EncryptExtendedKey(extKey *extkeys.ExtendedKey, auth string, scryptN, scryp } derivedKey, err := scrypt.Key(authArray, salt, scryptN, scryptR, scryptP, scryptDKLen) if err != nil { - return cryptoJSON{}, err + return CryptoJSON{}, err } encryptKey := derivedKey[:16] keyBytes := []byte(extKey.String()) @@ -218,7 +225,7 @@ func EncryptExtendedKey(extKey *extkeys.ExtendedKey, auth string, scryptN, scryp } cipherText, err := aesCTRXOR(encryptKey, keyBytes, iv) if err != nil { - return cryptoJSON{}, err + return CryptoJSON{}, err } mac := crypto.Keccak256(derivedKey[16:32], cipherText) @@ -233,7 +240,7 @@ func EncryptExtendedKey(extKey *extkeys.ExtendedKey, auth string, scryptN, scryp IV: hex.EncodeToString(iv), } - return cryptoJSON{ + return CryptoJSON{ Cipher: "aes-128-ctr", CipherText: hex.EncodeToString(cipherText), CipherParams: cipherParamsJSON, @@ -290,7 +297,6 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) { } extKey, err = extkeys.NewKeyFromString(string(extKeyBytes)) } - // Handle any decryption errors and return the key if err != nil { return nil, err @@ -306,42 +312,48 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) { }, nil } -func decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byte, keyId []byte, err error) { - if keyProtected.Version != version { - return nil, nil, fmt.Errorf("Version not supported: %v", keyProtected.Version) +func DecryptDataV3(cryptoJson CryptoJSON, auth string) ([]byte, error) { + if cryptoJson.Cipher != "aes-128-ctr" { + return nil, fmt.Errorf("Cipher not supported: %v", cryptoJson.Cipher) } - - if keyProtected.Crypto.Cipher != "aes-128-ctr" { - return nil, nil, fmt.Errorf("Cipher not supported: %v", keyProtected.Crypto.Cipher) - } - - keyId = uuid.Parse(keyProtected.Id) - mac, err := hex.DecodeString(keyProtected.Crypto.MAC) + mac, err := hex.DecodeString(cryptoJson.MAC) if err != nil { - return nil, nil, err + return nil, err } - iv, err := hex.DecodeString(keyProtected.Crypto.CipherParams.IV) + iv, err := hex.DecodeString(cryptoJson.CipherParams.IV) if err != nil { - return nil, nil, err + return nil, err } - cipherText, err := hex.DecodeString(keyProtected.Crypto.CipherText) + cipherText, err := hex.DecodeString(cryptoJson.CipherText) if err != nil { - return nil, nil, err + return nil, err } - derivedKey, err := getKDFKey(keyProtected.Crypto, auth) + derivedKey, err := getKDFKey(cryptoJson, auth) if err != nil { - return nil, nil, err + return nil, err } calculatedMAC := crypto.Keccak256(derivedKey[16:32], cipherText) if !bytes.Equal(calculatedMAC, mac) { - return nil, nil, ErrDecrypt + return nil, ErrDecrypt } plainText, err := aesCTRXOR(derivedKey[:16], cipherText, iv) + if err != nil { + return nil, err + } + return plainText, err +} + +func decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byte, keyId []byte, err error) { + if keyProtected.Version != version { + return nil, nil, fmt.Errorf("Version not supported: %v", keyProtected.Version) + } + keyId = uuid.Parse(keyProtected.Id) + plainText, err := DecryptDataV3(keyProtected.Crypto, auth) if err != nil { return nil, nil, err } @@ -427,7 +439,7 @@ func decryptExtendedKey(keyProtected *encryptedKeyJSONV3, auth string) (plainTex return plainText, err } -func getKDFKey(cryptoJSON cryptoJSON, auth string) ([]byte, error) { +func getKDFKey(cryptoJSON CryptoJSON, auth string) ([]byte, error) { authArray := []byte(auth) salt, err := hex.DecodeString(cryptoJSON.KDFParams["salt"].(string)) if err != nil { diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_plain.go b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/plain.go similarity index 100% rename from vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_plain.go rename to vendor/github.com/ethereum/go-ethereum/accounts/keystore/plain.go diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/presale.go b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/presale.go index 1554294e1..03055245f 100644 --- a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/presale.go +++ b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/presale.go @@ -38,7 +38,13 @@ func importPreSaleKey(keyStore keyStore, keyJSON []byte, password string) (accou return accounts.Account{}, nil, err } key.Id = uuid.NewRandom() - a := accounts.Account{Address: key.Address, URL: accounts.URL{Scheme: KeyStoreScheme, Path: keyStore.JoinPath(keyFileName(key.Address))}} + a := accounts.Account{ + Address: key.Address, + URL: accounts.URL{ + Scheme: KeyStoreScheme, + Path: keyStore.JoinPath(keyFileName(key.Address)), + }, + } err = keyStore.StoreKey(a.URL.Path, key, password) return a, key, err } diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_wallet.go b/vendor/github.com/ethereum/go-ethereum/accounts/keystore/wallet.go similarity index 100% rename from vendor/github.com/ethereum/go-ethereum/accounts/keystore/keystore_wallet.go rename to vendor/github.com/ethereum/go-ethereum/accounts/keystore/wallet.go diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/usbwallet/ledger.go b/vendor/github.com/ethereum/go-ethereum/accounts/usbwallet/ledger.go index 2583fbc4d..7d5f67908 100644 --- a/vendor/github.com/ethereum/go-ethereum/accounts/usbwallet/ledger.go +++ b/vendor/github.com/ethereum/go-ethereum/accounts/usbwallet/ledger.go @@ -350,7 +350,7 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction signer = new(types.HomesteadSigner) } else { signer = types.NewEIP155Signer(chainID) - signature[64] = signature[64] - byte(chainID.Uint64()*2+35) + signature[64] -= byte(chainID.Uint64()*2 + 35) } signed, err := tx.WithSignature(signer, signature) if err != nil { diff --git a/vendor/github.com/ethereum/go-ethereum/accounts/usbwallet/trezor.go b/vendor/github.com/ethereum/go-ethereum/accounts/usbwallet/trezor.go index b84a95599..a9d2e9959 100644 --- a/vendor/github.com/ethereum/go-ethereum/accounts/usbwallet/trezor.go +++ b/vendor/github.com/ethereum/go-ethereum/accounts/usbwallet/trezor.go @@ -221,7 +221,7 @@ func (w *trezorDriver) trezorSign(derivationPath []uint32, tx *types.Transaction signer = new(types.HomesteadSigner) } else { signer = types.NewEIP155Signer(chainID) - signature[64] = signature[64] - byte(chainID.Uint64()*2+35) + signature[64] -= byte(chainID.Uint64()*2 + 35) } // Inject the final signature into the transaction and sanity check the sender signed, err := tx.WithSignature(signer, signature) diff --git a/vendor/github.com/ethereum/go-ethereum/appveyor.yml b/vendor/github.com/ethereum/go-ethereum/appveyor.yml index 11848ddb9..e5126b252 100644 --- a/vendor/github.com/ethereum/go-ethereum/appveyor.yml +++ b/vendor/github.com/ethereum/go-ethereum/appveyor.yml @@ -23,8 +23,8 @@ environment: install: - git submodule update --init - rmdir C:\go /s /q - - appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.1.windows-%GETH_ARCH%.zip - - 7z x go1.11.1.windows-%GETH_ARCH%.zip -y -oC:\ > NUL + - appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.2.windows-%GETH_ARCH%.zip + - 7z x go1.11.2.windows-%GETH_ARCH%.zip -y -oC:\ > NUL - go version - gcc --version diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/bootnode/main.go b/vendor/github.com/ethereum/go-ethereum/cmd/bootnode/main.go index 845900865..32f7d63be 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/bootnode/main.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/bootnode/main.go @@ -38,7 +38,7 @@ func main() { var ( listenAddr = flag.String("addr", ":30301", "listen address") genKey = flag.String("genkey", "", "generate a node key") - writeAddr = flag.Bool("writeaddress", false, "write out the node's pubkey hash and quit") + writeAddr = flag.Bool("writeaddress", false, "write out the node's public key and quit") nodeKeyFile = flag.String("nodekey", "", "private key filename") nodeKeyHex = flag.String("nodekeyhex", "", "private key as hex (for testing)") natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|extip:)") @@ -86,7 +86,7 @@ func main() { } if *writeAddr { - fmt.Printf("%v\n", enode.PubkeyToIDV4(&nodeKey.PublicKey)) + fmt.Printf("%x\n", crypto.FromECDSAPub(&nodeKey.PublicKey)[1:]) os.Exit(0) } @@ -119,16 +119,17 @@ func main() { } if *runv5 { - if _, err := discv5.ListenUDP(nodeKey, conn, realaddr, "", restrictList); err != nil { + if _, err := discv5.ListenUDP(nodeKey, conn, "", restrictList); err != nil { utils.Fatalf("%v", err) } } else { + db, _ := enode.OpenDB("") + ln := enode.NewLocalNode(db, nodeKey) cfg := discover.Config{ - PrivateKey: nodeKey, - AnnounceAddr: realaddr, - NetRestrict: restrictList, + PrivateKey: nodeKey, + NetRestrict: restrictList, } - if _, err := discover.ListenUDP(conn, cfg); err != nil { + if _, err := discover.ListenUDP(conn, ln, cfg); err != nil { utils.Fatalf("%v", err) } } diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/clef/README.md b/vendor/github.com/ethereum/go-ethereum/cmd/clef/README.md index c02ac44d8..c9461be10 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/clef/README.md +++ b/vendor/github.com/ethereum/go-ethereum/cmd/clef/README.md @@ -91,7 +91,7 @@ invoking methods with the following info: * [x] Version info about the signer * [x] Address of API (http/ipc) * [ ] List of known accounts -* [ ] Have a default timeout on signing operations, so that if the user has not answered withing e.g. 60 seconds, the request is rejected. +* [ ] Have a default timeout on signing operations, so that if the user has not answered within e.g. 60 seconds, the request is rejected. * [ ] `account_signRawTransaction` * [ ] `account_bulkSignTransactions([] transactions)` should * only exist if enabled via config/flag @@ -129,7 +129,7 @@ The signer listens to HTTP requests on `rpcaddr`:`rpcport`, with the same JSONRP expected to be JSON [jsonrpc 2.0 standard](http://www.jsonrpc.org/specification). Some of these call can require user interaction. Clients must be aware that responses -may be delayed significanlty or may never be received if a users decides to ignore the confirmation request. +may be delayed significantly or may never be received if a users decides to ignore the confirmation request. The External API is **untrusted** : it does not accept credentials over this api, nor does it expect that requests have any authority. @@ -862,7 +862,7 @@ A UI should conform to the following rules. * A UI SHOULD inform the user about the `SHA256` or `MD5` hash of the binary being executed * A UI SHOULD NOT maintain a secondary storage of data, e.g. list of accounts * The signer provides accounts -* A UI SHOULD, to the best extent possible, use static linking / bundling, so that requried libraries are bundled +* A UI SHOULD, to the best extent possible, use static linking / bundling, so that required libraries are bundled along with the UI. diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/clef/intapi_changelog.md b/vendor/github.com/ethereum/go-ethereum/cmd/clef/intapi_changelog.md index 9e13f67d0..92a39a268 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/clef/intapi_changelog.md +++ b/vendor/github.com/ethereum/go-ethereum/cmd/clef/intapi_changelog.md @@ -1,5 +1,9 @@ ### Changelog for internal API (ui-api) +### 3.0.0 + +* Make use of `OnInputRequired(info UserInputRequest)` for obtaining master password during startup + ### 2.1.0 * Add `OnInputRequired(info UserInputRequest)` to internal API. This method is used when Clef needs user input, e.g. passwords. @@ -14,7 +18,6 @@ The following structures are used: UserInputResponse struct { Text string `json:"text"` } -``` ### 2.0.0 diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/clef/main.go b/vendor/github.com/ethereum/go-ethereum/cmd/clef/main.go index c060285be..519d63b3c 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/clef/main.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/clef/main.go @@ -35,8 +35,10 @@ import ( "runtime" "strings" + "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/console" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" @@ -48,10 +50,10 @@ import ( ) // ExternalAPIVersion -- see extapi_changelog.md -const ExternalAPIVersion = "3.0.0" +const ExternalAPIVersion = "4.0.0" // InternalAPIVersion -- see intapi_changelog.md -const InternalAPIVersion = "2.0.0" +const InternalAPIVersion = "3.0.0" const legalWarning = ` WARNING! @@ -91,7 +93,7 @@ var ( } signerSecretFlag = cli.StringFlag{ Name: "signersecret", - Usage: "A file containing the password used to encrypt Clef credentials, e.g. keystore credentials and ruleset hash", + Usage: "A file containing the (encrypted) master seed to encrypt Clef data, e.g. keystore credentials and ruleset hash", } dBFlag = cli.StringFlag{ Name: "4bytedb", @@ -155,18 +157,18 @@ Whenever you make an edit to the rule file, you need to use attestation to tell Clef that the file is 'safe' to execute.`, } - addCredentialCommand = cli.Command{ - Action: utils.MigrateFlags(addCredential), - Name: "addpw", + setCredentialCommand = cli.Command{ + Action: utils.MigrateFlags(setCredential), + Name: "setpw", Usage: "Store a credential for a keystore file", - ArgsUsage: "
", + ArgsUsage: "
", Flags: []cli.Flag{ logLevelFlag, configdirFlag, signerSecretFlag, }, Description: ` -The addpw command stores a password for a given address (keyfile). If you invoke it with only one parameter, it will + The setpw command stores a password for a given address (keyfile). If you enter a blank passphrase, it will remove any stored credential for that address (keyfile) `, } @@ -198,7 +200,7 @@ func init() { advancedMode, } app.Action = signer - app.Commands = []cli.Command{initCommand, attestCommand, addCredentialCommand} + app.Commands = []cli.Command{initCommand, attestCommand, setCredentialCommand} } func main() { @@ -212,25 +214,45 @@ func initializeSecrets(c *cli.Context) error { if err := initialize(c); err != nil { return err } - configDir := c.String(configdirFlag.Name) + configDir := c.GlobalString(configdirFlag.Name) masterSeed := make([]byte, 256) - n, err := io.ReadFull(rand.Reader, masterSeed) + num, err := io.ReadFull(rand.Reader, masterSeed) if err != nil { return err } - if n != len(masterSeed) { + if num != len(masterSeed) { return fmt.Errorf("failed to read enough random") } + + n, p := keystore.StandardScryptN, keystore.StandardScryptP + if c.GlobalBool(utils.LightKDFFlag.Name) { + n, p = keystore.LightScryptN, keystore.LightScryptP + } + text := "The master seed of clef is locked with a password. Please give a password. Do not forget this password." + var password string + for { + password = getPassPhrase(text, true) + if err := core.ValidatePasswordFormat(password); err != nil { + fmt.Printf("invalid password: %v\n", err) + } else { + break + } + } + cipherSeed, err := encryptSeed(masterSeed, []byte(password), n, p) + if err != nil { + return fmt.Errorf("failed to encrypt master seed: %v", err) + } + err = os.Mkdir(configDir, 0700) if err != nil && !os.IsExist(err) { return err } - location := filepath.Join(configDir, "secrets.dat") + location := filepath.Join(configDir, "masterseed.json") if _, err := os.Stat(location); err == nil { return fmt.Errorf("file %v already exists, will not overwrite", location) } - err = ioutil.WriteFile(location, masterSeed, 0400) + err = ioutil.WriteFile(location, cipherSeed, 0400) if err != nil { return err } @@ -255,11 +277,11 @@ func attestFile(ctx *cli.Context) error { return err } - stretchedKey, err := readMasterKey(ctx) + stretchedKey, err := readMasterKey(ctx, nil) if err != nil { utils.Fatalf(err.Error()) } - configDir := ctx.String(configdirFlag.Name) + configDir := ctx.GlobalString(configdirFlag.Name) vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), stretchedKey)[:10])) confKey := crypto.Keccak256([]byte("config"), stretchedKey) @@ -271,38 +293,36 @@ func attestFile(ctx *cli.Context) error { return nil } -func addCredential(ctx *cli.Context) error { +func setCredential(ctx *cli.Context) error { if len(ctx.Args()) < 1 { - utils.Fatalf("This command requires at leaste one argument.") + utils.Fatalf("This command requires an address to be passed as an argument.") } if err := initialize(ctx); err != nil { return err } - stretchedKey, err := readMasterKey(ctx) + address := ctx.Args().First() + password := getPassPhrase("Enter a passphrase to store with this address.", true) + + stretchedKey, err := readMasterKey(ctx, nil) if err != nil { utils.Fatalf(err.Error()) } - configDir := ctx.String(configdirFlag.Name) + configDir := ctx.GlobalString(configdirFlag.Name) vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), stretchedKey)[:10])) pwkey := crypto.Keccak256([]byte("credentials"), stretchedKey) // Initialize the encrypted storages pwStorage := storage.NewAESEncryptedStorage(filepath.Join(vaultLocation, "credentials.json"), pwkey) - key := ctx.Args().First() - value := "" - if len(ctx.Args()) > 1 { - value = ctx.Args().Get(1) - } - pwStorage.Put(key, value) - log.Info("Credential store updated", "key", key) + pwStorage.Put(address, password) + log.Info("Credential store updated", "key", address) return nil } func initialize(c *cli.Context) error { // Set up the logger to print everything logOutput := os.Stdout - if c.Bool(stdiouiFlag.Name) { + if c.GlobalBool(stdiouiFlag.Name) { logOutput = os.Stderr // If using the stdioui, we can't do the 'confirm'-flow fmt.Fprintf(logOutput, legalWarning) @@ -323,26 +343,28 @@ func signer(c *cli.Context) error { var ( ui core.SignerUI ) - if c.Bool(stdiouiFlag.Name) { + if c.GlobalBool(stdiouiFlag.Name) { log.Info("Using stdin/stdout as UI-channel") ui = core.NewStdIOUI() } else { log.Info("Using CLI as UI-channel") ui = core.NewCommandlineUI() } - db, err := core.NewAbiDBFromFiles(c.String(dBFlag.Name), c.String(customDBFlag.Name)) + fourByteDb := c.GlobalString(dBFlag.Name) + fourByteLocal := c.GlobalString(customDBFlag.Name) + db, err := core.NewAbiDBFromFiles(fourByteDb, fourByteLocal) if err != nil { utils.Fatalf(err.Error()) } - log.Info("Loaded 4byte db", "signatures", db.Size(), "file", c.String("4bytedb")) + log.Info("Loaded 4byte db", "signatures", db.Size(), "file", fourByteDb, "local", fourByteLocal) var ( api core.ExternalAPI ) - configDir := c.String(configdirFlag.Name) - if stretchedKey, err := readMasterKey(c); err != nil { - log.Info("No master seed provided, rules disabled") + configDir := c.GlobalString(configdirFlag.Name) + if stretchedKey, err := readMasterKey(c, ui); err != nil { + log.Info("No master seed provided, rules disabled", "error", err) } else { if err != nil { @@ -361,7 +383,7 @@ func signer(c *cli.Context) error { configStorage := storage.NewAESEncryptedStorage(filepath.Join(vaultLocation, "config.json"), confkey) //Do we have a rule-file? - ruleJS, err := ioutil.ReadFile(c.String(ruleFlag.Name)) + ruleJS, err := ioutil.ReadFile(c.GlobalString(ruleFlag.Name)) if err != nil { log.Info("Could not load rulefile, rules not enabled", "file", "rulefile") } else { @@ -385,17 +407,15 @@ func signer(c *cli.Context) error { } apiImpl := core.NewSignerAPI( - c.Int64(utils.NetworkIdFlag.Name), - c.String(keystoreFlag.Name), - c.Bool(utils.NoUSBFlag.Name), + c.GlobalInt64(utils.NetworkIdFlag.Name), + c.GlobalString(keystoreFlag.Name), + c.GlobalBool(utils.NoUSBFlag.Name), ui, db, - c.Bool(utils.LightKDFFlag.Name), - c.Bool(advancedMode.Name)) - + c.GlobalBool(utils.LightKDFFlag.Name), + c.GlobalBool(advancedMode.Name)) api = apiImpl - // Audit logging - if logfile := c.String(auditLogFlag.Name); logfile != "" { + if logfile := c.GlobalString(auditLogFlag.Name); logfile != "" { api, err = core.NewAuditLogger(logfile, api) if err != nil { utils.Fatalf(err.Error()) @@ -414,13 +434,13 @@ func signer(c *cli.Context) error { Service: api, Version: "1.0"}, } - if c.Bool(utils.RPCEnabledFlag.Name) { + if c.GlobalBool(utils.RPCEnabledFlag.Name) { vhosts := splitAndTrim(c.GlobalString(utils.RPCVirtualHostsFlag.Name)) cors := splitAndTrim(c.GlobalString(utils.RPCCORSDomainFlag.Name)) // start http server - httpEndpoint := fmt.Sprintf("%s:%d", c.String(utils.RPCListenAddrFlag.Name), c.Int(rpcPortFlag.Name)) + httpEndpoint := fmt.Sprintf("%s:%d", c.GlobalString(utils.RPCListenAddrFlag.Name), c.Int(rpcPortFlag.Name)) listener, _, err := rpc.StartHTTPEndpoint(httpEndpoint, rpcAPI, []string{"account"}, cors, vhosts, rpc.DefaultHTTPTimeouts) if err != nil { utils.Fatalf("Could not start RPC api: %v", err) @@ -434,9 +454,9 @@ func signer(c *cli.Context) error { }() } - if !c.Bool(utils.IPCDisabledFlag.Name) { + if !c.GlobalBool(utils.IPCDisabledFlag.Name) { if c.IsSet(utils.IPCPathFlag.Name) { - ipcapiURL = c.String(utils.IPCPathFlag.Name) + ipcapiURL = c.GlobalString(utils.IPCPathFlag.Name) } else { ipcapiURL = filepath.Join(configDir, "clef.ipc") } @@ -453,7 +473,7 @@ func signer(c *cli.Context) error { } - if c.Bool(testFlag.Name) { + if c.GlobalBool(testFlag.Name) { log.Info("Performing UI test") go testExternalUI(apiImpl) } @@ -512,36 +532,52 @@ func homeDir() string { } return "" } -func readMasterKey(ctx *cli.Context) ([]byte, error) { +func readMasterKey(ctx *cli.Context, ui core.SignerUI) ([]byte, error) { var ( file string - configDir = ctx.String(configdirFlag.Name) + configDir = ctx.GlobalString(configdirFlag.Name) ) - if ctx.IsSet(signerSecretFlag.Name) { - file = ctx.String(signerSecretFlag.Name) + if ctx.GlobalIsSet(signerSecretFlag.Name) { + file = ctx.GlobalString(signerSecretFlag.Name) } else { - file = filepath.Join(configDir, "secrets.dat") + file = filepath.Join(configDir, "masterseed.json") } if err := checkFile(file); err != nil { return nil, err } - masterKey, err := ioutil.ReadFile(file) + cipherKey, err := ioutil.ReadFile(file) if err != nil { return nil, err } - if len(masterKey) < 256 { - return nil, fmt.Errorf("master key of insufficient length, expected >255 bytes, got %d", len(masterKey)) + var password string + // If ui is not nil, get the password from ui. + if ui != nil { + resp, err := ui.OnInputRequired(core.UserInputRequest{ + Title: "Master Password", + Prompt: "Please enter the password to decrypt the master seed", + IsPassword: true}) + if err != nil { + return nil, err + } + password = resp.Text + } else { + password = getPassPhrase("Decrypt master seed of clef", false) } + masterSeed, err := decryptSeed(cipherKey, password) + if err != nil { + return nil, fmt.Errorf("failed to decrypt the master seed of clef") + } + if len(masterSeed) < 256 { + return nil, fmt.Errorf("master seed of insufficient length, expected >255 bytes, got %d", len(masterSeed)) + } + // Create vault location - vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), masterKey)[:10])) + vaultLocation := filepath.Join(configDir, common.Bytes2Hex(crypto.Keccak256([]byte("vault"), masterSeed)[:10])) err = os.Mkdir(vaultLocation, 0700) if err != nil && !os.IsExist(err) { return nil, err } - //!TODO, use KDF to stretch the master key - // stretched_key := stretch_key(master_key) - - return masterKey, nil + return masterSeed, nil } // checkFile is a convenience function to check if a file @@ -619,6 +655,59 @@ func testExternalUI(api *core.SignerAPI) { } +// getPassPhrase retrieves the password associated with clef, either fetched +// from a list of preloaded passphrases, or requested interactively from the user. +// TODO: there are many `getPassPhrase` functions, it will be better to abstract them into one. +func getPassPhrase(prompt string, confirmation bool) string { + fmt.Println(prompt) + password, err := console.Stdin.PromptPassword("Passphrase: ") + if err != nil { + utils.Fatalf("Failed to read passphrase: %v", err) + } + if confirmation { + confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ") + if err != nil { + utils.Fatalf("Failed to read passphrase confirmation: %v", err) + } + if password != confirm { + utils.Fatalf("Passphrases do not match") + } + } + return password +} + +type encryptedSeedStorage struct { + Description string `json:"description"` + Version int `json:"version"` + Params keystore.CryptoJSON `json:"params"` +} + +// encryptSeed uses a similar scheme as the keystore uses, but with a different wrapping, +// to encrypt the master seed +func encryptSeed(seed []byte, auth []byte, scryptN, scryptP int) ([]byte, error) { + cryptoStruct, err := keystore.EncryptDataV3(seed, auth, scryptN, scryptP) + if err != nil { + return nil, err + } + return json.Marshal(&encryptedSeedStorage{"Clef seed", 1, cryptoStruct}) +} + +// decryptSeed decrypts the master seed +func decryptSeed(keyjson []byte, auth string) ([]byte, error) { + var encSeed encryptedSeedStorage + if err := json.Unmarshal(keyjson, &encSeed); err != nil { + return nil, err + } + if encSeed.Version != 1 { + log.Warn(fmt.Sprintf("unsupported encryption format of seed: %d, operation will likely fail", encSeed.Version)) + } + seed, err := keystore.DecryptDataV3(encSeed.Params, auth) + if err != nil { + return nil, err + } + return seed, err +} + /** //Create Account diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/evm/runner.go b/vendor/github.com/ethereum/go-ethereum/cmd/evm/runner.go index 962fc021d..54b67ce10 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/evm/runner.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/evm/runner.go @@ -89,7 +89,7 @@ func runCmd(ctx *cli.Context) error { genesisConfig *core.Genesis ) if ctx.GlobalBool(MachineFlag.Name) { - tracer = NewJSONLogger(logconfig, os.Stdout) + tracer = vm.NewJSONLogger(logconfig, os.Stdout) } else if ctx.GlobalBool(DebugFlag.Name) { debugLogger = vm.NewStructLogger(logconfig) tracer = debugLogger @@ -206,6 +206,7 @@ func runCmd(ctx *cli.Context) error { execTime := time.Since(tstart) if ctx.GlobalBool(DumpFlag.Name) { + statedb.Commit(true) statedb.IntermediateRoot(true) fmt.Println(string(statedb.Dump())) } diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/evm/staterunner.go b/vendor/github.com/ethereum/go-ethereum/cmd/evm/staterunner.go index 06c9be380..b3c69d9b9 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/evm/staterunner.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/evm/staterunner.go @@ -68,7 +68,7 @@ func stateTestCmd(ctx *cli.Context) error { ) switch { case ctx.GlobalBool(MachineFlag.Name): - tracer = NewJSONLogger(config, os.Stderr) + tracer = vm.NewJSONLogger(config, os.Stderr) case ctx.GlobalBool(DebugFlag.Name): debugger = vm.NewStructLogger(config) diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/faucet/faucet.go b/vendor/github.com/ethereum/go-ethereum/cmd/faucet/faucet.go index 2ffe12276..a7c20db77 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/faucet/faucet.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/faucet/faucet.go @@ -256,7 +256,7 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u } for _, boot := range enodes { old, err := enode.ParseV4(boot.String()) - if err != nil { + if err == nil { stack.Server().AddPeer(old) } } diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/geth/config.go b/vendor/github.com/ethereum/go-ethereum/cmd/geth/config.go index d724562eb..c25b16a79 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/geth/config.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/geth/config.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "io" + "math/big" "os" "reflect" "unicode" @@ -153,7 +154,9 @@ func enableWhisper(ctx *cli.Context) bool { func makeFullNode(ctx *cli.Context) *node.Node { stack, cfg := makeConfigNode(ctx) - + if ctx.GlobalIsSet(utils.ConstantinopleOverrideFlag.Name) { + cfg.Eth.ConstantinopleOverride = new(big.Int).SetUint64(ctx.GlobalUint64(utils.ConstantinopleOverrideFlag.Name)) + } utils.RegisterEthService(stack, &cfg.Eth) if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) { diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/geth/main.go b/vendor/github.com/ethereum/go-ethereum/cmd/geth/main.go index bdc624d56..2da98c30e 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/geth/main.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/geth/main.go @@ -21,7 +21,6 @@ import ( "fmt" "math" "os" - "runtime" godebug "runtime/debug" "sort" "strconv" @@ -92,8 +91,10 @@ var ( utils.LightServFlag, utils.LightPeersFlag, utils.LightKDFFlag, + utils.WhitelistFlag, utils.CacheFlag, utils.CacheDatabaseFlag, + utils.CacheTrieFlag, utils.CacheGCFlag, utils.TrieCacheGenFlag, utils.ListenPortFlag, @@ -126,6 +127,7 @@ var ( utils.RinkebyFlag, utils.VMEnableDebugFlag, utils.NetworkIdFlag, + utils.ConstantinopleOverrideFlag, utils.RPCCORSDomainFlag, utils.RPCVirtualHostsFlag, utils.EthStatsURLFlag, @@ -213,8 +215,6 @@ func init() { app.Flags = append(app.Flags, metricsFlags...) app.Before = func(ctx *cli.Context) error { - runtime.GOMAXPROCS(runtime.NumCPU()) - logdir := "" if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) { logdir = (&node.Config{DataDir: utils.MakeDataDir(ctx)}).ResolvePath("logs") diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/geth/usage.go b/vendor/github.com/ethereum/go-ethereum/cmd/geth/usage.go index 8b0491ce3..25a702dd7 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/geth/usage.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/geth/usage.go @@ -81,6 +81,7 @@ var AppHelpFlagGroups = []flagGroup{ utils.LightServFlag, utils.LightPeersFlag, utils.LightKDFFlag, + utils.WhitelistFlag, }, }, { @@ -132,6 +133,7 @@ var AppHelpFlagGroups = []flagGroup{ Flags: []cli.Flag{ utils.CacheFlag, utils.CacheDatabaseFlag, + utils.CacheTrieFlag, utils.CacheGCFlag, utils.TrieCacheGenFlag, }, diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/genesis.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/genesis.go index 5f39a889d..1025dfe82 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/genesis.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/genesis.go @@ -20,35 +20,41 @@ import ( "encoding/binary" "errors" "math" + "math/big" + "strings" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + math2 "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/params" ) -// cppEthereumGenesisSpec represents the genesis specification format used by the +// alethGenesisSpec represents the genesis specification format used by the // C++ Ethereum implementation. -type cppEthereumGenesisSpec struct { +type alethGenesisSpec struct { SealEngine string `json:"sealEngine"` Params struct { - AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"` - HomesteadForkBlock hexutil.Uint64 `json:"homesteadForkBlock"` - EIP150ForkBlock hexutil.Uint64 `json:"EIP150ForkBlock"` - EIP158ForkBlock hexutil.Uint64 `json:"EIP158ForkBlock"` - ByzantiumForkBlock hexutil.Uint64 `json:"byzantiumForkBlock"` - ConstantinopleForkBlock hexutil.Uint64 `json:"constantinopleForkBlock"` - NetworkID hexutil.Uint64 `json:"networkID"` - ChainID hexutil.Uint64 `json:"chainID"` - MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"` - MinGasLimit hexutil.Uint64 `json:"minGasLimit"` - MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"` - GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"` - MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"` - DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"` - DurationLimit *hexutil.Big `json:"durationLimit"` - BlockReward *hexutil.Big `json:"blockReward"` + AccountStartNonce math2.HexOrDecimal64 `json:"accountStartNonce"` + MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"` + HomesteadForkBlock hexutil.Uint64 `json:"homesteadForkBlock"` + DaoHardforkBlock math2.HexOrDecimal64 `json:"daoHardforkBlock"` + EIP150ForkBlock hexutil.Uint64 `json:"EIP150ForkBlock"` + EIP158ForkBlock hexutil.Uint64 `json:"EIP158ForkBlock"` + ByzantiumForkBlock hexutil.Uint64 `json:"byzantiumForkBlock"` + ConstantinopleForkBlock hexutil.Uint64 `json:"constantinopleForkBlock"` + MinGasLimit hexutil.Uint64 `json:"minGasLimit"` + MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"` + TieBreakingGas bool `json:"tieBreakingGas"` + GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"` + MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"` + DifficultyBoundDivisor *math2.HexOrDecimal256 `json:"difficultyBoundDivisor"` + DurationLimit *math2.HexOrDecimal256 `json:"durationLimit"` + BlockReward *hexutil.Big `json:"blockReward"` + NetworkID hexutil.Uint64 `json:"networkID"` + ChainID hexutil.Uint64 `json:"chainID"` + AllowFutureBlocks bool `json:"allowFutureBlocks"` } `json:"params"` Genesis struct { @@ -62,57 +68,68 @@ type cppEthereumGenesisSpec struct { GasLimit hexutil.Uint64 `json:"gasLimit"` } `json:"genesis"` - Accounts map[common.Address]*cppEthereumGenesisSpecAccount `json:"accounts"` + Accounts map[common.UnprefixedAddress]*alethGenesisSpecAccount `json:"accounts"` } -// cppEthereumGenesisSpecAccount is the prefunded genesis account and/or precompiled +// alethGenesisSpecAccount is the prefunded genesis account and/or precompiled // contract definition. -type cppEthereumGenesisSpecAccount struct { - Balance *hexutil.Big `json:"balance"` - Nonce uint64 `json:"nonce,omitempty"` - Precompiled *cppEthereumGenesisSpecBuiltin `json:"precompiled,omitempty"` +type alethGenesisSpecAccount struct { + Balance *math2.HexOrDecimal256 `json:"balance"` + Nonce uint64 `json:"nonce,omitempty"` + Precompiled *alethGenesisSpecBuiltin `json:"precompiled,omitempty"` } -// cppEthereumGenesisSpecBuiltin is the precompiled contract definition. -type cppEthereumGenesisSpecBuiltin struct { - Name string `json:"name,omitempty"` - StartingBlock hexutil.Uint64 `json:"startingBlock,omitempty"` - Linear *cppEthereumGenesisSpecLinearPricing `json:"linear,omitempty"` +// alethGenesisSpecBuiltin is the precompiled contract definition. +type alethGenesisSpecBuiltin struct { + Name string `json:"name,omitempty"` + StartingBlock hexutil.Uint64 `json:"startingBlock,omitempty"` + Linear *alethGenesisSpecLinearPricing `json:"linear,omitempty"` } -type cppEthereumGenesisSpecLinearPricing struct { +type alethGenesisSpecLinearPricing struct { Base uint64 `json:"base"` Word uint64 `json:"word"` } -// newCppEthereumGenesisSpec converts a go-ethereum genesis block into a Parity specific +// newAlethGenesisSpec converts a go-ethereum genesis block into a Aleth-specific // chain specification format. -func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEthereumGenesisSpec, error) { - // Only ethash is currently supported between go-ethereum and cpp-ethereum +func newAlethGenesisSpec(network string, genesis *core.Genesis) (*alethGenesisSpec, error) { + // Only ethash is currently supported between go-ethereum and aleth if genesis.Config.Ethash == nil { return nil, errors.New("unsupported consensus engine") } - // Reconstruct the chain spec in Parity's format - spec := &cppEthereumGenesisSpec{ + // Reconstruct the chain spec in Aleth format + spec := &alethGenesisSpec{ SealEngine: "Ethash", } + // Some defaults spec.Params.AccountStartNonce = 0 + spec.Params.TieBreakingGas = false + spec.Params.AllowFutureBlocks = false + spec.Params.DaoHardforkBlock = 0 + spec.Params.HomesteadForkBlock = (hexutil.Uint64)(genesis.Config.HomesteadBlock.Uint64()) spec.Params.EIP150ForkBlock = (hexutil.Uint64)(genesis.Config.EIP150Block.Uint64()) spec.Params.EIP158ForkBlock = (hexutil.Uint64)(genesis.Config.EIP158Block.Uint64()) - spec.Params.ByzantiumForkBlock = (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()) - spec.Params.ConstantinopleForkBlock = (hexutil.Uint64)(math.MaxUint64) + + // Byzantium + if num := genesis.Config.ByzantiumBlock; num != nil { + spec.setByzantium(num) + } + // Constantinople + if num := genesis.Config.ConstantinopleBlock; num != nil { + spec.setConstantinople(num) + } spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64()) spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64()) - spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize) spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit) - spec.Params.MaxGasLimit = (hexutil.Uint64)(math.MaxUint64) + spec.Params.MaxGasLimit = (hexutil.Uint64)(math.MaxInt64) spec.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty) - spec.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor) - spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor) - spec.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit) + spec.Params.DifficultyBoundDivisor = (*math2.HexOrDecimal256)(params.DifficultyBoundDivisor) + spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor) + spec.Params.DurationLimit = (*math2.HexOrDecimal256)(params.DurationLimit) spec.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward) spec.Genesis.Nonce = (hexutil.Bytes)(make([]byte, 8)) @@ -126,77 +143,104 @@ func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEther spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData) spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit) - spec.Accounts = make(map[common.Address]*cppEthereumGenesisSpecAccount) for address, account := range genesis.Alloc { - spec.Accounts[address] = &cppEthereumGenesisSpecAccount{ - Balance: (*hexutil.Big)(account.Balance), - Nonce: account.Nonce, - } - } - spec.Accounts[common.BytesToAddress([]byte{1})].Precompiled = &cppEthereumGenesisSpecBuiltin{ - Name: "ecrecover", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 3000}, - } - spec.Accounts[common.BytesToAddress([]byte{2})].Precompiled = &cppEthereumGenesisSpecBuiltin{ - Name: "sha256", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 60, Word: 12}, - } - spec.Accounts[common.BytesToAddress([]byte{3})].Precompiled = &cppEthereumGenesisSpecBuiltin{ - Name: "ripemd160", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 600, Word: 120}, - } - spec.Accounts[common.BytesToAddress([]byte{4})].Precompiled = &cppEthereumGenesisSpecBuiltin{ - Name: "identity", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 15, Word: 3}, + spec.setAccount(address, account) } + + spec.setPrecompile(1, &alethGenesisSpecBuiltin{Name: "ecrecover", + Linear: &alethGenesisSpecLinearPricing{Base: 3000}}) + spec.setPrecompile(2, &alethGenesisSpecBuiltin{Name: "sha256", + Linear: &alethGenesisSpecLinearPricing{Base: 60, Word: 12}}) + spec.setPrecompile(3, &alethGenesisSpecBuiltin{Name: "ripemd160", + Linear: &alethGenesisSpecLinearPricing{Base: 600, Word: 120}}) + spec.setPrecompile(4, &alethGenesisSpecBuiltin{Name: "identity", + Linear: &alethGenesisSpecLinearPricing{Base: 15, Word: 3}}) if genesis.Config.ByzantiumBlock != nil { - spec.Accounts[common.BytesToAddress([]byte{5})].Precompiled = &cppEthereumGenesisSpecBuiltin{ - Name: "modexp", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), - } - spec.Accounts[common.BytesToAddress([]byte{6})].Precompiled = &cppEthereumGenesisSpecBuiltin{ - Name: "alt_bn128_G1_add", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), Linear: &cppEthereumGenesisSpecLinearPricing{Base: 500}, - } - spec.Accounts[common.BytesToAddress([]byte{7})].Precompiled = &cppEthereumGenesisSpecBuiltin{ - Name: "alt_bn128_G1_mul", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), Linear: &cppEthereumGenesisSpecLinearPricing{Base: 40000}, - } - spec.Accounts[common.BytesToAddress([]byte{8})].Precompiled = &cppEthereumGenesisSpecBuiltin{ - Name: "alt_bn128_pairing_product", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), - } + spec.setPrecompile(5, &alethGenesisSpecBuiltin{Name: "modexp", + StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())}) + spec.setPrecompile(6, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_add", + StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), + Linear: &alethGenesisSpecLinearPricing{Base: 500}}) + spec.setPrecompile(7, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_mul", + StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), + Linear: &alethGenesisSpecLinearPricing{Base: 40000}}) + spec.setPrecompile(8, &alethGenesisSpecBuiltin{Name: "alt_bn128_pairing_product", + StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())}) } return spec, nil } +func (spec *alethGenesisSpec) setPrecompile(address byte, data *alethGenesisSpecBuiltin) { + if spec.Accounts == nil { + spec.Accounts = make(map[common.UnprefixedAddress]*alethGenesisSpecAccount) + } + spec.Accounts[common.UnprefixedAddress(common.BytesToAddress([]byte{address}))].Precompiled = data +} + +func (spec *alethGenesisSpec) setAccount(address common.Address, account core.GenesisAccount) { + if spec.Accounts == nil { + spec.Accounts = make(map[common.UnprefixedAddress]*alethGenesisSpecAccount) + } + + a, exist := spec.Accounts[common.UnprefixedAddress(address)] + if !exist { + a = &alethGenesisSpecAccount{} + spec.Accounts[common.UnprefixedAddress(address)] = a + } + a.Balance = (*math2.HexOrDecimal256)(account.Balance) + a.Nonce = account.Nonce + +} + +func (spec *alethGenesisSpec) setByzantium(num *big.Int) { + spec.Params.ByzantiumForkBlock = hexutil.Uint64(num.Uint64()) +} + +func (spec *alethGenesisSpec) setConstantinople(num *big.Int) { + spec.Params.ConstantinopleForkBlock = hexutil.Uint64(num.Uint64()) +} + // parityChainSpec is the chain specification format used by Parity. type parityChainSpec struct { - Name string `json:"name"` - Engine struct { + Name string `json:"name"` + Datadir string `json:"dataDir"` + Engine struct { Ethash struct { Params struct { - MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"` - DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"` - DurationLimit *hexutil.Big `json:"durationLimit"` - BlockReward *hexutil.Big `json:"blockReward"` - HomesteadTransition uint64 `json:"homesteadTransition"` - EIP150Transition uint64 `json:"eip150Transition"` - EIP160Transition uint64 `json:"eip160Transition"` - EIP161abcTransition uint64 `json:"eip161abcTransition"` - EIP161dTransition uint64 `json:"eip161dTransition"` - EIP649Reward *hexutil.Big `json:"eip649Reward"` - EIP100bTransition uint64 `json:"eip100bTransition"` - EIP649Transition uint64 `json:"eip649Transition"` + MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"` + DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"` + DurationLimit *hexutil.Big `json:"durationLimit"` + BlockReward map[string]string `json:"blockReward"` + DifficultyBombDelays map[string]string `json:"difficultyBombDelays"` + HomesteadTransition hexutil.Uint64 `json:"homesteadTransition"` + EIP100bTransition hexutil.Uint64 `json:"eip100bTransition"` } `json:"params"` } `json:"Ethash"` } `json:"engine"` Params struct { - MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"` - MinGasLimit hexutil.Uint64 `json:"minGasLimit"` - GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"` - NetworkID hexutil.Uint64 `json:"networkID"` - MaxCodeSize uint64 `json:"maxCodeSize"` - EIP155Transition uint64 `json:"eip155Transition"` - EIP98Transition uint64 `json:"eip98Transition"` - EIP86Transition uint64 `json:"eip86Transition"` - EIP140Transition uint64 `json:"eip140Transition"` - EIP211Transition uint64 `json:"eip211Transition"` - EIP214Transition uint64 `json:"eip214Transition"` - EIP658Transition uint64 `json:"eip658Transition"` + AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"` + MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"` + MinGasLimit hexutil.Uint64 `json:"minGasLimit"` + GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"` + NetworkID hexutil.Uint64 `json:"networkID"` + ChainID hexutil.Uint64 `json:"chainID"` + MaxCodeSize hexutil.Uint64 `json:"maxCodeSize"` + MaxCodeSizeTransition hexutil.Uint64 `json:"maxCodeSizeTransition"` + EIP98Transition hexutil.Uint64 `json:"eip98Transition"` + EIP150Transition hexutil.Uint64 `json:"eip150Transition"` + EIP160Transition hexutil.Uint64 `json:"eip160Transition"` + EIP161abcTransition hexutil.Uint64 `json:"eip161abcTransition"` + EIP161dTransition hexutil.Uint64 `json:"eip161dTransition"` + EIP155Transition hexutil.Uint64 `json:"eip155Transition"` + EIP140Transition hexutil.Uint64 `json:"eip140Transition"` + EIP211Transition hexutil.Uint64 `json:"eip211Transition"` + EIP214Transition hexutil.Uint64 `json:"eip214Transition"` + EIP658Transition hexutil.Uint64 `json:"eip658Transition"` + EIP145Transition hexutil.Uint64 `json:"eip145Transition"` + EIP1014Transition hexutil.Uint64 `json:"eip1014Transition"` + EIP1052Transition hexutil.Uint64 `json:"eip1052Transition"` + EIP1283Transition hexutil.Uint64 `json:"eip1283Transition"` } `json:"params"` Genesis struct { @@ -215,22 +259,22 @@ type parityChainSpec struct { GasLimit hexutil.Uint64 `json:"gasLimit"` } `json:"genesis"` - Nodes []string `json:"nodes"` - Accounts map[common.Address]*parityChainSpecAccount `json:"accounts"` + Nodes []string `json:"nodes"` + Accounts map[common.UnprefixedAddress]*parityChainSpecAccount `json:"accounts"` } // parityChainSpecAccount is the prefunded genesis account and/or precompiled // contract definition. type parityChainSpecAccount struct { - Balance *hexutil.Big `json:"balance"` - Nonce uint64 `json:"nonce,omitempty"` + Balance math2.HexOrDecimal256 `json:"balance"` + Nonce math2.HexOrDecimal64 `json:"nonce,omitempty"` Builtin *parityChainSpecBuiltin `json:"builtin,omitempty"` } // parityChainSpecBuiltin is the precompiled contract definition. type parityChainSpecBuiltin struct { Name string `json:"name,omitempty"` - ActivateAt uint64 `json:"activate_at,omitempty"` + ActivateAt math2.HexOrDecimal64 `json:"activate_at,omitempty"` Pricing *parityChainSpecPricing `json:"pricing,omitempty"` } @@ -265,34 +309,51 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin } // Reconstruct the chain spec in Parity's format spec := &parityChainSpec{ - Name: network, - Nodes: bootnodes, + Name: network, + Nodes: bootnodes, + Datadir: strings.ToLower(network), } + spec.Engine.Ethash.Params.BlockReward = make(map[string]string) + spec.Engine.Ethash.Params.DifficultyBombDelays = make(map[string]string) + // Frontier spec.Engine.Ethash.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty) spec.Engine.Ethash.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor) spec.Engine.Ethash.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit) - spec.Engine.Ethash.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward) - spec.Engine.Ethash.Params.HomesteadTransition = genesis.Config.HomesteadBlock.Uint64() - spec.Engine.Ethash.Params.EIP150Transition = genesis.Config.EIP150Block.Uint64() - spec.Engine.Ethash.Params.EIP160Transition = genesis.Config.EIP155Block.Uint64() - spec.Engine.Ethash.Params.EIP161abcTransition = genesis.Config.EIP158Block.Uint64() - spec.Engine.Ethash.Params.EIP161dTransition = genesis.Config.EIP158Block.Uint64() - spec.Engine.Ethash.Params.EIP649Reward = (*hexutil.Big)(ethash.ByzantiumBlockReward) - spec.Engine.Ethash.Params.EIP100bTransition = genesis.Config.ByzantiumBlock.Uint64() - spec.Engine.Ethash.Params.EIP649Transition = genesis.Config.ByzantiumBlock.Uint64() + spec.Engine.Ethash.Params.BlockReward["0x0"] = hexutil.EncodeBig(ethash.FrontierBlockReward) + // Homestead + spec.Engine.Ethash.Params.HomesteadTransition = hexutil.Uint64(genesis.Config.HomesteadBlock.Uint64()) + + // Tangerine Whistle : 150 + // https://github.com/ethereum/EIPs/blob/master/EIPS/eip-608.md + spec.Params.EIP150Transition = hexutil.Uint64(genesis.Config.EIP150Block.Uint64()) + + // Spurious Dragon: 155, 160, 161, 170 + // https://github.com/ethereum/EIPs/blob/master/EIPS/eip-607.md + spec.Params.EIP155Transition = hexutil.Uint64(genesis.Config.EIP155Block.Uint64()) + spec.Params.EIP160Transition = hexutil.Uint64(genesis.Config.EIP155Block.Uint64()) + spec.Params.EIP161abcTransition = hexutil.Uint64(genesis.Config.EIP158Block.Uint64()) + spec.Params.EIP161dTransition = hexutil.Uint64(genesis.Config.EIP158Block.Uint64()) + + // Byzantium + if num := genesis.Config.ByzantiumBlock; num != nil { + spec.setByzantium(num) + } + // Constantinople + if num := genesis.Config.ConstantinopleBlock; num != nil { + spec.setConstantinople(num) + } spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize) spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit) - spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor) + spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor) spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64()) + spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64()) spec.Params.MaxCodeSize = params.MaxCodeSize - spec.Params.EIP155Transition = genesis.Config.EIP155Block.Uint64() - spec.Params.EIP98Transition = math.MaxUint64 - spec.Params.EIP86Transition = math.MaxUint64 - spec.Params.EIP140Transition = genesis.Config.ByzantiumBlock.Uint64() - spec.Params.EIP211Transition = genesis.Config.ByzantiumBlock.Uint64() - spec.Params.EIP214Transition = genesis.Config.ByzantiumBlock.Uint64() - spec.Params.EIP658Transition = genesis.Config.ByzantiumBlock.Uint64() + // geth has it set from zero + spec.Params.MaxCodeSizeTransition = 0 + + // Disable this one + spec.Params.EIP98Transition = math.MaxInt64 spec.Genesis.Seal.Ethereum.Nonce = (hexutil.Bytes)(make([]byte, 8)) binary.LittleEndian.PutUint64(spec.Genesis.Seal.Ethereum.Nonce[:], genesis.Nonce) @@ -305,42 +366,77 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData) spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit) - spec.Accounts = make(map[common.Address]*parityChainSpecAccount) + spec.Accounts = make(map[common.UnprefixedAddress]*parityChainSpecAccount) for address, account := range genesis.Alloc { - spec.Accounts[address] = &parityChainSpecAccount{ - Balance: (*hexutil.Big)(account.Balance), - Nonce: account.Nonce, + bal := math2.HexOrDecimal256(*account.Balance) + + spec.Accounts[common.UnprefixedAddress(address)] = &parityChainSpecAccount{ + Balance: bal, + Nonce: math2.HexOrDecimal64(account.Nonce), } } - spec.Accounts[common.BytesToAddress([]byte{1})].Builtin = &parityChainSpecBuiltin{ - Name: "ecrecover", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 3000}}, - } - spec.Accounts[common.BytesToAddress([]byte{2})].Builtin = &parityChainSpecBuiltin{ + spec.setPrecompile(1, &parityChainSpecBuiltin{Name: "ecrecover", + Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 3000}}}) + + spec.setPrecompile(2, &parityChainSpecBuiltin{ Name: "sha256", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 60, Word: 12}}, - } - spec.Accounts[common.BytesToAddress([]byte{3})].Builtin = &parityChainSpecBuiltin{ + }) + spec.setPrecompile(3, &parityChainSpecBuiltin{ Name: "ripemd160", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 600, Word: 120}}, - } - spec.Accounts[common.BytesToAddress([]byte{4})].Builtin = &parityChainSpecBuiltin{ + }) + spec.setPrecompile(4, &parityChainSpecBuiltin{ Name: "identity", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 15, Word: 3}}, - } + }) if genesis.Config.ByzantiumBlock != nil { - spec.Accounts[common.BytesToAddress([]byte{5})].Builtin = &parityChainSpecBuiltin{ - Name: "modexp", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}}, - } - spec.Accounts[common.BytesToAddress([]byte{6})].Builtin = &parityChainSpecBuiltin{ - Name: "alt_bn128_add", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 500}}, - } - spec.Accounts[common.BytesToAddress([]byte{7})].Builtin = &parityChainSpecBuiltin{ - Name: "alt_bn128_mul", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 40000}}, - } - spec.Accounts[common.BytesToAddress([]byte{8})].Builtin = &parityChainSpecBuiltin{ - Name: "alt_bn128_pairing", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}}, - } + blnum := math2.HexOrDecimal64(genesis.Config.ByzantiumBlock.Uint64()) + spec.setPrecompile(5, &parityChainSpecBuiltin{ + Name: "modexp", ActivateAt: blnum, Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}}, + }) + spec.setPrecompile(6, &parityChainSpecBuiltin{ + Name: "alt_bn128_add", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 500}}, + }) + spec.setPrecompile(7, &parityChainSpecBuiltin{ + Name: "alt_bn128_mul", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 40000}}, + }) + spec.setPrecompile(8, &parityChainSpecBuiltin{ + Name: "alt_bn128_pairing", ActivateAt: blnum, Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}}, + }) } return spec, nil } +func (spec *parityChainSpec) setPrecompile(address byte, data *parityChainSpecBuiltin) { + if spec.Accounts == nil { + spec.Accounts = make(map[common.UnprefixedAddress]*parityChainSpecAccount) + } + a := common.UnprefixedAddress(common.BytesToAddress([]byte{address})) + if _, exist := spec.Accounts[a]; !exist { + spec.Accounts[a] = &parityChainSpecAccount{} + } + spec.Accounts[a].Builtin = data +} + +func (spec *parityChainSpec) setByzantium(num *big.Int) { + spec.Engine.Ethash.Params.BlockReward[hexutil.EncodeBig(num)] = hexutil.EncodeBig(ethash.ByzantiumBlockReward) + spec.Engine.Ethash.Params.DifficultyBombDelays[hexutil.EncodeBig(num)] = hexutil.EncodeUint64(3000000) + n := hexutil.Uint64(num.Uint64()) + spec.Engine.Ethash.Params.EIP100bTransition = n + spec.Params.EIP140Transition = n + spec.Params.EIP211Transition = n + spec.Params.EIP214Transition = n + spec.Params.EIP658Transition = n +} + +func (spec *parityChainSpec) setConstantinople(num *big.Int) { + spec.Engine.Ethash.Params.BlockReward[hexutil.EncodeBig(num)] = hexutil.EncodeBig(ethash.ConstantinopleBlockReward) + spec.Engine.Ethash.Params.DifficultyBombDelays[hexutil.EncodeBig(num)] = hexutil.EncodeUint64(2000000) + n := hexutil.Uint64(num.Uint64()) + spec.Params.EIP145Transition = n + spec.Params.EIP1014Transition = n + spec.Params.EIP1052Transition = n + spec.Params.EIP1283Transition = n +} + // pyEthereumGenesisSpec represents the genesis specification format used by the // Python Ethereum implementation. type pyEthereumGenesisSpec struct { diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_dashboard.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_dashboard.go index d22bd8110..cb3ed6e71 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_dashboard.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_dashboard.go @@ -640,7 +640,7 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da files[filepath.Join(workdir, network+".json")] = genesis if conf.Genesis.Config.Ethash != nil { - cppSpec, err := newCppEthereumGenesisSpec(network, conf.Genesis) + cppSpec, err := newAlethGenesisSpec(network, conf.Genesis) if err != nil { return nil, err } diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_ethstats.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_ethstats.go index a7d99a297..58ecb8395 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_ethstats.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_ethstats.go @@ -43,7 +43,8 @@ version: '2' services: ethstats: build: . - image: {{.Network}}/ethstats{{if not .VHost}} + image: {{.Network}}/ethstats + container_name: {{.Network}}_ethstats_1{{if not .VHost}} ports: - "{{.Port}}:3000"{{end}} environment: diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_explorer.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_explorer.go index e916deaf6..e465fa04a 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_explorer.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_explorer.go @@ -77,6 +77,7 @@ services: explorer: build: . image: {{.Network}}/explorer + container_name: {{.Network}}_explorer_1 ports: - "{{.NodePort}}:{{.NodePort}}" - "{{.NodePort}}:{{.NodePort}}/udp"{{if not .VHost}} diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_faucet.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_faucet.go index 06c9fc0f5..3a06bf3c6 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_faucet.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_faucet.go @@ -56,8 +56,10 @@ services: faucet: build: . image: {{.Network}}/faucet + container_name: {{.Network}}_faucet_1 ports: - - "{{.EthPort}}:{{.EthPort}}"{{if not .VHost}} + - "{{.EthPort}}:{{.EthPort}}" + - "{{.EthPort}}:{{.EthPort}}/udp"{{if not .VHost}} - "{{.ApiPort}}:8080"{{end}} volumes: - {{.Datadir}}:/root/.faucet diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_nginx.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_nginx.go index 7f87661d3..1b1ae61ff 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_nginx.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_nginx.go @@ -40,6 +40,7 @@ services: nginx: build: . image: {{.Network}}/nginx + container_name: {{.Network}}_nginx_1 ports: - "{{.Port}}:80" volumes: diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_node.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_node.go index 069adfe4f..5d9ef4652 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_node.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_node.go @@ -55,6 +55,7 @@ services: {{.Type}}: build: . image: {{.Network}}/{{.Type}} + container_name: {{.Network}}_{{.Type}}_1 ports: - "{{.Port}}:{{.Port}}" - "{{.Port}}:{{.Port}}/udp" diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_wallet.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_wallet.go index 90812c4a0..ebaa5b6ae 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_wallet.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_wallet.go @@ -57,6 +57,7 @@ services: wallet: build: . image: {{.Network}}/wallet + container_name: {{.Network}}_wallet_1 ports: - "{{.NodePort}}:{{.NodePort}}" - "{{.NodePort}}:{{.NodePort}}/udp" diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/puppeth.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/puppeth.go index f9b8fe481..c3de5f936 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/puppeth.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/puppeth.go @@ -43,18 +43,23 @@ func main() { Usage: "log level to emit to the screen", }, } - app.Action = func(c *cli.Context) error { + app.Before = func(c *cli.Context) error { // Set up the logger to print everything and the random generator log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(c.Int("loglevel")), log.StreamHandler(os.Stdout, log.TerminalFormat(true)))) rand.Seed(time.Now().UnixNano()) - network := c.String("network") - if strings.Contains(network, " ") || strings.Contains(network, "-") { - log.Crit("No spaces or hyphens allowed in network name") - } - // Start the wizard and relinquish control - makeWizard(c.String("network")).run() return nil } + app.Action = runWizard app.Run(os.Args) } + +// runWizard start the wizard and relinquish control to it. +func runWizard(c *cli.Context) error { + network := c.String("network") + if strings.Contains(network, " ") || strings.Contains(network, "-") || strings.ToLower(network) != network { + log.Crit("No spaces, hyphens or capital letters allowed in network name") + } + makeWizard(c.String("network")).run() + return nil +} diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/testdata/stureby_aleth.json b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/testdata/stureby_aleth.json new file mode 100644 index 000000000..1ef1d8ae1 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/testdata/stureby_aleth.json @@ -0,0 +1,112 @@ +{ + "sealEngine":"Ethash", + "params":{ + "accountStartNonce":"0x00", + "maximumExtraDataSize":"0x20", + "homesteadForkBlock":"0x2710", + "daoHardforkBlock":"0x00", + "EIP150ForkBlock":"0x3a98", + "EIP158ForkBlock":"0x59d8", + "byzantiumForkBlock":"0x7530", + "constantinopleForkBlock":"0x9c40", + "minGasLimit":"0x1388", + "maxGasLimit":"0x7fffffffffffffff", + "tieBreakingGas":false, + "gasLimitBoundDivisor":"0x0400", + "minimumDifficulty":"0x20000", + "difficultyBoundDivisor":"0x0800", + "durationLimit":"0x0d", + "blockReward":"0x4563918244F40000", + "networkID":"0x4cb2e", + "chainID":"0x4cb2e", + "allowFutureBlocks":false + }, + "genesis":{ + "nonce":"0x0000000000000000", + "difficulty":"0x20000", + "mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000", + "author":"0x0000000000000000000000000000000000000000", + "timestamp":"0x59a4e76d", + "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000", + "extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee", + "gasLimit":"0x47b760" + }, + "accounts":{ + "0000000000000000000000000000000000000001":{ + "balance":"1", + "precompiled":{ + "name":"ecrecover", + "linear":{ + "base":3000, + "word":0 + } + } + }, + "0000000000000000000000000000000000000002":{ + "balance":"1", + "precompiled":{ + "name":"sha256", + "linear":{ + "base":60, + "word":12 + } + } + }, + "0000000000000000000000000000000000000003":{ + "balance":"1", + "precompiled":{ + "name":"ripemd160", + "linear":{ + "base":600, + "word":120 + } + } + }, + "0000000000000000000000000000000000000004":{ + "balance":"1", + "precompiled":{ + "name":"identity", + "linear":{ + "base":15, + "word":3 + } + } + }, + "0000000000000000000000000000000000000005":{ + "balance":"1", + "precompiled":{ + "name":"modexp", + "startingBlock":"0x7530" + } + }, + "0000000000000000000000000000000000000006":{ + "balance":"1", + "precompiled":{ + "name":"alt_bn128_G1_add", + "startingBlock":"0x7530", + "linear":{ + "base":500, + "word":0 + } + } + }, + "0000000000000000000000000000000000000007":{ + "balance":"1", + "precompiled":{ + "name":"alt_bn128_G1_mul", + "startingBlock":"0x7530", + "linear":{ + "base":40000, + "word":0 + } + } + }, + "0000000000000000000000000000000000000008":{ + "balance":"1", + "precompiled":{ + "name":"alt_bn128_pairing_product", + "startingBlock":"0x7530" + } + } + } +} diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/testdata/stureby_geth.json b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/testdata/stureby_geth.json new file mode 100644 index 000000000..c8c3b3c95 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/testdata/stureby_geth.json @@ -0,0 +1,47 @@ +{ + "config": { + "ethash":{}, + "chainId": 314158, + "homesteadBlock": 10000, + "eip150Block": 15000, + "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "eip155Block": 23000, + "eip158Block": 23000, + "byzantiumBlock": 30000, + "constantinopleBlock": 40000 + }, + "nonce": "0x0", + "timestamp": "0x59a4e76d", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "extraData": "0x0000000000000000000000000000000000000000000000000000000b4dc0ffee", + "gasLimit": "0x47b760", + "difficulty": "0x20000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x0000000000000000000000000000000000000000", + "alloc": { + "0000000000000000000000000000000000000001": { + "balance": "0x01" + }, + "0000000000000000000000000000000000000002": { + "balance": "0x01" + }, + "0000000000000000000000000000000000000003": { + "balance": "0x01" + }, + "0000000000000000000000000000000000000004": { + "balance": "0x01" + }, + "0000000000000000000000000000000000000005": { + "balance": "0x01" + }, + "0000000000000000000000000000000000000006": { + "balance": "0x01" + }, + "0000000000000000000000000000000000000007": { + "balance": "0x01" + }, + "0000000000000000000000000000000000000008": { + "balance": "0x01" + } + } +} diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/testdata/stureby_parity.json b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/testdata/stureby_parity.json new file mode 100644 index 000000000..f3fa8386a --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/testdata/stureby_parity.json @@ -0,0 +1,181 @@ +{ + "name":"Stureby", + "dataDir":"stureby", + "engine":{ + "Ethash":{ + "params":{ + "minimumDifficulty":"0x20000", + "difficultyBoundDivisor":"0x800", + "durationLimit":"0xd", + "blockReward":{ + "0x0":"0x4563918244f40000", + "0x7530":"0x29a2241af62c0000", + "0x9c40":"0x1bc16d674ec80000" + }, + "homesteadTransition":"0x2710", + "eip100bTransition":"0x7530", + "difficultyBombDelays":{ + "0x7530":"0x2dc6c0", + "0x9c40":"0x1e8480" + } + } + } + }, + "params":{ + "accountStartNonce":"0x0", + "maximumExtraDataSize":"0x20", + "gasLimitBoundDivisor":"0x400", + "minGasLimit":"0x1388", + "networkID":"0x4cb2e", + "chainID":"0x4cb2e", + "maxCodeSize":"0x6000", + "maxCodeSizeTransition":"0x0", + "eip98Transition": "0x7fffffffffffffff", + "eip150Transition":"0x3a98", + "eip160Transition":"0x59d8", + "eip161abcTransition":"0x59d8", + "eip161dTransition":"0x59d8", + "eip155Transition":"0x59d8", + "eip140Transition":"0x7530", + "eip211Transition":"0x7530", + "eip214Transition":"0x7530", + "eip658Transition":"0x7530", + "eip145Transition":"0x9c40", + "eip1014Transition":"0x9c40", + "eip1052Transition":"0x9c40", + "eip1283Transition":"0x9c40" + }, + "genesis":{ + "seal":{ + "ethereum":{ + "nonce":"0x0000000000000000", + "mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "difficulty":"0x20000", + "author":"0x0000000000000000000000000000000000000000", + "timestamp":"0x59a4e76d", + "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000", + "extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee", + "gasLimit":"0x47b760" + }, + "nodes":[ + "enode://dfa7aca3f5b635fbfe7d0b20575f25e40d9e27b4bfbb3cf74364a42023ad9f25c1a4383bcc8cced86ee511a7d03415345a4df05be37f1dff040e4c780699f1c0@168.61.153.255:31303", + "enode://ef441b20dd70aeabf0eac35c3b8a2854e5ce04db0e30be9152ea9fd129359dcbb3f803993303ff5781c755dfd7223f3fe43505f583cccb740949407677412ba9@40.74.91.252:31303", + "enode://953b5ea1c8987cf46008232a0160324fd00d41320ecf00e23af86ec8f5396b19eb57ddab37c78141be56f62e9077de4f4dfa0747fa768ed8c8531bbfb1046237@40.70.214.166:31303", + "enode://276e613dd4b277a66591e565711e6c8bb107f0905248a9f8f8228c1a87992e156e5114bb9937c02824a9d9d25f76340442cf86e2028bf5293cae19904fb2b98e@35.178.251.52:30303", + "enode://064c820d41e52ed7d426ac64b60506c2998235bedc7e67cb497c6faf7bb4fc54fe56fc82d0add3180b747c0c4f40a1108a6f84d7d0629ed606d504528e61cc57@3.8.5.3:30303", + "enode://90069fdabcc5e684fa5d59430bebbb12755d9362dfe5006a1485b13d71a78a3812d36e74dd7d88e50b51add01e097ea80f16263aeaa4f0230db6c79e2a97e7ca@217.29.191.142:30303", + "enode://0aac74b7fd28726275e466acb5e03bc88a95927e9951eb66b5efb239b2f798ada0690853b2f2823fe4efa408f0f3d4dd258430bc952a5ff70677b8625b3e3b14@40.115.33.57:40404", + "enode://0b96415a10f835106d83e090a0528eed5e7887e5c802a6d084e9f1993a9d0fc713781e6e4101f6365e9b91259712f291acc0a9e6e667e22023050d602c36fbe2@40.115.33.57:40414" + ], + "accounts":{ + "0000000000000000000000000000000000000001":{ + "balance":"1", + "nonce":"0", + "builtin":{ + "name":"ecrecover", + "pricing":{ + "linear":{ + "base":3000, + "word":0 + } + } + } + }, + "0000000000000000000000000000000000000002":{ + "balance":"1", + "nonce":"0", + "builtin":{ + "name":"sha256", + "pricing":{ + "linear":{ + "base":60, + "word":12 + } + } + } + }, + "0000000000000000000000000000000000000003":{ + "balance":"1", + "nonce":"0", + "builtin":{ + "name":"ripemd160", + "pricing":{ + "linear":{ + "base":600, + "word":120 + } + } + } + }, + "0000000000000000000000000000000000000004":{ + "balance":"1", + "nonce":"0", + "builtin":{ + "name":"identity", + "pricing":{ + "linear":{ + "base":15, + "word":3 + } + } + } + }, + "0000000000000000000000000000000000000005":{ + "balance":"1", + "nonce":"0", + "builtin":{ + "name":"modexp", + "activate_at":"0x7530", + "pricing":{ + "modexp":{ + "divisor":20 + } + } + } + }, + "0000000000000000000000000000000000000006":{ + "balance":"1", + "nonce":"0", + "builtin":{ + "name":"alt_bn128_add", + "activate_at":"0x7530", + "pricing":{ + "linear":{ + "base":500, + "word":0 + } + } + } + }, + "0000000000000000000000000000000000000007":{ + "balance":"1", + "nonce":"0", + "builtin":{ + "name":"alt_bn128_mul", + "activate_at":"0x7530", + "pricing":{ + "linear":{ + "base":40000, + "word":0 + } + } + } + }, + "0000000000000000000000000000000000000008":{ + "balance":"1", + "nonce":"0", + "builtin":{ + "name":"alt_bn128_pairing", + "activate_at":"0x7530", + "pricing":{ + "alt_bn128_pairing":{ + "base":100000, + "pair":80000 + } + } + } + } + } +} diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard.go index b88a61de7..83536506c 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard.go @@ -23,6 +23,7 @@ import ( "io/ioutil" "math/big" "net" + "net/url" "os" "path/filepath" "sort" @@ -118,6 +119,47 @@ func (w *wizard) readDefaultString(def string) string { return def } +// readDefaultYesNo reads a single line from stdin, trimming if from spaces and +// interpreting it as a 'yes' or a 'no'. If an empty line is entered, the default +// value is returned. +func (w *wizard) readDefaultYesNo(def bool) bool { + for { + fmt.Printf("> ") + text, err := w.in.ReadString('\n') + if err != nil { + log.Crit("Failed to read user input", "err", err) + } + if text = strings.ToLower(strings.TrimSpace(text)); text == "" { + return def + } + if text == "y" || text == "yes" { + return true + } + if text == "n" || text == "no" { + return false + } + log.Error("Invalid input, expected 'y', 'yes', 'n', 'no' or empty") + } +} + +// readURL reads a single line from stdin, trimming if from spaces and trying to +// interpret it as a URL (http, https or file). +func (w *wizard) readURL() *url.URL { + for { + fmt.Printf("> ") + text, err := w.in.ReadString('\n') + if err != nil { + log.Crit("Failed to read user input", "err", err) + } + uri, err := url.Parse(strings.TrimSpace(text)) + if err != nil { + log.Error("Invalid input, expected URL", "err", err) + continue + } + return uri + } +} + // readInt reads a single line from stdin, trimming if from spaces, enforcing it // to parse into an integer. func (w *wizard) readInt() int { diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_dashboard.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_dashboard.go index 1a01631ff..8a8370845 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_dashboard.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_dashboard.go @@ -137,14 +137,14 @@ func (w *wizard) deployDashboard() { if w.conf.ethstats != "" { fmt.Println() fmt.Println("Include ethstats secret on dashboard (y/n)? (default = yes)") - infos.trusted = w.readDefaultString("y") == "y" + infos.trusted = w.readDefaultYesNo(true) } // Try to deploy the dashboard container on the host nocache := false if existed { fmt.Println() fmt.Printf("Should the dashboard be built from scratch (y/n)? (default = no)\n") - nocache = w.readDefaultString("n") != "n" + nocache = w.readDefaultYesNo(false) } if out, err := deployDashboard(client, w.network, &w.conf, infos, nocache); err != nil { log.Error("Failed to deploy dashboard container", "err", err) diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_ethstats.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_ethstats.go index fb2529c26..58ff3efbe 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_ethstats.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_ethstats.go @@ -67,11 +67,11 @@ func (w *wizard) deployEthstats() { if existed { fmt.Println() fmt.Printf("Keep existing IP %v blacklist (y/n)? (default = yes)\n", infos.banned) - if w.readDefaultString("y") != "y" { + if !w.readDefaultYesNo(true) { // The user might want to clear the entire list, although generally probably not fmt.Println() fmt.Printf("Clear out blacklist and start over (y/n)? (default = no)\n") - if w.readDefaultString("n") != "n" { + if w.readDefaultYesNo(false) { infos.banned = nil } // Offer the user to explicitly add/remove certain IP addresses @@ -106,7 +106,7 @@ func (w *wizard) deployEthstats() { if existed { fmt.Println() fmt.Printf("Should the ethstats be built from scratch (y/n)? (default = no)\n") - nocache = w.readDefaultString("n") != "n" + nocache = w.readDefaultYesNo(false) } trusted := make([]string, 0, len(w.servers)) for _, client := range w.servers { diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_explorer.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_explorer.go index 413511c1c..a128fb9fb 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_explorer.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_explorer.go @@ -100,7 +100,7 @@ func (w *wizard) deployExplorer() { if existed { fmt.Println() fmt.Printf("Should the explorer be built from scratch (y/n)? (default = no)\n") - nocache = w.readDefaultString("n") != "n" + nocache = w.readDefaultYesNo(false) } if out, err := deployExplorer(client, w.network, chain, infos, nocache); err != nil { log.Error("Failed to deploy explorer container", "err", err) diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_faucet.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_faucet.go index 6f0840894..9068c1d30 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_faucet.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_faucet.go @@ -81,7 +81,7 @@ func (w *wizard) deployFaucet() { if infos.captchaToken != "" { fmt.Println() fmt.Println("Reuse previous reCaptcha API authorization (y/n)? (default = yes)") - if w.readDefaultString("y") != "y" { + if !w.readDefaultYesNo(true) { infos.captchaToken, infos.captchaSecret = "", "" } } @@ -89,7 +89,7 @@ func (w *wizard) deployFaucet() { // No previous authorization (or old one discarded) fmt.Println() fmt.Println("Enable reCaptcha protection against robots (y/n)? (default = no)") - if w.readDefaultString("n") == "n" { + if !w.readDefaultYesNo(false) { log.Warn("Users will be able to requests funds via automated scripts") } else { // Captcha protection explicitly requested, read the site and secret keys @@ -132,7 +132,7 @@ func (w *wizard) deployFaucet() { } else { fmt.Println() fmt.Printf("Reuse previous (%s) funding account (y/n)? (default = yes)\n", key.Address.Hex()) - if w.readDefaultString("y") != "y" { + if !w.readDefaultYesNo(true) { infos.node.keyJSON, infos.node.keyPass = "", "" } } @@ -166,7 +166,7 @@ func (w *wizard) deployFaucet() { if existed { fmt.Println() fmt.Printf("Should the faucet be built from scratch (y/n)? (default = no)\n") - nocache = w.readDefaultString("n") != "n" + nocache = w.readDefaultYesNo(false) } if out, err := deployFaucet(client, w.network, w.conf.bootnodes, infos, nocache); err != nil { log.Error("Failed to deploy faucet container", "err", err) diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_genesis.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_genesis.go index 6c4cd571f..95da5bd4f 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_genesis.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_genesis.go @@ -20,9 +20,13 @@ import ( "bytes" "encoding/json" "fmt" + "io" "io/ioutil" "math/big" "math/rand" + "net/http" + "os" + "path/filepath" "time" "github.com/ethereum/go-ethereum/common" @@ -40,11 +44,12 @@ func (w *wizard) makeGenesis() { Difficulty: big.NewInt(524288), Alloc: make(core.GenesisAlloc), Config: ¶ms.ChainConfig{ - HomesteadBlock: big.NewInt(1), - EIP150Block: big.NewInt(2), - EIP155Block: big.NewInt(3), - EIP158Block: big.NewInt(3), - ByzantiumBlock: big.NewInt(4), + HomesteadBlock: big.NewInt(1), + EIP150Block: big.NewInt(2), + EIP155Block: big.NewInt(3), + EIP158Block: big.NewInt(3), + ByzantiumBlock: big.NewInt(4), + ConstantinopleBlock: big.NewInt(5), }, } // Figure out which consensus engine to choose @@ -114,9 +119,13 @@ func (w *wizard) makeGenesis() { } break } - // Add a batch of precompile balances to avoid them getting deleted - for i := int64(0); i < 256; i++ { - genesis.Alloc[common.BigToAddress(big.NewInt(i))] = core.GenesisAccount{Balance: big.NewInt(1)} + fmt.Println() + fmt.Println("Should the precompile-addresses (0x1 .. 0xff) be pre-funded with 1 wei? (advisable yes)") + if w.readDefaultYesNo(true) { + // Add a batch of precompile balances to avoid them getting deleted + for i := int64(0); i < 256; i++ { + genesis.Alloc[common.BigToAddress(big.NewInt(i))] = core.GenesisAccount{Balance: big.NewInt(1)} + } } // Query the user for some custom extras fmt.Println() @@ -130,53 +139,130 @@ func (w *wizard) makeGenesis() { w.conf.flush() } +// importGenesis imports a Geth genesis spec into puppeth. +func (w *wizard) importGenesis() { + // Request the genesis JSON spec URL from the user + fmt.Println() + fmt.Println("Where's the genesis file? (local file or http/https url)") + url := w.readURL() + + // Convert the various allowed URLs to a reader stream + var reader io.Reader + + switch url.Scheme { + case "http", "https": + // Remote web URL, retrieve it via an HTTP client + res, err := http.Get(url.String()) + if err != nil { + log.Error("Failed to retrieve remote genesis", "err", err) + return + } + defer res.Body.Close() + reader = res.Body + + case "": + // Schemaless URL, interpret as a local file + file, err := os.Open(url.String()) + if err != nil { + log.Error("Failed to open local genesis", "err", err) + return + } + defer file.Close() + reader = file + + default: + log.Error("Unsupported genesis URL scheme", "scheme", url.Scheme) + return + } + // Parse the genesis file and inject it successful + var genesis core.Genesis + if err := json.NewDecoder(reader).Decode(&genesis); err != nil { + log.Error("Invalid genesis spec: %v", err) + return + } + log.Info("Imported genesis block") + + w.conf.Genesis = &genesis + w.conf.flush() +} + // manageGenesis permits the modification of chain configuration parameters in // a genesis config and the export of the entire genesis spec. func (w *wizard) manageGenesis() { // Figure out whether to modify or export the genesis fmt.Println() fmt.Println(" 1. Modify existing fork rules") - fmt.Println(" 2. Export genesis configuration") + fmt.Println(" 2. Export genesis configurations") fmt.Println(" 3. Remove genesis configuration") choice := w.read() - switch { - case choice == "1": + switch choice { + case "1": // Fork rule updating requested, iterate over each fork fmt.Println() fmt.Printf("Which block should Homestead come into effect? (default = %v)\n", w.conf.Genesis.Config.HomesteadBlock) w.conf.Genesis.Config.HomesteadBlock = w.readDefaultBigInt(w.conf.Genesis.Config.HomesteadBlock) fmt.Println() - fmt.Printf("Which block should EIP150 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP150Block) + fmt.Printf("Which block should EIP150 (Tangerine Whistle) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP150Block) w.conf.Genesis.Config.EIP150Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP150Block) fmt.Println() - fmt.Printf("Which block should EIP155 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP155Block) + fmt.Printf("Which block should EIP155 (Spurious Dragon) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP155Block) w.conf.Genesis.Config.EIP155Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP155Block) fmt.Println() - fmt.Printf("Which block should EIP158 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP158Block) + fmt.Printf("Which block should EIP158/161 (also Spurious Dragon) come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP158Block) w.conf.Genesis.Config.EIP158Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP158Block) fmt.Println() fmt.Printf("Which block should Byzantium come into effect? (default = %v)\n", w.conf.Genesis.Config.ByzantiumBlock) w.conf.Genesis.Config.ByzantiumBlock = w.readDefaultBigInt(w.conf.Genesis.Config.ByzantiumBlock) + fmt.Println() + fmt.Printf("Which block should Constantinople come into effect? (default = %v)\n", w.conf.Genesis.Config.ConstantinopleBlock) + w.conf.Genesis.Config.ConstantinopleBlock = w.readDefaultBigInt(w.conf.Genesis.Config.ConstantinopleBlock) + out, _ := json.MarshalIndent(w.conf.Genesis.Config, "", " ") fmt.Printf("Chain configuration updated:\n\n%s\n", out) - case choice == "2": + case "2": // Save whatever genesis configuration we currently have fmt.Println() - fmt.Printf("Which file to save the genesis into? (default = %s.json)\n", w.network) - out, _ := json.MarshalIndent(w.conf.Genesis, "", " ") - if err := ioutil.WriteFile(w.readDefaultString(fmt.Sprintf("%s.json", w.network)), out, 0644); err != nil { - log.Error("Failed to save genesis file", "err", err) - } - log.Info("Exported existing genesis block") + fmt.Printf("Which folder to save the genesis specs into? (default = current)\n") + fmt.Printf(" Will create %s.json, %s-aleth.json, %s-harmony.json, %s-parity.json\n", w.network, w.network, w.network, w.network) - case choice == "3": + folder := w.readDefaultString(".") + if err := os.MkdirAll(folder, 0755); err != nil { + log.Error("Failed to create spec folder", "folder", folder, "err", err) + return + } + out, _ := json.MarshalIndent(w.conf.Genesis, "", " ") + + // Export the native genesis spec used by puppeth and Geth + gethJson := filepath.Join(folder, fmt.Sprintf("%s.json", w.network)) + if err := ioutil.WriteFile((gethJson), out, 0644); err != nil { + log.Error("Failed to save genesis file", "err", err) + return + } + log.Info("Saved native genesis chain spec", "path", gethJson) + + // Export the genesis spec used by Aleth (formerly C++ Ethereum) + if spec, err := newAlethGenesisSpec(w.network, w.conf.Genesis); err != nil { + log.Error("Failed to create Aleth chain spec", "err", err) + } else { + saveGenesis(folder, w.network, "aleth", spec) + } + // Export the genesis spec used by Parity + if spec, err := newParityChainSpec(w.network, w.conf.Genesis, []string{}); err != nil { + log.Error("Failed to create Parity chain spec", "err", err) + } else { + saveGenesis(folder, w.network, "parity", spec) + } + // Export the genesis spec used by Harmony (formerly EthereumJ + saveGenesis(folder, w.network, "harmony", w.conf.Genesis) + + case "3": // Make sure we don't have any services running if len(w.conf.servers()) > 0 { log.Error("Genesis reset requires all services and servers torn down") @@ -186,8 +272,20 @@ func (w *wizard) manageGenesis() { w.conf.Genesis = nil w.conf.flush() - default: log.Error("That's not something I can do") + return } } + +// saveGenesis JSON encodes an arbitrary genesis spec into a pre-defined file. +func saveGenesis(folder, network, client string, spec interface{}) { + path := filepath.Join(folder, fmt.Sprintf("%s-%s.json", network, client)) + + out, _ := json.Marshal(spec) + if err := ioutil.WriteFile(path, out, 0644); err != nil { + log.Error("Failed to save genesis file", "client", client, "err", err) + return + } + log.Info("Saved genesis chain spec", "client", client, "path", path) +} diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_intro.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_intro.go index 60aa0f7ff..75fb04b76 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_intro.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_intro.go @@ -61,14 +61,14 @@ func (w *wizard) run() { // Make sure we have a good network name to work with fmt.Println() // Docker accepts hyphens in image names, but doesn't like it for container names if w.network == "" { - fmt.Println("Please specify a network name to administer (no spaces or hyphens, please)") + fmt.Println("Please specify a network name to administer (no spaces, hyphens or capital letters please)") for { w.network = w.readString() - if !strings.Contains(w.network, " ") && !strings.Contains(w.network, "-") { + if !strings.Contains(w.network, " ") && !strings.Contains(w.network, "-") && strings.ToLower(w.network) == w.network { fmt.Printf("\nSweet, you can set this via --network=%s next time!\n\n", w.network) break } - log.Error("I also like to live dangerously, still no spaces or hyphens") + log.Error("I also like to live dangerously, still no spaces, hyphens or capital letters") } } log.Info("Administering Ethereum network", "name", w.network) @@ -131,7 +131,20 @@ func (w *wizard) run() { case choice == "2": if w.conf.Genesis == nil { - w.makeGenesis() + fmt.Println() + fmt.Println("What would you like to do? (default = create)") + fmt.Println(" 1. Create new genesis from scratch") + fmt.Println(" 2. Import already existing genesis") + + choice := w.read() + switch { + case choice == "" || choice == "1": + w.makeGenesis() + case choice == "2": + w.importGenesis() + default: + log.Error("That's not something I can do") + } } else { w.manageGenesis() } @@ -149,7 +162,6 @@ func (w *wizard) run() { } else { w.manageComponents() } - default: log.Error("That's not something I can do") } diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_nginx.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_nginx.go index 4eeae93a0..8397b7fd5 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_nginx.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_nginx.go @@ -41,12 +41,12 @@ func (w *wizard) ensureVirtualHost(client *sshClient, port int, def string) (str // Reverse proxy is not running, offer to deploy a new one fmt.Println() fmt.Println("Allow sharing the port with other services (y/n)? (default = yes)") - if w.readDefaultString("y") == "y" { + if w.readDefaultYesNo(true) { nocache := false if proxy != nil { fmt.Println() fmt.Printf("Should the reverse-proxy be rebuilt from scratch (y/n)? (default = no)\n") - nocache = w.readDefaultString("n") != "n" + nocache = w.readDefaultYesNo(false) } if out, err := deployNginx(client, w.network, port, nocache); err != nil { log.Error("Failed to deploy reverse-proxy", "err", err) diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_node.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_node.go index 49b10a023..e37297f6d 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_node.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_node.go @@ -126,7 +126,7 @@ func (w *wizard) deployNode(boot bool) { } else { fmt.Println() fmt.Printf("Reuse previous (%s) signing account (y/n)? (default = yes)\n", key.Address.Hex()) - if w.readDefaultString("y") != "y" { + if !w.readDefaultYesNo(true) { infos.keyJSON, infos.keyPass = "", "" } } @@ -165,7 +165,7 @@ func (w *wizard) deployNode(boot bool) { if existed { fmt.Println() fmt.Printf("Should the node be built from scratch (y/n)? (default = no)\n") - nocache = w.readDefaultString("n") != "n" + nocache = w.readDefaultYesNo(false) } if out, err := deployNode(client, w.network, w.conf.bootnodes, infos, nocache); err != nil { log.Error("Failed to deploy Ethereum node container", "err", err) diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_wallet.go b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_wallet.go index 7624d11e2..ca1ea5bd2 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_wallet.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_wallet.go @@ -96,7 +96,7 @@ func (w *wizard) deployWallet() { if existed { fmt.Println() fmt.Printf("Should the wallet be built from scratch (y/n)? (default = no)\n") - nocache = w.readDefaultString("n") != "n" + nocache = w.readDefaultYesNo(false) } if out, err := deployWallet(client, w.network, w.conf.bootnodes, infos, nocache); err != nil { log.Error("Failed to deploy wallet container", "err", err) diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/access.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/access.go index dd2d513c2..072541b65 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/access.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/access.go @@ -29,7 +29,65 @@ import ( "gopkg.in/urfave/cli.v1" ) -var salt = make([]byte, 32) +var ( + salt = make([]byte, 32) + accessCommand = cli.Command{ + CustomHelpTemplate: helpTemplate, + Name: "access", + Usage: "encrypts a reference and embeds it into a root manifest", + ArgsUsage: "", + Description: "encrypts a reference and embeds it into a root manifest", + Subcommands: []cli.Command{ + { + CustomHelpTemplate: helpTemplate, + Name: "new", + Usage: "encrypts a reference and embeds it into a root manifest", + ArgsUsage: "", + Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest", + Subcommands: []cli.Command{ + { + Action: accessNewPass, + CustomHelpTemplate: helpTemplate, + Flags: []cli.Flag{ + utils.PasswordFileFlag, + SwarmDryRunFlag, + }, + Name: "pass", + Usage: "encrypts a reference with a password and embeds it into a root manifest", + ArgsUsage: "", + Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest", + }, + { + Action: accessNewPK, + CustomHelpTemplate: helpTemplate, + Flags: []cli.Flag{ + utils.PasswordFileFlag, + SwarmDryRunFlag, + SwarmAccessGrantKeyFlag, + }, + Name: "pk", + Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest", + ArgsUsage: "", + Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest", + }, + { + Action: accessNewACT, + CustomHelpTemplate: helpTemplate, + Flags: []cli.Flag{ + SwarmAccessGrantKeysFlag, + SwarmDryRunFlag, + utils.PasswordFileFlag, + }, + Name: "act", + Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest", + ArgsUsage: "", + Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest", + }, + }, + }, + }, + } +) func init() { if _, err := io.ReadFull(rand.Reader, salt); err != nil { @@ -56,6 +114,9 @@ func accessNewPass(ctx *cli.Context) { utils.Fatalf("error getting session key: %v", err) } m, err := api.GenerateAccessControlManifest(ctx, ref, accessKey, ae) + if err != nil { + utils.Fatalf("had an error generating the manifest: %v", err) + } if dryRun { err = printManifests(m, nil) if err != nil { @@ -89,6 +150,9 @@ func accessNewPK(ctx *cli.Context) { utils.Fatalf("error getting session key: %v", err) } m, err := api.GenerateAccessControlManifest(ctx, ref, sessionKey, ae) + if err != nil { + utils.Fatalf("had an error generating the manifest: %v", err) + } if dryRun { err = printManifests(m, nil) if err != nil { diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/config.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/config.go index 16001010d..3eea3057b 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/config.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/config.go @@ -80,6 +80,7 @@ const ( SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY" SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY" SWARM_ACCESS_PASSWORD = "SWARM_ACCESS_PASSWORD" + SWARM_AUTO_DEFAULTPATH = "SWARM_AUTO_DEFAULTPATH" GETH_ENV_DATADIR = "GETH_DATADIR" ) diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/db.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/db.go index 107fbf100..7916beffc 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/db.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/db.go @@ -29,6 +29,48 @@ import ( "gopkg.in/urfave/cli.v1" ) +var dbCommand = cli.Command{ + Name: "db", + CustomHelpTemplate: helpTemplate, + Usage: "manage the local chunk database", + ArgsUsage: "db COMMAND", + Description: "Manage the local chunk database", + Subcommands: []cli.Command{ + { + Action: dbExport, + CustomHelpTemplate: helpTemplate, + Name: "export", + Usage: "export a local chunk database as a tar archive (use - to send to stdout)", + ArgsUsage: " ", + Description: ` +Export a local chunk database as a tar archive (use - to send to stdout). + + swarm db export ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar + +The export may be quite large, consider piping the output through the Unix +pv(1) tool to get a progress bar: + + swarm db export ~/.ethereum/swarm/bzz-KEY/chunks - | pv > chunks.tar +`, + }, + { + Action: dbImport, + CustomHelpTemplate: helpTemplate, + Name: "import", + Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)", + ArgsUsage: " ", + Description: `Import chunks from a tar archive into a local chunk database (use - to read from stdin). + + swarm db import ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar + +The import may be quite large, consider piping the input through the Unix +pv(1) tool to get a progress bar: + + pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -`, + }, + }, +} + func dbExport(ctx *cli.Context) { args := ctx.Args() if len(args) != 3 { diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/download.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/download.go index 91bc2c93a..fcbefa020 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/download.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/download.go @@ -28,6 +28,15 @@ import ( "gopkg.in/urfave/cli.v1" ) +var downloadCommand = cli.Command{ + Action: download, + Name: "down", + Flags: []cli.Flag{SwarmRecursiveFlag, SwarmAccessPasswordFlag}, + Usage: "downloads a swarm manifest or a file inside a manifest", + ArgsUsage: " []", + Description: `Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries.`, +} + func download(ctx *cli.Context) { log.Debug("downloading content using swarm down") args := ctx.Args() diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/feeds.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/feeds.go index 6806c6cf4..6cd971a92 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/feeds.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/feeds.go @@ -31,6 +31,68 @@ import ( "gopkg.in/urfave/cli.v1" ) +var feedCommand = cli.Command{ + CustomHelpTemplate: helpTemplate, + Name: "feed", + Usage: "(Advanced) Create and update Swarm Feeds", + ArgsUsage: "", + Description: "Works with Swarm Feeds", + Subcommands: []cli.Command{ + { + Action: feedCreateManifest, + CustomHelpTemplate: helpTemplate, + Name: "create", + Usage: "creates and publishes a new feed manifest", + Description: `creates and publishes a new feed manifest pointing to a specified user's updates about a particular topic. + The feed topic can be built in the following ways: + * use --topic to set the topic to an arbitrary binary hex string. + * use --name to set the topic to a human-readable name. + For example --name could be set to "profile-picture", meaning this feed allows to get this user's current profile picture. + * use both --topic and --name to create named subtopics. + For example, --topic could be set to an Ethereum contract address and --name could be set to "comments", meaning + this feed tracks a discussion about that contract. + The --user flag allows to have this manifest refer to a user other than yourself. If not specified, + it will then default to your local account (--bzzaccount)`, + Flags: []cli.Flag{SwarmFeedNameFlag, SwarmFeedTopicFlag, SwarmFeedUserFlag}, + }, + { + Action: feedUpdate, + CustomHelpTemplate: helpTemplate, + Name: "update", + Usage: "updates the content of an existing Swarm Feed", + ArgsUsage: "<0x Hex data>", + Description: `publishes a new update on the specified topic + The feed topic can be built in the following ways: + * use --topic to set the topic to an arbitrary binary hex string. + * use --name to set the topic to a human-readable name. + For example --name could be set to "profile-picture", meaning this feed allows to get this user's current profile picture. + * use both --topic and --name to create named subtopics. + For example, --topic could be set to an Ethereum contract address and --name could be set to "comments", meaning + this feed tracks a discussion about that contract. + + If you have a manifest, you can specify it with --manifest to refer to the feed, + instead of using --topic / --name + `, + Flags: []cli.Flag{SwarmFeedManifestFlag, SwarmFeedNameFlag, SwarmFeedTopicFlag}, + }, + { + Action: feedInfo, + CustomHelpTemplate: helpTemplate, + Name: "info", + Usage: "obtains information about an existing Swarm feed", + Description: `obtains information about an existing Swarm feed + The topic can be specified directly with the --topic flag as an hex string + If no topic is specified, the default topic (zero) will be used + The --name flag can be used to specify subtopics with a specific name. + The --user flag allows to refer to a user other than yourself. If not specified, + it will then default to your local account (--bzzaccount) + If you have a manifest, you can specify it with --manifest instead of --topic / --name / ---user + to refer to the feed`, + Flags: []cli.Flag{SwarmFeedManifestFlag, SwarmFeedNameFlag, SwarmFeedTopicFlag, SwarmFeedUserFlag}, + }, + }, +} + func NewGenericSigner(ctx *cli.Context) feed.Signer { return feed.NewGenericSigner(getPrivKey(ctx)) } @@ -107,7 +169,6 @@ func feedUpdate(ctx *cli.Context) { query = new(feed.Query) query.User = signer.Address() query.Topic = getTopic(ctx) - } // Retrieve a feed update request @@ -116,6 +177,11 @@ func feedUpdate(ctx *cli.Context) { utils.Fatalf("Error retrieving feed status: %s", err.Error()) } + // Check that the provided signer matches the request to sign + if updateRequest.User != signer.Address() { + utils.Fatalf("Signer address does not match the update request") + } + // set the new data updateRequest.SetData(data) diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/flags.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/flags.go new file mode 100644 index 000000000..0dedca674 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/flags.go @@ -0,0 +1,179 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +// Command feed allows the user to create and update signed Swarm feeds +package main + +import cli "gopkg.in/urfave/cli.v1" + +var ( + ChequebookAddrFlag = cli.StringFlag{ + Name: "chequebook", + Usage: "chequebook contract address", + EnvVar: SWARM_ENV_CHEQUEBOOK_ADDR, + } + SwarmAccountFlag = cli.StringFlag{ + Name: "bzzaccount", + Usage: "Swarm account key file", + EnvVar: SWARM_ENV_ACCOUNT, + } + SwarmListenAddrFlag = cli.StringFlag{ + Name: "httpaddr", + Usage: "Swarm HTTP API listening interface", + EnvVar: SWARM_ENV_LISTEN_ADDR, + } + SwarmPortFlag = cli.StringFlag{ + Name: "bzzport", + Usage: "Swarm local http api port", + EnvVar: SWARM_ENV_PORT, + } + SwarmNetworkIdFlag = cli.IntFlag{ + Name: "bzznetworkid", + Usage: "Network identifier (integer, default 3=swarm testnet)", + EnvVar: SWARM_ENV_NETWORK_ID, + } + SwarmSwapEnabledFlag = cli.BoolFlag{ + Name: "swap", + Usage: "Swarm SWAP enabled (default false)", + EnvVar: SWARM_ENV_SWAP_ENABLE, + } + SwarmSwapAPIFlag = cli.StringFlag{ + Name: "swap-api", + Usage: "URL of the Ethereum API provider to use to settle SWAP payments", + EnvVar: SWARM_ENV_SWAP_API, + } + SwarmSyncDisabledFlag = cli.BoolTFlag{ + Name: "nosync", + Usage: "Disable swarm syncing", + EnvVar: SWARM_ENV_SYNC_DISABLE, + } + SwarmSyncUpdateDelay = cli.DurationFlag{ + Name: "sync-update-delay", + Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)", + EnvVar: SWARM_ENV_SYNC_UPDATE_DELAY, + } + SwarmMaxStreamPeerServersFlag = cli.IntFlag{ + Name: "max-stream-peer-servers", + Usage: "Limit of Stream peer servers, 0 denotes unlimited", + EnvVar: SWARM_ENV_MAX_STREAM_PEER_SERVERS, + Value: 10000, // A very large default value is possible as stream servers have very small memory footprint + } + SwarmLightNodeEnabled = cli.BoolFlag{ + Name: "lightnode", + Usage: "Enable Swarm LightNode (default false)", + EnvVar: SWARM_ENV_LIGHT_NODE_ENABLE, + } + SwarmDeliverySkipCheckFlag = cli.BoolFlag{ + Name: "delivery-skip-check", + Usage: "Skip chunk delivery check (default false)", + EnvVar: SWARM_ENV_DELIVERY_SKIP_CHECK, + } + EnsAPIFlag = cli.StringSliceFlag{ + Name: "ens-api", + Usage: "ENS API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url", + EnvVar: SWARM_ENV_ENS_API, + } + SwarmApiFlag = cli.StringFlag{ + Name: "bzzapi", + Usage: "Specifies the Swarm HTTP endpoint to connect to", + Value: "http://127.0.0.1:8500", + } + SwarmRecursiveFlag = cli.BoolFlag{ + Name: "recursive", + Usage: "Upload directories recursively", + } + SwarmWantManifestFlag = cli.BoolTFlag{ + Name: "manifest", + Usage: "Automatic manifest upload (default true)", + } + SwarmUploadDefaultPath = cli.StringFlag{ + Name: "defaultpath", + Usage: "path to file served for empty url path (none)", + } + SwarmAccessGrantKeyFlag = cli.StringFlag{ + Name: "grant-key", + Usage: "grants a given public key access to an ACT", + } + SwarmAccessGrantKeysFlag = cli.StringFlag{ + Name: "grant-keys", + Usage: "grants a given list of public keys in the following file (separated by line breaks) access to an ACT", + } + SwarmUpFromStdinFlag = cli.BoolFlag{ + Name: "stdin", + Usage: "reads data to be uploaded from stdin", + } + SwarmUploadMimeType = cli.StringFlag{ + Name: "mime", + Usage: "Manually specify MIME type", + } + SwarmEncryptedFlag = cli.BoolFlag{ + Name: "encrypt", + Usage: "use encrypted upload", + } + SwarmAccessPasswordFlag = cli.StringFlag{ + Name: "password", + Usage: "Password", + EnvVar: SWARM_ACCESS_PASSWORD, + } + SwarmDryRunFlag = cli.BoolFlag{ + Name: "dry-run", + Usage: "dry-run", + } + CorsStringFlag = cli.StringFlag{ + Name: "corsdomain", + Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')", + EnvVar: SWARM_ENV_CORS, + } + SwarmStorePath = cli.StringFlag{ + Name: "store.path", + Usage: "Path to leveldb chunk DB (default <$GETH_ENV_DIR>/swarm/bzz-<$BZZ_KEY>/chunks)", + EnvVar: SWARM_ENV_STORE_PATH, + } + SwarmStoreCapacity = cli.Uint64Flag{ + Name: "store.size", + Usage: "Number of chunks (5M is roughly 20-25GB) (default 5000000)", + EnvVar: SWARM_ENV_STORE_CAPACITY, + } + SwarmStoreCacheCapacity = cli.UintFlag{ + Name: "store.cache.size", + Usage: "Number of recent chunks cached in memory (default 5000)", + EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY, + } + SwarmCompressedFlag = cli.BoolFlag{ + Name: "compressed", + Usage: "Prints encryption keys in compressed form", + } + SwarmFeedNameFlag = cli.StringFlag{ + Name: "name", + Usage: "User-defined name for the new feed, limited to 32 characters. If combined with topic, it will refer to a subtopic with this name", + } + SwarmFeedTopicFlag = cli.StringFlag{ + Name: "topic", + Usage: "User-defined topic this feed is tracking, hex encoded. Limited to 64 hexadecimal characters", + } + SwarmFeedDataOnCreateFlag = cli.StringFlag{ + Name: "data", + Usage: "Initializes the feed with the given hex-encoded data. Data must be prefixed by 0x", + } + SwarmFeedManifestFlag = cli.StringFlag{ + Name: "manifest", + Usage: "Refers to the feed through a manifest", + } + SwarmFeedUserFlag = cli.StringFlag{ + Name: "user", + Usage: "Indicates the user who updates the feed", + } +) diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/fs.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/fs.go index 3dc38ca4d..edeeddff8 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/fs.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/fs.go @@ -24,16 +24,50 @@ import ( "time" "github.com/ethereum/go-ethereum/cmd/utils" - "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/swarm/fuse" "gopkg.in/urfave/cli.v1" ) +var fsCommand = cli.Command{ + Name: "fs", + CustomHelpTemplate: helpTemplate, + Usage: "perform FUSE operations", + ArgsUsage: "fs COMMAND", + Description: "Performs FUSE operations by mounting/unmounting/listing mount points. This assumes you already have a Swarm node running locally. For all operation you must reference the correct path to bzzd.ipc in order to communicate with the node", + Subcommands: []cli.Command{ + { + Action: mount, + CustomHelpTemplate: helpTemplate, + Name: "mount", + Usage: "mount a swarm hash to a mount point", + ArgsUsage: "swarm fs mount ", + Description: "Mounts a Swarm manifest hash to a given mount point. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file", + }, + { + Action: unmount, + CustomHelpTemplate: helpTemplate, + Name: "unmount", + Usage: "unmount a swarmfs mount", + ArgsUsage: "swarm fs unmount ", + Description: "Unmounts a swarmfs mount residing at . This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file", + }, + { + Action: listMounts, + CustomHelpTemplate: helpTemplate, + Name: "list", + Usage: "list swarmfs mounts", + ArgsUsage: "swarm fs list", + Description: "Lists all mounted swarmfs volumes. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file", + }, + }, +} + func mount(cliContext *cli.Context) { args := cliContext.Args() if len(args) < 2 { - utils.Fatalf("Usage: swarm fs mount --ipcpath ") + utils.Fatalf("Usage: swarm fs mount ") } client, err := dialRPC(cliContext) @@ -60,7 +94,7 @@ func unmount(cliContext *cli.Context) { args := cliContext.Args() if len(args) < 1 { - utils.Fatalf("Usage: swarm fs unmount --ipcpath ") + utils.Fatalf("Usage: swarm fs unmount ") } client, err := dialRPC(cliContext) if err != nil { @@ -108,20 +142,21 @@ func listMounts(cliContext *cli.Context) { } func dialRPC(ctx *cli.Context) (*rpc.Client, error) { - var endpoint string + endpoint := getIPCEndpoint(ctx) + log.Info("IPC endpoint", "path", endpoint) + return rpc.Dial(endpoint) +} - if ctx.IsSet(utils.IPCPathFlag.Name) { - endpoint = ctx.String(utils.IPCPathFlag.Name) - } else { - utils.Fatalf("swarm ipc endpoint not specified") - } +func getIPCEndpoint(ctx *cli.Context) string { + cfg := defaultNodeConfig + utils.SetNodeConfig(ctx, &cfg) - if endpoint == "" { - endpoint = node.DefaultIPCEndpoint(clientIdentifier) - } else if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") { + endpoint := cfg.IPCEndpoint() + + if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") { // Backwards compatibility with geth < 1.5 which required // these prefixes. endpoint = endpoint[4:] } - return rpc.Dial(endpoint) + return endpoint } diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/hash.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/hash.go index d679806e3..471feb53d 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/hash.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/hash.go @@ -27,6 +27,15 @@ import ( "gopkg.in/urfave/cli.v1" ) +var hashCommand = cli.Command{ + Action: hash, + CustomHelpTemplate: helpTemplate, + Name: "hash", + Usage: "print the swarm hash of a file or directory", + ArgsUsage: "", + Description: "Prints the swarm hash of file or directory", +} + func hash(ctx *cli.Context) { args := ctx.Args() if len(args) < 1 { diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/list.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/list.go index 01b3f4ab6..5d35154a5 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/list.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/list.go @@ -27,6 +27,15 @@ import ( "gopkg.in/urfave/cli.v1" ) +var listCommand = cli.Command{ + Action: list, + CustomHelpTemplate: helpTemplate, + Name: "ls", + Usage: "list files and directories contained in a manifest", + ArgsUsage: " []", + Description: "Lists files and directories contained in a manifest", +} + func list(ctx *cli.Context) { args := ctx.Args() diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/main.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/main.go index 88e6b0b0b..ccbb24eec 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/main.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/main.go @@ -70,165 +70,6 @@ var ( gitCommit string // Git SHA1 commit hash of the release (set via linker flags) ) -var ( - ChequebookAddrFlag = cli.StringFlag{ - Name: "chequebook", - Usage: "chequebook contract address", - EnvVar: SWARM_ENV_CHEQUEBOOK_ADDR, - } - SwarmAccountFlag = cli.StringFlag{ - Name: "bzzaccount", - Usage: "Swarm account key file", - EnvVar: SWARM_ENV_ACCOUNT, - } - SwarmListenAddrFlag = cli.StringFlag{ - Name: "httpaddr", - Usage: "Swarm HTTP API listening interface", - EnvVar: SWARM_ENV_LISTEN_ADDR, - } - SwarmPortFlag = cli.StringFlag{ - Name: "bzzport", - Usage: "Swarm local http api port", - EnvVar: SWARM_ENV_PORT, - } - SwarmNetworkIdFlag = cli.IntFlag{ - Name: "bzznetworkid", - Usage: "Network identifier (integer, default 3=swarm testnet)", - EnvVar: SWARM_ENV_NETWORK_ID, - } - SwarmSwapEnabledFlag = cli.BoolFlag{ - Name: "swap", - Usage: "Swarm SWAP enabled (default false)", - EnvVar: SWARM_ENV_SWAP_ENABLE, - } - SwarmSwapAPIFlag = cli.StringFlag{ - Name: "swap-api", - Usage: "URL of the Ethereum API provider to use to settle SWAP payments", - EnvVar: SWARM_ENV_SWAP_API, - } - SwarmSyncDisabledFlag = cli.BoolTFlag{ - Name: "nosync", - Usage: "Disable swarm syncing", - EnvVar: SWARM_ENV_SYNC_DISABLE, - } - SwarmSyncUpdateDelay = cli.DurationFlag{ - Name: "sync-update-delay", - Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)", - EnvVar: SWARM_ENV_SYNC_UPDATE_DELAY, - } - SwarmMaxStreamPeerServersFlag = cli.IntFlag{ - Name: "max-stream-peer-servers", - Usage: "Limit of Stream peer servers, 0 denotes unlimited", - EnvVar: SWARM_ENV_MAX_STREAM_PEER_SERVERS, - Value: 10000, // A very large default value is possible as stream servers have very small memory footprint - } - SwarmLightNodeEnabled = cli.BoolFlag{ - Name: "lightnode", - Usage: "Enable Swarm LightNode (default false)", - EnvVar: SWARM_ENV_LIGHT_NODE_ENABLE, - } - SwarmDeliverySkipCheckFlag = cli.BoolFlag{ - Name: "delivery-skip-check", - Usage: "Skip chunk delivery check (default false)", - EnvVar: SWARM_ENV_DELIVERY_SKIP_CHECK, - } - EnsAPIFlag = cli.StringSliceFlag{ - Name: "ens-api", - Usage: "ENS API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url", - EnvVar: SWARM_ENV_ENS_API, - } - SwarmApiFlag = cli.StringFlag{ - Name: "bzzapi", - Usage: "Swarm HTTP endpoint", - Value: "http://127.0.0.1:8500", - } - SwarmRecursiveFlag = cli.BoolFlag{ - Name: "recursive", - Usage: "Upload directories recursively", - } - SwarmWantManifestFlag = cli.BoolTFlag{ - Name: "manifest", - Usage: "Automatic manifest upload (default true)", - } - SwarmUploadDefaultPath = cli.StringFlag{ - Name: "defaultpath", - Usage: "path to file served for empty url path (none)", - } - SwarmAccessGrantKeyFlag = cli.StringFlag{ - Name: "grant-key", - Usage: "grants a given public key access to an ACT", - } - SwarmAccessGrantKeysFlag = cli.StringFlag{ - Name: "grant-keys", - Usage: "grants a given list of public keys in the following file (separated by line breaks) access to an ACT", - } - SwarmUpFromStdinFlag = cli.BoolFlag{ - Name: "stdin", - Usage: "reads data to be uploaded from stdin", - } - SwarmUploadMimeType = cli.StringFlag{ - Name: "mime", - Usage: "Manually specify MIME type", - } - SwarmEncryptedFlag = cli.BoolFlag{ - Name: "encrypt", - Usage: "use encrypted upload", - } - SwarmAccessPasswordFlag = cli.StringFlag{ - Name: "password", - Usage: "Password", - EnvVar: SWARM_ACCESS_PASSWORD, - } - SwarmDryRunFlag = cli.BoolFlag{ - Name: "dry-run", - Usage: "dry-run", - } - CorsStringFlag = cli.StringFlag{ - Name: "corsdomain", - Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')", - EnvVar: SWARM_ENV_CORS, - } - SwarmStorePath = cli.StringFlag{ - Name: "store.path", - Usage: "Path to leveldb chunk DB (default <$GETH_ENV_DIR>/swarm/bzz-<$BZZ_KEY>/chunks)", - EnvVar: SWARM_ENV_STORE_PATH, - } - SwarmStoreCapacity = cli.Uint64Flag{ - Name: "store.size", - Usage: "Number of chunks (5M is roughly 20-25GB) (default 5000000)", - EnvVar: SWARM_ENV_STORE_CAPACITY, - } - SwarmStoreCacheCapacity = cli.UintFlag{ - Name: "store.cache.size", - Usage: "Number of recent chunks cached in memory (default 5000)", - EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY, - } - SwarmCompressedFlag = cli.BoolFlag{ - Name: "compressed", - Usage: "Prints encryption keys in compressed form", - } - SwarmFeedNameFlag = cli.StringFlag{ - Name: "name", - Usage: "User-defined name for the new feed, limited to 32 characters. If combined with topic, it will refer to a subtopic with this name", - } - SwarmFeedTopicFlag = cli.StringFlag{ - Name: "topic", - Usage: "User-defined topic this feed is tracking, hex encoded. Limited to 64 hexadecimal characters", - } - SwarmFeedDataOnCreateFlag = cli.StringFlag{ - Name: "data", - Usage: "Initializes the feed with the given hex-encoded data. Data must be prefixed by 0x", - } - SwarmFeedManifestFlag = cli.StringFlag{ - Name: "manifest", - Usage: "Refers to the feed through a manifest", - } - SwarmFeedUserFlag = cli.StringFlag{ - Name: "user", - Usage: "Indicates the user who updates the feed", - } -) - //declare a few constant error messages, useful for later error check comparisons in test var ( SWARM_ERR_NO_BZZACCOUNT = "bzzaccount option is required but not set; check your config file, command line or environment variables" @@ -279,267 +120,24 @@ func init() { Usage: "Print public key information", Description: "The output of this command is supposed to be machine-readable", }, - { - Action: upload, - CustomHelpTemplate: helpTemplate, - Name: "up", - Usage: "uploads a file or directory to swarm using the HTTP API", - ArgsUsage: "", - Flags: []cli.Flag{SwarmEncryptedFlag}, - Description: "uploads a file or directory to swarm using the HTTP API and prints the root hash", - }, - { - CustomHelpTemplate: helpTemplate, - Name: "access", - Usage: "encrypts a reference and embeds it into a root manifest", - ArgsUsage: "", - Description: "encrypts a reference and embeds it into a root manifest", - Subcommands: []cli.Command{ - { - CustomHelpTemplate: helpTemplate, - Name: "new", - Usage: "encrypts a reference and embeds it into a root manifest", - ArgsUsage: "", - Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest", - Subcommands: []cli.Command{ - { - Action: accessNewPass, - CustomHelpTemplate: helpTemplate, - Flags: []cli.Flag{ - utils.PasswordFileFlag, - SwarmDryRunFlag, - }, - Name: "pass", - Usage: "encrypts a reference with a password and embeds it into a root manifest", - ArgsUsage: "", - Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest", - }, - { - Action: accessNewPK, - CustomHelpTemplate: helpTemplate, - Flags: []cli.Flag{ - utils.PasswordFileFlag, - SwarmDryRunFlag, - SwarmAccessGrantKeyFlag, - }, - Name: "pk", - Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest", - ArgsUsage: "", - Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest", - }, - { - Action: accessNewACT, - CustomHelpTemplate: helpTemplate, - Flags: []cli.Flag{ - SwarmAccessGrantKeysFlag, - SwarmDryRunFlag, - utils.PasswordFileFlag, - }, - Name: "act", - Usage: "encrypts a reference with the node's private key and a given grantee's public key and embeds it into a root manifest", - ArgsUsage: "", - Description: "encrypts a reference and embeds it into a root access manifest and prints the resulting manifest", - }, - }, - }, - }, - }, - { - CustomHelpTemplate: helpTemplate, - Name: "feed", - Usage: "(Advanced) Create and update Swarm Feeds", - ArgsUsage: "", - Description: "Works with Swarm Feeds", - Subcommands: []cli.Command{ - { - Action: feedCreateManifest, - CustomHelpTemplate: helpTemplate, - Name: "create", - Usage: "creates and publishes a new feed manifest", - Description: `creates and publishes a new feed manifest pointing to a specified user's updates about a particular topic. - The feed topic can be built in the following ways: - * use --topic to set the topic to an arbitrary binary hex string. - * use --name to set the topic to a human-readable name. - For example --name could be set to "profile-picture", meaning this feed allows to get this user's current profile picture. - * use both --topic and --name to create named subtopics. - For example, --topic could be set to an Ethereum contract address and --name could be set to "comments", meaning - this feed tracks a discussion about that contract. - The --user flag allows to have this manifest refer to a user other than yourself. If not specified, - it will then default to your local account (--bzzaccount)`, - Flags: []cli.Flag{SwarmFeedNameFlag, SwarmFeedTopicFlag, SwarmFeedUserFlag}, - }, - { - Action: feedUpdate, - CustomHelpTemplate: helpTemplate, - Name: "update", - Usage: "updates the content of an existing Swarm Feed", - ArgsUsage: "<0x Hex data>", - Description: `publishes a new update on the specified topic - The feed topic can be built in the following ways: - * use --topic to set the topic to an arbitrary binary hex string. - * use --name to set the topic to a human-readable name. - For example --name could be set to "profile-picture", meaning this feed allows to get this user's current profile picture. - * use both --topic and --name to create named subtopics. - For example, --topic could be set to an Ethereum contract address and --name could be set to "comments", meaning - this feed tracks a discussion about that contract. - - If you have a manifest, you can specify it with --manifest to refer to the feed, - instead of using --topic / --name - `, - Flags: []cli.Flag{SwarmFeedManifestFlag, SwarmFeedNameFlag, SwarmFeedTopicFlag}, - }, - { - Action: feedInfo, - CustomHelpTemplate: helpTemplate, - Name: "info", - Usage: "obtains information about an existing Swarm feed", - Description: `obtains information about an existing Swarm feed - The topic can be specified directly with the --topic flag as an hex string - If no topic is specified, the default topic (zero) will be used - The --name flag can be used to specify subtopics with a specific name. - The --user flag allows to refer to a user other than yourself. If not specified, - it will then default to your local account (--bzzaccount) - If you have a manifest, you can specify it with --manifest instead of --topic / --name / ---user - to refer to the feed`, - Flags: []cli.Flag{SwarmFeedManifestFlag, SwarmFeedNameFlag, SwarmFeedTopicFlag, SwarmFeedUserFlag}, - }, - }, - }, - { - Action: list, - CustomHelpTemplate: helpTemplate, - Name: "ls", - Usage: "list files and directories contained in a manifest", - ArgsUsage: " []", - Description: "Lists files and directories contained in a manifest", - }, - { - Action: hash, - CustomHelpTemplate: helpTemplate, - Name: "hash", - Usage: "print the swarm hash of a file or directory", - ArgsUsage: "", - Description: "Prints the swarm hash of file or directory", - }, - { - Action: download, - Name: "down", - Flags: []cli.Flag{SwarmRecursiveFlag, SwarmAccessPasswordFlag}, - Usage: "downloads a swarm manifest or a file inside a manifest", - ArgsUsage: " []", - Description: `Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries.`, - }, - { - Name: "manifest", - CustomHelpTemplate: helpTemplate, - Usage: "perform operations on swarm manifests", - ArgsUsage: "COMMAND", - Description: "Updates a MANIFEST by adding/removing/updating the hash of a path.\nCOMMAND could be: add, update, remove", - Subcommands: []cli.Command{ - { - Action: manifestAdd, - CustomHelpTemplate: helpTemplate, - Name: "add", - Usage: "add a new path to the manifest", - ArgsUsage: " ", - Description: "Adds a new path to the manifest", - }, - { - Action: manifestUpdate, - CustomHelpTemplate: helpTemplate, - Name: "update", - Usage: "update the hash for an already existing path in the manifest", - ArgsUsage: " ", - Description: "Update the hash for an already existing path in the manifest", - }, - { - Action: manifestRemove, - CustomHelpTemplate: helpTemplate, - Name: "remove", - Usage: "removes a path from the manifest", - ArgsUsage: " ", - Description: "Removes a path from the manifest", - }, - }, - }, - { - Name: "fs", - CustomHelpTemplate: helpTemplate, - Usage: "perform FUSE operations", - ArgsUsage: "fs COMMAND", - Description: "Performs FUSE operations by mounting/unmounting/listing mount points. This assumes you already have a Swarm node running locally. For all operation you must reference the correct path to bzzd.ipc in order to communicate with the node", - Subcommands: []cli.Command{ - { - Action: mount, - CustomHelpTemplate: helpTemplate, - Name: "mount", - Flags: []cli.Flag{utils.IPCPathFlag}, - Usage: "mount a swarm hash to a mount point", - ArgsUsage: "swarm fs mount --ipcpath ", - Description: "Mounts a Swarm manifest hash to a given mount point. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file", - }, - { - Action: unmount, - CustomHelpTemplate: helpTemplate, - Name: "unmount", - Flags: []cli.Flag{utils.IPCPathFlag}, - Usage: "unmount a swarmfs mount", - ArgsUsage: "swarm fs unmount --ipcpath ", - Description: "Unmounts a swarmfs mount residing at . This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file", - }, - { - Action: listMounts, - CustomHelpTemplate: helpTemplate, - Name: "list", - Flags: []cli.Flag{utils.IPCPathFlag}, - Usage: "list swarmfs mounts", - ArgsUsage: "swarm fs list --ipcpath ", - Description: "Lists all mounted swarmfs volumes. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file", - }, - }, - }, - { - Name: "db", - CustomHelpTemplate: helpTemplate, - Usage: "manage the local chunk database", - ArgsUsage: "db COMMAND", - Description: "Manage the local chunk database", - Subcommands: []cli.Command{ - { - Action: dbExport, - CustomHelpTemplate: helpTemplate, - Name: "export", - Usage: "export a local chunk database as a tar archive (use - to send to stdout)", - ArgsUsage: " ", - Description: ` -Export a local chunk database as a tar archive (use - to send to stdout). - - swarm db export ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar - -The export may be quite large, consider piping the output through the Unix -pv(1) tool to get a progress bar: - - swarm db export ~/.ethereum/swarm/bzz-KEY/chunks - | pv > chunks.tar -`, - }, - { - Action: dbImport, - CustomHelpTemplate: helpTemplate, - Name: "import", - Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)", - ArgsUsage: " ", - Description: `Import chunks from a tar archive into a local chunk database (use - to read from stdin). - - swarm db import ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar - -The import may be quite large, consider piping the input through the Unix -pv(1) tool to get a progress bar: - - pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -`, - }, - }, - }, - + // See upload.go + upCommand, + // See access.go + accessCommand, + // See feeds.go + feedCommand, + // See list.go + listCommand, + // See hash.go + hashCommand, + // See download.go + downloadCommand, + // See manifest.go + manifestCommand, + // See fs.go + fsCommand, + // See db.go + dbCommand, // See config.go DumpConfigCommand, } diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/manifest.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/manifest.go index 0216ffc1d..312c72fa2 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/manifest.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/manifest.go @@ -28,6 +28,40 @@ import ( "gopkg.in/urfave/cli.v1" ) +var manifestCommand = cli.Command{ + Name: "manifest", + CustomHelpTemplate: helpTemplate, + Usage: "perform operations on swarm manifests", + ArgsUsage: "COMMAND", + Description: "Updates a MANIFEST by adding/removing/updating the hash of a path.\nCOMMAND could be: add, update, remove", + Subcommands: []cli.Command{ + { + Action: manifestAdd, + CustomHelpTemplate: helpTemplate, + Name: "add", + Usage: "add a new path to the manifest", + ArgsUsage: " ", + Description: "Adds a new path to the manifest", + }, + { + Action: manifestUpdate, + CustomHelpTemplate: helpTemplate, + Name: "update", + Usage: "update the hash for an already existing path in the manifest", + ArgsUsage: " ", + Description: "Update the hash for an already existing path in the manifest", + }, + { + Action: manifestRemove, + CustomHelpTemplate: helpTemplate, + Name: "remove", + Usage: "removes a path from the manifest", + ArgsUsage: " ", + Description: "Removes a path from the manifest", + }, + }, +} + // manifestAdd adds a new entry to the manifest at the given path. // New entry hash, the last argument, must be the hash of a manifest // with only one entry, which meta-data will be added to the original manifest. diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/feed_upload_and_sync.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/feed_upload_and_sync.go new file mode 100644 index 000000000..2c5e3fd23 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/feed_upload_and_sync.go @@ -0,0 +1,366 @@ +package main + +import ( + "bytes" + "context" + "crypto/md5" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptrace" + "os" + "os/exec" + "strings" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/swarm/api/client" + "github.com/ethereum/go-ethereum/swarm/spancontext" + "github.com/ethereum/go-ethereum/swarm/storage/feed" + "github.com/ethereum/go-ethereum/swarm/testutil" + colorable "github.com/mattn/go-colorable" + opentracing "github.com/opentracing/opentracing-go" + "github.com/pborman/uuid" + cli "gopkg.in/urfave/cli.v1" +) + +const ( + feedRandomDataLength = 8 +) + +func cliFeedUploadAndSync(c *cli.Context) error { + metrics.GetOrRegisterCounter("feed-and-sync", nil).Inc(1) + log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))) + + errc := make(chan error) + go func() { + errc <- feedUploadAndSync(c) + }() + + select { + case err := <-errc: + if err != nil { + metrics.GetOrRegisterCounter("feed-and-sync.fail", nil).Inc(1) + } + return err + case <-time.After(time.Duration(timeout) * time.Second): + metrics.GetOrRegisterCounter("feed-and-sync.timeout", nil).Inc(1) + return fmt.Errorf("timeout after %v sec", timeout) + } +} + +// TODO: retrieve with manifest + extract repeating code +func feedUploadAndSync(c *cli.Context) error { + defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size (kb)", filesize) }(time.Now()) + + generateEndpoints(scheme, cluster, appName, from, to) + + log.Info("generating and uploading feeds to " + endpoints[0] + " and syncing") + + // create a random private key to sign updates with and derive the address + pkFile, err := ioutil.TempFile("", "swarm-feed-smoke-test") + if err != nil { + return err + } + defer pkFile.Close() + defer os.Remove(pkFile.Name()) + + privkeyHex := "0000000000000000000000000000000000000000000000000000000000001976" + privKey, err := crypto.HexToECDSA(privkeyHex) + if err != nil { + return err + } + user := crypto.PubkeyToAddress(privKey.PublicKey) + userHex := hexutil.Encode(user.Bytes()) + + // save the private key to a file + _, err = io.WriteString(pkFile, privkeyHex) + if err != nil { + return err + } + + // keep hex strings for topic and subtopic + var topicHex string + var subTopicHex string + + // and create combination hex topics for bzz-feed retrieval + // xor'ed with topic (zero-value topic if no topic) + var subTopicOnlyHex string + var mergedSubTopicHex string + + // generate random topic and subtopic and put a hex on them + topicBytes, err := generateRandomData(feed.TopicLength) + topicHex = hexutil.Encode(topicBytes) + subTopicBytes, err := generateRandomData(8) + subTopicHex = hexutil.Encode(subTopicBytes) + if err != nil { + return err + } + mergedSubTopic, err := feed.NewTopic(subTopicHex, topicBytes) + if err != nil { + return err + } + mergedSubTopicHex = hexutil.Encode(mergedSubTopic[:]) + subTopicOnlyBytes, err := feed.NewTopic(subTopicHex, nil) + if err != nil { + return err + } + subTopicOnlyHex = hexutil.Encode(subTopicOnlyBytes[:]) + + // create feed manifest, topic only + var out bytes.Buffer + cmd := exec.Command("swarm", "--bzzapi", endpoints[0], "feed", "create", "--topic", topicHex, "--user", userHex) + cmd.Stdout = &out + log.Debug("create feed manifest topic cmd", "cmd", cmd) + err = cmd.Run() + if err != nil { + return err + } + manifestWithTopic := strings.TrimRight(out.String(), string([]byte{0x0a})) + if len(manifestWithTopic) != 64 { + return fmt.Errorf("unknown feed create manifest hash format (topic): (%d) %s", len(out.String()), manifestWithTopic) + } + log.Debug("create topic feed", "manifest", manifestWithTopic) + out.Reset() + + // create feed manifest, subtopic only + cmd = exec.Command("swarm", "--bzzapi", endpoints[0], "feed", "create", "--name", subTopicHex, "--user", userHex) + cmd.Stdout = &out + log.Debug("create feed manifest subtopic cmd", "cmd", cmd) + err = cmd.Run() + if err != nil { + return err + } + manifestWithSubTopic := strings.TrimRight(out.String(), string([]byte{0x0a})) + if len(manifestWithSubTopic) != 64 { + return fmt.Errorf("unknown feed create manifest hash format (subtopic): (%d) %s", len(out.String()), manifestWithSubTopic) + } + log.Debug("create subtopic feed", "manifest", manifestWithTopic) + out.Reset() + + // create feed manifest, merged topic + cmd = exec.Command("swarm", "--bzzapi", endpoints[0], "feed", "create", "--topic", topicHex, "--name", subTopicHex, "--user", userHex) + cmd.Stdout = &out + log.Debug("create feed manifest mergetopic cmd", "cmd", cmd) + err = cmd.Run() + if err != nil { + log.Error(err.Error()) + return err + } + manifestWithMergedTopic := strings.TrimRight(out.String(), string([]byte{0x0a})) + if len(manifestWithMergedTopic) != 64 { + return fmt.Errorf("unknown feed create manifest hash format (mergedtopic): (%d) %s", len(out.String()), manifestWithMergedTopic) + } + log.Debug("create mergedtopic feed", "manifest", manifestWithMergedTopic) + out.Reset() + + // create test data + data, err := generateRandomData(feedRandomDataLength) + if err != nil { + return err + } + h := md5.New() + h.Write(data) + dataHash := h.Sum(nil) + dataHex := hexutil.Encode(data) + + // update with topic + cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--topic", topicHex, dataHex) + cmd.Stdout = &out + log.Debug("update feed manifest topic cmd", "cmd", cmd) + err = cmd.Run() + if err != nil { + return err + } + log.Debug("feed update topic", "out", out) + out.Reset() + + // update with subtopic + cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--name", subTopicHex, dataHex) + cmd.Stdout = &out + log.Debug("update feed manifest subtopic cmd", "cmd", cmd) + err = cmd.Run() + if err != nil { + return err + } + log.Debug("feed update subtopic", "out", out) + out.Reset() + + // update with merged topic + cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--topic", topicHex, "--name", subTopicHex, dataHex) + cmd.Stdout = &out + log.Debug("update feed manifest merged topic cmd", "cmd", cmd) + err = cmd.Run() + if err != nil { + return err + } + log.Debug("feed update mergedtopic", "out", out) + out.Reset() + + time.Sleep(3 * time.Second) + + // retrieve the data + wg := sync.WaitGroup{} + for _, endpoint := range endpoints { + // raw retrieve, topic only + for _, hex := range []string{topicHex, subTopicOnlyHex, mergedSubTopicHex} { + wg.Add(1) + ruid := uuid.New()[:8] + go func(hex string, endpoint string, ruid string) { + for { + err := fetchFeed(hex, userHex, endpoint, dataHash, ruid) + if err != nil { + continue + } + + wg.Done() + return + } + }(hex, endpoint, ruid) + + } + } + wg.Wait() + log.Info("all endpoints synced random data successfully") + + // upload test file + seed := int(time.Now().UnixNano() / 1e6) + log.Info("feed uploading to "+endpoints[0]+" and syncing", "seed", seed) + + randomBytes := testutil.RandomBytes(seed, filesize*1000) + + hash, err := upload(&randomBytes, endpoints[0]) + if err != nil { + return err + } + hashBytes, err := hexutil.Decode("0x" + hash) + if err != nil { + return err + } + multihashHex := hexutil.Encode(hashBytes) + fileHash, err := digest(bytes.NewReader(randomBytes)) + if err != nil { + return err + } + + log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fileHash)) + + // update file with topic + cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--topic", topicHex, multihashHex) + cmd.Stdout = &out + err = cmd.Run() + if err != nil { + return err + } + log.Debug("feed update topic", "out", out) + out.Reset() + + // update file with subtopic + cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--name", subTopicHex, multihashHex) + cmd.Stdout = &out + err = cmd.Run() + if err != nil { + return err + } + log.Debug("feed update subtopic", "out", out) + out.Reset() + + // update file with merged topic + cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--topic", topicHex, "--name", subTopicHex, multihashHex) + cmd.Stdout = &out + err = cmd.Run() + if err != nil { + return err + } + log.Debug("feed update mergedtopic", "out", out) + out.Reset() + + time.Sleep(3 * time.Second) + + for _, endpoint := range endpoints { + + // manifest retrieve, topic only + for _, url := range []string{manifestWithTopic, manifestWithSubTopic, manifestWithMergedTopic} { + wg.Add(1) + ruid := uuid.New()[:8] + go func(url string, endpoint string, ruid string) { + for { + err := fetch(url, endpoint, fileHash, ruid) + if err != nil { + continue + } + + wg.Done() + return + } + }(url, endpoint, ruid) + } + + } + wg.Wait() + log.Info("all endpoints synced random file successfully") + + return nil +} + +func fetchFeed(topic string, user string, endpoint string, original []byte, ruid string) error { + ctx, sp := spancontext.StartSpan(context.Background(), "feed-and-sync.fetch") + defer sp.Finish() + + log.Trace("sleeping", "ruid", ruid) + time.Sleep(3 * time.Second) + + log.Trace("http get request (feed)", "ruid", ruid, "api", endpoint, "topic", topic, "user", user) + + var tn time.Time + reqUri := endpoint + "/bzz-feed:/?topic=" + topic + "&user=" + user + req, _ := http.NewRequest("GET", reqUri, nil) + + opentracing.GlobalTracer().Inject( + sp.Context(), + opentracing.HTTPHeaders, + opentracing.HTTPHeadersCarrier(req.Header)) + + trace := client.GetClientTrace("feed-and-sync - http get", "feed-and-sync", ruid, &tn) + + req = req.WithContext(httptrace.WithClientTrace(ctx, trace)) + transport := http.DefaultTransport + + //transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + + tn = time.Now() + res, err := transport.RoundTrip(req) + if err != nil { + log.Error(err.Error(), "ruid", ruid) + return err + } + + log.Trace("http get response (feed)", "ruid", ruid, "api", endpoint, "topic", topic, "user", user, "code", res.StatusCode, "len", res.ContentLength) + + if res.StatusCode != 200 { + return fmt.Errorf("expected status code %d, got %v (ruid %v)", 200, res.StatusCode, ruid) + } + + defer res.Body.Close() + + rdigest, err := digest(res.Body) + if err != nil { + log.Warn(err.Error(), "ruid", ruid) + return err + } + + if !bytes.Equal(rdigest, original) { + err := fmt.Errorf("downloaded imported file md5=%x is not the same as the generated one=%x", rdigest, original) + log.Warn(err.Error(), "ruid", ruid) + return err + } + + log.Trace("downloaded file matches random file", "ruid", ruid, "len", res.ContentLength) + + return nil +} diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/main.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/main.go index 70aee1922..66cecdc5c 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/main.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/main.go @@ -17,28 +17,41 @@ package main import ( + "fmt" "os" "sort" + "github.com/ethereum/go-ethereum/cmd/utils" + gethmetrics "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/metrics/influxdb" + swarmmetrics "github.com/ethereum/go-ethereum/swarm/metrics" + "github.com/ethereum/go-ethereum/swarm/tracing" + "github.com/ethereum/go-ethereum/log" - colorable "github.com/mattn/go-colorable" cli "gopkg.in/urfave/cli.v1" ) +var ( + gitCommit string // Git SHA1 commit hash of the release (set via linker flags) +) + var ( endpoints []string includeLocalhost bool cluster string + appName string scheme string filesize int + syncDelay int from int to int + verbosity int + timeout int + single bool ) func main() { - log.PrintOrigins(true) - log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) app := cli.NewApp() app.Name = "smoke-test" @@ -47,10 +60,16 @@ func main() { app.Flags = []cli.Flag{ cli.StringFlag{ Name: "cluster-endpoint", - Value: "testing", - Usage: "cluster to point to (local, open or testing)", + Value: "prod", + Usage: "cluster to point to (prod or a given namespace)", Destination: &cluster, }, + cli.StringFlag{ + Name: "app", + Value: "swarm", + Usage: "application to point to (swarm or swarm-private)", + Destination: &appName, + }, cli.IntFlag{ Name: "cluster-from", Value: 8501, @@ -80,8 +99,42 @@ func main() { Usage: "file size for generated random file in KB", Destination: &filesize, }, + cli.IntFlag{ + Name: "sync-delay", + Value: 5, + Usage: "duration of delay in seconds to wait for content to be synced", + Destination: &syncDelay, + }, + cli.IntFlag{ + Name: "verbosity", + Value: 1, + Usage: "verbosity", + Destination: &verbosity, + }, + cli.IntFlag{ + Name: "timeout", + Value: 120, + Usage: "timeout in seconds after which kill the process", + Destination: &timeout, + }, + cli.BoolFlag{ + Name: "single", + Usage: "whether to fetch content from a single node or from all nodes", + Destination: &single, + }, } + app.Flags = append(app.Flags, []cli.Flag{ + utils.MetricsEnabledFlag, + swarmmetrics.MetricsInfluxDBEndpointFlag, + swarmmetrics.MetricsInfluxDBDatabaseFlag, + swarmmetrics.MetricsInfluxDBUsernameFlag, + swarmmetrics.MetricsInfluxDBPasswordFlag, + swarmmetrics.MetricsInfluxDBHostTagFlag, + }...) + + app.Flags = append(app.Flags, tracing.Flags...) + app.Commands = []cli.Command{ { Name: "upload_and_sync", @@ -89,13 +142,49 @@ func main() { Usage: "upload and sync", Action: cliUploadAndSync, }, + { + Name: "feed_sync", + Aliases: []string{"f"}, + Usage: "feed update generate, upload and sync", + Action: cliFeedUploadAndSync, + }, } sort.Sort(cli.FlagsByName(app.Flags)) sort.Sort(cli.CommandsByName(app.Commands)) + app.Before = func(ctx *cli.Context) error { + tracing.Setup(ctx) + return nil + } + + app.After = func(ctx *cli.Context) error { + return emitMetrics(ctx) + } + err := app.Run(os.Args) if err != nil { log.Error(err.Error()) + + os.Exit(1) } } + +func emitMetrics(ctx *cli.Context) error { + if gethmetrics.Enabled { + var ( + endpoint = ctx.GlobalString(swarmmetrics.MetricsInfluxDBEndpointFlag.Name) + database = ctx.GlobalString(swarmmetrics.MetricsInfluxDBDatabaseFlag.Name) + username = ctx.GlobalString(swarmmetrics.MetricsInfluxDBUsernameFlag.Name) + password = ctx.GlobalString(swarmmetrics.MetricsInfluxDBPasswordFlag.Name) + hosttag = ctx.GlobalString(swarmmetrics.MetricsInfluxDBHostTagFlag.Name) + ) + return influxdb.InfluxDBWithTagsOnce(gethmetrics.DefaultRegistry, endpoint, database, username, password, "swarm-smoke.", map[string]string{ + "host": hosttag, + "version": gitCommit, + "filesize": fmt.Sprintf("%v", filesize), + }) + } + + return nil +} diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/upload_and_sync.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/upload_and_sync.go index 5e0ff4b0f..d605f79a3 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/upload_and_sync.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/upload_and_sync.go @@ -18,38 +18,41 @@ package main import ( "bytes" + "context" "crypto/md5" - "crypto/rand" + crand "crypto/rand" + "errors" "fmt" "io" "io/ioutil" + "math/rand" "net/http" + "net/http/httptrace" "os" - "os/exec" - "strings" "sync" "time" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/swarm/api" + "github.com/ethereum/go-ethereum/swarm/api/client" + "github.com/ethereum/go-ethereum/swarm/spancontext" + "github.com/ethereum/go-ethereum/swarm/testutil" + opentracing "github.com/opentracing/opentracing-go" "github.com/pborman/uuid" cli "gopkg.in/urfave/cli.v1" ) -func generateEndpoints(scheme string, cluster string, from int, to int) { +func generateEndpoints(scheme string, cluster string, app string, from int, to int) { if cluster == "prod" { - cluster = "" - } else if cluster == "local" { - for port := from; port <= to; port++ { - endpoints = append(endpoints, fmt.Sprintf("%s://localhost:%v", scheme, port)) + for port := from; port < to; port++ { + endpoints = append(endpoints, fmt.Sprintf("%s://%v.swarm-gateways.net", scheme, port)) } - return } else { - cluster = cluster + "." - } - - for port := from; port <= to; port++ { - endpoints = append(endpoints, fmt.Sprintf("%s://%v.%sswarm-gateways.net", scheme, port, cluster)) + for port := from; port < to; port++ { + endpoints = append(endpoints, fmt.Sprintf("%s://%s-%v-%s.stg.swarm-gateways.net", scheme, app, port, cluster)) + } } if includeLocalhost { @@ -58,22 +61,51 @@ func generateEndpoints(scheme string, cluster string, from int, to int) { } func cliUploadAndSync(c *cli.Context) error { - defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size (kb)", filesize) }(time.Now()) + log.PrintOrigins(true) + log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(os.Stdout, log.TerminalFormat(true)))) - generateEndpoints(scheme, cluster, from, to) + metrics.GetOrRegisterCounter("upload-and-sync", nil).Inc(1) - log.Info("uploading to " + endpoints[0] + " and syncing") + errc := make(chan error) + go func() { + errc <- uploadAndSync(c) + }() - f, cleanup := generateRandomFile(filesize * 1000) - defer cleanup() + select { + case err := <-errc: + if err != nil { + metrics.GetOrRegisterCounter("upload-and-sync.fail", nil).Inc(1) + } + return err + case <-time.After(time.Duration(timeout) * time.Second): + metrics.GetOrRegisterCounter("upload-and-sync.timeout", nil).Inc(1) + return fmt.Errorf("timeout after %v sec", timeout) + } +} - hash, err := upload(f, endpoints[0]) +func uploadAndSync(c *cli.Context) error { + defer func(now time.Time) { + totalTime := time.Since(now) + + log.Info("total time", "time", totalTime, "kb", filesize) + metrics.GetOrRegisterCounter("upload-and-sync.total-time", nil).Inc(int64(totalTime)) + }(time.Now()) + + generateEndpoints(scheme, cluster, appName, from, to) + seed := int(time.Now().UnixNano() / 1e6) + log.Info("uploading to "+endpoints[0]+" and syncing", "seed", seed) + + randomBytes := testutil.RandomBytes(seed, filesize*1000) + + t1 := time.Now() + hash, err := upload(&randomBytes, endpoints[0]) if err != nil { log.Error(err.Error()) return err } + metrics.GetOrRegisterCounter("upload-and-sync.upload-time", nil).Inc(int64(time.Since(t1))) - fhash, err := digest(f) + fhash, err := digest(bytes.NewReader(randomBytes)) if err != nil { log.Error(err.Error()) return err @@ -81,24 +113,47 @@ func cliUploadAndSync(c *cli.Context) error { log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash)) - time.Sleep(3 * time.Second) + time.Sleep(time.Duration(syncDelay) * time.Second) wg := sync.WaitGroup{} - for _, endpoint := range endpoints { - endpoint := endpoint + if single { + rand.Seed(time.Now().UTC().UnixNano()) + randIndex := 1 + rand.Intn(len(endpoints)-1) ruid := uuid.New()[:8] wg.Add(1) go func(endpoint string, ruid string) { for { + start := time.Now() err := fetch(hash, endpoint, fhash, ruid) + fetchTime := time.Since(start) if err != nil { continue } + metrics.GetOrRegisterMeter("upload-and-sync.single.fetch-time", nil).Mark(int64(fetchTime)) wg.Done() return } - }(endpoint, ruid) + }(endpoints[randIndex], ruid) + } else { + for _, endpoint := range endpoints { + ruid := uuid.New()[:8] + wg.Add(1) + go func(endpoint string, ruid string) { + for { + start := time.Now() + err := fetch(hash, endpoint, fhash, ruid) + fetchTime := time.Since(start) + if err != nil { + continue + } + + metrics.GetOrRegisterMeter("upload-and-sync.each.fetch-time", nil).Mark(int64(fetchTime)) + wg.Done() + return + } + }(endpoint, ruid) + } } wg.Wait() log.Info("all endpoints synced random file successfully") @@ -108,13 +163,33 @@ func cliUploadAndSync(c *cli.Context) error { // fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file func fetch(hash string, endpoint string, original []byte, ruid string) error { + ctx, sp := spancontext.StartSpan(context.Background(), "upload-and-sync.fetch") + defer sp.Finish() + log.Trace("sleeping", "ruid", ruid) time.Sleep(3 * time.Second) - log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash) - res, err := http.Get(endpoint + "/bzz:/" + hash + "/") + + var tn time.Time + reqUri := endpoint + "/bzz:/" + hash + "/" + req, _ := http.NewRequest("GET", reqUri, nil) + + opentracing.GlobalTracer().Inject( + sp.Context(), + opentracing.HTTPHeaders, + opentracing.HTTPHeadersCarrier(req.Header)) + + trace := client.GetClientTrace("upload-and-sync - http get", "upload-and-sync", ruid, &tn) + + req = req.WithContext(httptrace.WithClientTrace(ctx, trace)) + transport := http.DefaultTransport + + //transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + + tn = time.Now() + res, err := transport.RoundTrip(req) if err != nil { - log.Warn(err.Error(), "ruid", ruid) + log.Error(err.Error(), "ruid", ruid) return err } log.Trace("http get response", "ruid", ruid, "api", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength) @@ -145,16 +220,19 @@ func fetch(hash string, endpoint string, original []byte, ruid string) error { } // upload is uploading a file `f` to `endpoint` via the `swarm up` cmd -func upload(f *os.File, endpoint string) (string, error) { - var out bytes.Buffer - cmd := exec.Command("swarm", "--bzzapi", endpoint, "up", f.Name()) - cmd.Stdout = &out - err := cmd.Run() - if err != nil { - return "", err +func upload(dataBytes *[]byte, endpoint string) (string, error) { + swarm := client.NewClient(endpoint) + f := &client.File{ + ReadCloser: ioutil.NopCloser(bytes.NewReader(*dataBytes)), + ManifestEntry: api.ManifestEntry{ + ContentType: "text/plain", + Mode: 0660, + Size: int64(len(*dataBytes)), + }, } - hash := strings.TrimRight(out.String(), "\r\n") - return hash, nil + + // upload data to bzz:// and retrieve the content-addressed manifest hash, hex-encoded. + return swarm.Upload(f, "", false) } func digest(r io.Reader) ([]byte, error) { @@ -166,26 +244,14 @@ func digest(r io.Reader) ([]byte, error) { return h.Sum(nil), nil } -// generateRandomFile is creating a temporary file with the requested byte size -func generateRandomFile(size int) (f *os.File, teardown func()) { - // create a tmp file - tmp, err := ioutil.TempFile("", "swarm-test") +// generates random data in heap buffer +func generateRandomData(datasize int) ([]byte, error) { + b := make([]byte, datasize) + c, err := crand.Read(b) if err != nil { - panic(err) + return nil, err + } else if c != datasize { + return nil, errors.New("short read") } - - // callback for tmp file cleanup - teardown = func() { - tmp.Close() - os.Remove(tmp.Name()) - } - - buf := make([]byte, size) - _, err = rand.Read(buf) - if err != nil { - panic(err) - } - ioutil.WriteFile(tmp.Name(), buf, 0755) - - return tmp, teardown + return b, nil } diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/upload.go b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/upload.go index 2225127cf..992f2d6e9 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/swarm/upload.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/swarm/upload.go @@ -26,29 +26,47 @@ import ( "os/user" "path" "path/filepath" + "strconv" "strings" + "github.com/ethereum/go-ethereum/log" swarm "github.com/ethereum/go-ethereum/swarm/api/client" "github.com/ethereum/go-ethereum/cmd/utils" "gopkg.in/urfave/cli.v1" ) -func upload(ctx *cli.Context) { +var upCommand = cli.Command{ + Action: upload, + CustomHelpTemplate: helpTemplate, + Name: "up", + Usage: "uploads a file or directory to swarm using the HTTP API", + ArgsUsage: "", + Flags: []cli.Flag{SwarmEncryptedFlag}, + Description: "uploads a file or directory to swarm using the HTTP API and prints the root hash", +} +func upload(ctx *cli.Context) { args := ctx.Args() var ( - bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") - recursive = ctx.GlobalBool(SwarmRecursiveFlag.Name) - wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name) - defaultPath = ctx.GlobalString(SwarmUploadDefaultPath.Name) - fromStdin = ctx.GlobalBool(SwarmUpFromStdinFlag.Name) - mimeType = ctx.GlobalString(SwarmUploadMimeType.Name) - client = swarm.NewClient(bzzapi) - toEncrypt = ctx.Bool(SwarmEncryptedFlag.Name) - file string + bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") + recursive = ctx.GlobalBool(SwarmRecursiveFlag.Name) + wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name) + defaultPath = ctx.GlobalString(SwarmUploadDefaultPath.Name) + fromStdin = ctx.GlobalBool(SwarmUpFromStdinFlag.Name) + mimeType = ctx.GlobalString(SwarmUploadMimeType.Name) + client = swarm.NewClient(bzzapi) + toEncrypt = ctx.Bool(SwarmEncryptedFlag.Name) + autoDefaultPath = false + file string ) - + if autoDefaultPathString := os.Getenv(SWARM_AUTO_DEFAULTPATH); autoDefaultPathString != "" { + b, err := strconv.ParseBool(autoDefaultPathString) + if err != nil { + utils.Fatalf("invalid environment variable %s: %v", SWARM_AUTO_DEFAULTPATH, err) + } + autoDefaultPath = b + } if len(args) != 1 { if fromStdin { tmp, err := ioutil.TempFile("", "swarm-stdin") @@ -97,6 +115,15 @@ func upload(ctx *cli.Context) { if !recursive { return "", errors.New("Argument is a directory and recursive upload is disabled") } + if autoDefaultPath && defaultPath == "" { + defaultEntryCandidate := path.Join(file, "index.html") + log.Debug("trying to find default path", "path", defaultEntryCandidate) + defaultEntryStat, err := os.Stat(defaultEntryCandidate) + if err == nil && !defaultEntryStat.IsDir() { + log.Debug("setting auto detected default path", "path", defaultEntryCandidate) + defaultPath = defaultEntryCandidate + } + } if defaultPath != "" { // construct absolute default path absDefaultPath, _ := filepath.Abs(defaultPath) diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/utils/cmd.go b/vendor/github.com/ethereum/go-ethereum/cmd/utils/cmd.go index 58d72f32b..f23aa5775 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/utils/cmd.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/utils/cmd.go @@ -272,13 +272,13 @@ func ImportPreimages(db *ethdb.LDBDatabase, fn string) error { // Accumulate the preimages and flush when enough ws gathered preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob) if len(preimages) > 1024 { - rawdb.WritePreimages(db, 0, preimages) + rawdb.WritePreimages(db, preimages) preimages = make(map[common.Hash][]byte) } } // Flush the last batch preimage data if len(preimages) > 0 { - rawdb.WritePreimages(db, 0, preimages) + rawdb.WritePreimages(db, preimages) } return nil } diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/utils/flags.go b/vendor/github.com/ethereum/go-ethereum/cmd/utils/flags.go index 878847ca0..d609843b9 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/utils/flags.go +++ b/vendor/github.com/ethereum/go-ethereum/cmd/utils/flags.go @@ -141,6 +141,10 @@ var ( Name: "rinkeby", Usage: "Rinkeby network: pre-configured proof-of-authority test network", } + ConstantinopleOverrideFlag = cli.Uint64Flag{ + Name: "override.constantinople", + Usage: "Manually specify constantinople fork-block, overriding the bundled setting", + } DeveloperFlag = cli.BoolFlag{ Name: "dev", Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled", @@ -200,6 +204,10 @@ var ( Name: "lightkdf", Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength", } + WhitelistFlag = cli.StringFlag{ + Name: "whitelist", + Usage: "Comma separated block number-to-hash mappings to enforce (=)", + } // Dashboard settings DashboardEnabledFlag = cli.BoolFlag{ Name: metrics.DashboardEnabledFlag, @@ -313,7 +321,12 @@ var ( CacheDatabaseFlag = cli.IntFlag{ Name: "cache.database", Usage: "Percentage of cache memory allowance to use for database io", - Value: 75, + Value: 50, + } + CacheTrieFlag = cli.IntFlag{ + Name: "cache.trie", + Usage: "Percentage of cache memory allowance to use for trie caching", + Value: 25, } CacheGCFlag = cli.IntFlag{ Name: "cache.gc", @@ -871,17 +884,12 @@ func SetULC(ctx *cli.Context, cfg *eth.Config) { // makeDatabaseHandles raises out the number of allowed file handles per process // for Geth and returns half of the allowance to assign to the database. func makeDatabaseHandles() int { - limit, err := fdlimit.Current() + limit, err := fdlimit.Maximum() if err != nil { Fatalf("Failed to retrieve file descriptor allowance: %v", err) } - if limit < 2048 { - if err := fdlimit.Raise(2048); err != nil { - Fatalf("Failed to raise file descriptor allowance: %v", err) - } - } - if limit > 2048 { // cap database file descriptors even if more is available - limit = 2048 + if err := fdlimit.Raise(uint64(limit)); err != nil { + Fatalf("Failed to raise file descriptor allowance: %v", err) } return limit / 2 // Leave half for networking and other stuff } @@ -1025,16 +1033,7 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) { setWS(ctx, cfg) setNodeUserIdent(ctx, cfg) - switch { - case ctx.GlobalIsSet(DataDirFlag.Name): - cfg.DataDir = ctx.GlobalString(DataDirFlag.Name) - case ctx.GlobalBool(DeveloperFlag.Name): - cfg.DataDir = "" // unless explicitly requested, use memory databases - case ctx.GlobalBool(TestnetFlag.Name): - cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet") - case ctx.GlobalBool(RinkebyFlag.Name): - cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby") - } + setDataDir(ctx, cfg) if ctx.GlobalIsSet(KeyStoreDirFlag.Name) { cfg.KeyStoreDir = ctx.GlobalString(KeyStoreDirFlag.Name) @@ -1047,6 +1046,19 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) { } } +func setDataDir(ctx *cli.Context, cfg *node.Config) { + switch { + case ctx.GlobalIsSet(DataDirFlag.Name): + cfg.DataDir = ctx.GlobalString(DataDirFlag.Name) + case ctx.GlobalBool(DeveloperFlag.Name): + cfg.DataDir = "" // unless explicitly requested, use memory databases + case ctx.GlobalBool(TestnetFlag.Name): + cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet") + case ctx.GlobalBool(RinkebyFlag.Name): + cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby") + } +} + func setGPO(ctx *cli.Context, cfg *gasprice.Config) { if ctx.GlobalIsSet(GpoBlocksFlag.Name) { cfg.Blocks = ctx.GlobalInt(GpoBlocksFlag.Name) @@ -1120,6 +1132,29 @@ func setEthash(ctx *cli.Context, cfg *eth.Config) { } } +func setWhitelist(ctx *cli.Context, cfg *eth.Config) { + whitelist := ctx.GlobalString(WhitelistFlag.Name) + if whitelist == "" { + return + } + cfg.Whitelist = make(map[uint64]common.Hash) + for _, entry := range strings.Split(whitelist, ",") { + parts := strings.Split(entry, "=") + if len(parts) != 2 { + Fatalf("Invalid whitelist entry: %s", entry) + } + number, err := strconv.ParseUint(parts[0], 0, 64) + if err != nil { + Fatalf("Invalid whitelist block number %s: %v", parts[0], err) + } + var hash common.Hash + if err = hash.UnmarshalText([]byte(parts[1])); err != nil { + Fatalf("Invalid whitelist hash %s: %v", parts[1], err) + } + cfg.Whitelist[number] = hash + } +} + // checkExclusive verifies that only a single instance of the provided flags was // set by the user. Each flag might optionally be followed by a string type to // specialize it further. @@ -1185,6 +1220,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { setGPO(ctx, &cfg.GPO) setTxPool(ctx, &cfg.TxPool) setEthash(ctx, cfg) + setWhitelist(ctx, cfg) if ctx.GlobalIsSet(SyncModeFlag.Name) { cfg.SyncMode = *GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode) @@ -1201,7 +1237,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { if ctx.GlobalIsSet(NetworkIdFlag.Name) { cfg.NetworkId = ctx.GlobalUint64(NetworkIdFlag.Name) } - if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheDatabaseFlag.Name) { cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100 } @@ -1212,8 +1247,11 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { } cfg.NoPruning = ctx.GlobalString(GCModeFlag.Name) == "archive" + if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) { + cfg.TrieCleanCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100 + } if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) { - cfg.TrieCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100 + cfg.TrieDirtyCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100 } if ctx.GlobalIsSet(MinerNotifyFlag.Name) { cfg.MinerNotify = strings.Split(ctx.GlobalString(MinerNotifyFlag.Name), ",") @@ -1423,7 +1461,6 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis { func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chainDb ethdb.Database) { var err error chainDb = MakeChainDatabase(ctx, stack) - config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx)) if err != nil { Fatalf("%v", err) @@ -1448,12 +1485,16 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name) } cache := &core.CacheConfig{ - Disabled: ctx.GlobalString(GCModeFlag.Name) == "archive", - TrieNodeLimit: eth.DefaultConfig.TrieCache, - TrieTimeLimit: eth.DefaultConfig.TrieTimeout, + Disabled: ctx.GlobalString(GCModeFlag.Name) == "archive", + TrieCleanLimit: eth.DefaultConfig.TrieCleanCache, + TrieDirtyLimit: eth.DefaultConfig.TrieDirtyCache, + TrieTimeLimit: eth.DefaultConfig.TrieTimeout, + } + if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) { + cache.TrieCleanLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100 } if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) { - cache.TrieNodeLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100 + cache.TrieDirtyLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100 } vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)} chain, err = core.NewBlockChain(chainDb, cache, config, engine, vmcfg, nil) diff --git a/vendor/github.com/ethereum/go-ethereum/common/bytes.go b/vendor/github.com/ethereum/go-ethereum/common/bytes.go index 0c257a1ee..c82e61624 100644 --- a/vendor/github.com/ethereum/go-ethereum/common/bytes.go +++ b/vendor/github.com/ethereum/go-ethereum/common/bytes.go @@ -31,6 +31,15 @@ func ToHex(b []byte) string { return "0x" + hex } +// ToHexArray creates a array of hex-string based on []byte +func ToHexArray(b [][]byte) []string { + r := make([]string, len(b)) + for i := range b { + r[i] = ToHex(b[i]) + } + return r +} + // FromHex returns the bytes represented by the hexadecimal string s. // s may be prefixed with "0x". func FromHex(s string) []byte { diff --git a/vendor/github.com/ethereum/go-ethereum/common/compiler/solidity.go b/vendor/github.com/ethereum/go-ethereum/common/compiler/solidity.go index f6e8d2e42..b7c8ec563 100644 --- a/vendor/github.com/ethereum/go-ethereum/common/compiler/solidity.go +++ b/vendor/github.com/ethereum/go-ethereum/common/compiler/solidity.go @@ -31,14 +31,15 @@ import ( var versionRegexp = regexp.MustCompile(`([0-9]+)\.([0-9]+)\.([0-9]+)`) -// Contract contains information about a compiled contract, alongside its code. +// Contract contains information about a compiled contract, alongside its code and runtime code. type Contract struct { - Code string `json:"code"` - Info ContractInfo `json:"info"` + Code string `json:"code"` + RuntimeCode string `json:"runtime-code"` + Info ContractInfo `json:"info"` } // ContractInfo contains information about a compiled contract, including access -// to the ABI definition, user and developer docs, and metadata. +// to the ABI definition, source mapping, user and developer docs, and metadata. // // Depending on the source, language version, compiler version, and compiler // options will provide information about how the contract was compiled. @@ -48,6 +49,8 @@ type ContractInfo struct { LanguageVersion string `json:"languageVersion"` CompilerVersion string `json:"compilerVersion"` CompilerOptions string `json:"compilerOptions"` + SrcMap string `json:"srcMap"` + SrcMapRuntime string `json:"srcMapRuntime"` AbiDefinition interface{} `json:"abiDefinition"` UserDoc interface{} `json:"userDoc"` DeveloperDoc interface{} `json:"developerDoc"` @@ -63,14 +66,16 @@ type Solidity struct { // --combined-output format type solcOutput struct { Contracts map[string]struct { - Bin, Abi, Devdoc, Userdoc, Metadata string + BinRuntime string `json:"bin-runtime"` + SrcMapRuntime string `json:"srcmap-runtime"` + Bin, SrcMap, Abi, Devdoc, Userdoc, Metadata string } Version string } func (s *Solidity) makeArgs() []string { p := []string{ - "--combined-json", "bin,abi,userdoc,devdoc", + "--combined-json", "bin,bin-runtime,srcmap,srcmap-runtime,abi,userdoc,devdoc", "--optimize", // code optimizer switched on } if s.Major > 0 || s.Minor > 4 || s.Patch > 6 { @@ -157,7 +162,7 @@ func (s *Solidity) run(cmd *exec.Cmd, source string) (map[string]*Contract, erro // provided source, language and compiler version, and compiler options are all // passed through into the Contract structs. // -// The solc output is expected to contain ABI, user docs, and dev docs. +// The solc output is expected to contain ABI, source mapping, user docs, and dev docs. // // Returns an error if the JSON is malformed or missing data, or if the JSON // embedded within the JSON is malformed. @@ -184,13 +189,16 @@ func ParseCombinedJSON(combinedJSON []byte, source string, languageVersion strin return nil, fmt.Errorf("solc: error reading dev doc: %v", err) } contracts[name] = &Contract{ - Code: "0x" + info.Bin, + Code: "0x" + info.Bin, + RuntimeCode: "0x" + info.BinRuntime, Info: ContractInfo{ Source: source, Language: "Solidity", LanguageVersion: languageVersion, CompilerVersion: compilerVersion, CompilerOptions: compilerOptions, + SrcMap: info.SrcMap, + SrcMapRuntime: info.SrcMapRuntime, AbiDefinition: abi, UserDoc: userdoc, DeveloperDoc: devdoc, diff --git a/vendor/github.com/ethereum/go-ethereum/consensus/clique/clique.go b/vendor/github.com/ethereum/go-ethereum/consensus/clique/clique.go index eae09f91d..0cb72c35c 100644 --- a/vendor/github.com/ethereum/go-ethereum/consensus/clique/clique.go +++ b/vendor/github.com/ethereum/go-ethereum/consensus/clique/clique.go @@ -696,7 +696,7 @@ func (c *Clique) SealHash(header *types.Header) common.Hash { return sigHash(header) } -// Close implements consensus.Engine. It's a noop for clique as there is are no background threads. +// Close implements consensus.Engine. It's a noop for clique as there are no background threads. func (c *Clique) Close() error { return nil } diff --git a/vendor/github.com/ethereum/go-ethereum/consensus/ethash/api.go b/vendor/github.com/ethereum/go-ethereum/consensus/ethash/api.go index a04ea235d..4d8eed416 100644 --- a/vendor/github.com/ethereum/go-ethereum/consensus/ethash/api.go +++ b/vendor/github.com/ethereum/go-ethereum/consensus/ethash/api.go @@ -37,27 +37,28 @@ type API struct { // result[0] - 32 bytes hex encoded current block header pow-hash // result[1] - 32 bytes hex encoded seed hash used for DAG // result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty -func (api *API) GetWork() ([3]string, error) { +// result[3] - hex encoded block number +func (api *API) GetWork() ([4]string, error) { if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest { - return [3]string{}, errors.New("not supported") + return [4]string{}, errors.New("not supported") } var ( - workCh = make(chan [3]string, 1) + workCh = make(chan [4]string, 1) errc = make(chan error, 1) ) select { case api.ethash.fetchWorkCh <- &sealWork{errc: errc, res: workCh}: case <-api.ethash.exitCh: - return [3]string{}, errEthashStopped + return [4]string{}, errEthashStopped } select { case work := <-workCh: return work, nil case err := <-errc: - return [3]string{}, err + return [4]string{}, err } } diff --git a/vendor/github.com/ethereum/go-ethereum/consensus/ethash/ethash.go b/vendor/github.com/ethereum/go-ethereum/consensus/ethash/ethash.go index d124cb1e2..78892e1da 100644 --- a/vendor/github.com/ethereum/go-ethereum/consensus/ethash/ethash.go +++ b/vendor/github.com/ethereum/go-ethereum/consensus/ethash/ethash.go @@ -432,7 +432,7 @@ type hashrate struct { // sealWork wraps a seal work package for remote sealer. type sealWork struct { errc chan error - res chan [3]string + res chan [4]string } // Ethash is a consensus engine based on proof-of-work implementing the ethash diff --git a/vendor/github.com/ethereum/go-ethereum/consensus/ethash/sealer.go b/vendor/github.com/ethereum/go-ethereum/consensus/ethash/sealer.go index 06c98a781..3a0919ca9 100644 --- a/vendor/github.com/ethereum/go-ethereum/consensus/ethash/sealer.go +++ b/vendor/github.com/ethereum/go-ethereum/consensus/ethash/sealer.go @@ -30,6 +30,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" @@ -193,7 +194,7 @@ func (ethash *Ethash) remote(notify []string, noverify bool) { results chan<- *types.Block currentBlock *types.Block - currentWork [3]string + currentWork [4]string notifyTransport = &http.Transport{} notifyClient = &http.Client{ @@ -234,12 +235,14 @@ func (ethash *Ethash) remote(notify []string, noverify bool) { // result[0], 32 bytes hex encoded current block header pow-hash // result[1], 32 bytes hex encoded seed hash used for DAG // result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty + // result[3], hex encoded block number makeWork := func(block *types.Block) { hash := ethash.SealHash(block.Header()) currentWork[0] = hash.Hex() currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex() currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex() + currentWork[3] = hexutil.EncodeBig(block.Number()) // Trace the seal work fetched by remote sealer. currentBlock = block diff --git a/vendor/github.com/ethereum/go-ethereum/core/block_validator.go b/vendor/github.com/ethereum/go-ethereum/core/block_validator.go index 1329f6242..3b9496fec 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/block_validator.go +++ b/vendor/github.com/ethereum/go-ethereum/core/block_validator.go @@ -53,12 +53,6 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error { if v.bc.HasBlockAndState(block.Hash(), block.NumberU64()) { return ErrKnownBlock } - if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) { - if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) { - return consensus.ErrUnknownAncestor - } - return consensus.ErrPrunedAncestor - } // Header validity is known at this point, check the uncles and transactions header := block.Header() if err := v.engine.VerifyUncles(v.bc, block); err != nil { @@ -70,6 +64,12 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error { if hash := types.DeriveSha(block.Transactions()); hash != header.TxHash { return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash) } + if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) { + if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) { + return consensus.ErrUnknownAncestor + } + return consensus.ErrPrunedAncestor + } return nil } diff --git a/vendor/github.com/ethereum/go-ethereum/core/blockchain.go b/vendor/github.com/ethereum/go-ethereum/core/blockchain.go index f4a818f4c..c29063a73 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/blockchain.go +++ b/vendor/github.com/ethereum/go-ethereum/core/blockchain.go @@ -47,7 +47,10 @@ import ( ) var ( - blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil) + blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil) + blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil) + blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil) + blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil) ErrNoGenesis = errors.New("Genesis not found in chain") ) @@ -68,9 +71,10 @@ const ( // CacheConfig contains the configuration values for the trie caching/pruning // that's resident in a blockchain. type CacheConfig struct { - Disabled bool // Whether to disable trie write caching (archive node) - TrieNodeLimit int // Memory limit (MB) at which to flush the current in-memory trie to disk - TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk + Disabled bool // Whether to disable trie write caching (archive node) + TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory + TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk + TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk } // BlockChain represents the canonical chain given a database with a genesis @@ -140,8 +144,9 @@ type BlockChain struct { func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) { if cacheConfig == nil { cacheConfig = &CacheConfig{ - TrieNodeLimit: 256 * 1024 * 1024, - TrieTimeLimit: 5 * time.Minute, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieTimeLimit: 5 * time.Minute, } } bodyCache, _ := lru.New(bodyCacheLimit) @@ -156,7 +161,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par cacheConfig: cacheConfig, db: db, triegc: prque.New(nil), - stateCache: state.NewDatabase(db), + stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit), quit: make(chan struct{}), shouldPreserve: shouldPreserve, bodyCache: bodyCache, @@ -205,6 +210,11 @@ func (bc *BlockChain) getProcInterrupt() bool { return atomic.LoadInt32(&bc.procInterrupt) == 1 } +// GetVMConfig returns the block chain VM config. +func (bc *BlockChain) GetVMConfig() *vm.Config { + return &bc.vmConfig +} + // loadLastState loads the last known chain state from the database. This method // assumes that the chain manager mutex is held. func (bc *BlockChain) loadLastState() error { @@ -393,6 +403,11 @@ func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) { return state.New(root, bc.stateCache) } +// StateCache returns the caching database underpinning the blockchain instance. +func (bc *BlockChain) StateCache() state.Database { + return bc.stateCache +} + // Reset purges the entire blockchain, restoring it to its genesis state. func (bc *BlockChain) Reset() error { return bc.ResetWithGenesisBlock(bc.genesisBlock) @@ -438,7 +453,11 @@ func (bc *BlockChain) repair(head **types.Block) error { return nil } // Otherwise rewind one block and recheck state availability there - (*head) = bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1) + block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1) + if block == nil { + return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash()) + } + (*head) = block } } @@ -554,6 +573,17 @@ func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool { return rawdb.HasBody(bc.db, hash, number) } +// HasFastBlock checks if a fast block is fully present in the database or not. +func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool { + if !bc.HasBlock(hash, number) { + return false + } + if bc.receiptsCache.Contains(hash) { + return true + } + return rawdb.HasReceipts(bc.db, hash, number) +} + // HasState checks if state trie is fully present in the database or not. func (bc *BlockChain) HasState(hash common.Hash) bool { _, err := bc.stateCache.OpenTrie(hash) @@ -611,12 +641,10 @@ func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts { if receipts, ok := bc.receiptsCache.Get(hash); ok { return receipts.(types.Receipts) } - number := rawdb.ReadHeaderNumber(bc.db, hash) if number == nil { return nil } - receipts := rawdb.ReadReceipts(bc.db, hash, *number) bc.receiptsCache.Add(hash, receipts) return receipts @@ -938,7 +966,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types. // If we exceeded our memory allowance, flush matured singleton nodes to disk var ( nodes, imgs = triedb.Size() - limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024 + limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024 ) if nodes > limit || imgs > 4*1024*1024 { triedb.Cap(limit - ethdb.IdealBatchSize) @@ -1002,7 +1030,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types. } // Write the positional metadata for transaction/receipt lookups and preimages rawdb.WriteTxLookupEntries(batch, block) - rawdb.WritePreimages(batch, block.NumberU64(), state.Preimages()) + rawdb.WritePreimages(batch, state.Preimages()) status = CanonStatTy } else { @@ -1020,6 +1048,18 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types. return status, nil } +// addFutureBlock checks if the block is within the max allowed window to get +// accepted for future processing, and returns an error if the block is too far +// ahead and was not added. +func (bc *BlockChain) addFutureBlock(block *types.Block) error { + max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks) + if block.Time().Cmp(max) > 0 { + return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max) + } + bc.futureBlocks.Add(block.Hash(), block) + return nil +} + // InsertChain attempts to insert the given batch of blocks in to the canonical // chain or, otherwise, create a fork. If an error is returned it will return // the index number of the failing block as well an error describing what went @@ -1027,18 +1067,9 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types. // // After insertion is done, all accumulated events will be fired. func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { - n, events, logs, err := bc.insertChain(chain) - bc.PostChainEvents(events, logs) - return n, err -} - -// insertChain will execute the actual chain insertion and event aggregation. The -// only reason this method exists as a separate one is to make locking cleaner -// with deferred statements. -func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) { // Sanity check that we have something meaningful to import if len(chain) == 0 { - return 0, nil, nil, nil + return 0, nil } // Do a sanity check that the provided chain is actually ordered and linked for i := 1; i < len(chain); i++ { @@ -1047,16 +1078,36 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(), "parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash()) - return 0, nil, nil, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(), + return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(), chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4]) } } // Pre-checks passed, start the full block imports bc.wg.Add(1) - defer bc.wg.Done() - bc.chainmu.Lock() - defer bc.chainmu.Unlock() + n, events, logs, err := bc.insertChain(chain, true) + bc.chainmu.Unlock() + bc.wg.Done() + + bc.PostChainEvents(events, logs) + return n, err +} + +// insertChain is the internal implementation of insertChain, which assumes that +// 1) chains are contiguous, and 2) The chain mutex is held. +// +// This method is split out so that import batches that require re-injecting +// historical blocks can do so without releasing the lock, which could lead to +// racey behaviour. If a sidechain import is in progress, and the historic state +// is imported, but then new canon-head is added before the actual sidechain +// completes, then the historic state could be pruned again +func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) { + // If the chain is terminating, don't even bother starting u + if atomic.LoadInt32(&bc.procInterrupt) == 1 { + return 0, nil, nil, nil + } + // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) + senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain) // A queued approach to delivering events. This is generally // faster than direct delivery and requires much less mutex @@ -1073,16 +1124,56 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty for i, block := range chain { headers[i] = block.Header() - seals[i] = true + seals[i] = verifySeals } abort, results := bc.engine.VerifyHeaders(bc, headers, seals) defer close(abort) - // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) - senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain) + // Peek the error for the first block to decide the directing import logic + it := newInsertIterator(chain, results, bc.Validator()) - // Iterate over the blocks and insert when the verifier permits - for i, block := range chain { + block, err := it.next() + switch { + // First block is pruned, insert as sidechain and reorg only if TD grows enough + case err == consensus.ErrPrunedAncestor: + return bc.insertSidechain(it) + + // First block is future, shove it (and all children) to the future queue (unknown ancestor) + case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())): + for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) { + if err := bc.addFutureBlock(block); err != nil { + return it.index, events, coalescedLogs, err + } + block, err = it.next() + } + stats.queued += it.processed() + stats.ignored += it.remaining() + + // If there are any still remaining, mark as ignored + return it.index, events, coalescedLogs, err + + // First block (and state) is known + // 1. We did a roll-back, and should now do a re-import + // 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot + // from the canonical chain, which has not been verified. + case err == ErrKnownBlock: + // Skip all known blocks that behind us + current := bc.CurrentBlock().NumberU64() + + for block != nil && err == ErrKnownBlock && current >= block.NumberU64() { + stats.ignored++ + block, err = it.next() + } + // Falls through to the block import + + // Some other error occurred, abort + case err != nil: + stats.ignored += len(it.chain) + bc.reportBlock(block, nil, err) + return it.index, events, coalescedLogs, err + } + // No validation errors for the first block (or chain prefix skipped) + for ; block != nil && err == nil; block, err = it.next() { // If the chain is terminating, stop processing blocks if atomic.LoadInt32(&bc.procInterrupt) == 1 { log.Debug("Premature abort during blocks processing") @@ -1091,115 +1182,53 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty // If the header is a banned one, straight out abort if BadHashes[block.Hash()] { bc.reportBlock(block, nil, ErrBlacklistedHash) - return i, events, coalescedLogs, ErrBlacklistedHash + return it.index, events, coalescedLogs, ErrBlacklistedHash } - // Wait for the block's verification to complete - bstart := time.Now() + // Retrieve the parent block and it's state to execute on top + start := time.Now() - err := <-results - if err == nil { - err = bc.Validator().ValidateBody(block) - } - switch { - case err == ErrKnownBlock: - // Block and state both already known. However if the current block is below - // this number we did a rollback and we should reimport it nonetheless. - if bc.CurrentBlock().NumberU64() >= block.NumberU64() { - stats.ignored++ - continue - } - - case err == consensus.ErrFutureBlock: - // Allow up to MaxFuture second in the future blocks. If this limit is exceeded - // the chain is discarded and processed at a later time if given. - max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks) - if block.Time().Cmp(max) > 0 { - return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max) - } - bc.futureBlocks.Add(block.Hash(), block) - stats.queued++ - continue - - case err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()): - bc.futureBlocks.Add(block.Hash(), block) - stats.queued++ - continue - - case err == consensus.ErrPrunedAncestor: - // Block competing with the canonical chain, store in the db, but don't process - // until the competitor TD goes above the canonical TD - currentBlock := bc.CurrentBlock() - localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) - externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty()) - if localTd.Cmp(externTd) > 0 { - if err = bc.WriteBlockWithoutState(block, externTd); err != nil { - return i, events, coalescedLogs, err - } - continue - } - // Competitor chain beat canonical, gather all blocks from the common ancestor - var winner []*types.Block - - parent := bc.GetBlock(block.ParentHash(), block.NumberU64()-1) - for !bc.HasState(parent.Root()) { - winner = append(winner, parent) - parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1) - } - for j := 0; j < len(winner)/2; j++ { - winner[j], winner[len(winner)-1-j] = winner[len(winner)-1-j], winner[j] - } - // Import all the pruned blocks to make the state available - bc.chainmu.Unlock() - _, evs, logs, err := bc.insertChain(winner) - bc.chainmu.Lock() - events, coalescedLogs = evs, logs - - if err != nil { - return i, events, coalescedLogs, err - } - - case err != nil: - bc.reportBlock(block, nil, err) - return i, events, coalescedLogs, err - } - // Create a new statedb using the parent block and report an - // error if it fails. - var parent *types.Block - if i == 0 { + parent := it.previous() + if parent == nil { parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1) - } else { - parent = chain[i-1] } state, err := state.New(parent.Root(), bc.stateCache) if err != nil { - return i, events, coalescedLogs, err + return it.index, events, coalescedLogs, err } // Process block using the parent state as reference point. + t0 := time.Now() receipts, logs, usedGas, err := bc.processor.Process(block, state, bc.vmConfig) + t1 := time.Now() if err != nil { bc.reportBlock(block, receipts, err) - return i, events, coalescedLogs, err + return it.index, events, coalescedLogs, err } // Validate the state using the default validator - err = bc.Validator().ValidateState(block, parent, state, receipts, usedGas) - if err != nil { + if err := bc.Validator().ValidateState(block, parent, state, receipts, usedGas); err != nil { bc.reportBlock(block, receipts, err) - return i, events, coalescedLogs, err + return it.index, events, coalescedLogs, err } - proctime := time.Since(bstart) + t2 := time.Now() + proctime := time.Since(start) // Write the block to the chain and get the status. status, err := bc.WriteBlockWithState(block, receipts, state) + t3 := time.Now() if err != nil { - return i, events, coalescedLogs, err + return it.index, events, coalescedLogs, err } + blockInsertTimer.UpdateSince(start) + blockExecutionTimer.Update(t1.Sub(t0)) + blockValidationTimer.Update(t2.Sub(t1)) + blockWriteTimer.Update(t3.Sub(t2)) switch status { case CanonStatTy: - log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()), - "txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart))) + log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), + "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(), + "elapsed", common.PrettyDuration(time.Since(start)), + "root", block.Root()) coalescedLogs = append(coalescedLogs, logs...) - blockInsertTimer.UpdateSince(bstart) events = append(events, ChainEvent{block, block.Hash(), logs}) lastCanon = block @@ -1207,78 +1236,153 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty bc.gcproc += proctime case SideStatTy: - log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed", - common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles())) - - blockInsertTimer.UpdateSince(bstart) + log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), + "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), + "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), + "root", block.Root()) events = append(events, ChainSideEvent{block}) } + blockInsertTimer.UpdateSince(start) stats.processed++ stats.usedGas += usedGas cache, _ := bc.stateCache.TrieDB().Size() - stats.report(chain, i, cache) + stats.report(chain, it.index, cache) } + // Any blocks remaining here? The only ones we care about are the future ones + if block != nil && err == consensus.ErrFutureBlock { + if err := bc.addFutureBlock(block); err != nil { + return it.index, events, coalescedLogs, err + } + block, err = it.next() + + for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() { + if err := bc.addFutureBlock(block); err != nil { + return it.index, events, coalescedLogs, err + } + stats.queued++ + } + } + stats.ignored += it.remaining() + // Append a single chain head event if we've progressed the chain if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { events = append(events, ChainHeadEvent{lastCanon}) } - return 0, events, coalescedLogs, nil + return it.index, events, coalescedLogs, err } -// insertStats tracks and reports on block insertion. -type insertStats struct { - queued, processed, ignored int - usedGas uint64 - lastIndex int - startTime mclock.AbsTime -} - -// statsReportLimit is the time limit during import and export after which we -// always print out progress. This avoids the user wondering what's going on. -const statsReportLimit = 8 * time.Second - -// report prints statistics if some number of blocks have been processed -// or more than a few seconds have passed since the last message. -func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) { - // Fetch the timings for the batch +// insertSidechain is called when an import batch hits upon a pruned ancestor +// error, which happens when a sidechain with a sufficiently old fork-block is +// found. +// +// The method writes all (header-and-body-valid) blocks to disk, then tries to +// switch over to the new chain if the TD exceeded the current chain. +func (bc *BlockChain) insertSidechain(it *insertIterator) (int, []interface{}, []*types.Log, error) { var ( - now = mclock.Now() - elapsed = time.Duration(now) - time.Duration(st.startTime) + externTd *big.Int + current = bc.CurrentBlock().NumberU64() ) - // If we're at the last block of the batch or report period reached, log - if index == len(chain)-1 || elapsed >= statsReportLimit { - var ( - end = chain[index] - txs = countTransactions(chain[st.lastIndex : index+1]) - ) - context := []interface{}{ - "blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000, - "elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed), - "number", end.Number(), "hash", end.Hash(), - } - if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute { - context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) - } - context = append(context, []interface{}{"cache", cache}...) + // The first sidechain block error is already verified to be ErrPrunedAncestor. + // Since we don't import them here, we expect ErrUnknownAncestor for the remaining + // ones. Any other errors means that the block is invalid, and should not be written + // to disk. + block, err := it.current(), consensus.ErrPrunedAncestor + for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() { + // Check the canonical state root for that number + if number := block.NumberU64(); current >= number { + if canonical := bc.GetBlockByNumber(number); canonical != nil && canonical.Root() == block.Root() { + // This is most likely a shadow-state attack. When a fork is imported into the + // database, and it eventually reaches a block height which is not pruned, we + // just found that the state already exist! This means that the sidechain block + // refers to a state which already exists in our canon chain. + // + // If left unchecked, we would now proceed importing the blocks, without actually + // having verified the state of the previous blocks. + log.Warn("Sidechain ghost-state attack detected", "number", block.NumberU64(), "sideroot", block.Root(), "canonroot", canonical.Root()) - if st.queued > 0 { - context = append(context, []interface{}{"queued", st.queued}...) + // If someone legitimately side-mines blocks, they would still be imported as usual. However, + // we cannot risk writing unverified blocks to disk when they obviously target the pruning + // mechanism. + return it.index, nil, nil, errors.New("sidechain ghost-state attack") + } } - if st.ignored > 0 { - context = append(context, []interface{}{"ignored", st.ignored}...) + if externTd == nil { + externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) } - log.Info("Imported new chain segment", context...) + externTd = new(big.Int).Add(externTd, block.Difficulty()) - *st = insertStats{startTime: now, lastIndex: index + 1} + if !bc.HasBlock(block.Hash(), block.NumberU64()) { + start := time.Now() + if err := bc.WriteBlockWithoutState(block, externTd); err != nil { + return it.index, nil, nil, err + } + log.Debug("Inserted sidechain block", "number", block.Number(), "hash", block.Hash(), + "diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)), + "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), + "root", block.Root()) + } } -} - -func countTransactions(chain []*types.Block) (c int) { - for _, b := range chain { - c += len(b.Transactions()) + // At this point, we've written all sidechain blocks to database. Loop ended + // either on some other error or all were processed. If there was some other + // error, we can ignore the rest of those blocks. + // + // If the externTd was larger than our local TD, we now need to reimport the previous + // blocks to regenerate the required state + localTd := bc.GetTd(bc.CurrentBlock().Hash(), current) + if localTd.Cmp(externTd) > 0 { + log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().NumberU64(), "sidetd", externTd, "localtd", localTd) + return it.index, nil, nil, err } - return c + // Gather all the sidechain hashes (full blocks may be memory heavy) + var ( + hashes []common.Hash + numbers []uint64 + ) + parent := bc.GetHeader(it.previous().Hash(), it.previous().NumberU64()) + for parent != nil && !bc.HasState(parent.Root) { + hashes = append(hashes, parent.Hash()) + numbers = append(numbers, parent.Number.Uint64()) + + parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1) + } + if parent == nil { + return it.index, nil, nil, errors.New("missing parent") + } + // Import all the pruned blocks to make the state available + var ( + blocks []*types.Block + memory common.StorageSize + ) + for i := len(hashes) - 1; i >= 0; i-- { + // Append the next block to our batch + block := bc.GetBlock(hashes[i], numbers[i]) + + blocks = append(blocks, block) + memory += block.Size() + + // If memory use grew too large, import and continue. Sadly we need to discard + // all raised events and logs from notifications since we're too heavy on the + // memory here. + if len(blocks) >= 2048 || memory > 64*1024*1024 { + log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64()) + if _, _, _, err := bc.insertChain(blocks, false); err != nil { + return 0, nil, nil, err + } + blocks, memory = blocks[:0], 0 + + // If the chain is terminating, stop processing blocks + if atomic.LoadInt32(&bc.procInterrupt) == 1 { + log.Debug("Premature abort during blocks processing") + return 0, nil, nil, nil + } + } + } + if len(blocks) > 0 { + log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64()) + return bc.insertChain(blocks, false) + } + return 0, nil, nil, nil } // reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them @@ -1453,8 +1557,10 @@ func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, e bc.addBadBlock(block) var receiptString string - for _, receipt := range receipts { - receiptString += fmt.Sprintf("\t%v\n", receipt) + for i, receipt := range receipts { + receiptString += fmt.Sprintf("\t %d: cumulative: %v gas: %v contract: %v status: %v tx: %v logs: %v bloom: %x state: %x\n", + i, receipt.CumulativeGasUsed, receipt.GasUsed, receipt.ContractAddress.Hex(), + receipt.Status, receipt.TxHash.Hex(), receipt.Logs, receipt.Bloom, receipt.PostState) } log.Error(fmt.Sprintf(` ########## BAD BLOCK ######### diff --git a/vendor/github.com/ethereum/go-ethereum/core/blockchain_insert.go b/vendor/github.com/ethereum/go-ethereum/core/blockchain_insert.go new file mode 100644 index 000000000..70bea3544 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/core/blockchain_insert.go @@ -0,0 +1,143 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package core + +import ( + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" +) + +// insertStats tracks and reports on block insertion. +type insertStats struct { + queued, processed, ignored int + usedGas uint64 + lastIndex int + startTime mclock.AbsTime +} + +// statsReportLimit is the time limit during import and export after which we +// always print out progress. This avoids the user wondering what's going on. +const statsReportLimit = 8 * time.Second + +// report prints statistics if some number of blocks have been processed +// or more than a few seconds have passed since the last message. +func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) { + // Fetch the timings for the batch + var ( + now = mclock.Now() + elapsed = time.Duration(now) - time.Duration(st.startTime) + ) + // If we're at the last block of the batch or report period reached, log + if index == len(chain)-1 || elapsed >= statsReportLimit { + // Count the number of transactions in this segment + var txs int + for _, block := range chain[st.lastIndex : index+1] { + txs += len(block.Transactions()) + } + end := chain[index] + + // Assemble the log context and send it to the logger + context := []interface{}{ + "blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000, + "elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed), + "number", end.Number(), "hash", end.Hash(), + } + if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute { + context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) + } + context = append(context, []interface{}{"cache", cache}...) + + if st.queued > 0 { + context = append(context, []interface{}{"queued", st.queued}...) + } + if st.ignored > 0 { + context = append(context, []interface{}{"ignored", st.ignored}...) + } + log.Info("Imported new chain segment", context...) + + // Bump the stats reported to the next section + *st = insertStats{startTime: now, lastIndex: index + 1} + } +} + +// insertIterator is a helper to assist during chain import. +type insertIterator struct { + chain types.Blocks + results <-chan error + index int + validator Validator +} + +// newInsertIterator creates a new iterator based on the given blocks, which are +// assumed to be a contiguous chain. +func newInsertIterator(chain types.Blocks, results <-chan error, validator Validator) *insertIterator { + return &insertIterator{ + chain: chain, + results: results, + index: -1, + validator: validator, + } +} + +// next returns the next block in the iterator, along with any potential validation +// error for that block. When the end is reached, it will return (nil, nil). +func (it *insertIterator) next() (*types.Block, error) { + if it.index+1 >= len(it.chain) { + it.index = len(it.chain) + return nil, nil + } + it.index++ + if err := <-it.results; err != nil { + return it.chain[it.index], err + } + return it.chain[it.index], it.validator.ValidateBody(it.chain[it.index]) +} + +// current returns the current block that's being processed. +func (it *insertIterator) current() *types.Block { + if it.index < 0 || it.index+1 >= len(it.chain) { + return nil + } + return it.chain[it.index] +} + +// previous returns the previous block was being processed, or nil +func (it *insertIterator) previous() *types.Block { + if it.index < 1 { + return nil + } + return it.chain[it.index-1] +} + +// first returns the first block in the it. +func (it *insertIterator) first() *types.Block { + return it.chain[0] +} + +// remaining returns the number of remaining blocks. +func (it *insertIterator) remaining() int { + return len(it.chain) - it.index +} + +// processed returns the number of processed blocks. +func (it *insertIterator) processed() int { + return it.index + 1 +} diff --git a/vendor/github.com/ethereum/go-ethereum/core/chain_makers.go b/vendor/github.com/ethereum/go-ethereum/core/chain_makers.go index 0bc453fdf..e3a5537a4 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/chain_makers.go +++ b/vendor/github.com/ethereum/go-ethereum/core/chain_makers.go @@ -33,12 +33,11 @@ import ( // BlockGen creates blocks for testing. // See GenerateChain for a detailed explanation. type BlockGen struct { - i int - parent *types.Block - chain []*types.Block - chainReader consensus.ChainReader - header *types.Header - statedb *state.StateDB + i int + parent *types.Block + chain []*types.Block + header *types.Header + statedb *state.StateDB gasPool *GasPool txs []*types.Transaction @@ -138,7 +137,7 @@ func (b *BlockGen) AddUncle(h *types.Header) { // For index -1, PrevBlock returns the parent block given to GenerateChain. func (b *BlockGen) PrevBlock(index int) *types.Block { if index >= b.i { - panic("block index out of range") + panic(fmt.Errorf("block index %d out of range (%d,%d)", index, -1, b.i)) } if index == -1 { return b.parent @@ -154,7 +153,8 @@ func (b *BlockGen) OffsetTime(seconds int64) { if b.header.Time.Cmp(b.parent.Header().Time) <= 0 { panic("block time out of range") } - b.header.Difficulty = b.engine.CalcDifficulty(b.chainReader, b.header.Time.Uint64(), b.parent.Header()) + chainreader := &fakeChainReader{config: b.config} + b.header.Difficulty = b.engine.CalcDifficulty(chainreader, b.header.Time.Uint64(), b.parent.Header()) } // GenerateChain creates a chain of n blocks. The first block's @@ -174,14 +174,10 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse config = params.TestChainConfig } blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n) + chainreader := &fakeChainReader{config: config} genblock := func(i int, parent *types.Block, statedb *state.StateDB) (*types.Block, types.Receipts) { - // TODO(karalabe): This is needed for clique, which depends on multiple blocks. - // It's nonetheless ugly to spin up a blockchain here. Get rid of this somehow. - blockchain, _ := NewBlockChain(db, nil, config, engine, vm.Config{}, nil) - defer blockchain.Stop() - - b := &BlockGen{i: i, parent: parent, chain: blocks, chainReader: blockchain, statedb: statedb, config: config, engine: engine} - b.header = makeHeader(b.chainReader, parent, statedb, b.engine) + b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine} + b.header = makeHeader(chainreader, parent, statedb, b.engine) // Mutate the state and block according to any hard-fork specs if daoBlock := config.DAOForkBlock; daoBlock != nil { @@ -201,7 +197,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse } if b.engine != nil { // Finalize and seal the block - block, _ := b.engine.Finalize(b.chainReader, b.header, statedb, b.txs, b.uncles, b.receipts) + block, _ := b.engine.Finalize(chainreader, b.header, statedb, b.txs, b.uncles, b.receipts) // Write state changes to db root, err := statedb.Commit(config.IsEIP158(b.header.Number)) @@ -269,3 +265,19 @@ func makeBlockChain(parent *types.Block, n int, engine consensus.Engine, db ethd }) return blocks } + +type fakeChainReader struct { + config *params.ChainConfig + genesis *types.Block +} + +// Config returns the chain configuration. +func (cr *fakeChainReader) Config() *params.ChainConfig { + return cr.config +} + +func (cr *fakeChainReader) CurrentHeader() *types.Header { return nil } +func (cr *fakeChainReader) GetHeaderByNumber(number uint64) *types.Header { return nil } +func (cr *fakeChainReader) GetHeaderByHash(hash common.Hash) *types.Header { return nil } +func (cr *fakeChainReader) GetHeader(hash common.Hash, number uint64) *types.Header { return nil } +func (cr *fakeChainReader) GetBlock(hash common.Hash, number uint64) *types.Block { return nil } diff --git a/vendor/github.com/ethereum/go-ethereum/core/genesis.go b/vendor/github.com/ethereum/go-ethereum/core/genesis.go index 6e71afd61..c96cb17a3 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/genesis.go +++ b/vendor/github.com/ethereum/go-ethereum/core/genesis.go @@ -151,6 +151,9 @@ func (e *GenesisMismatchError) Error() string { // // The returned chain configuration is never nil. func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) { + return SetupGenesisBlockWithOverride(db, genesis, nil) +} +func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, constantinopleOverride *big.Int) (*params.ChainConfig, common.Hash, error) { if genesis != nil && genesis.Config == nil { return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig } @@ -178,6 +181,9 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig // Get the existing chain configuration. newcfg := genesis.configOrDefault(stored) + if constantinopleOverride != nil { + newcfg.ConstantinopleBlock = constantinopleOverride + } storedcfg := rawdb.ReadChainConfig(db, stored) if storedcfg == nil { log.Warn("Found genesis block without chain config") diff --git a/vendor/github.com/ethereum/go-ethereum/core/rawdb/accessors_chain.go b/vendor/github.com/ethereum/go-ethereum/core/rawdb/accessors_chain.go index da5432832..491a125c6 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/rawdb/accessors_chain.go +++ b/vendor/github.com/ethereum/go-ethereum/core/rawdb/accessors_chain.go @@ -271,6 +271,15 @@ func DeleteTd(db DatabaseDeleter, hash common.Hash, number uint64) { } } +// HasReceipts verifies the existence of all the transaction receipts belonging +// to a block. +func HasReceipts(db DatabaseReader, hash common.Hash, number uint64) bool { + if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil { + return false + } + return true +} + // ReadReceipts retrieves all the transaction receipts belonging to a block. func ReadReceipts(db DatabaseReader, hash common.Hash, number uint64) types.Receipts { // Retrieve the flattened receipt slice @@ -278,7 +287,7 @@ func ReadReceipts(db DatabaseReader, hash common.Hash, number uint64) types.Rece if len(data) == 0 { return nil } - // Convert the revceipts from their storage form to their internal representation + // Convert the receipts from their storage form to their internal representation storageReceipts := []*types.ReceiptForStorage{} if err := rlp.DecodeBytes(data, &storageReceipts); err != nil { log.Error("Invalid receipt array RLP", "hash", hash, "err", err) diff --git a/vendor/github.com/ethereum/go-ethereum/core/rawdb/accessors_metadata.go b/vendor/github.com/ethereum/go-ethereum/core/rawdb/accessors_metadata.go index 514328e87..3b6e6548d 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/rawdb/accessors_metadata.go +++ b/vendor/github.com/ethereum/go-ethereum/core/rawdb/accessors_metadata.go @@ -77,9 +77,8 @@ func ReadPreimage(db DatabaseReader, hash common.Hash) []byte { return data } -// WritePreimages writes the provided set of preimages to the database. `number` is the -// current block number, and is used for debug messages only. -func WritePreimages(db DatabaseWriter, number uint64, preimages map[common.Hash][]byte) { +// WritePreimages writes the provided set of preimages to the database. +func WritePreimages(db DatabaseWriter, preimages map[common.Hash][]byte) { for hash, preimage := range preimages { if err := db.Put(preimageKey(hash), preimage); err != nil { log.Crit("Failed to store trie preimage", "err", err) diff --git a/vendor/github.com/ethereum/go-ethereum/core/rawdb/schema.go b/vendor/github.com/ethereum/go-ethereum/core/rawdb/schema.go index ef597ef30..8a9921ef4 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/rawdb/schema.go +++ b/vendor/github.com/ethereum/go-ethereum/core/rawdb/schema.go @@ -35,7 +35,7 @@ var ( // headBlockKey tracks the latest know full block's hash. headBlockKey = []byte("LastBlock") - // headFastBlockKey tracks the latest known incomplete block's hash duirng fast sync. + // headFastBlockKey tracks the latest known incomplete block's hash during fast sync. headFastBlockKey = []byte("LastFast") // fastTrieProgressKey tracks the number of trie entries imported during fast sync. diff --git a/vendor/github.com/ethereum/go-ethereum/core/state/database.go b/vendor/github.com/ethereum/go-ethereum/core/state/database.go index c1b630991..f6ea144b9 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/state/database.go +++ b/vendor/github.com/ethereum/go-ethereum/core/state/database.go @@ -72,13 +72,19 @@ type Trie interface { } // NewDatabase creates a backing store for state. The returned database is safe for -// concurrent use and retains cached trie nodes in memory. The pool is an optional -// intermediate trie-node memory pool between the low level storage layer and the -// high level trie abstraction. +// concurrent use and retains a few recent expanded trie nodes in memory. To keep +// more historical state in memory, use the NewDatabaseWithCache constructor. func NewDatabase(db ethdb.Database) Database { + return NewDatabaseWithCache(db, 0) +} + +// NewDatabase creates a backing store for state. The returned database is safe for +// concurrent use and retains both a few recent expanded trie nodes in memory, as +// well as a lot of collapsed RLP trie nodes in a large memory cache. +func NewDatabaseWithCache(db ethdb.Database, cache int) Database { csc, _ := lru.New(codeSizeCacheSize) return &cachingDB{ - db: trie.NewDatabase(db), + db: trie.NewDatabaseWithCache(db, cache), codeSizeCache: csc, } } diff --git a/vendor/github.com/ethereum/go-ethereum/core/state/statedb.go b/vendor/github.com/ethereum/go-ethereum/core/state/statedb.go index 216667ce9..76e67d839 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/state/statedb.go +++ b/vendor/github.com/ethereum/go-ethereum/core/state/statedb.go @@ -18,10 +18,10 @@ package state import ( + "errors" "fmt" "math/big" "sort" - "sync" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -44,6 +44,13 @@ var ( emptyCode = crypto.Keccak256Hash(nil) ) +type proofList [][]byte + +func (n *proofList) Put(key []byte, value []byte) error { + *n = append(*n, value) + return nil +} + // StateDBs within the ethereum protocol are used to store anything // within the merkle trie. StateDBs take care of caching and storing // nested states. It's the general query interface to retrieve: @@ -79,8 +86,6 @@ type StateDB struct { journal *journal validRevisions []revision nextRevisionId int - - lock sync.Mutex } // Create a new state from a given trie. @@ -256,6 +261,24 @@ func (self *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash return common.Hash{} } +// GetProof returns the MerkleProof for a given Account +func (self *StateDB) GetProof(a common.Address) ([][]byte, error) { + var proof proofList + err := self.trie.Prove(crypto.Keccak256(a.Bytes()), 0, &proof) + return [][]byte(proof), err +} + +// GetProof returns the StorageProof for given key +func (self *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, error) { + var proof proofList + trie := self.StorageTrie(a) + if trie == nil { + return proof, errors.New("storage trie for requested address does not exist") + } + err := trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof) + return [][]byte(proof), err +} + // GetCommittedState retrieves a value from the given account's committed storage trie. func (self *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { stateObject := self.getStateObject(addr) @@ -470,9 +493,6 @@ func (db *StateDB) ForEachStorage(addr common.Address, cb func(key, value common // Copy creates a deep, independent copy of the state. // Snapshots of the copied state cannot be applied to the copy. func (self *StateDB) Copy() *StateDB { - self.lock.Lock() - defer self.lock.Unlock() - // Copy all the basic fields, initialize the memory ones state := &StateDB{ db: self.db, diff --git a/vendor/github.com/ethereum/go-ethereum/core/tx_pool.go b/vendor/github.com/ethereum/go-ethereum/core/tx_pool.go index f6da5da2a..fc35d1f24 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/tx_pool.go +++ b/vendor/github.com/ethereum/go-ethereum/core/tx_pool.go @@ -825,7 +825,7 @@ func (pool *TxPool) addTxs(txs []*types.Transaction, local bool) []error { // addTxsLocked attempts to queue a batch of transactions if they are valid, // whilst assuming the transaction pool lock is already held. func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) []error { - // Add the batch of transaction, tracking the accepted ones + // Add the batch of transactions, tracking the accepted ones dirty := make(map[common.Address]struct{}) errs := make([]error, len(txs)) diff --git a/vendor/github.com/ethereum/go-ethereum/core/types/block.go b/vendor/github.com/ethereum/go-ethereum/core/types/block.go index 8a21bba1e..9d11f60d8 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/types/block.go +++ b/vendor/github.com/ethereum/go-ethereum/core/types/block.go @@ -81,8 +81,8 @@ type Header struct { GasUsed uint64 `json:"gasUsed" gencodec:"required"` Time *big.Int `json:"timestamp" gencodec:"required"` Extra []byte `json:"extraData" gencodec:"required"` - MixDigest common.Hash `json:"mixHash" gencodec:"required"` - Nonce BlockNonce `json:"nonce" gencodec:"required"` + MixDigest common.Hash `json:"mixHash"` + Nonce BlockNonce `json:"nonce"` } // field type overrides for gencodec diff --git a/vendor/github.com/ethereum/go-ethereum/core/types/gen_header_json.go b/vendor/github.com/ethereum/go-ethereum/core/types/gen_header_json.go index 1b92cd9cf..59a1c9c43 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/types/gen_header_json.go +++ b/vendor/github.com/ethereum/go-ethereum/core/types/gen_header_json.go @@ -13,6 +13,7 @@ import ( var _ = (*headerMarshaling)(nil) +// MarshalJSON marshals as JSON. func (h Header) MarshalJSON() ([]byte, error) { type Header struct { ParentHash common.Hash `json:"parentHash" gencodec:"required"` @@ -28,8 +29,8 @@ func (h Header) MarshalJSON() ([]byte, error) { GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` Time *hexutil.Big `json:"timestamp" gencodec:"required"` Extra hexutil.Bytes `json:"extraData" gencodec:"required"` - MixDigest common.Hash `json:"mixHash" gencodec:"required"` - Nonce BlockNonce `json:"nonce" gencodec:"required"` + MixDigest common.Hash `json:"mixHash"` + Nonce BlockNonce `json:"nonce"` Hash common.Hash `json:"hash"` } var enc Header @@ -52,6 +53,7 @@ func (h Header) MarshalJSON() ([]byte, error) { return json.Marshal(&enc) } +// UnmarshalJSON unmarshals from JSON. func (h *Header) UnmarshalJSON(input []byte) error { type Header struct { ParentHash *common.Hash `json:"parentHash" gencodec:"required"` @@ -67,8 +69,8 @@ func (h *Header) UnmarshalJSON(input []byte) error { GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` Time *hexutil.Big `json:"timestamp" gencodec:"required"` Extra *hexutil.Bytes `json:"extraData" gencodec:"required"` - MixDigest *common.Hash `json:"mixHash" gencodec:"required"` - Nonce *BlockNonce `json:"nonce" gencodec:"required"` + MixDigest *common.Hash `json:"mixHash"` + Nonce *BlockNonce `json:"nonce"` } var dec Header if err := json.Unmarshal(input, &dec); err != nil { @@ -126,13 +128,11 @@ func (h *Header) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'extraData' for Header") } h.Extra = *dec.Extra - if dec.MixDigest == nil { - return errors.New("missing required field 'mixHash' for Header") + if dec.MixDigest != nil { + h.MixDigest = *dec.MixDigest } - h.MixDigest = *dec.MixDigest - if dec.Nonce == nil { - return errors.New("missing required field 'nonce' for Header") + if dec.Nonce != nil { + h.Nonce = *dec.Nonce } - h.Nonce = *dec.Nonce return nil } diff --git a/vendor/github.com/ethereum/go-ethereum/core/types/transaction_signing.go b/vendor/github.com/ethereum/go-ethereum/core/types/transaction_signing.go index 1997755dc..63132048e 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/types/transaction_signing.go +++ b/vendor/github.com/ethereum/go-ethereum/core/types/transaction_signing.go @@ -136,7 +136,7 @@ func (s EIP155Signer) Sender(tx *Transaction) (common.Address, error) { return recoverPlain(s.Hash(tx), tx.data.R, tx.data.S, V, true) } -// WithSignature returns a new transaction with the given signature. This signature +// SignatureValues returns signature values. This signature // needs to be in the [R || S || V] format where V is 0 or 1. func (s EIP155Signer) SignatureValues(tx *Transaction, sig []byte) (R, S, V *big.Int, err error) { R, S, V, err = HomesteadSigner{}.SignatureValues(tx, sig) diff --git a/vendor/github.com/ethereum/go-ethereum/core/vm/evm.go b/vendor/github.com/ethereum/go-ethereum/core/vm/evm.go index 968d2219e..ba4d1e9eb 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/vm/evm.go +++ b/vendor/github.com/ethereum/go-ethereum/core/vm/evm.go @@ -339,6 +339,12 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte contract := NewContract(caller, to, new(big.Int), gas) contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr)) + // We do an AddBalance of zero here, just in order to trigger a touch. + // This doesn't matter on Mainnet, where all empties are gone at the time of Byzantium, + // but is the correct thing to do and matters on other networks, in tests, and potential + // future scenarios + evm.StateDB.AddBalance(addr, bigZero) + // When an error was returned by the EVM or when setting the creation code // above we revert to the snapshot and consume any gas remaining. Additionally // when we're in Homestead this also counts for code storage gas errors. diff --git a/vendor/github.com/ethereum/go-ethereum/core/vm/gen_structlog.go b/vendor/github.com/ethereum/go-ethereum/core/vm/gen_structlog.go index ade3ca631..726012e59 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/vm/gen_structlog.go +++ b/vendor/github.com/ethereum/go-ethereum/core/vm/gen_structlog.go @@ -13,20 +13,22 @@ import ( var _ = (*structLogMarshaling)(nil) +// MarshalJSON marshals as JSON. func (s StructLog) MarshalJSON() ([]byte, error) { type StructLog struct { - Pc uint64 `json:"pc"` - Op OpCode `json:"op"` - Gas math.HexOrDecimal64 `json:"gas"` - GasCost math.HexOrDecimal64 `json:"gasCost"` - Memory hexutil.Bytes `json:"memory"` - MemorySize int `json:"memSize"` - Stack []*math.HexOrDecimal256 `json:"stack"` - Storage map[common.Hash]common.Hash `json:"-"` - Depth int `json:"depth"` - Err error `json:"-"` - OpName string `json:"opName"` - ErrorString string `json:"error"` + Pc uint64 `json:"pc"` + Op OpCode `json:"op"` + Gas math.HexOrDecimal64 `json:"gas"` + GasCost math.HexOrDecimal64 `json:"gasCost"` + Memory hexutil.Bytes `json:"memory"` + MemorySize int `json:"memSize"` + Stack []*math.HexOrDecimal256 `json:"stack"` + Storage map[common.Hash]common.Hash `json:"-"` + Depth int `json:"depth"` + RefundCounter uint64 `json:"refund"` + Err error `json:"-"` + OpName string `json:"opName"` + ErrorString string `json:"error"` } var enc StructLog enc.Pc = s.Pc @@ -43,24 +45,27 @@ func (s StructLog) MarshalJSON() ([]byte, error) { } enc.Storage = s.Storage enc.Depth = s.Depth + enc.RefundCounter = s.RefundCounter enc.Err = s.Err enc.OpName = s.OpName() enc.ErrorString = s.ErrorString() return json.Marshal(&enc) } +// UnmarshalJSON unmarshals from JSON. func (s *StructLog) UnmarshalJSON(input []byte) error { type StructLog struct { - Pc *uint64 `json:"pc"` - Op *OpCode `json:"op"` - Gas *math.HexOrDecimal64 `json:"gas"` - GasCost *math.HexOrDecimal64 `json:"gasCost"` - Memory *hexutil.Bytes `json:"memory"` - MemorySize *int `json:"memSize"` - Stack []*math.HexOrDecimal256 `json:"stack"` - Storage map[common.Hash]common.Hash `json:"-"` - Depth *int `json:"depth"` - Err error `json:"-"` + Pc *uint64 `json:"pc"` + Op *OpCode `json:"op"` + Gas *math.HexOrDecimal64 `json:"gas"` + GasCost *math.HexOrDecimal64 `json:"gasCost"` + Memory *hexutil.Bytes `json:"memory"` + MemorySize *int `json:"memSize"` + Stack []*math.HexOrDecimal256 `json:"stack"` + Storage map[common.Hash]common.Hash `json:"-"` + Depth *int `json:"depth"` + RefundCounter *uint64 `json:"refund"` + Err error `json:"-"` } var dec StructLog if err := json.Unmarshal(input, &dec); err != nil { @@ -96,6 +101,9 @@ func (s *StructLog) UnmarshalJSON(input []byte) error { if dec.Depth != nil { s.Depth = *dec.Depth } + if dec.RefundCounter != nil { + s.RefundCounter = *dec.RefundCounter + } if dec.Err != nil { s.Err = dec.Err } diff --git a/vendor/github.com/ethereum/go-ethereum/core/vm/instructions.go b/vendor/github.com/ethereum/go-ethereum/core/vm/instructions.go index e94a2777b..6696c6e3d 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/vm/instructions.go +++ b/vendor/github.com/ethereum/go-ethereum/core/vm/instructions.go @@ -124,10 +124,22 @@ func opSmod(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory func opExp(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { base, exponent := stack.pop(), stack.pop() - stack.push(math.Exp(base, exponent)) - - interpreter.intPool.put(base, exponent) - + // some shortcuts + cmpToOne := exponent.Cmp(big1) + if cmpToOne < 0 { // Exponent is zero + // x ^ 0 == 1 + stack.push(base.SetUint64(1)) + } else if base.Sign() == 0 { + // 0 ^ y, if y != 0, == 0 + stack.push(base.SetUint64(0)) + } else if cmpToOne == 0 { // Exponent is one + // x ^ 1 == x + stack.push(base) + } else { + stack.push(math.Exp(base, exponent)) + interpreter.intPool.put(base) + } + interpreter.intPool.put(exponent) return nil, nil } @@ -532,7 +544,12 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, contract *Contract, // this account should be regarded as a non-existent account and zero should be returned. func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { slot := stack.peek() - slot.SetBytes(interpreter.evm.StateDB.GetCodeHash(common.BigToAddress(slot)).Bytes()) + address := common.BigToAddress(slot) + if interpreter.evm.StateDB.Empty(address) { + slot.SetUint64(0) + } else { + slot.SetBytes(interpreter.evm.StateDB.GetCodeHash(address).Bytes()) + } return nil, nil } diff --git a/vendor/github.com/ethereum/go-ethereum/core/vm/logger.go b/vendor/github.com/ethereum/go-ethereum/core/vm/logger.go index 85acb8d6d..1733bf270 100644 --- a/vendor/github.com/ethereum/go-ethereum/core/vm/logger.go +++ b/vendor/github.com/ethereum/go-ethereum/core/vm/logger.go @@ -56,16 +56,17 @@ type LogConfig struct { // StructLog is emitted to the EVM each cycle and lists information about the current internal state // prior to the execution of the statement. type StructLog struct { - Pc uint64 `json:"pc"` - Op OpCode `json:"op"` - Gas uint64 `json:"gas"` - GasCost uint64 `json:"gasCost"` - Memory []byte `json:"memory"` - MemorySize int `json:"memSize"` - Stack []*big.Int `json:"stack"` - Storage map[common.Hash]common.Hash `json:"-"` - Depth int `json:"depth"` - Err error `json:"-"` + Pc uint64 `json:"pc"` + Op OpCode `json:"op"` + Gas uint64 `json:"gas"` + GasCost uint64 `json:"gasCost"` + Memory []byte `json:"memory"` + MemorySize int `json:"memSize"` + Stack []*big.Int `json:"stack"` + Storage map[common.Hash]common.Hash `json:"-"` + Depth int `json:"depth"` + RefundCounter uint64 `json:"refund"` + Err error `json:"-"` } // overrides for gencodec @@ -177,7 +178,7 @@ func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost ui storage = l.changedValues[contract.Address()].Copy() } // create a new snaptshot of the EVM. - log := StructLog{pc, op, gas, cost, mem, memory.Len(), stck, storage, depth, err} + log := StructLog{pc, op, gas, cost, mem, memory.Len(), stck, storage, depth, env.StateDB.GetRefund(), err} l.logs = append(l.logs, log) return nil diff --git a/vendor/github.com/ethereum/go-ethereum/cmd/evm/json_logger.go b/vendor/github.com/ethereum/go-ethereum/core/vm/logger_json.go similarity index 75% rename from vendor/github.com/ethereum/go-ethereum/cmd/evm/json_logger.go rename to vendor/github.com/ethereum/go-ethereum/core/vm/logger_json.go index f16424fbe..ac3c40759 100644 --- a/vendor/github.com/ethereum/go-ethereum/cmd/evm/json_logger.go +++ b/vendor/github.com/ethereum/go-ethereum/core/vm/logger_json.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with go-ethereum. If not, see . -package main +package vm import ( "encoding/json" @@ -24,17 +24,16 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" - "github.com/ethereum/go-ethereum/core/vm" ) type JSONLogger struct { encoder *json.Encoder - cfg *vm.LogConfig + cfg *LogConfig } // NewJSONLogger creates a new EVM tracer that prints execution steps as JSON objects // into the provided stream. -func NewJSONLogger(cfg *vm.LogConfig, writer io.Writer) *JSONLogger { +func NewJSONLogger(cfg *LogConfig, writer io.Writer) *JSONLogger { return &JSONLogger{json.NewEncoder(writer), cfg} } @@ -43,16 +42,17 @@ func (l *JSONLogger) CaptureStart(from common.Address, to common.Address, create } // CaptureState outputs state information on the logger. -func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error { - log := vm.StructLog{ - Pc: pc, - Op: op, - Gas: gas, - GasCost: cost, - MemorySize: memory.Len(), - Storage: nil, - Depth: depth, - Err: err, +func (l *JSONLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error { + log := StructLog{ + Pc: pc, + Op: op, + Gas: gas, + GasCost: cost, + MemorySize: memory.Len(), + Storage: nil, + Depth: depth, + RefundCounter: env.StateDB.GetRefund(), + Err: err, } if !l.cfg.DisableMemory { log.Memory = memory.Data() @@ -64,7 +64,7 @@ func (l *JSONLogger) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cos } // CaptureFault outputs state information on the logger. -func (l *JSONLogger) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error { +func (l *JSONLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error { return nil } diff --git a/vendor/github.com/ethereum/go-ethereum/eth/api.go b/vendor/github.com/ethereum/go-ethereum/eth/api.go index 3ec3afb81..816b9cd33 100644 --- a/vendor/github.com/ethereum/go-ethereum/eth/api.go +++ b/vendor/github.com/ethereum/go-ethereum/eth/api.go @@ -444,16 +444,16 @@ func (api *PrivateDebugAPI) getModifiedAccounts(startBlock, endBlock *types.Bloc if startBlock.Number().Uint64() >= endBlock.Number().Uint64() { return nil, fmt.Errorf("start block height (%d) must be less than end block height (%d)", startBlock.Number().Uint64(), endBlock.Number().Uint64()) } + triedb := api.eth.BlockChain().StateCache().TrieDB() - oldTrie, err := trie.NewSecure(startBlock.Root(), trie.NewDatabase(api.eth.chainDb), 0) + oldTrie, err := trie.NewSecure(startBlock.Root(), triedb, 0) if err != nil { return nil, err } - newTrie, err := trie.NewSecure(endBlock.Root(), trie.NewDatabase(api.eth.chainDb), 0) + newTrie, err := trie.NewSecure(endBlock.Root(), triedb, 0) if err != nil { return nil, err } - diff, _ := trie.NewDifferenceIterator(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{})) iter := trie.NewIterator(diff) diff --git a/vendor/github.com/ethereum/go-ethereum/eth/api_backend.go b/vendor/github.com/ethereum/go-ethereum/eth/api_backend.go index 8748d444f..a48815e0d 100644 --- a/vendor/github.com/ethereum/go-ethereum/eth/api_backend.go +++ b/vendor/github.com/ethereum/go-ethereum/eth/api_backend.go @@ -125,12 +125,12 @@ func (b *EthAPIBackend) GetTd(blockHash common.Hash) *big.Int { return b.eth.blockchain.GetTdByHash(blockHash) } -func (b *EthAPIBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmCfg vm.Config) (*vm.EVM, func() error, error) { +func (b *EthAPIBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, func() error, error) { state.SetBalance(msg.From(), math.MaxBig256) vmError := func() error { return nil } context := core.NewEVMContext(msg, header, b.eth.BlockChain(), nil) - return vm.NewEVM(context, state, b.eth.chainConfig, vmCfg), vmError, nil + return vm.NewEVM(context, state, b.eth.chainConfig, *b.eth.blockchain.GetVMConfig()), vmError, nil } func (b *EthAPIBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { diff --git a/vendor/github.com/ethereum/go-ethereum/eth/api_tracer.go b/vendor/github.com/ethereum/go-ethereum/eth/api_tracer.go index 80552ada8..0b8f8aa00 100644 --- a/vendor/github.com/ethereum/go-ethereum/eth/api_tracer.go +++ b/vendor/github.com/ethereum/go-ethereum/eth/api_tracer.go @@ -17,11 +17,13 @@ package eth import ( + "bufio" "bytes" "context" "errors" "fmt" "io/ioutil" + "os" "runtime" "sync" "time" @@ -60,6 +62,13 @@ type TraceConfig struct { Reexec *uint64 } +// StdTraceConfig holds extra parameters to standard-json trace functions. +type StdTraceConfig struct { + *vm.LogConfig + Reexec *uint64 + TxHash common.Hash +} + // txTraceResult is the result of a single transaction trace. type txTraceResult struct { Result interface{} `json:"result,omitempty"` // Trace results produced by the tracer @@ -138,7 +147,7 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl // Ensure we have a valid starting state before doing any work origin := start.NumberU64() - database := state.NewDatabase(api.eth.ChainDb()) + database := state.NewDatabaseWithCache(api.eth.ChainDb(), 16) // Chain tracing will probably start at genesis if number := start.NumberU64(); number > 0 { start = api.eth.blockchain.GetBlock(start.ParentHash(), start.NumberU64()-1) @@ -366,7 +375,7 @@ func (api *PrivateDebugAPI) TraceBlockByNumber(ctx context.Context, number rpc.B func (api *PrivateDebugAPI) TraceBlockByHash(ctx context.Context, hash common.Hash, config *TraceConfig) ([]*txTraceResult, error) { block := api.eth.blockchain.GetBlockByHash(hash) if block == nil { - return nil, fmt.Errorf("block #%x not found", hash) + return nil, fmt.Errorf("block %#x not found", hash) } return api.traceBlock(ctx, block, config) } @@ -391,13 +400,41 @@ func (api *PrivateDebugAPI) TraceBlockFromFile(ctx context.Context, file string, return api.TraceBlock(ctx, blob, config) } -// TraceBadBlock returns the structured logs created during the execution of a block -// within the blockchain 'badblocks' cache -func (api *PrivateDebugAPI) TraceBadBlock(ctx context.Context, index int, config *TraceConfig) ([]*txTraceResult, error) { - if blocks := api.eth.blockchain.BadBlocks(); index < len(blocks) { - return api.traceBlock(ctx, blocks[index], config) +// TraceBadBlockByHash returns the structured logs created during the execution of +// EVM against a block pulled from the pool of bad ones and returns them as a JSON +// object. +func (api *PrivateDebugAPI) TraceBadBlock(ctx context.Context, hash common.Hash, config *TraceConfig) ([]*txTraceResult, error) { + blocks := api.eth.blockchain.BadBlocks() + for _, block := range blocks { + if block.Hash() == hash { + return api.traceBlock(ctx, block, config) + } } - return nil, fmt.Errorf("index out of range") + return nil, fmt.Errorf("bad block %#x not found", hash) +} + +// StandardTraceBlockToFile dumps the structured logs created during the +// execution of EVM to the local file system and returns a list of files +// to the caller. +func (api *PrivateDebugAPI) StandardTraceBlockToFile(ctx context.Context, hash common.Hash, config *StdTraceConfig) ([]string, error) { + block := api.eth.blockchain.GetBlockByHash(hash) + if block == nil { + return nil, fmt.Errorf("block %#x not found", hash) + } + return api.standardTraceBlockToFile(ctx, block, config) +} + +// StandardTraceBadBlockToFile dumps the structured logs created during the +// execution of EVM against a block pulled from the pool of bad ones to the +// local file system and returns a list of files to the caller. +func (api *PrivateDebugAPI) StandardTraceBadBlockToFile(ctx context.Context, hash common.Hash, config *StdTraceConfig) ([]string, error) { + blocks := api.eth.blockchain.BadBlocks() + for _, block := range blocks { + if block.Hash() == hash { + return api.standardTraceBlockToFile(ctx, block, config) + } + } + return nil, fmt.Errorf("bad block %#x not found", hash) } // traceBlock configures a new tracer according to the provided configuration, and @@ -410,7 +447,7 @@ func (api *PrivateDebugAPI) traceBlock(ctx context.Context, block *types.Block, } parent := api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) if parent == nil { - return nil, fmt.Errorf("parent %x not found", block.ParentHash()) + return nil, fmt.Errorf("parent %#x not found", block.ParentHash()) } reexec := defaultTraceReexec if config != nil && config.Reexec != nil { @@ -481,6 +518,106 @@ func (api *PrivateDebugAPI) traceBlock(ctx context.Context, block *types.Block, return results, nil } +// standardTraceBlockToFile configures a new tracer which uses standard JSON output, +// and traces either a full block or an individual transaction. The return value will +// be one filename per transaction traced. +func (api *PrivateDebugAPI) standardTraceBlockToFile(ctx context.Context, block *types.Block, config *StdTraceConfig) ([]string, error) { + // If we're tracing a single transaction, make sure it's present + if config != nil && config.TxHash != (common.Hash{}) { + var exists bool + for _, tx := range block.Transactions() { + if exists = (tx.Hash() == config.TxHash); exists { + break + } + } + if !exists { + return nil, fmt.Errorf("transaction %#x not found in block", config.TxHash) + } + } + // Create the parent state database + if err := api.eth.engine.VerifyHeader(api.eth.blockchain, block.Header(), true); err != nil { + return nil, err + } + parent := api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) + if parent == nil { + return nil, fmt.Errorf("parent %#x not found", block.ParentHash()) + } + reexec := defaultTraceReexec + if config != nil && config.Reexec != nil { + reexec = *config.Reexec + } + statedb, err := api.computeStateDB(parent, reexec) + if err != nil { + return nil, err + } + // Retrieve the tracing configurations, or use default values + var ( + logConfig vm.LogConfig + txHash common.Hash + ) + if config != nil { + if config.LogConfig != nil { + logConfig = *config.LogConfig + } + txHash = config.TxHash + } + logConfig.Debug = true + + // Execute transaction, either tracing all or just the requested one + var ( + signer = types.MakeSigner(api.config, block.Number()) + dumps []string + ) + for i, tx := range block.Transactions() { + // Prepare the trasaction for un-traced execution + var ( + msg, _ = tx.AsMessage(signer) + vmctx = core.NewEVMContext(msg, block.Header(), api.eth.blockchain, nil) + + vmConf vm.Config + dump *os.File + err error + ) + // If the transaction needs tracing, swap out the configs + if tx.Hash() == txHash || txHash == (common.Hash{}) { + // Generate a unique temporary file to dump it into + prefix := fmt.Sprintf("block_%#x-%d-%#x-", block.Hash().Bytes()[:4], i, tx.Hash().Bytes()[:4]) + + dump, err = ioutil.TempFile(os.TempDir(), prefix) + if err != nil { + return nil, err + } + dumps = append(dumps, dump.Name()) + + // Swap out the noop logger to the standard tracer + vmConf = vm.Config{ + Debug: true, + Tracer: vm.NewJSONLogger(&logConfig, bufio.NewWriter(dump)), + EnablePreimageRecording: true, + } + } + // Execute the transaction and flush any traces to disk + vmenv := vm.NewEVM(vmctx, statedb, api.config, vmConf) + _, _, _, err = core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())) + + if dump != nil { + dump.Close() + log.Info("Wrote standard trace", "file", dump.Name()) + } + if err != nil { + return dumps, err + } + // Finalize the state so any modifications are written to the trie + statedb.Finalise(true) + + // If we've traced the transaction we were looking for, abort + if tx.Hash() == txHash { + break + } + } + return dumps, nil +} + // computeStateDB retrieves the state database associated with a certain block. // If no state is locally available for the given block, a number of blocks are // attempted to be reexecuted to generate the desired state. @@ -492,7 +629,7 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (* } // Otherwise try to reexec blocks until we find a state or reach our limit origin := block.NumberU64() - database := state.NewDatabase(api.eth.ChainDb()) + database := state.NewDatabaseWithCache(api.eth.ChainDb(), 16) for i := uint64(0); i < reexec; i++ { block = api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) @@ -506,7 +643,7 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (* if err != nil { switch err.(type) { case *trie.MissingNodeError: - return nil, errors.New("required historical state unavailable") + return nil, fmt.Errorf("required historical state unavailable (reexec=%d)", reexec) default: return nil, err } @@ -520,7 +657,7 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (* for block.NumberU64() < origin { // Print progress logs if long enough time elapsed if time.Since(logged) > 8*time.Second { - log.Info("Regenerating historical state", "block", block.NumberU64()+1, "target", origin, "elapsed", time.Since(start)) + log.Info("Regenerating historical state", "block", block.NumberU64()+1, "target", origin, "remaining", origin-block.NumberU64()-1, "elapsed", time.Since(start)) logged = time.Now() } // Retrieve the next block to regenerate and process it @@ -529,15 +666,15 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (* } _, _, _, err := api.eth.blockchain.Processor().Process(block, statedb, vm.Config{}) if err != nil { - return nil, err + return nil, fmt.Errorf("processing block %d failed: %v", block.NumberU64(), err) } // Finalize the state so any modifications are written to the trie - root, err := statedb.Commit(true) + root, err := statedb.Commit(api.eth.blockchain.Config().IsEIP158(block.Number())) if err != nil { return nil, err } if err := statedb.Reset(root); err != nil { - return nil, err + return nil, fmt.Errorf("state reset after block %d failed: %v", block.NumberU64(), err) } database.TrieDB().Reference(root, common.Hash{}) if proot != (common.Hash{}) { @@ -556,7 +693,7 @@ func (api *PrivateDebugAPI) TraceTransaction(ctx context.Context, hash common.Ha // Retrieve the transaction and assemble its EVM context tx, blockHash, _, index := rawdb.ReadTransaction(api.eth.ChainDb(), hash) if tx == nil { - return nil, fmt.Errorf("transaction %x not found", hash) + return nil, fmt.Errorf("transaction %#x not found", hash) } reexec := defaultTraceReexec if config != nil && config.Reexec != nil { @@ -636,11 +773,11 @@ func (api *PrivateDebugAPI) computeTxEnv(blockHash common.Hash, txIndex int, ree // Create the parent state database block := api.eth.blockchain.GetBlockByHash(blockHash) if block == nil { - return nil, vm.Context{}, nil, fmt.Errorf("block %x not found", blockHash) + return nil, vm.Context{}, nil, fmt.Errorf("block %#x not found", blockHash) } parent := api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) if parent == nil { - return nil, vm.Context{}, nil, fmt.Errorf("parent %x not found", block.ParentHash()) + return nil, vm.Context{}, nil, fmt.Errorf("parent %#x not found", block.ParentHash()) } statedb, err := api.computeStateDB(parent, reexec) if err != nil { @@ -659,10 +796,10 @@ func (api *PrivateDebugAPI) computeTxEnv(blockHash common.Hash, txIndex int, ree // Not yet the searched for transaction, execute on top of the current state vmenv := vm.NewEVM(context, statedb, api.config, vm.Config{}) if _, _, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { - return nil, vm.Context{}, nil, fmt.Errorf("tx %x failed: %v", tx.Hash(), err) + return nil, vm.Context{}, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) } // Ensure any modifications are committed to the state statedb.Finalise(true) } - return nil, vm.Context{}, nil, fmt.Errorf("tx index %d out of range for block %x", txIndex, blockHash) + return nil, vm.Context{}, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, blockHash) } diff --git a/vendor/github.com/ethereum/go-ethereum/eth/backend.go b/vendor/github.com/ethereum/go-ethereum/eth/backend.go index b555b064a..354fc17d4 100644 --- a/vendor/github.com/ethereum/go-ethereum/eth/backend.go +++ b/vendor/github.com/ethereum/go-ethereum/eth/backend.go @@ -118,7 +118,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) { if err != nil { return nil, err } - chainConfig, genesisHash, genesisErr := core.SetupGenesisBlock(chainDb, config.Genesis) + chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.ConstantinopleOverride) if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok { return nil, genesisErr } @@ -154,7 +154,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) { EWASMInterpreter: config.EWASMInterpreter, EVMInterpreter: config.EVMInterpreter, } - cacheConfig = &core.CacheConfig{Disabled: config.NoPruning, TrieNodeLimit: config.TrieCache, TrieTimeLimit: config.TrieTimeout} + cacheConfig = &core.CacheConfig{Disabled: config.NoPruning, TrieCleanLimit: config.TrieCleanCache, TrieDirtyLimit: config.TrieDirtyCache, TrieTimeLimit: config.TrieTimeout} ) eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, eth.chainConfig, eth.engine, vmConfig, eth.shouldPreserve) if err != nil { @@ -173,7 +173,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) { } eth.txPool = core.NewTxPool(config.TxPool, eth.chainConfig, eth.blockchain) - if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.SyncMode, config.NetworkId, eth.eventMux, eth.txPool, eth.engine, eth.blockchain, chainDb); err != nil { + if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.SyncMode, config.NetworkId, eth.eventMux, eth.txPool, eth.engine, eth.blockchain, chainDb, config.Whitelist); err != nil { return nil, err } diff --git a/vendor/github.com/ethereum/go-ethereum/eth/config.go b/vendor/github.com/ethereum/go-ethereum/eth/config.go index 7d1db9f82..f71b8dfee 100644 --- a/vendor/github.com/ethereum/go-ethereum/eth/config.go +++ b/vendor/github.com/ethereum/go-ethereum/eth/config.go @@ -43,15 +43,16 @@ var DefaultConfig = Config{ DatasetsInMem: 1, DatasetsOnDisk: 2, }, - NetworkId: 1, - LightPeers: 100, - DatabaseCache: 768, - TrieCache: 256, - TrieTimeout: 60 * time.Minute, - MinerGasFloor: 8000000, - MinerGasCeil: 8000000, - MinerGasPrice: big.NewInt(params.GWei), - MinerRecommit: 3 * time.Second, + NetworkId: 1, + LightPeers: 100, + DatabaseCache: 512, + TrieCleanCache: 256, + TrieDirtyCache: 256, + TrieTimeout: 60 * time.Minute, + MinerGasFloor: 8000000, + MinerGasCeil: 8000000, + MinerGasPrice: big.NewInt(params.GWei), + MinerRecommit: 3 * time.Second, TxPool: core.DefaultTxPoolConfig, GPO: gasprice.Config{ @@ -86,6 +87,9 @@ type Config struct { SyncMode downloader.SyncMode NoPruning bool + // Whitelist of required block number -> hash values to accept + Whitelist map[uint64]common.Hash `toml:"-"` + // Light client options LightServ int `toml:",omitempty"` // Maximum percentage of time allowed for serving LES requests LightPeers int `toml:",omitempty"` // Maximum number of LES client peers @@ -98,7 +102,8 @@ type Config struct { SkipBcVersionCheck bool `toml:"-"` DatabaseHandles int `toml:"-"` DatabaseCache int - TrieCache int + TrieCleanCache int + TrieDirtyCache int TrieTimeout time.Duration // Mining-related options @@ -126,10 +131,14 @@ type Config struct { // Miscellaneous options DocRoot string `toml:"-"` - // Type of the EWASM interpreter ("" for detault) + // Type of the EWASM interpreter ("" for default) EWASMInterpreter string + // Type of the EVM interpreter ("" for default) EVMInterpreter string + + // Constantinople block override (TODO: remove after the fork) + ConstantinopleOverride *big.Int } type configMarshaling struct { diff --git a/vendor/github.com/ethereum/go-ethereum/eth/downloader/downloader.go b/vendor/github.com/ethereum/go-ethereum/eth/downloader/downloader.go index e8fe2ef17..2c60e858c 100644 --- a/vendor/github.com/ethereum/go-ethereum/eth/downloader/downloader.go +++ b/vendor/github.com/ethereum/go-ethereum/eth/downloader/downloader.go @@ -99,6 +99,7 @@ type Downloader struct { mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle) mux *event.TypeMux // Event multiplexer to announce sync operation events + genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT) queue *queue // Scheduler for selecting the hashes to download peers *peerSet // Set of active peers from which download can proceed stateDB ethdb.Database @@ -183,6 +184,9 @@ type BlockChain interface { // HasBlock verifies a block's presence in the local chain. HasBlock(common.Hash, uint64) bool + // HasFastBlock verifies a fast block's presence in the local chain. + HasFastBlock(common.Hash, uint64) bool + // GetBlockByHash retrieves a block from the local chain. GetBlockByHash(common.Hash) *types.Block @@ -434,7 +438,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I } height := latest.Number.Uint64() - origin, err := d.findAncestor(p, height) + origin, err := d.findAncestor(p, latest) if err != nil { return err } @@ -603,41 +607,107 @@ func (d *Downloader) fetchHeight(p *peerConnection) (*types.Header, error) { } } +// calculateRequestSpan calculates what headers to request from a peer when trying to determine the +// common ancestor. +// It returns parameters to be used for peer.RequestHeadersByNumber: +// from - starting block number +// count - number of headers to request +// skip - number of headers to skip +// and also returns 'max', the last block which is expected to be returned by the remote peers, +// given the (from,count,skip) +func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) { + var ( + from int + count int + MaxCount = MaxHeaderFetch / 16 + ) + // requestHead is the highest block that we will ask for. If requestHead is not offset, + // the highest block that we will get is 16 blocks back from head, which means we + // will fetch 14 or 15 blocks unnecessarily in the case the height difference + // between us and the peer is 1-2 blocks, which is most common + requestHead := int(remoteHeight) - 1 + if requestHead < 0 { + requestHead = 0 + } + // requestBottom is the lowest block we want included in the query + // Ideally, we want to include just below own head + requestBottom := int(localHeight - 1) + if requestBottom < 0 { + requestBottom = 0 + } + totalSpan := requestHead - requestBottom + span := 1 + totalSpan/MaxCount + if span < 2 { + span = 2 + } + if span > 16 { + span = 16 + } + + count = 1 + totalSpan/span + if count > MaxCount { + count = MaxCount + } + if count < 2 { + count = 2 + } + from = requestHead - (count-1)*span + if from < 0 { + from = 0 + } + max := from + (count-1)*span + return int64(from), count, span - 1, uint64(max) +} + // findAncestor tries to locate the common ancestor link of the local chain and // a remote peers blockchain. In the general case when our node was in sync and // on the correct chain, checking the top N links should already get us a match. // In the rare scenario when we ended up on a long reorganisation (i.e. none of // the head links match), we do a binary search to find the common ancestor. -func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, error) { +func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) { // Figure out the valid ancestor range to prevent rewrite attacks - floor, ceil := int64(-1), d.lightchain.CurrentHeader().Number.Uint64() + var ( + floor = int64(-1) + localHeight uint64 + remoteHeight = remoteHeader.Number.Uint64() + ) + switch d.mode { + case FullSync: + localHeight = d.blockchain.CurrentBlock().NumberU64() + case FastSync: + localHeight = d.blockchain.CurrentFastBlock().NumberU64() + default: + localHeight = d.lightchain.CurrentHeader().Number.Uint64() + } + p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight) + if localHeight >= MaxForkAncestry { + // We're above the max reorg threshold, find the earliest fork point + floor = int64(localHeight - MaxForkAncestry) - if d.mode == FullSync { - ceil = d.blockchain.CurrentBlock().NumberU64() - } else if d.mode == FastSync { - ceil = d.blockchain.CurrentFastBlock().NumberU64() + // If we're doing a light sync, ensure the floor doesn't go below the CHT, as + // all headers before that point will be missing. + if d.mode == LightSync { + // If we dont know the current CHT position, find it + if d.genesis == 0 { + header := d.lightchain.CurrentHeader() + for header != nil { + d.genesis = header.Number.Uint64() + if floor >= int64(d.genesis)-1 { + break + } + header = d.lightchain.GetHeaderByHash(header.ParentHash) + } + } + // We already know the "genesis" block number, cap floor to that + if floor < int64(d.genesis)-1 { + floor = int64(d.genesis) - 1 + } + } } - if ceil >= MaxForkAncestry { - floor = int64(ceil - MaxForkAncestry) - } - p.log.Debug("Looking for common ancestor", "local", ceil, "remote", height) + from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight) - // Request the topmost blocks to short circuit binary ancestor lookup - head := ceil - if head > height { - head = height - } - from := int64(head) - int64(MaxHeaderFetch) - if from < 0 { - from = 0 - } - // Span out with 15 block gaps into the future to catch bad head reports - limit := 2 * MaxHeaderFetch / 16 - count := 1 + int((int64(ceil)-from)/16) - if count > limit { - count = limit - } - go p.peer.RequestHeadersByNumber(uint64(from), count, 15, false) + p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip) + go p.peer.RequestHeadersByNumber(uint64(from), count, skip, false) // Wait for the remote response to the head fetch number, hash := uint64(0), common.Hash{} @@ -663,9 +733,10 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err return 0, errEmptyHeaderSet } // Make sure the peer's reply conforms to the request - for i := 0; i < len(headers); i++ { - if number := headers[i].Number.Int64(); number != from+int64(i)*16 { - p.log.Warn("Head headers broke chain ordering", "index", i, "requested", from+int64(i)*16, "received", number) + for i, header := range headers { + expectNumber := from + int64(i)*int64((skip+1)) + if number := header.Number.Int64(); number != expectNumber { + p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number) return 0, errInvalidChain } } @@ -673,20 +744,24 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err finished = true for i := len(headers) - 1; i >= 0; i-- { // Skip any headers that underflow/overflow our requested set - if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > ceil { + if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max { continue } // Otherwise check if we already know the header or not h := headers[i].Hash() n := headers[i].Number.Uint64() - if (d.mode == FullSync && d.blockchain.HasBlock(h, n)) || (d.mode != FullSync && d.lightchain.HasHeader(h, n)) { - number, hash = n, h - // If every header is known, even future ones, the peer straight out lied about its head - if number > height && i == limit-1 { - p.log.Warn("Lied about chain head", "reported", height, "found", number) - return 0, errStallingPeer - } + var known bool + switch d.mode { + case FullSync: + known = d.blockchain.HasBlock(h, n) + case FastSync: + known = d.blockchain.HasFastBlock(h, n) + default: + known = d.lightchain.HasHeader(h, n) + } + if known { + number, hash = n, h break } } @@ -710,10 +785,12 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err return number, nil } // Ancestor not found, we need to binary search over our chain - start, end := uint64(0), head + start, end := uint64(0), remoteHeight if floor > 0 { start = uint64(floor) } + p.log.Trace("Binary searching for common ancestor", "start", start, "end", end) + for start+1 < end { // Split our chain interval in two, and request the hash to cross check check := (start + end) / 2 @@ -746,7 +823,17 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err // Modify the search interval based on the response h := headers[0].Hash() n := headers[0].Number.Uint64() - if (d.mode == FullSync && !d.blockchain.HasBlock(h, n)) || (d.mode != FullSync && !d.lightchain.HasHeader(h, n)) { + + var known bool + switch d.mode { + case FullSync: + known = d.blockchain.HasBlock(h, n) + case FastSync: + known = d.blockchain.HasFastBlock(h, n) + default: + known = d.lightchain.HasHeader(h, n) + } + if !known { end = check break } @@ -756,6 +843,7 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err return 0, errBadPeer } start = check + hash = h case <-timeout: p.log.Debug("Waiting for search header timed out", "elapsed", ttl) @@ -1262,7 +1350,7 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er } // If no headers were retrieved at all, the peer violated its TD promise that it had a // better chain compared to ours. The only exception is if its promised blocks were - // already imported by other means (e.g. fecher): + // already imported by other means (e.g. fetcher): // // R , L : Both at block 10 // R: Mine block 11, and propagate it to L @@ -1431,7 +1519,7 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error { defer stateSync.Cancel() go func() { if err := stateSync.Wait(); err != nil && err != errCancelStateFetch { - d.queue.Close() // wake up WaitResults + d.queue.Close() // wake up Results } }() // Figure out the ideal pivot block. Note, that this goalpost may move if the @@ -1489,7 +1577,7 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error { defer stateSync.Cancel() go func() { if err := stateSync.Wait(); err != nil && err != errCancelStateFetch { - d.queue.Close() // wake up WaitResults + d.queue.Close() // wake up Results } }() oldPivot = P diff --git a/vendor/github.com/ethereum/go-ethereum/eth/downloader/peer.go b/vendor/github.com/ethereum/go-ethereum/eth/downloader/peer.go index 428a60f8a..60f86d0e1 100644 --- a/vendor/github.com/ethereum/go-ethereum/eth/downloader/peer.go +++ b/vendor/github.com/ethereum/go-ethereum/eth/downloader/peer.go @@ -229,13 +229,6 @@ func (p *peerConnection) SetHeadersIdle(delivered int) { p.setIdle(p.headerStarted, delivered, &p.headerThroughput, &p.headerIdle) } -// SetBlocksIdle sets the peer to idle, allowing it to execute new block retrieval -// requests. Its estimated block retrieval throughput is updated with that measured -// just now. -func (p *peerConnection) SetBlocksIdle(delivered int) { - p.setIdle(p.blockStarted, delivered, &p.blockThroughput, &p.blockIdle) -} - // SetBodiesIdle sets the peer to idle, allowing it to execute block body retrieval // requests. Its estimated body retrieval throughput is updated with that measured // just now. diff --git a/vendor/github.com/ethereum/go-ethereum/eth/downloader/queue.go b/vendor/github.com/ethereum/go-ethereum/eth/downloader/queue.go index a0e8a6d48..7c3395381 100644 --- a/vendor/github.com/ethereum/go-ethereum/eth/downloader/queue.go +++ b/vendor/github.com/ethereum/go-ethereum/eth/downloader/queue.go @@ -143,7 +143,7 @@ func (q *queue) Reset() { q.resultOffset = 0 } -// Close marks the end of the sync, unblocking WaitResults. +// Close marks the end of the sync, unblocking Results. // It may be called even if the queue is already closed. func (q *queue) Close() { q.lock.Lock() @@ -325,7 +325,7 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { } // Make sure no duplicate requests are executed if _, ok := q.blockTaskPool[hash]; ok { - log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash) + log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash) continue } if _, ok := q.receiptTaskPool[hash]; ok { @@ -545,7 +545,7 @@ func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common taskQueue.Push(header, -int64(header.Number.Uint64())) } if progress { - // Wake WaitResults, resultCache was modified + // Wake Results, resultCache was modified q.active.Signal() } // Assemble and return the block download request @@ -664,12 +664,11 @@ func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, } // Add the peer to the expiry report along the number of failed requests expiries[id] = len(request.Headers) + + // Remove the expired requests from the pending pool directly + delete(pendPool, id) } } - // Remove the expired requests from the pending pool - for id := range expiries { - delete(pendPool, id) - } return expiries } @@ -857,7 +856,7 @@ func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQ taskQueue.Push(header, -int64(header.Number.Uint64())) } } - // Wake up WaitResults + // Wake up Results if accepted > 0 { q.active.Signal() } diff --git a/vendor/github.com/ethereum/go-ethereum/eth/downloader/statesync.go b/vendor/github.com/ethereum/go-ethereum/eth/downloader/statesync.go index 8d33dfec7..29d5ee4dd 100644 --- a/vendor/github.com/ethereum/go-ethereum/eth/downloader/statesync.go +++ b/vendor/github.com/ethereum/go-ethereum/eth/downloader/statesync.go @@ -313,11 +313,12 @@ func (s *stateSync) loop() (err error) { s.d.dropPeer(req.peer.id) } // Process all the received blobs and check for stale delivery - if err = s.process(req); err != nil { + delivered, err := s.process(req) + if err != nil { log.Warn("Node data write error", "err", err) return err } - req.peer.SetNodeDataIdle(len(req.response)) + req.peer.SetNodeDataIdle(delivered) } } return nil @@ -398,9 +399,11 @@ func (s *stateSync) fillTasks(n int, req *stateReq) { // process iterates over a batch of delivered state data, injecting each item // into a running state sync, re-queuing any items that were requested but not // delivered. -func (s *stateSync) process(req *stateReq) error { +// Returns whether the peer actually managed to deliver anything of value, +// and any error that occurred +func (s *stateSync) process(req *stateReq) (int, error) { // Collect processing stats and update progress if valid data was received - duplicate, unexpected := 0, 0 + duplicate, unexpected, successful := 0, 0, 0 defer func(start time.Time) { if duplicate > 0 || unexpected > 0 { @@ -410,7 +413,6 @@ func (s *stateSync) process(req *stateReq) error { // Iterate over all the delivered data and inject one-by-one into the trie progress := false - for _, blob := range req.response { prog, hash, err := s.processNodeData(blob) switch err { @@ -418,12 +420,13 @@ func (s *stateSync) process(req *stateReq) error { s.numUncommitted++ s.bytesUncommitted += len(blob) progress = progress || prog + successful++ case trie.ErrNotRequested: unexpected++ case trie.ErrAlreadyProcessed: duplicate++ default: - return fmt.Errorf("invalid state node %s: %v", hash.TerminalString(), err) + return successful, fmt.Errorf("invalid state node %s: %v", hash.TerminalString(), err) } if _, ok := req.tasks[hash]; ok { delete(req.tasks, hash) @@ -441,12 +444,12 @@ func (s *stateSync) process(req *stateReq) error { // If we've requested the node too many times already, it may be a malicious // sync where nobody has the right data. Abort. if len(task.attempts) >= npeers { - return fmt.Errorf("state node %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers) + return successful, fmt.Errorf("state node %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers) } // Missing item, place into the retry queue. s.tasks[hash] = task } - return nil + return successful, nil } // processNodeData tries to inject a trie node data blob delivered from a remote diff --git a/vendor/github.com/ethereum/go-ethereum/eth/gen_config.go b/vendor/github.com/ethereum/go-ethereum/eth/gen_config.go index 382d4d948..6662f820f 100644 --- a/vendor/github.com/ethereum/go-ethereum/eth/gen_config.go +++ b/vendor/github.com/ethereum/go-ethereum/eth/gen_config.go @@ -30,7 +30,8 @@ func (c Config) MarshalTOML() (interface{}, error) { SkipBcVersionCheck bool `toml:"-"` DatabaseHandles int `toml:"-"` DatabaseCache int - TrieCache int + TrieCleanCache int + TrieDirtyCache int TrieTimeout time.Duration Etherbase common.Address `toml:",omitempty"` MinerNotify []string `toml:",omitempty"` @@ -45,6 +46,8 @@ func (c Config) MarshalTOML() (interface{}, error) { GPO gasprice.Config EnablePreimageRecording bool DocRoot string `toml:"-"` + EWASMInterpreter string + EVMInterpreter string } var enc Config enc.Genesis = c.Genesis @@ -58,7 +61,8 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.SkipBcVersionCheck = c.SkipBcVersionCheck enc.DatabaseHandles = c.DatabaseHandles enc.DatabaseCache = c.DatabaseCache - enc.TrieCache = c.TrieCache + enc.TrieCleanCache = c.TrieCleanCache + enc.TrieDirtyCache = c.TrieDirtyCache enc.TrieTimeout = c.TrieTimeout enc.Etherbase = c.Etherbase enc.MinerNotify = c.MinerNotify @@ -73,6 +77,8 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.GPO = c.GPO enc.EnablePreimageRecording = c.EnablePreimageRecording enc.DocRoot = c.DocRoot + enc.EWASMInterpreter = c.EWASMInterpreter + enc.EVMInterpreter = c.EVMInterpreter return &enc, nil } @@ -90,7 +96,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { SkipBcVersionCheck *bool `toml:"-"` DatabaseHandles *int `toml:"-"` DatabaseCache *int - TrieCache *int + TrieCleanCache *int + TrieDirtyCache *int TrieTimeout *time.Duration Etherbase *common.Address `toml:",omitempty"` MinerNotify []string `toml:",omitempty"` @@ -105,6 +112,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { GPO *gasprice.Config EnablePreimageRecording *bool DocRoot *string `toml:"-"` + EWASMInterpreter *string + EVMInterpreter *string } var dec Config if err := unmarshal(&dec); err != nil { @@ -143,8 +152,11 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.DatabaseCache != nil { c.DatabaseCache = *dec.DatabaseCache } - if dec.TrieCache != nil { - c.TrieCache = *dec.TrieCache + if dec.TrieCleanCache != nil { + c.TrieCleanCache = *dec.TrieCleanCache + } + if dec.TrieDirtyCache != nil { + c.TrieDirtyCache = *dec.TrieDirtyCache } if dec.TrieTimeout != nil { c.TrieTimeout = *dec.TrieTimeout @@ -188,5 +200,11 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.DocRoot != nil { c.DocRoot = *dec.DocRoot } + if dec.EWASMInterpreter != nil { + c.EWASMInterpreter = *dec.EWASMInterpreter + } + if dec.EVMInterpreter != nil { + c.EVMInterpreter = *dec.EVMInterpreter + } return nil } diff --git a/vendor/github.com/ethereum/go-ethereum/eth/handler.go b/vendor/github.com/ethereum/go-ethereum/eth/handler.go index aad8a48a7..140437f53 100644 --- a/vendor/github.com/ethereum/go-ethereum/eth/handler.go +++ b/vendor/github.com/ethereum/go-ethereum/eth/handler.go @@ -88,6 +88,8 @@ type ProtocolManager struct { txsSub event.Subscription minedBlockSub *event.TypeMuxSubscription + whitelist map[uint64]common.Hash + // channels for fetcher, syncer, txsyncLoop newPeerCh chan *peer txsyncCh chan *txsync @@ -101,7 +103,7 @@ type ProtocolManager struct { // NewProtocolManager returns a new Ethereum sub protocol manager. The Ethereum sub protocol manages peers capable // with the Ethereum network. -func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkID uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) { +func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkID uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database, whitelist map[uint64]common.Hash) (*ProtocolManager, error) { // Create the protocol manager with the base fields manager := &ProtocolManager{ networkID: networkID, @@ -110,6 +112,7 @@ func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, ne blockchain: blockchain, chainconfig: config, peers: newPeerSet(), + whitelist: whitelist, newPeerCh: make(chan *peer), noMorePeers: make(chan struct{}), txsyncCh: make(chan *txsync), @@ -310,7 +313,13 @@ func (pm *ProtocolManager) handle(p *peer) error { } }() } - // main loop. handle incoming messages. + // If we have any explicit whitelist block hashes, request them + for number := range pm.whitelist { + if err := p.RequestHeadersByNumber(number, 1, 0, false); err != nil { + return err + } + } + // Handle incoming messages until the connection is torn down for { if err := pm.handleMsg(p); err != nil { p.Log().Debug("Ethereum message handling failed", "err", err) @@ -469,6 +478,14 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { p.Log().Debug("Verified to be on the same side of the DAO fork") return nil } + // Otherwise if it's a whitelisted block, validate against the set + if want, ok := pm.whitelist[headers[0].Number.Uint64()]; ok { + if hash := headers[0].Hash(); want != hash { + p.Log().Info("Whitelist mismatch, dropping peer", "number", headers[0].Number.Uint64(), "hash", hash, "want", want) + return errors.New("whitelist block mismatch") + } + p.Log().Debug("Whitelist block verified", "number", headers[0].Number.Uint64(), "hash", want) + } // Irrelevant of the fork checks, send the header to the fetcher just in case headers = pm.fetcher.FilterHeaders(p.id, headers, time.Now()) } @@ -656,12 +673,12 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { trueHead = request.Block.ParentHash() trueTD = new(big.Int).Sub(request.TD, request.Block.Difficulty()) ) - // Update the peers total difficulty if better than the previous + // Update the peer's total difficulty if better than the previous if _, td := p.Head(); trueTD.Cmp(td) > 0 { p.SetHead(trueHead, trueTD) // Schedule a sync if above ours. Note, this will not fire a sync for a gap of - // a singe block (as the true TD is below the propagated block), however this + // a single block (as the true TD is below the propagated block), however this // scenario should easily be covered by the fetcher. currentBlock := pm.blockchain.CurrentBlock() if trueTD.Cmp(pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64())) > 0 { diff --git a/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/assets.go b/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/assets.go index 04dd6fe89..addd32882 100644 --- a/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/assets.go +++ b/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/assets.go @@ -1,14 +1,14 @@ // Code generated by go-bindata. DO NOT EDIT. // sources: -// 4byte_tracer.js -// bigram_tracer.js -// call_tracer.js -// evmdis_tracer.js -// noop_tracer.js -// opcount_tracer.js -// prestate_tracer.js -// trigram_tracer.js -// unigram_tracer.js +// 4byte_tracer.js (2.933kB) +// bigram_tracer.js (1.712kB) +// call_tracer.js (8.596kB) +// evmdis_tracer.js (4.194kB) +// noop_tracer.js (1.271kB) +// opcount_tracer.js (1.372kB) +// prestate_tracer.js (3.892kB) +// trigram_tracer.js (1.788kB) +// unigram_tracer.js (1.51kB) package tracers @@ -28,7 +28,7 @@ import ( func bindataRead(data []byte, name string) ([]byte, error) { gz, err := gzip.NewReader(bytes.NewBuffer(data)) if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) + return nil, fmt.Errorf("read %q: %v", name, err) } var buf bytes.Buffer @@ -36,7 +36,7 @@ func bindataRead(data []byte, name string) ([]byte, error) { clErr := gz.Close() if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) + return nil, fmt.Errorf("read %q: %v", name, err) } if clErr != nil { return nil, err @@ -197,7 +197,7 @@ func opcount_tracerJs() (*asset, error) { return a, nil } -var _prestate_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x57\xdd\x6f\x1b\xb9\x11\x7f\xde\xfd\x2b\xa6\x7e\x91\x84\x53\x56\xce\x15\xb8\x02\x72\x5d\x60\xa3\x28\x89\x00\x9d\x6d\x48\x4a\x5d\xf7\x70\x0f\x5c\x72\x76\xc5\x13\x45\x2e\x48\xae\x3e\x10\xf8\x7f\x2f\x86\xfb\x21\xcb\x67\x27\x6e\xeb\x27\x2f\x39\xfc\xcd\xf7\x6f\x46\xa3\x11\x4c\x4c\x79\xb4\xb2\x58\x7b\xf8\xf9\xf2\xfd\xdf\x60\xb5\x46\x28\xcc\x3b\xf4\x6b\xb4\x58\x6d\x21\xad\xfc\xda\x58\x17\x8f\x46\xb0\x5a\x4b\x07\xb9\x54\x08\xd2\x41\xc9\xac\x07\x93\x83\x7f\x26\xaf\x64\x66\x99\x3d\x26\xf1\x68\x54\xbf\x79\xf1\x9a\x10\x72\x8b\x08\xce\xe4\x7e\xcf\x2c\x8e\xe1\x68\x2a\xe0\x4c\x83\x45\x21\x9d\xb7\x32\xab\x3c\x82\xf4\xc0\xb4\x18\x19\x0b\x5b\x23\x64\x7e\x24\x48\xe9\xa1\xd2\x02\x6d\x50\xed\xd1\x6e\x5d\x6b\xc7\xe7\x9b\xaf\x30\x47\xe7\xd0\xc2\x67\xd4\x68\x99\x82\xbb\x2a\x53\x92\xc3\x5c\x72\xd4\x0e\x81\x39\x28\xe9\xc4\xad\x51\x40\x16\xe0\xe8\xe1\x27\x32\x65\xd9\x98\x02\x9f\x4c\xa5\x05\xf3\xd2\xe8\x21\xa0\x24\xcb\x61\x87\xd6\x49\xa3\xe1\xaf\xad\xaa\x06\x70\x08\xc6\x12\x48\x9f\x79\x72\xc0\x82\x29\xe9\xdd\x00\x98\x3e\x82\x62\xfe\xf4\xf4\x0d\x01\x39\xf9\x2d\x40\xea\xa0\x66\x6d\x4a\x04\xbf\x66\x9e\xbc\xde\x4b\xa5\x20\x43\xa8\x1c\xe6\x95\x1a\x12\x5a\x56\x79\xb8\x9f\xad\xbe\xdc\x7e\x5d\x41\x7a\xf3\x00\xf7\xe9\x62\x91\xde\xac\x1e\xae\x60\x2f\xfd\xda\x54\x1e\x70\x87\x35\x94\xdc\x96\x4a\xa2\x80\x3d\xb3\x96\x69\x7f\x04\x93\x13\xc2\xaf\xd3\xc5\xe4\x4b\x7a\xb3\x4a\x3f\xcc\xe6\xb3\xd5\x03\x18\x0b\x9f\x66\xab\x9b\xe9\x72\x09\x9f\x6e\x17\x90\xc2\x5d\xba\x58\xcd\x26\x5f\xe7\xe9\x02\xee\xbe\x2e\xee\x6e\x97\xd3\x04\x96\x48\x56\x21\xbd\xff\x71\xcc\xf3\x90\x3d\x8b\x20\xd0\x33\xa9\x5c\x1b\x89\x07\x53\x81\x5b\x9b\x4a\x09\x58\xb3\x1d\x82\x45\x8e\x72\x87\x02\x18\x70\x53\x1e\xdf\x9c\x54\xc2\x62\xca\xe8\x22\xf8\xfc\x6a\x41\xc2\x2c\x07\x6d\xfc\x10\x1c\x22\xfc\x7d\xed\x7d\x39\x1e\x8d\xf6\xfb\x7d\x52\xe8\x2a\x31\xb6\x18\xa9\x1a\xce\x8d\xfe\x91\xc4\x84\x59\x5a\x74\x9e\x79\x5c\x59\xc6\xd1\x82\xa9\x7c\x59\x79\x07\xae\xca\x73\xc9\x25\x6a\x0f\x52\xe7\xc6\x6e\x43\xa5\x80\x37\xc0\x2d\x32\x8f\xc0\x40\x19\xce\x14\xe0\x01\x79\x15\xee\xea\x48\x87\x72\xb5\x4c\x3b\xc6\xc3\x69\x6e\xcd\x96\x7c\xad\x9c\xa7\x7f\x9c\xc3\x6d\xa6\x50\x40\x81\x1a\x9d\x74\x90\x29\xc3\x37\x49\xfc\x2d\x8e\x9e\x18\x43\x75\x12\x3c\x6c\x84\x42\x6d\xec\xb1\x67\x11\xb2\x4a\x2a\x21\x75\x91\xc4\x51\x2b\x3d\x06\x5d\x29\x35\x8c\x03\x84\x32\x66\x53\x95\x29\xe7\xa6\x0a\xb6\xff\x81\xdc\xd7\x60\xae\x44\x2e\x73\x2a\x0e\xd6\xdd\x7a\x13\xae\x3a\xbd\x26\x23\xf9\x24\x8e\xce\x60\xc6\x90\x57\x3a\xb8\xd3\x67\x42\xd8\x21\x88\x6c\xf0\x2d\x8e\xa2\x1d\xb3\x84\x05\xd7\xe0\xcd\x17\x3c\x84\xcb\xc1\x55\x1c\x45\x32\x87\xbe\x5f\x4b\x97\xb4\xc0\xbf\x31\xce\x7f\x87\xeb\xeb\xeb\xd0\xd4\xb9\xd4\x28\x06\x40\x10\xd1\x4b\x62\xf5\x4d\x94\x31\xc5\x34\xc7\x31\xf4\x2e\x0f\x3d\xf8\x09\x44\x96\x14\xe8\x3f\xd4\xa7\xb5\xb2\xc4\x9b\xa5\xb7\x52\x17\xfd\xf7\xbf\x0c\x86\xe1\x95\x36\xe1\x0d\x34\xe2\x37\xa6\x13\xae\xef\xb9\x11\xe1\xba\xb1\xb9\x96\x9a\x18\xd1\x08\x35\x52\xce\x1b\xcb\x0a\x1c\xc3\xb7\x47\xfa\x7e\x24\xaf\x1e\xe3\xe8\xf1\x2c\xca\xcb\x5a\xe8\x95\x28\x37\x10\x80\xda\xdb\xae\xce\x0b\x49\x9d\xfa\x34\x01\x01\xef\x7b\x49\x58\xb6\xa6\x3c\x4b\xc2\x06\x8f\x3f\xce\x04\x5d\x48\x71\xe8\x2e\x36\x78\x1c\x5c\xc5\xaf\xa6\x28\x69\x8c\xfe\x4d\x8a\xc3\xcb\xf9\x22\xc0\x1d\x53\x1d\x60\x1d\xbf\x25\x21\x9c\xec\x1a\x04\xdd\x41\x07\xc9\xfe\xe5\x1a\x2e\x2e\x0f\x97\xff\xe7\xdf\x45\x63\xc1\x0b\x25\xf3\xcc\xec\x37\x98\xf6\x78\x9e\x4f\x8b\xae\x52\x9e\xda\x4e\xea\x9d\xd9\x10\x81\xae\x29\x4f\x4a\x85\xd4\x98\x92\xaa\xc6\xd5\x0c\x96\x21\x6a\x90\x1e\x2d\x23\x0a\x37\x3b\xb4\x34\xbd\xc0\xa2\xaf\xac\x76\x5d\x3a\x73\xa9\x99\x6a\x81\x9b\xec\x7b\xcb\x78\xdd\xbb\xf5\xf9\x93\x9c\x72\x7f\x08\xd9\x0c\x3e\x8e\x46\x90\x7a\x20\x3f\xa1\x34\x52\xfb\x21\xec\x11\x34\xa2\x20\x02\x12\x28\x2a\xee\x03\x5e\x6f\xc7\x54\x85\xbd\x9a\x64\x88\xaa\xc3\x53\x53\xd1\x44\x7a\x42\x42\xc3\x60\xe0\xd6\xec\xc2\xa8\xcd\x18\xdf\x40\xd3\xf8\xc6\xca\x42\xea\xb8\x89\xe9\x59\xd3\x93\x45\x09\x01\x07\xb3\x42\xcd\x50\xee\xe9\xe4\x43\xc8\x7f\x26\x8b\x99\xf6\xcf\x8a\xa8\x8e\x7c\xfb\x74\xf0\x7b\xd2\x34\x71\xe2\x88\x78\xfb\x3f\x0f\x86\xf0\xfe\x97\xae\x32\xbd\x21\x28\xf8\x31\x98\x37\xaf\x43\xc5\xcf\x2b\xe2\xe5\x67\x41\x0d\x31\xc9\x4f\x41\x6b\xe2\xaa\x8c\xd2\x51\xfb\x19\xe2\x78\xce\x26\x57\xdf\xc1\x3d\xf7\xad\xc5\x6d\x42\x93\x30\x21\x5e\x07\xad\x53\xf4\x11\xb9\xc5\x2d\x4d\x17\xca\x02\x67\x4a\xa1\xed\x39\x08\xdc\x35\x6c\xca\x29\xe4\x0b\xb7\xa5\x3f\xb6\x33\xc7\x33\x5b\xa0\x77\x3f\x36\x2c\xe0\xbc\x7b\xd7\x52\x71\x08\xc5\xb1\x44\xb8\xbe\x86\xde\x64\x31\x4d\x57\xd3\x5e\xd3\x4c\xa3\x11\xdc\x63\xd8\xc8\x32\x25\x33\xa1\x8e\x20\x50\xa1\xc7\xda\x2e\xa3\x43\x88\x3a\x6a\x1a\xd2\x6a\x45\x4b\x0f\x1e\xa4\xf3\x52\x17\x50\x33\xd6\x9e\xe6\x7b\x03\x17\x7a\x84\xb3\xca\x51\xb5\x3e\x1b\x86\xde\xd0\x66\x63\x91\xf8\x8d\xe6\x50\x68\x37\xa6\x64\xb7\x09\xe5\xd2\x3a\x0f\xa5\x62\x1c\x13\xc2\xeb\x8c\x79\x3d\xbf\x0d\x33\x93\xea\x45\x68\xc1\x00\x74\x1a\xb4\x4c\xd1\xa0\x26\xf5\x0e\xfa\x2d\xc6\x20\x8e\x22\xdb\x4a\x3f\xc1\xbe\x3a\x51\x82\xf3\x58\x3e\x25\x04\x5a\x70\x70\x87\x44\xe5\x81\x0d\xea\xa1\x4c\xba\xfe\xf9\x6b\xb3\x05\xa0\x4b\xe2\x88\xde\x3d\xe9\x6b\x65\x8a\xf3\xbe\x16\x75\x58\x78\x65\x2d\xe5\xbf\x1b\x05\x39\xf5\xf8\x1f\x95\xf3\x14\x53\x4b\xe1\x69\xd8\xe2\x25\xb2\x0e\xd4\x4c\x53\x7f\xf0\xe7\x21\x4a\xf3\x33\xcc\x2b\x52\xd7\x4c\xcb\x7a\xab\x2c\x8d\x47\xed\x25\x53\xea\x48\x79\xd8\x5b\x5a\xa7\x68\x81\x1a\x82\x93\x24\x15\x18\x27\x88\x4a\xcd\x55\x25\xea\x32\x08\x75\xdc\xe0\xb9\x60\xf3\xf9\x1e\xb6\x45\xe7\x58\x81\x09\x55\x52\x2e\x0f\xcd\x26\xab\xa1\x57\x93\x5c\x7f\xd0\x4b\x3a\x23\xcf\x29\x46\x99\x22\x69\x8b\x8c\xb8\x3a\x15\xc2\xa2\x73\xfd\x41\xc3\x39\x5d\x66\xef\xd7\xa8\x29\xf8\xa0\x71\x0f\xdd\x8a\xc4\x38\xa7\x95\x51\x0c\x81\x09\x41\xd4\xf6\x6c\x9d\x89\xa3\xc8\xed\xa5\xe7\x6b\x08\x9a\x4c\x79\xea\xc5\x41\x53\xff\x9c\x39\x84\x8b\xe9\xbf\x56\x93\xdb\x8f\xd3\xc9\xed\xdd\xc3\xc5\x18\xce\xce\x96\xb3\x7f\x4f\xbb\xb3\x0f\xe9\x3c\xbd\x99\x4c\x2f\xc6\xa7\x39\x74\xee\x90\x37\xad\x0b\xa4\xd0\x79\xc6\x37\x49\x89\xb8\xe9\x5f\x9e\xf3\xc0\xc9\xc1\x28\xca\x2c\xb2\xcd\xd5\xc9\x98\xba\x41\x1b\x1d\x2d\xe5\xc2\x35\xbc\x1a\xac\xab\xd7\xad\x99\x34\xf2\xfd\x96\xc8\x4f\x2b\x51\xa0\x8a\xef\xda\x91\xce\xe7\x9d\xe7\xf4\x41\xe1\xe8\x0e\x3e\x4e\xe7\xd3\xcf\xe9\x6a\x7a\x26\xb5\x5c\xa5\xab\xd9\xa4\x3e\xfa\xaf\x43\xf4\xfe\xcd\x21\xea\x2d\x97\xab\xdb\xc5\xb4\x37\x6e\xbe\xe6\xb7\xe9\xc7\xde\x9f\x14\x36\x7b\xd3\xf7\x8a\xcc\x9b\x7b\x63\xc5\xff\x92\xab\x27\xbb\x43\xce\x5e\x5a\x1d\x02\x09\x71\x5f\x3d\xfb\x89\x00\x4c\xb7\xfc\x91\xd7\x3f\x93\xa2\xf0\xfe\x45\xc6\x78\x8c\x1f\xe3\xff\x04\x00\x00\xff\xff\xb5\x44\x89\xaf\xbc\x0f\x00\x00") +var _prestate_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\xdf\x6f\x1a\x49\x12\x7e\x9e\xf9\x2b\x4a\xfb\x02\x68\xc9\x90\xec\x49\x7b\x12\x3e\x9f\x34\x21\x24\x41\x62\x6d\x0b\xc8\xf9\x7c\xab\x7d\xe8\xe9\xae\x19\x7a\x69\xba\x47\xfd\x03\x8c\x22\xff\xef\xa7\xea\x99\x01\xc3\xda\x49\xee\xde\xcc\x74\xf5\x57\x55\x5f\x55\x7d\x5d\x1e\x8d\x60\x62\xea\x83\x95\xd5\xda\xc3\x2f\x6f\xdf\xfd\x1d\x56\x6b\x84\xca\xbc\x41\xbf\x46\x8b\x61\x0b\x79\xf0\x6b\x63\x5d\x3a\x1a\xc1\x6a\x2d\x1d\x94\x52\x21\x48\x07\x35\xb3\x1e\x4c\x09\xfe\xc2\x5e\xc9\xc2\x32\x7b\xc8\xd2\xd1\xa8\xb9\xf3\xe2\x31\x21\x94\x16\x11\x9c\x29\xfd\x9e\x59\x1c\xc3\xc1\x04\xe0\x4c\x83\x45\x21\x9d\xb7\xb2\x08\x1e\x41\x7a\x60\x5a\x8c\x8c\x85\xad\x11\xb2\x3c\x10\xa4\xf4\x10\xb4\x40\x1b\x5d\x7b\xb4\x5b\xd7\xc5\xf1\xe9\xe6\x0b\xcc\xd1\x39\xb4\xf0\x09\x35\x5a\xa6\xe0\x2e\x14\x4a\x72\x98\x4b\x8e\xda\x21\x30\x07\x35\x7d\x71\x6b\x14\x50\x44\x38\xba\xf8\x91\x42\x59\xb6\xa1\xc0\x47\x13\xb4\x60\x5e\x1a\x3d\x04\x94\x14\x39\xec\xd0\x3a\x69\x34\xfc\xad\x73\xd5\x02\x0e\xc1\x58\x02\xe9\x33\x4f\x09\x58\x30\x35\xdd\x1b\x00\xd3\x07\x50\xcc\x9f\xae\xfe\x00\x21\xa7\xbc\x05\x48\x1d\xdd\xac\x4d\x8d\xe0\xd7\xcc\x53\xd6\x7b\xa9\x14\x14\x08\xc1\x61\x19\xd4\x90\xd0\x8a\xe0\xe1\x7e\xb6\xfa\x7c\xfb\x65\x05\xf9\xcd\x03\xdc\xe7\x8b\x45\x7e\xb3\x7a\xb8\x82\xbd\xf4\x6b\x13\x3c\xe0\x0e\x1b\x28\xb9\xad\x95\x44\x01\x7b\x66\x2d\xd3\xfe\x00\xa6\x24\x84\xdf\xa6\x8b\xc9\xe7\xfc\x66\x95\xbf\x9f\xcd\x67\xab\x07\x30\x16\x3e\xce\x56\x37\xd3\xe5\x12\x3e\xde\x2e\x20\x87\xbb\x7c\xb1\x9a\x4d\xbe\xcc\xf3\x05\xdc\x7d\x59\xdc\xdd\x2e\xa7\x19\x2c\x91\xa2\x42\xba\xff\x7d\xce\xcb\x58\x3d\x8b\x20\xd0\x33\xa9\x5c\xc7\xc4\x83\x09\xe0\xd6\x26\x28\x01\x6b\xb6\x43\xb0\xc8\x51\xee\x50\x00\x03\x6e\xea\xc3\x0f\x17\x95\xb0\x98\x32\xba\x8a\x39\xbf\xda\x90\x30\x2b\x41\x1b\x3f\x04\x87\x08\xff\x58\x7b\x5f\x8f\x47\xa3\xfd\x7e\x9f\x55\x3a\x64\xc6\x56\x23\xd5\xc0\xb9\xd1\x3f\xb3\x94\x30\x6b\x8b\xce\x33\x8f\x2b\xcb\x38\x5a\x30\xc1\xd7\xc1\x3b\x70\xa1\x2c\x25\x97\xa8\x3d\x48\x5d\x1a\xbb\x8d\x9d\x02\xde\x00\xb7\xc8\x3c\x02\x03\x65\x38\x53\x80\x8f\xc8\x43\x3c\x6b\x98\x8e\xed\x6a\x99\x76\x8c\xc7\xaf\xa5\x35\x5b\xca\x35\x38\x4f\x7f\x38\x87\xdb\x42\xa1\x80\x0a\x35\x3a\xe9\xa0\x50\x86\x6f\xb2\xf4\x6b\x9a\x3c\x0b\x86\xfa\x24\x66\xd8\x1a\xc5\xde\xd8\x63\xcf\x22\x14\x41\x2a\x21\x75\x95\xa5\x49\x67\x3d\x06\x1d\x94\x1a\xa6\x11\x42\x19\xb3\x09\x75\xce\xb9\x09\x31\xf6\x3f\x91\xfb\x06\xcc\xd5\xc8\x65\x49\xcd\xc1\x8e\xa7\xde\xc4\xa3\xa3\x5f\x53\x90\x7d\x96\x26\x67\x30\x63\x28\x83\x8e\xe9\xf4\x99\x10\x76\x08\xa2\x18\x7c\x4d\x93\x64\xc7\x2c\x61\xc1\x35\x78\xf3\x19\x1f\xe3\xe1\xe0\x2a\x4d\x12\x59\x42\xdf\xaf\xa5\xcb\x3a\xe0\xdf\x19\xe7\x7f\xc0\xf5\xf5\x75\x1c\xea\x52\x6a\x14\x03\x20\x88\xe4\x25\xb3\xe6\x24\x29\x98\x62\x9a\xe3\x18\x7a\x6f\x1f\x7b\xf0\x33\x88\x22\xab\xd0\xbf\x6f\xbe\x36\xce\x32\x6f\x96\xde\x4a\x5d\xf5\xdf\xfd\x3a\x18\xc6\x5b\xda\xc4\x3b\xd0\x9a\xdf\x98\xa3\x71\x73\xce\x8d\x88\xc7\x6d\xcc\x8d\xd5\xc4\x88\xd6\xa8\xb5\x72\xde\x58\x56\xe1\x18\xbe\x3e\xd1\xef\x27\xca\xea\x29\x4d\x9e\xce\x58\x5e\x36\x46\xaf\xb0\xdc\x42\x00\x6a\x6f\x8f\x7d\x5e\x49\x9a\xd4\xe7\x05\x88\x78\xdf\x2a\xc2\xb2\x0b\xe5\xa2\x08\x1b\x3c\x7c\xbf\x12\x74\x20\xc5\xe3\xf1\x60\x83\x87\xc1\x55\xfa\x6a\x89\xb2\x36\xe8\xdf\xa5\x78\xfc\xd1\x7a\x5d\xdc\x39\xe3\x75\x49\x56\xa7\x78\x07\x83\x0b\x1e\x2d\xba\xa0\x3c\xb5\xbb\xd4\x3b\xb3\x21\xe1\x5a\x13\x3f\x4a\x45\x4a\x4c\x4d\xd5\x72\x8d\x72\x14\x88\x1a\xa4\x47\xcb\x48\x3a\xcd\x0e\x2d\xbd\x1a\x60\xd1\x07\xab\xdd\x91\xc6\x52\x6a\xa6\x3a\xe0\x96\x75\x6f\x19\x6f\x66\xa6\xf9\xfe\x8c\x4b\xee\x1f\x23\x8b\x31\xbb\xd1\x08\x72\x0f\x94\x22\xd4\x46\x6a\x3f\x84\x3d\x82\x46\x14\x34\xf8\x02\x45\xe0\x3e\xe2\xf5\x76\x4c\x05\xec\x35\xc3\x4d\x12\x19\xaf\x9a\x40\x2f\xc1\xb3\xe1\x1f\xc6\x00\xb7\x66\x17\x9f\xb8\x82\xf1\x0d\xb4\x03\x67\xac\xac\xa4\x4e\x5b\x3a\xcf\x86\x8d\x22\xca\x08\x38\x86\x15\x6b\x45\x45\xa4\x2f\xef\x99\x82\x6b\x28\x64\x35\xd3\xfe\xa2\x78\x0d\xe9\xdd\xd5\xc1\x1f\x59\x3b\x3c\x99\x23\xc1\xeb\xff\x32\x18\xc2\xbb\x5f\x8f\x1d\xe1\x0d\x41\xc1\xf7\xc1\xbc\x79\x1d\x2a\xbd\x6c\x86\x97\xaf\x45\x37\x34\xc1\x3f\x47\xaf\x99\x0b\x05\x95\xa3\xc9\x33\xf2\x78\x3e\xc5\x57\xdf\xc0\x3d\xcf\xad\xc3\x6d\xa9\xc9\x98\x10\xaf\x83\x36\x25\xfa\x80\xdc\xe2\x96\x54\x9d\xaa\xc0\x99\x52\x68\x7b\x0e\xa2\x66\x0c\xdb\x76\x8a\xf5\xc2\x6d\xed\x0f\x9d\xd6\x7b\x66\x2b\xf4\xee\xfb\x81\x45\x9c\x37\x6f\x3a\x09\x8c\x54\x1c\x6a\x84\xeb\x6b\xe8\x4d\x16\xd3\x7c\x35\xed\xb5\x63\x34\x1a\xc1\x3d\xc6\x4d\xa8\x50\xb2\x10\xea\x00\x02\x15\x7a\x6c\xe2\x32\x3a\x52\x74\x94\x84\x21\xad\x34\xb4\x6c\xe0\xa3\x74\x5e\xea\x0a\x1a\xa5\xd8\xd3\xbb\xda\xc2\xc5\x19\xe1\x2c\x38\xea\xd6\x8b\x47\xc8\x1b\xda\x28\x2c\x92\xae\x90\xfe\xc7\x71\x63\x4a\x1e\x37\x90\x52\x5a\xe7\xa1\x56\x8c\x63\x46\x78\xc7\x60\x5e\xaf\x6f\x3b\xc9\xe4\x7a\x11\x47\x30\x02\x9d\x1e\x38\xa6\xe8\x81\x24\xf7\x0e\xfa\x1d\xc6\x20\x4d\x12\xdb\x59\x3f\xc3\xbe\x3a\x49\x82\xf3\x58\x3f\x17\x04\x5a\x2c\x70\x87\x24\xa1\x51\x0d\x9a\xc7\x90\x7c\xfd\xeb\xb7\xf6\xf5\x45\x97\xa5\x09\xdd\x7b\x36\xd7\xca\x54\xe7\x73\x2d\x1a\x5a\x78\xb0\x96\xea\x7f\x94\xe0\x92\x66\xfc\xcf\xe0\x3c\x71\x6a\x89\x9e\x56\x2d\x5e\x12\xc9\x28\x89\xf4\xda\x0e\xfe\x2a\x86\xf4\x6e\xc5\x77\x82\xdc\xb5\xaf\x54\xb3\xcd\xd5\xc6\xa3\xf6\x92\x29\x75\xa0\x3a\xec\x2d\xad\x31\xb4\xb8\x0c\xc1\x49\xb2\x8a\x8a\x13\x4d\xa5\xe6\x2a\x88\xa6\x0d\x62\x1f\xb7\x78\x2e\xc6\x7c\xbe\xff\x6c\xd1\x39\x56\x61\x46\x9d\x54\xca\xc7\x76\x83\xd4\xd0\x6b\x44\xae\x3f\xe8\x65\xc7\x20\xcf\x25\x46\x99\x2a\xeb\x9a\x8c\x64\x3a\x17\xc2\xa2\x73\xfd\x41\xab\x39\xc7\xca\xde\xaf\x51\x13\xf9\xa0\x71\x0f\xc7\xd5\x84\x71\x4e\xab\x9a\x18\x02\x13\x82\xa4\xed\x62\x8d\x48\x93\xc4\xed\xa5\xe7\x6b\x88\x9e\x4c\x7d\x9a\xc5\x41\xdb\xff\x9c\x39\x84\x9f\xa6\xff\x5e\x4d\x6e\x3f\x4c\x27\xb7\x77\x0f\x3f\x8d\xe1\xec\xdb\x72\xf6\x9f\xe9\xf1\xdb\xfb\x7c\x9e\xdf\x4c\xa6\x3f\x8d\xe3\xdb\xfc\x42\x42\xde\x74\x29\x90\x43\xe7\x19\xdf\x64\x35\xe2\xa6\xff\xf6\x5c\x07\x4e\x09\x26\x49\x61\x91\x6d\xae\x4e\xc1\x34\x03\xda\xfa\xe8\x24\x17\xae\xe1\x55\xb2\xae\x5e\x8f\x66\xd2\xda\xf7\x3b\x21\x3f\xad\x22\x51\x2a\xbe\x19\x47\x3e\x9f\x1f\x33\xa7\x1f\x44\xc7\xf1\xc3\x87\xe9\x7c\xfa\x29\x5f\x4d\xcf\xac\x96\xab\x7c\x35\x9b\x34\x9f\xfe\x67\x8a\xde\xfd\x30\x45\xbd\xe5\x72\x75\xbb\x98\xf6\xc6\xed\xaf\xf9\x6d\xfe\xa1\xf7\x17\x87\xed\xbe\xf2\xad\x26\xf3\xe6\xde\x58\xf1\xff\xd4\xea\xd9\xee\x50\xb2\x97\x56\x87\x28\x42\xdc\x87\x8b\xd5\x1c\x98\xee\xf4\xa3\x6c\xfe\x3d\x49\xe2\xfd\x17\x15\xe3\x29\x7d\x4a\xff\x1b\x00\x00\xff\xff\x7c\xdb\x3f\x79\x34\x0f\x00\x00") func prestate_tracerJsBytes() ([]byte, error) { return bindataRead( @@ -213,7 +213,7 @@ func prestate_tracerJs() (*asset, error) { } info := bindataFileInfo{name: "prestate_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd0, 0xd5, 0x5, 0x92, 0xed, 0xf4, 0x69, 0x2e, 0x14, 0x48, 0x35, 0x67, 0xcc, 0xf2, 0x3e, 0xc7, 0xf, 0x18, 0x22, 0x7a, 0x4d, 0x6f, 0x31, 0xad, 0x3c, 0x92, 0x77, 0xb4, 0x1, 0x2a, 0xd3, 0x7c}} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd, 0xb0, 0x72, 0x28, 0xc7, 0x27, 0x97, 0x4d, 0xe, 0xbf, 0x29, 0xe1, 0xa8, 0xd7, 0x52, 0x13, 0xa1, 0x19, 0xc3, 0xfb, 0x8d, 0x5b, 0xcb, 0xdd, 0xa5, 0xd7, 0x98, 0x34, 0x6a, 0xbf, 0x33, 0x6c}} return a, nil } diff --git a/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/prestate_tracer.js b/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/prestate_tracer.js index 99f71d2c3..56aa2b210 100644 --- a/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/prestate_tracer.js +++ b/vendor/github.com/ethereum/go-ethereum/eth/tracers/internal/tracers/prestate_tracer.js @@ -40,10 +40,7 @@ var idx = toHex(key); if (this.prestate[acc].storage[idx] === undefined) { - var val = toHex(db.getState(addr, key)); - if (val != "0x0000000000000000000000000000000000000000000000000000000000000000") { - this.prestate[acc].storage[idx] = toHex(db.getState(addr, key)); - } + this.prestate[acc].storage[idx] = toHex(db.getState(addr, key)); } }, diff --git a/vendor/github.com/ethereum/go-ethereum/eth/tracers/tracer.go b/vendor/github.com/ethereum/go-ethereum/eth/tracers/tracer.go index b519236f2..3533a831f 100644 --- a/vendor/github.com/ethereum/go-ethereum/eth/tracers/tracer.go +++ b/vendor/github.com/ethereum/go-ethereum/eth/tracers/tracer.go @@ -290,11 +290,12 @@ type Tracer struct { contractWrapper *contractWrapper // Wrapper around the contract object dbWrapper *dbWrapper // Wrapper around the VM environment - pcValue *uint // Swappable pc value wrapped by a log accessor - gasValue *uint // Swappable gas value wrapped by a log accessor - costValue *uint // Swappable cost value wrapped by a log accessor - depthValue *uint // Swappable depth value wrapped by a log accessor - errorValue *string // Swappable error value wrapped by a log accessor + pcValue *uint // Swappable pc value wrapped by a log accessor + gasValue *uint // Swappable gas value wrapped by a log accessor + costValue *uint // Swappable cost value wrapped by a log accessor + depthValue *uint // Swappable depth value wrapped by a log accessor + errorValue *string // Swappable error value wrapped by a log accessor + refundValue *uint // Swappable refund value wrapped by a log accessor ctx map[string]interface{} // Transaction context gathered throughout execution err error // Error, if one has occurred @@ -323,6 +324,7 @@ func New(code string) (*Tracer, error) { gasValue: new(uint), costValue: new(uint), depthValue: new(uint), + refundValue: new(uint), } // Set up builtins for this environment tracer.vm.PushGlobalGoFunction("toHex", func(ctx *duktape.Context) int { @@ -442,6 +444,9 @@ func New(code string) (*Tracer, error) { tracer.vm.PushGoFunction(func(ctx *duktape.Context) int { ctx.PushUint(*tracer.depthValue); return 1 }) tracer.vm.PutPropString(logObject, "getDepth") + tracer.vm.PushGoFunction(func(ctx *duktape.Context) int { ctx.PushUint(*tracer.refundValue); return 1 }) + tracer.vm.PutPropString(logObject, "getRefund") + tracer.vm.PushGoFunction(func(ctx *duktape.Context) int { if tracer.errorValue != nil { ctx.PushString(*tracer.errorValue) @@ -527,6 +532,7 @@ func (jst *Tracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost *jst.gasValue = uint(gas) *jst.costValue = uint(cost) *jst.depthValue = uint(depth) + *jst.refundValue = uint(env.StateDB.GetRefund()) jst.errorValue = nil if err != nil { diff --git a/vendor/github.com/ethereum/go-ethereum/ethclient/ethclient.go b/vendor/github.com/ethereum/go-ethereum/ethclient/ethclient.go index b40837c8c..f3163e19b 100644 --- a/vendor/github.com/ethereum/go-ethereum/ethclient/ethclient.go +++ b/vendor/github.com/ethereum/go-ethereum/ethclient/ethclient.go @@ -365,26 +365,42 @@ func (ec *Client) NonceAt(ctx context.Context, account common.Address, blockNumb // FilterLogs executes a filter query. func (ec *Client) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { var result []types.Log - err := ec.c.CallContext(ctx, &result, "eth_getLogs", toFilterArg(q)) + arg, err := toFilterArg(q) + if err != nil { + return nil, err + } + err = ec.c.CallContext(ctx, &result, "eth_getLogs", arg) return result, err } // SubscribeFilterLogs subscribes to the results of a streaming filter query. func (ec *Client) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { - return ec.c.EthSubscribe(ctx, ch, "logs", toFilterArg(q)) + arg, err := toFilterArg(q) + if err != nil { + return nil, err + } + return ec.c.EthSubscribe(ctx, ch, "logs", arg) } -func toFilterArg(q ethereum.FilterQuery) interface{} { +func toFilterArg(q ethereum.FilterQuery) (interface{}, error) { arg := map[string]interface{}{ - "fromBlock": toBlockNumArg(q.FromBlock), - "toBlock": toBlockNumArg(q.ToBlock), - "address": q.Addresses, - "topics": q.Topics, + "address": q.Addresses, + "topics": q.Topics, } - if q.FromBlock == nil { - arg["fromBlock"] = "0x0" + if q.BlockHash != nil { + arg["blockHash"] = *q.BlockHash + if q.FromBlock != nil || q.ToBlock != nil { + return nil, fmt.Errorf("cannot specify both BlockHash and FromBlock/ToBlock") + } + } else { + if q.FromBlock == nil { + arg["fromBlock"] = "0x0" + } else { + arg["fromBlock"] = toBlockNumArg(q.FromBlock) + } + arg["toBlock"] = toBlockNumArg(q.ToBlock) } - return arg + return arg, nil } // Pending State diff --git a/vendor/github.com/ethereum/go-ethereum/ethdb/database.go b/vendor/github.com/ethereum/go-ethereum/ethdb/database.go index 99abd09b9..6c62d6a38 100644 --- a/vendor/github.com/ethereum/go-ethereum/ethdb/database.go +++ b/vendor/github.com/ethereum/go-ethereum/ethdb/database.go @@ -14,6 +14,8 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . +// +build !js + package ethdb import ( @@ -380,71 +382,3 @@ func (b *ldbBatch) Reset() { b.b.Reset() b.size = 0 } - -type table struct { - db Database - prefix string -} - -// NewTable returns a Database object that prefixes all keys with a given -// string. -func NewTable(db Database, prefix string) Database { - return &table{ - db: db, - prefix: prefix, - } -} - -func (dt *table) Put(key []byte, value []byte) error { - return dt.db.Put(append([]byte(dt.prefix), key...), value) -} - -func (dt *table) Has(key []byte) (bool, error) { - return dt.db.Has(append([]byte(dt.prefix), key...)) -} - -func (dt *table) Get(key []byte) ([]byte, error) { - return dt.db.Get(append([]byte(dt.prefix), key...)) -} - -func (dt *table) Delete(key []byte) error { - return dt.db.Delete(append([]byte(dt.prefix), key...)) -} - -func (dt *table) Close() { - // Do nothing; don't close the underlying DB. -} - -type tableBatch struct { - batch Batch - prefix string -} - -// NewTableBatch returns a Batch object which prefixes all keys with a given string. -func NewTableBatch(db Database, prefix string) Batch { - return &tableBatch{db.NewBatch(), prefix} -} - -func (dt *table) NewBatch() Batch { - return &tableBatch{dt.db.NewBatch(), dt.prefix} -} - -func (tb *tableBatch) Put(key, value []byte) error { - return tb.batch.Put(append([]byte(tb.prefix), key...), value) -} - -func (tb *tableBatch) Delete(key []byte) error { - return tb.batch.Delete(append([]byte(tb.prefix), key...)) -} - -func (tb *tableBatch) Write() error { - return tb.batch.Write() -} - -func (tb *tableBatch) ValueSize() int { - return tb.batch.ValueSize() -} - -func (tb *tableBatch) Reset() { - tb.batch.Reset() -} diff --git a/vendor/github.com/ethereum/go-ethereum/ethdb/database_js.go b/vendor/github.com/ethereum/go-ethereum/ethdb/database_js.go new file mode 100644 index 000000000..ba6eeb5a2 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/ethdb/database_js.go @@ -0,0 +1,68 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// +build js + +package ethdb + +import ( + "errors" +) + +var errNotSupported = errors.New("ethdb: not supported") + +type LDBDatabase struct { +} + +// NewLDBDatabase returns a LevelDB wrapped object. +func NewLDBDatabase(file string, cache int, handles int) (*LDBDatabase, error) { + return nil, errNotSupported +} + +// Path returns the path to the database directory. +func (db *LDBDatabase) Path() string { + return "" +} + +// Put puts the given key / value to the queue +func (db *LDBDatabase) Put(key []byte, value []byte) error { + return errNotSupported +} + +func (db *LDBDatabase) Has(key []byte) (bool, error) { + return false, errNotSupported +} + +// Get returns the given key if it's present. +func (db *LDBDatabase) Get(key []byte) ([]byte, error) { + return nil, errNotSupported +} + +// Delete deletes the key from the queue and database +func (db *LDBDatabase) Delete(key []byte) error { + return errNotSupported +} + +func (db *LDBDatabase) Close() { +} + +// Meter configures the database metrics collectors and +func (db *LDBDatabase) Meter(prefix string) { +} + +func (db *LDBDatabase) NewBatch() Batch { + return nil +} diff --git a/vendor/github.com/ethereum/go-ethereum/event/filter/generic_filter.go b/vendor/github.com/ethereum/go-ethereum/ethdb/table.go similarity index 51% rename from vendor/github.com/ethereum/go-ethereum/event/filter/generic_filter.go rename to vendor/github.com/ethereum/go-ethereum/ethdb/table.go index d679b8bfa..28069c078 100644 --- a/vendor/github.com/ethereum/go-ethereum/event/filter/generic_filter.go +++ b/vendor/github.com/ethereum/go-ethereum/ethdb/table.go @@ -14,35 +14,38 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package filter +package ethdb -type Generic struct { - Str1, Str2, Str3 string - Data map[string]struct{} - - Fn func(data interface{}) +type table struct { + db Database + prefix string } -// self = registered, f = incoming -func (self Generic) Compare(f Filter) bool { - var strMatch, dataMatch = true, true - - filter := f.(Generic) - if (len(self.Str1) > 0 && filter.Str1 != self.Str1) || - (len(self.Str2) > 0 && filter.Str2 != self.Str2) || - (len(self.Str3) > 0 && filter.Str3 != self.Str3) { - strMatch = false +// NewTable returns a Database object that prefixes all keys with a given +// string. +func NewTable(db Database, prefix string) Database { + return &table{ + db: db, + prefix: prefix, } - - for k := range self.Data { - if _, ok := filter.Data[k]; !ok { - return false - } - } - - return strMatch && dataMatch } -func (self Generic) Trigger(data interface{}) { - self.Fn(data) +func (dt *table) Put(key []byte, value []byte) error { + return dt.db.Put(append([]byte(dt.prefix), key...), value) +} + +func (dt *table) Has(key []byte) (bool, error) { + return dt.db.Has(append([]byte(dt.prefix), key...)) +} + +func (dt *table) Get(key []byte) ([]byte, error) { + return dt.db.Get(append([]byte(dt.prefix), key...)) +} + +func (dt *table) Delete(key []byte) error { + return dt.db.Delete(append([]byte(dt.prefix), key...)) +} + +func (dt *table) Close() { + // Do nothing; don't close the underlying DB. } diff --git a/vendor/github.com/ethereum/go-ethereum/ethdb/table_batch.go b/vendor/github.com/ethereum/go-ethereum/ethdb/table_batch.go new file mode 100644 index 000000000..ae83e79ce --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/ethdb/table_batch.go @@ -0,0 +1,51 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethdb + +type tableBatch struct { + batch Batch + prefix string +} + +// NewTableBatch returns a Batch object which prefixes all keys with a given string. +func NewTableBatch(db Database, prefix string) Batch { + return &tableBatch{db.NewBatch(), prefix} +} + +func (dt *table) NewBatch() Batch { + return &tableBatch{dt.db.NewBatch(), dt.prefix} +} + +func (tb *tableBatch) Put(key, value []byte) error { + return tb.batch.Put(append([]byte(tb.prefix), key...), value) +} + +func (tb *tableBatch) Delete(key []byte) error { + return tb.batch.Delete(append([]byte(tb.prefix), key...)) +} + +func (tb *tableBatch) Write() error { + return tb.batch.Write() +} + +func (tb *tableBatch) ValueSize() int { + return tb.batch.ValueSize() +} + +func (tb *tableBatch) Reset() { + tb.batch.Reset() +} diff --git a/vendor/github.com/ethereum/go-ethereum/event/filter/filter.go b/vendor/github.com/ethereum/go-ethereum/event/filter/filter.go deleted file mode 100644 index a6fe46d6a..000000000 --- a/vendor/github.com/ethereum/go-ethereum/event/filter/filter.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package filter implements event filters. -package filter - -import "reflect" - -type Filter interface { - Compare(Filter) bool - Trigger(data interface{}) -} - -type FilterEvent struct { - filter Filter - data interface{} -} - -type Filters struct { - id int - watchers map[int]Filter - ch chan FilterEvent - - quit chan struct{} -} - -func New() *Filters { - return &Filters{ - ch: make(chan FilterEvent), - watchers: make(map[int]Filter), - quit: make(chan struct{}), - } -} - -func (f *Filters) Start() { - go f.loop() -} - -func (f *Filters) Stop() { - close(f.quit) -} - -func (f *Filters) Notify(filter Filter, data interface{}) { - f.ch <- FilterEvent{filter, data} -} - -func (f *Filters) Install(watcher Filter) int { - f.watchers[f.id] = watcher - f.id++ - - return f.id - 1 -} - -func (f *Filters) Uninstall(id int) { - delete(f.watchers, id) -} - -func (f *Filters) loop() { -out: - for { - select { - case <-f.quit: - break out - case event := <-f.ch: - for _, watcher := range f.watchers { - if reflect.TypeOf(watcher) == reflect.TypeOf(event.filter) { - if watcher.Compare(event.filter) { - watcher.Trigger(event.data) - } - } - } - } - } -} - -func (f *Filters) Match(a, b Filter) bool { - return reflect.TypeOf(a) == reflect.TypeOf(b) && a.Compare(b) -} - -func (f *Filters) Get(i int) Filter { - return f.watchers[i] -} diff --git a/vendor/github.com/ethereum/go-ethereum/interfaces.go b/vendor/github.com/ethereum/go-ethereum/interfaces.go index 26b0fcbc1..be7834406 100644 --- a/vendor/github.com/ethereum/go-ethereum/interfaces.go +++ b/vendor/github.com/ethereum/go-ethereum/interfaces.go @@ -146,7 +146,7 @@ type FilterQuery struct { // {{A}} matches topic A in first position // {{}, {B}} matches any topic in first position, B in second position // {{A}, {B}} matches topic A in first position, B in second position - // {{A, B}}, {C, D}} matches topic (A OR B) in first position, (C OR D) in second position + // {{A, B}, {C, D}} matches topic (A OR B) in first position, (C OR D) in second position Topics [][]common.Hash } diff --git a/vendor/github.com/ethereum/go-ethereum/internal/cmdtest/test_cmd.go b/vendor/github.com/ethereum/go-ethereum/internal/cmdtest/test_cmd.go index 20e82ec2a..f8b1b43c0 100644 --- a/vendor/github.com/ethereum/go-ethereum/internal/cmdtest/test_cmd.go +++ b/vendor/github.com/ethereum/go-ethereum/internal/cmdtest/test_cmd.go @@ -27,6 +27,7 @@ import ( "regexp" "strings" "sync" + "syscall" "testing" "text/template" "time" @@ -50,6 +51,8 @@ type TestCmd struct { stdout *bufio.Reader stdin io.WriteCloser stderr *testlogger + // Err will contain the process exit error or interrupt signal error + Err error } // Run exec's the current binary using name as argv[0] which will trigger the @@ -182,11 +185,25 @@ func (tt *TestCmd) ExpectExit() { } func (tt *TestCmd) WaitExit() { - tt.cmd.Wait() + tt.Err = tt.cmd.Wait() } func (tt *TestCmd) Interrupt() { - tt.cmd.Process.Signal(os.Interrupt) + tt.Err = tt.cmd.Process.Signal(os.Interrupt) +} + +// ExitStatus exposes the process' OS exit code +// It will only return a valid value after the process has finished. +func (tt *TestCmd) ExitStatus() int { + if tt.Err != nil { + exitErr := tt.Err.(*exec.ExitError) + if exitErr != nil { + if status, ok := exitErr.Sys().(syscall.WaitStatus); ok { + return status.ExitStatus() + } + } + } + return 0 } // StderrText returns any stderr output written so far. diff --git a/vendor/github.com/ethereum/go-ethereum/internal/ethapi/api.go b/vendor/github.com/ethereum/go-ethereum/internal/ethapi/api.go index 9adad6095..114cc1192 100644 --- a/vendor/github.com/ethereum/go-ethereum/internal/ethapi/api.go +++ b/vendor/github.com/ethereum/go-ethereum/internal/ethapi/api.go @@ -336,6 +336,9 @@ func (s *PrivateAccountAPI) UnlockAccount(addr common.Address, password string, d = time.Duration(*duration) * time.Second } err := fetchKeystore(s.am).TimedUnlock(accounts.Account{Address: addr}, password, d) + if err != nil { + log.Warn("Failed account unlock attempt", "address", addr, "err", err) + } return err == nil, err } @@ -344,10 +347,10 @@ func (s *PrivateAccountAPI) LockAccount(addr common.Address) bool { return fetchKeystore(s.am).Lock(addr) == nil } -// signTransactions sets defaults and signs the given transaction +// signTransaction sets defaults and signs the given transaction // NOTE: the caller needs to ensure that the nonceLock is held, if applicable, // and release it after the transaction has been submitted to the tx pool -func (s *PrivateAccountAPI) signTransaction(ctx context.Context, args SendTxArgs, passwd string) (*types.Transaction, error) { +func (s *PrivateAccountAPI) signTransaction(ctx context.Context, args *SendTxArgs, passwd string) (*types.Transaction, error) { // Look up the wallet containing the requested signer account := accounts.Account{Address: args.From} wallet, err := s.am.Find(account) @@ -378,8 +381,9 @@ func (s *PrivateAccountAPI) SendTransaction(ctx context.Context, args SendTxArgs s.nonceLock.LockAddr(args.From) defer s.nonceLock.UnlockAddr(args.From) } - signed, err := s.signTransaction(ctx, args, passwd) + signed, err := s.signTransaction(ctx, &args, passwd) if err != nil { + log.Warn("Failed transaction send attempt", "from", args.From, "to", args.To, "value", args.Value.ToInt(), "err", err) return common.Hash{}, err } return submitTransaction(ctx, s.b, signed) @@ -401,8 +405,9 @@ func (s *PrivateAccountAPI) SignTransaction(ctx context.Context, args SendTxArgs if args.Nonce == nil { return nil, fmt.Errorf("nonce not specified") } - signed, err := s.signTransaction(ctx, args, passwd) + signed, err := s.signTransaction(ctx, &args, passwd) if err != nil { + log.Warn("Failed transaction sign attempt", "from", args.From, "to", args.To, "value", args.Value.ToInt(), "err", err) return nil, err } data, err := rlp.EncodeToBytes(signed) @@ -444,6 +449,7 @@ func (s *PrivateAccountAPI) Sign(ctx context.Context, data hexutil.Bytes, addr c // Assemble sign the data with the wallet signature, err := wallet.SignHashWithPassphrase(account, passwd, signHash(data)) if err != nil { + log.Warn("Failed data sign attempt", "address", addr, "err", err) return nil, err } signature[64] += 27 // Transform V from 0/1 to 27/28 according to the yellow paper @@ -510,6 +516,72 @@ func (s *PublicBlockChainAPI) GetBalance(ctx context.Context, address common.Add return (*hexutil.Big)(state.GetBalance(address)), state.Error() } +// Result structs for GetProof +type AccountResult struct { + Address common.Address `json:"address"` + AccountProof []string `json:"accountProof"` + Balance *hexutil.Big `json:"balance"` + CodeHash common.Hash `json:"codeHash"` + Nonce hexutil.Uint64 `json:"nonce"` + StorageHash common.Hash `json:"storageHash"` + StorageProof []StorageResult `json:"storageProof"` +} +type StorageResult struct { + Key string `json:"key"` + Value *hexutil.Big `json:"value"` + Proof []string `json:"proof"` +} + +// GetProof returns the Merkle-proof for a given account and optionally some storage keys. +func (s *PublicBlockChainAPI) GetProof(ctx context.Context, address common.Address, storageKeys []string, blockNr rpc.BlockNumber) (*AccountResult, error) { + state, _, err := s.b.StateAndHeaderByNumber(ctx, blockNr) + if state == nil || err != nil { + return nil, err + } + + storageTrie := state.StorageTrie(address) + storageHash := types.EmptyRootHash + codeHash := state.GetCodeHash(address) + storageProof := make([]StorageResult, len(storageKeys)) + + // if we have a storageTrie, (which means the account exists), we can update the storagehash + if storageTrie != nil { + storageHash = storageTrie.Hash() + } else { + // no storageTrie means the account does not exist, so the codeHash is the hash of an empty bytearray. + codeHash = crypto.Keccak256Hash(nil) + } + + // create the proof for the storageKeys + for i, key := range storageKeys { + if storageTrie != nil { + proof, storageError := state.GetStorageProof(address, common.HexToHash(key)) + if storageError != nil { + return nil, storageError + } + storageProof[i] = StorageResult{key, (*hexutil.Big)(state.GetState(address, common.HexToHash(key)).Big()), common.ToHexArray(proof)} + } else { + storageProof[i] = StorageResult{key, &hexutil.Big{}, []string{}} + } + } + + // create the accountProof + accountProof, proofErr := state.GetProof(address) + if proofErr != nil { + return nil, proofErr + } + + return &AccountResult{ + Address: address, + AccountProof: common.ToHexArray(accountProof), + Balance: (*hexutil.Big)(state.GetBalance(address)), + CodeHash: codeHash, + Nonce: hexutil.Uint64(state.GetNonce(address)), + StorageHash: storageHash, + StorageProof: storageProof, + }, state.Error() +} + // GetBlockByNumber returns the requested block. When blockNr is -1 the chain head is returned. When fullTx is true all // transactions in the block are returned in full detail, otherwise only the transaction hash is returned. func (s *PublicBlockChainAPI) GetBlockByNumber(ctx context.Context, blockNr rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) { @@ -619,7 +691,7 @@ type CallArgs struct { Data hexutil.Bytes `json:"data"` } -func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber, vmCfg vm.Config, timeout time.Duration) ([]byte, uint64, bool, error) { +func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber, timeout time.Duration) ([]byte, uint64, bool, error) { defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now()) state, header, err := s.b.StateAndHeaderByNumber(ctx, blockNr) @@ -660,7 +732,7 @@ func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr defer cancel() // Get a new instance of the EVM. - evm, vmError, err := s.b.GetEVM(ctx, msg, state, header, vmCfg) + evm, vmError, err := s.b.GetEVM(ctx, msg, state, header) if err != nil { return nil, 0, false, err } @@ -684,7 +756,7 @@ func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr // Call executes the given transaction on the state for the given block number. // It doesn't make and changes in the state/blockchain and is useful to execute and retrieve values. func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber) (hexutil.Bytes, error) { - result, _, _, err := s.doCall(ctx, args, blockNr, vm.Config{}, 5*time.Second) + result, _, _, err := s.doCall(ctx, args, blockNr, 5*time.Second) return (hexutil.Bytes)(result), err } @@ -713,7 +785,7 @@ func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs) (h executable := func(gas uint64) bool { args.Gas = hexutil.Uint64(gas) - _, _, failed, err := s.doCall(ctx, args, rpc.PendingBlockNumber, vm.Config{}, 0) + _, _, failed, err := s.doCall(ctx, args, rpc.PendingBlockNumber, 0) if err != nil || failed { return false } diff --git a/vendor/github.com/ethereum/go-ethereum/internal/ethapi/backend.go b/vendor/github.com/ethereum/go-ethereum/internal/ethapi/backend.go index c9ffe230c..e23ee03b1 100644 --- a/vendor/github.com/ethereum/go-ethereum/internal/ethapi/backend.go +++ b/vendor/github.com/ethereum/go-ethereum/internal/ethapi/backend.go @@ -53,7 +53,7 @@ type Backend interface { GetBlock(ctx context.Context, blockHash common.Hash) (*types.Block, error) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) GetTd(blockHash common.Hash) *big.Int - GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmCfg vm.Config) (*vm.EVM, func() error, error) + GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, func() error, error) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription diff --git a/vendor/github.com/ethereum/go-ethereum/internal/web3ext/web3ext.go b/vendor/github.com/ethereum/go-ethereum/internal/web3ext/web3ext.go index addf3c766..06bfcef69 100644 --- a/vendor/github.com/ethereum/go-ethereum/internal/web3ext/web3ext.go +++ b/vendor/github.com/ethereum/go-ethereum/internal/web3ext/web3ext.go @@ -384,6 +384,18 @@ web3._extend({ params: 1, inputFormatter: [null] }), + new web3._extend.Method({ + name: 'standardTraceBadBlockToFile', + call: 'debug_standardTraceBadBlockToFile', + params: 2, + inputFormatter: [null, null] + }), + new web3._extend.Method({ + name: 'standardTraceBlockToFile', + call: 'debug_standardTraceBlockToFile', + params: 2, + inputFormatter: [null, null] + }), new web3._extend.Method({ name: 'traceBlockByNumber', call: 'debug_traceBlockByNumber', @@ -481,6 +493,12 @@ web3._extend({ params: 2, inputFormatter: [web3._extend.formatters.inputBlockNumberFormatter, web3._extend.utils.toHex] }), + new web3._extend.Method({ + name: 'getProof', + call: 'eth_getProof', + params: 3, + inputFormatter: [web3._extend.formatters.inputAddressFormatter, null, web3._extend.formatters.inputBlockNumberFormatter] + }), ], properties: [ new web3._extend.Property({ diff --git a/vendor/github.com/ethereum/go-ethereum/les/api_backend.go b/vendor/github.com/ethereum/go-ethereum/les/api_backend.go index aa748a4ea..753139623 100644 --- a/vendor/github.com/ethereum/go-ethereum/les/api_backend.go +++ b/vendor/github.com/ethereum/go-ethereum/les/api_backend.go @@ -105,10 +105,10 @@ func (b *LesApiBackend) GetTd(hash common.Hash) *big.Int { return b.eth.blockchain.GetTdByHash(hash) } -func (b *LesApiBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmCfg vm.Config) (*vm.EVM, func() error, error) { +func (b *LesApiBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, func() error, error) { state.SetBalance(msg.From(), math.MaxBig256) context := core.NewEVMContext(msg, header, b.eth.blockchain, nil) - return vm.NewEVM(context, state, b.eth.chainConfig, vmCfg), state.Error, nil + return vm.NewEVM(context, state, b.eth.chainConfig, vm.Config{}), state.Error, nil } func (b *LesApiBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error { diff --git a/vendor/github.com/ethereum/go-ethereum/les/backend.go b/vendor/github.com/ethereum/go-ethereum/les/backend.go index d54b466e6..cd99f8f81 100644 --- a/vendor/github.com/ethereum/go-ethereum/les/backend.go +++ b/vendor/github.com/ethereum/go-ethereum/les/backend.go @@ -82,7 +82,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) { if err != nil { return nil, err } - chainConfig, genesisHash, genesisErr := core.SetupGenesisBlock(chainDb, config.Genesis) + chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.ConstantinopleOverride) if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat { return nil, genesisErr } diff --git a/vendor/github.com/ethereum/go-ethereum/les/fetcher.go b/vendor/github.com/ethereum/go-ethereum/les/fetcher.go index 16cfc0034..56c96017e 100644 --- a/vendor/github.com/ethereum/go-ethereum/les/fetcher.go +++ b/vendor/github.com/ethereum/go-ethereum/les/fetcher.go @@ -149,38 +149,39 @@ func (f *lightFetcher) syncLoop() { s := requesting requesting = false var ( - rq *distReq - reqID uint64 + rq *distReq + reqID uint64 + syncing bool ) - if !f.syncing && !(newAnnounce && s) { - rq, reqID = f.nextRequest() + rq, reqID, syncing = f.nextRequest() } - - syncing := f.syncing f.lock.Unlock() if rq != nil { requesting = true - _, ok := <-f.pm.reqDist.queue(rq) - if !ok { + if _, ok := <-f.pm.reqDist.queue(rq); ok { + if syncing { + f.lock.Lock() + f.syncing = true + f.lock.Unlock() + } else { + go func() { + time.Sleep(softRequestTimeout) + f.reqMu.Lock() + req, ok := f.requested[reqID] + if ok { + req.timeout = true + f.requested[reqID] = req + } + f.reqMu.Unlock() + // keep starting new requests while possible + f.requestChn <- false + }() + } + } else { f.requestChn <- false } - - if !syncing { - go func() { - time.Sleep(softRequestTimeout) - f.reqMu.Lock() - req, ok := f.requested[reqID] - if ok { - req.timeout = true - f.requested[reqID] = req - } - f.reqMu.Unlock() - // keep starting new requests while possible - f.requestChn <- false - }() - } } case reqID := <-f.timeoutChn: f.reqMu.Lock() @@ -222,6 +223,7 @@ func (f *lightFetcher) syncLoop() { f.newHeaders([]*types.Header{h}, []*big.Int{td}) } f.lock.Unlock() + f.requestChn <- false } } } @@ -420,7 +422,7 @@ func (f *lightFetcher) requestedID(reqID uint64) bool { // nextRequest selects the peer and announced head to be requested next, amount // to be downloaded starting from the head backwards is also returned -func (f *lightFetcher) nextRequest() (*distReq, uint64) { +func (f *lightFetcher) nextRequest() (*distReq, uint64, bool) { var ( bestHash common.Hash bestAmount uint64 @@ -430,19 +432,17 @@ func (f *lightFetcher) nextRequest() (*distReq, uint64) { bestHash, bestAmount, bestTd, bestSyncing = f.findBestValues() if bestTd == f.maxConfirmedTd { - return nil, 0 + return nil, 0, false } - f.syncing = bestSyncing - var rq *distReq reqID := genReqID() - if f.syncing { + if bestSyncing { rq = f.newFetcherDistReqForSync(bestHash) } else { rq = f.newFetcherDistReq(bestHash, reqID, bestAmount) } - return rq, reqID + return rq, reqID, bestSyncing } // findBestValues retrieves the best values for LES or ULC mode. diff --git a/vendor/github.com/ethereum/go-ethereum/les/flowcontrol/control.go b/vendor/github.com/ethereum/go-ethereum/les/flowcontrol/control.go index d50eb809c..8ef4ba511 100644 --- a/vendor/github.com/ethereum/go-ethereum/les/flowcontrol/control.go +++ b/vendor/github.com/ethereum/go-ethereum/les/flowcontrol/control.go @@ -82,7 +82,6 @@ func (peer *ClientNode) RequestProcessed(cost uint64) (bv, realCost uint64) { time := mclock.Now() peer.recalcBV(time) peer.bufValue -= cost - peer.recalcBV(time) rcValue, rcost := peer.cm.processed(peer.cmNode, time) if rcValue < peer.params.BufLimit { bv := peer.params.BufLimit - rcValue diff --git a/vendor/github.com/ethereum/go-ethereum/les/serverpool.go b/vendor/github.com/ethereum/go-ethereum/les/serverpool.go index c7edfc784..e1feb965a 100644 --- a/vendor/github.com/ethereum/go-ethereum/les/serverpool.go +++ b/vendor/github.com/ethereum/go-ethereum/les/serverpool.go @@ -729,7 +729,7 @@ func (e *poolEntry) DecodeRLP(s *rlp.Stream) error { } func encodePubkey64(pub *ecdsa.PublicKey) []byte { - return crypto.FromECDSAPub(pub)[:1] + return crypto.FromECDSAPub(pub)[1:] } func decodePubkey64(b []byte) (*ecdsa.PublicKey, error) { diff --git a/vendor/github.com/ethereum/go-ethereum/light/postprocess.go b/vendor/github.com/ethereum/go-ethereum/light/postprocess.go index 1cfd7535e..dd1b74a7b 100644 --- a/vendor/github.com/ethereum/go-ethereum/light/postprocess.go +++ b/vendor/github.com/ethereum/go-ethereum/light/postprocess.go @@ -159,7 +159,7 @@ func NewChtIndexer(db ethdb.Database, odr OdrBackend, size, confirms uint64) *co diskdb: db, odr: odr, trieTable: trieTable, - triedb: trie.NewDatabase(trieTable), + triedb: trie.NewDatabaseWithCache(trieTable, 1), // Use a tiny cache only to keep memory down sectionSize: size, } return core.NewChainIndexer(db, ethdb.NewTable(db, "chtIndex-"), backend, size, confirms, time.Millisecond*100, "cht") @@ -281,7 +281,7 @@ func NewBloomTrieIndexer(db ethdb.Database, odr OdrBackend, parentSize, size uin diskdb: db, odr: odr, trieTable: trieTable, - triedb: trie.NewDatabase(trieTable), + triedb: trie.NewDatabaseWithCache(trieTable, 1), // Use a tiny cache only to keep memory down parentSize: parentSize, size: size, } diff --git a/vendor/github.com/ethereum/go-ethereum/light/trie.go b/vendor/github.com/ethereum/go-ethereum/light/trie.go index c07e99461..ab4e18b43 100644 --- a/vendor/github.com/ethereum/go-ethereum/light/trie.go +++ b/vendor/github.com/ethereum/go-ethereum/light/trie.go @@ -108,7 +108,7 @@ func (t *odrTrie) TryGet(key []byte) ([]byte, error) { func (t *odrTrie) TryUpdate(key, value []byte) error { key = crypto.Keccak256(key) return t.do(key, func() error { - return t.trie.TryDelete(key) + return t.trie.TryUpdate(key, value) }) } diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/counter.go b/vendor/github.com/ethereum/go-ethereum/metrics/counter.go index fbb14b862..2f78c90d5 100644 --- a/vendor/github.com/ethereum/go-ethereum/metrics/counter.go +++ b/vendor/github.com/ethereum/go-ethereum/metrics/counter.go @@ -22,6 +22,17 @@ func GetOrRegisterCounter(name string, r Registry) Counter { return r.GetOrRegister(name, NewCounter).(Counter) } +// GetOrRegisterCounterForced returns an existing Counter or constructs and registers a +// new Counter no matter the global switch is enabled or not. +// Be sure to unregister the counter from the registry once it is of no use to +// allow for garbage collection. +func GetOrRegisterCounterForced(name string, r Registry) Counter { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewCounterForced).(Counter) +} + // NewCounter constructs a new StandardCounter. func NewCounter() Counter { if !Enabled { diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/influxdb/influxdb.go b/vendor/github.com/ethereum/go-ethereum/metrics/influxdb/influxdb.go index 31a5c21b5..c4ef92723 100644 --- a/vendor/github.com/ethereum/go-ethereum/metrics/influxdb/influxdb.go +++ b/vendor/github.com/ethereum/go-ethereum/metrics/influxdb/influxdb.go @@ -58,6 +58,34 @@ func InfluxDBWithTags(r metrics.Registry, d time.Duration, url, database, userna rep.run() } +// InfluxDBWithTagsOnce runs once an InfluxDB reporter and post the given metrics.Registry with the specified tags +func InfluxDBWithTagsOnce(r metrics.Registry, url, database, username, password, namespace string, tags map[string]string) error { + u, err := uurl.Parse(url) + if err != nil { + return fmt.Errorf("Unable to parse InfluxDB. url: %s, err: %v", url, err) + } + + rep := &reporter{ + reg: r, + url: *u, + database: database, + username: username, + password: password, + namespace: namespace, + tags: tags, + cache: make(map[string]int64), + } + if err := rep.makeClient(); err != nil { + return fmt.Errorf("Unable to make InfluxDB client. err: %v", err) + } + + if err := rep.send(); err != nil { + return fmt.Errorf("Unable to send to InfluxDB. err: %v", err) + } + + return nil +} + func (r *reporter) makeClient() (err error) { r.client, err = client.NewClient(client.Config{ URL: r.url, diff --git a/vendor/github.com/ethereum/go-ethereum/metrics/registry.go b/vendor/github.com/ethereum/go-ethereum/metrics/registry.go index cc34c9dfd..c1cf7906c 100644 --- a/vendor/github.com/ethereum/go-ethereum/metrics/registry.go +++ b/vendor/github.com/ethereum/go-ethereum/metrics/registry.go @@ -311,7 +311,10 @@ func (r *PrefixedRegistry) UnregisterAll() { r.underlying.UnregisterAll() } -var DefaultRegistry Registry = NewRegistry() +var ( + DefaultRegistry = NewRegistry() + EphemeralRegistry = NewRegistry() +) // Call the given function for each registered metric. func Each(f func(string, interface{})) { diff --git a/vendor/github.com/ethereum/go-ethereum/miner/stress_clique.go b/vendor/github.com/ethereum/go-ethereum/miner/stress_clique.go index 8961091d5..7e19975ae 100644 --- a/vendor/github.com/ethereum/go-ethereum/miner/stress_clique.go +++ b/vendor/github.com/ethereum/go-ethereum/miner/stress_clique.go @@ -22,7 +22,6 @@ package main import ( "bytes" "crypto/ecdsa" - "fmt" "io/ioutil" "math/big" "math/rand" @@ -40,7 +39,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/params" ) @@ -62,11 +61,11 @@ func main() { var ( nodes []*node.Node - enodes []string + enodes []*enode.Node ) for _, sealer := range sealers { // Start the node and wait until it's up - node, err := makeSealer(genesis, enodes) + node, err := makeSealer(genesis) if err != nil { panic(err) } @@ -76,18 +75,12 @@ func main() { time.Sleep(250 * time.Millisecond) } // Connect the node to al the previous ones - for _, enode := range enodes { - enode, err := discover.ParseNode(enode) - if err != nil { - panic(err) - } - node.Server().AddPeer(enode) + for _, n := range enodes { + node.Server().AddPeer(n) } - // Start tracking the node and it's enode url + // Start tracking the node and it's enode nodes = append(nodes, node) - - enode := fmt.Sprintf("enode://%s@127.0.0.1:%d", node.Server().NodeInfo().ID, node.Server().NodeInfo().Ports.Listener) - enodes = append(enodes, enode) + enodes = append(enodes, node.Server().Self()) // Inject the signer key and start sealing with it store := node.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) @@ -177,7 +170,7 @@ func makeGenesis(faucets []*ecdsa.PrivateKey, sealers []*ecdsa.PrivateKey) *core return genesis } -func makeSealer(genesis *core.Genesis, nodes []string) (*node.Node, error) { +func makeSealer(genesis *core.Genesis) (*node.Node, error) { // Define the basic configurations for the Ethereum node datadir, _ := ioutil.TempDir("", "") diff --git a/vendor/github.com/ethereum/go-ethereum/miner/stress_ethash.go b/vendor/github.com/ethereum/go-ethereum/miner/stress_ethash.go index 5ed11d73a..044ca9a21 100644 --- a/vendor/github.com/ethereum/go-ethereum/miner/stress_ethash.go +++ b/vendor/github.com/ethereum/go-ethereum/miner/stress_ethash.go @@ -21,7 +21,6 @@ package main import ( "crypto/ecdsa" - "fmt" "io/ioutil" "math/big" "math/rand" @@ -41,7 +40,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/params" ) @@ -62,11 +61,11 @@ func main() { var ( nodes []*node.Node - enodes []string + enodes []*enode.Node ) for i := 0; i < 4; i++ { // Start the node and wait until it's up - node, err := makeMiner(genesis, enodes) + node, err := makeMiner(genesis) if err != nil { panic(err) } @@ -76,18 +75,12 @@ func main() { time.Sleep(250 * time.Millisecond) } // Connect the node to al the previous ones - for _, enode := range enodes { - enode, err := discover.ParseNode(enode) - if err != nil { - panic(err) - } - node.Server().AddPeer(enode) + for _, n := range enodes { + node.Server().AddPeer(n) } - // Start tracking the node and it's enode url + // Start tracking the node and it's enode nodes = append(nodes, node) - - enode := fmt.Sprintf("enode://%s@127.0.0.1:%d", node.Server().NodeInfo().ID, node.Server().NodeInfo().Ports.Listener) - enodes = append(enodes, enode) + enodes = append(enodes, node.Server().Self()) // Inject the signer key and start sealing with it store := node.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) @@ -155,7 +148,7 @@ func makeGenesis(faucets []*ecdsa.PrivateKey) *core.Genesis { return genesis } -func makeMiner(genesis *core.Genesis, nodes []string) (*node.Node, error) { +func makeMiner(genesis *core.Genesis) (*node.Node, error) { // Define the basic configurations for the Ethereum node datadir, _ := ioutil.TempDir("", "") diff --git a/vendor/github.com/ethereum/go-ethereum/miner/worker.go b/vendor/github.com/ethereum/go-ethereum/miner/worker.go index 8579c5c84..48473796b 100644 --- a/vendor/github.com/ethereum/go-ethereum/miner/worker.go +++ b/vendor/github.com/ethereum/go-ethereum/miner/worker.go @@ -31,7 +31,6 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" @@ -692,7 +691,7 @@ func (w *worker) updateSnapshot() { func (w *worker) commitTransaction(tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) { snap := w.current.state.Snapshot() - receipt, _, err := core.ApplyTransaction(w.config, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &w.current.header.GasUsed, vm.Config{}) + receipt, _, err := core.ApplyTransaction(w.config, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &w.current.header.GasUsed, *w.chain.GetVMConfig()) if err != nil { w.current.state.RevertToSnapshot(snap) return nil, err diff --git a/vendor/github.com/ethereum/go-ethereum/mobile/big.go b/vendor/github.com/ethereum/go-ethereum/mobile/big.go index dd7b15878..86ea93245 100644 --- a/vendor/github.com/ethereum/go-ethereum/mobile/big.go +++ b/vendor/github.com/ethereum/go-ethereum/mobile/big.go @@ -84,6 +84,13 @@ func (bi *BigInt) SetString(x string, base int) { // BigInts represents a slice of big ints. type BigInts struct{ bigints []*big.Int } +// NewBigInts creates a slice of uninitialized big numbers. +func NewBigInts(size int) *BigInts { + return &BigInts{ + bigints: make([]*big.Int, size), + } +} + // Size returns the number of big ints in the slice. func (bi *BigInts) Size() int { return len(bi.bigints) diff --git a/vendor/github.com/ethereum/go-ethereum/node/config.go b/vendor/github.com/ethereum/go-ethereum/node/config.go index 8f10f4f61..7b32a5908 100644 --- a/vendor/github.com/ethereum/go-ethereum/node/config.go +++ b/vendor/github.com/ethereum/go-ethereum/node/config.go @@ -24,6 +24,7 @@ import ( "path/filepath" "runtime" "strings" + "sync" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/keystore" @@ -152,6 +153,10 @@ type Config struct { // Logger is a custom logger to use with the p2p.Server. Logger log.Logger `toml:",omitempty"` + + staticNodesWarning bool + trustedNodesWarning bool + oldGethResourceWarning bool } // IPCEndpoint resolves an IPC endpoint based on a configured value, taking into @@ -263,8 +268,8 @@ var isOldGethResource = map[string]bool{ "chaindata": true, "nodes": true, "nodekey": true, - "static-nodes.json": true, - "trusted-nodes.json": true, + "static-nodes.json": false, // no warning for these because they have their + "trusted-nodes.json": false, // own separate warning. } // ResolvePath resolves path in the instance directory. @@ -277,13 +282,15 @@ func (c *Config) ResolvePath(path string) string { } // Backwards-compatibility: ensure that data directory files created // by geth 1.4 are used if they exist. - if c.name() == "geth" && isOldGethResource[path] { + if warn, isOld := isOldGethResource[path]; isOld { oldpath := "" - if c.Name == "geth" { + if c.name() == "geth" { oldpath = filepath.Join(c.DataDir, path) } if oldpath != "" && common.FileExist(oldpath) { - // TODO: print warning + if warn { + c.warnOnce(&c.oldGethResourceWarning, "Using deprecated resource file %s, please move this file to the 'geth' subdirectory of datadir.", oldpath) + } return oldpath } } @@ -337,17 +344,17 @@ func (c *Config) NodeKey() *ecdsa.PrivateKey { // StaticNodes returns a list of node enode URLs configured as static nodes. func (c *Config) StaticNodes() []*enode.Node { - return c.parsePersistentNodes(c.ResolvePath(datadirStaticNodes)) + return c.parsePersistentNodes(&c.staticNodesWarning, c.ResolvePath(datadirStaticNodes)) } // TrustedNodes returns a list of node enode URLs configured as trusted nodes. func (c *Config) TrustedNodes() []*enode.Node { - return c.parsePersistentNodes(c.ResolvePath(datadirTrustedNodes)) + return c.parsePersistentNodes(&c.trustedNodesWarning, c.ResolvePath(datadirTrustedNodes)) } // parsePersistentNodes parses a list of discovery node URLs loaded from a .json // file from within the data directory. -func (c *Config) parsePersistentNodes(path string) []*enode.Node { +func (c *Config) parsePersistentNodes(w *bool, path string) []*enode.Node { // Short circuit if no node config is present if c.DataDir == "" { return nil @@ -355,10 +362,12 @@ func (c *Config) parsePersistentNodes(path string) []*enode.Node { if _, err := os.Stat(path); err != nil { return nil } + c.warnOnce(w, "Found deprecated node list file %s, please use the TOML config file instead.", path) + // Load the nodes from the config file. var nodelist []string if err := common.LoadJSON(path, &nodelist); err != nil { - log.Error(fmt.Sprintf("Can't load node file %s: %v", path, err)) + log.Error(fmt.Sprintf("Can't load node list file: %v", err)) return nil } // Interpret the list as a discovery node array @@ -440,3 +449,20 @@ func makeAccountManager(conf *Config) (*accounts.Manager, string, error) { } return accounts.NewManager(backends...), ephemeral, nil } + +var warnLock sync.Mutex + +func (c *Config) warnOnce(w *bool, format string, args ...interface{}) { + warnLock.Lock() + defer warnLock.Unlock() + + if *w { + return + } + l := c.Logger + if l == nil { + l = log.Root() + } + l.Warn(fmt.Sprintf(format, args...)) + *w = true +} diff --git a/vendor/github.com/ethereum/go-ethereum/node/node.go b/vendor/github.com/ethereum/go-ethereum/node/node.go index 5ea58e13f..6a6474928 100644 --- a/vendor/github.com/ethereum/go-ethereum/node/node.go +++ b/vendor/github.com/ethereum/go-ethereum/node/node.go @@ -295,7 +295,7 @@ func (n *Node) startInProc(apis []rpc.API) error { if err := handler.RegisterName(api.Namespace, api.Service); err != nil { return err } - n.log.Debug("InProc registered", "service", api.Service, "namespace", api.Namespace) + n.log.Debug("InProc registered", "namespace", api.Namespace) } n.inprocHandler = handler return nil @@ -360,7 +360,7 @@ func (n *Node) stopIPC() { n.ipcListener.Close() n.ipcListener = nil - n.log.Info("IPC endpoint closed", "endpoint", n.ipcEndpoint) + n.log.Info("IPC endpoint closed", "url", n.ipcEndpoint) } if n.ipcHandler != nil { n.ipcHandler.Stop() @@ -599,11 +599,23 @@ func (n *Node) IPCEndpoint() string { // HTTPEndpoint retrieves the current HTTP endpoint used by the protocol stack. func (n *Node) HTTPEndpoint() string { + n.lock.Lock() + defer n.lock.Unlock() + + if n.httpListener != nil { + return n.httpListener.Addr().String() + } return n.httpEndpoint } // WSEndpoint retrieves the current WS endpoint used by the protocol stack. func (n *Node) WSEndpoint() string { + n.lock.Lock() + defer n.lock.Unlock() + + if n.wsListener != nil { + return n.wsListener.Addr().String() + } return n.wsEndpoint } diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/dial.go b/vendor/github.com/ethereum/go-ethereum/p2p/dial.go index 359cdbcbb..075a0f936 100644 --- a/vendor/github.com/ethereum/go-ethereum/p2p/dial.go +++ b/vendor/github.com/ethereum/go-ethereum/p2p/dial.go @@ -71,6 +71,7 @@ type dialstate struct { maxDynDials int ntab discoverTable netrestrict *netutil.Netlist + self enode.ID lookupRunning bool dialing map[enode.ID]connFlag @@ -84,7 +85,6 @@ type dialstate struct { } type discoverTable interface { - Self() *enode.Node Close() Resolve(*enode.Node) *enode.Node LookupRandom() []*enode.Node @@ -126,10 +126,11 @@ type waitExpireTask struct { time.Duration } -func newDialState(static []*enode.Node, bootnodes []*enode.Node, ntab discoverTable, maxdyn int, netrestrict *netutil.Netlist) *dialstate { +func newDialState(self enode.ID, static []*enode.Node, bootnodes []*enode.Node, ntab discoverTable, maxdyn int, netrestrict *netutil.Netlist) *dialstate { s := &dialstate{ maxDynDials: maxdyn, ntab: ntab, + self: self, netrestrict: netrestrict, static: make(map[enode.ID]*dialTask), dialing: make(map[enode.ID]connFlag), @@ -266,7 +267,7 @@ func (s *dialstate) checkDial(n *enode.Node, peers map[enode.ID]*Peer) error { return errAlreadyDialing case peers[n.ID()] != nil: return errAlreadyConnected - case s.ntab != nil && n.ID() == s.ntab.Self().ID(): + case n.ID() == s.self: return errSelf case s.netrestrict != nil && !s.netrestrict.Contains(n.IP()): return errNotWhitelisted @@ -349,7 +350,7 @@ func (t *dialTask) dial(srv *Server, dest *enode.Node) error { if err != nil { return &dialError{err} } - mfd := newMeteredConn(fd, false) + mfd := newMeteredConn(fd, false, dest.IP()) return srv.SetupConn(mfd, t.flags, dest) } diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/discover/table.go b/vendor/github.com/ethereum/go-ethereum/p2p/discover/table.go index 7a3e41de1..9f7f1d41b 100644 --- a/vendor/github.com/ethereum/go-ethereum/p2p/discover/table.go +++ b/vendor/github.com/ethereum/go-ethereum/p2p/discover/table.go @@ -72,21 +72,20 @@ type Table struct { ips netutil.DistinctNetSet db *enode.DB // database of known nodes + net transport refreshReq chan chan struct{} initDone chan struct{} closeReq chan struct{} closed chan struct{} nodeAddedHook func(*node) // for testing - - net transport - self *node // metadata of the local node } // transport is implemented by the UDP transport. // it is an interface so we can test without opening lots of UDP // sockets and without generating a private key. type transport interface { + self() *enode.Node ping(enode.ID, *net.UDPAddr) error findnode(toid enode.ID, addr *net.UDPAddr, target encPubkey) ([]*node, error) close() @@ -100,11 +99,10 @@ type bucket struct { ips netutil.DistinctNetSet } -func newTable(t transport, self *enode.Node, db *enode.DB, bootnodes []*enode.Node) (*Table, error) { +func newTable(t transport, db *enode.DB, bootnodes []*enode.Node) (*Table, error) { tab := &Table{ net: t, db: db, - self: wrapNode(self), refreshReq: make(chan chan struct{}), initDone: make(chan struct{}), closeReq: make(chan struct{}), @@ -127,6 +125,10 @@ func newTable(t transport, self *enode.Node, db *enode.DB, bootnodes []*enode.No return tab, nil } +func (tab *Table) self() *enode.Node { + return tab.net.self() +} + func (tab *Table) seedRand() { var b [8]byte crand.Read(b[:]) @@ -136,11 +138,6 @@ func (tab *Table) seedRand() { tab.mutex.Unlock() } -// Self returns the local node. -func (tab *Table) Self() *enode.Node { - return unwrapNode(tab.self) -} - // ReadRandomNodes fills the given slice with random nodes from the table. The results // are guaranteed to be unique for a single invocation, no node will appear twice. func (tab *Table) ReadRandomNodes(buf []*enode.Node) (n int) { @@ -183,6 +180,10 @@ func (tab *Table) ReadRandomNodes(buf []*enode.Node) (n int) { // Close terminates the network listener and flushes the node database. func (tab *Table) Close() { + if tab.net != nil { + tab.net.close() + } + select { case <-tab.closed: // already closed. @@ -257,7 +258,7 @@ func (tab *Table) lookup(targetKey encPubkey, refreshIfEmpty bool) []*node { ) // don't query further if we hit ourself. // unlikely to happen often in practice. - asked[tab.self.ID()] = true + asked[tab.self().ID()] = true for { tab.mutex.Lock() @@ -340,8 +341,8 @@ func (tab *Table) loop() { revalidate = time.NewTimer(tab.nextRevalidateTime()) refresh = time.NewTicker(refreshInterval) copyNodes = time.NewTicker(copyNodesInterval) - revalidateDone = make(chan struct{}) refreshDone = make(chan struct{}) // where doRefresh reports completion + revalidateDone chan struct{} // where doRevalidate reports completion waiting = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs ) defer refresh.Stop() @@ -372,9 +373,11 @@ loop: } waiting, refreshDone = nil, nil case <-revalidate.C: + revalidateDone = make(chan struct{}) go tab.doRevalidate(revalidateDone) case <-revalidateDone: revalidate.Reset(tab.nextRevalidateTime()) + revalidateDone = nil case <-copyNodes.C: go tab.copyLiveNodes() case <-tab.closeReq: @@ -382,15 +385,15 @@ loop: } } - if tab.net != nil { - tab.net.close() - } if refreshDone != nil { <-refreshDone } for _, ch := range waiting { close(ch) } + if revalidateDone != nil { + <-revalidateDone + } close(tab.closed) } @@ -408,7 +411,7 @@ func (tab *Table) doRefresh(done chan struct{}) { // Run self lookup to discover new neighbor nodes. // We can only do this if we have a secp256k1 identity. var key ecdsa.PublicKey - if err := tab.self.Load((*enode.Secp256k1)(&key)); err == nil { + if err := tab.self().Load((*enode.Secp256k1)(&key)); err == nil { tab.lookup(encodePubkey(&key), false) } @@ -431,7 +434,7 @@ func (tab *Table) loadSeedNodes() { for i := range seeds { seed := seeds[i] age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.LastPongReceived(seed.ID())) }} - log.Debug("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age) + log.Trace("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age) tab.add(seed) } } @@ -530,7 +533,7 @@ func (tab *Table) len() (n int) { // bucket returns the bucket for the given node ID hash. func (tab *Table) bucket(id enode.ID) *bucket { - d := enode.LogDist(tab.self.ID(), id) + d := enode.LogDist(tab.self().ID(), id) if d <= bucketMinDistance { return tab.buckets[0] } @@ -543,7 +546,7 @@ func (tab *Table) bucket(id enode.ID) *bucket { // // The caller must not hold tab.mutex. func (tab *Table) add(n *node) { - if n.ID() == tab.self.ID() { + if n.ID() == tab.self().ID() { return } @@ -576,7 +579,7 @@ func (tab *Table) stuff(nodes []*node) { defer tab.mutex.Unlock() for _, n := range nodes { - if n.ID() == tab.self.ID() { + if n.ID() == tab.self().ID() { continue // don't add self } b := tab.bucket(n.ID()) diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/discover/udp.go b/vendor/github.com/ethereum/go-ethereum/p2p/discover/udp.go index 45fcce282..37a044902 100644 --- a/vendor/github.com/ethereum/go-ethereum/p2p/discover/udp.go +++ b/vendor/github.com/ethereum/go-ethereum/p2p/discover/udp.go @@ -23,12 +23,12 @@ import ( "errors" "fmt" "net" + "sync" "time" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/rlp" ) @@ -118,9 +118,11 @@ type ( ) func makeEndpoint(addr *net.UDPAddr, tcpPort uint16) rpcEndpoint { - ip := addr.IP.To4() - if ip == nil { - ip = addr.IP.To16() + ip := net.IP{} + if ip4 := addr.IP.To4(); ip4 != nil { + ip = ip4 + } else if ip6 := addr.IP.To16(); ip6 != nil { + ip = ip6 } return rpcEndpoint{IP: ip, UDP: uint16(addr.Port), TCP: tcpPort} } @@ -165,20 +167,19 @@ type conn interface { LocalAddr() net.Addr } -// udp implements the RPC protocol. +// udp implements the discovery v4 UDP wire protocol. type udp struct { conn conn netrestrict *netutil.Netlist priv *ecdsa.PrivateKey - ourEndpoint rpcEndpoint + localNode *enode.LocalNode + db *enode.DB + tab *Table + wg sync.WaitGroup addpending chan *pending gotreply chan reply - - closing chan struct{} - nat nat.Interface - - *Table + closing chan struct{} } // pending represents a pending reply. @@ -230,60 +231,57 @@ type Config struct { PrivateKey *ecdsa.PrivateKey // These settings are optional: - AnnounceAddr *net.UDPAddr // local address announced in the DHT - NodeDBPath string // if set, the node database is stored at this filesystem location - NetRestrict *netutil.Netlist // network whitelist - Bootnodes []*enode.Node // list of bootstrap nodes - Unhandled chan<- ReadPacket // unhandled packets are sent on this channel + NetRestrict *netutil.Netlist // network whitelist + Bootnodes []*enode.Node // list of bootstrap nodes + Unhandled chan<- ReadPacket // unhandled packets are sent on this channel } // ListenUDP returns a new table that listens for UDP packets on laddr. -func ListenUDP(c conn, cfg Config) (*Table, error) { - tab, _, err := newUDP(c, cfg) +func ListenUDP(c conn, ln *enode.LocalNode, cfg Config) (*Table, error) { + tab, _, err := newUDP(c, ln, cfg) if err != nil { return nil, err } - log.Info("UDP listener up", "self", tab.self) return tab, nil } -func newUDP(c conn, cfg Config) (*Table, *udp, error) { - realaddr := c.LocalAddr().(*net.UDPAddr) - if cfg.AnnounceAddr != nil { - realaddr = cfg.AnnounceAddr - } - self := enode.NewV4(&cfg.PrivateKey.PublicKey, realaddr.IP, realaddr.Port, realaddr.Port) - db, err := enode.OpenDB(cfg.NodeDBPath) - if err != nil { - return nil, nil, err - } - +func newUDP(c conn, ln *enode.LocalNode, cfg Config) (*Table, *udp, error) { udp := &udp{ conn: c, priv: cfg.PrivateKey, netrestrict: cfg.NetRestrict, + localNode: ln, + db: ln.Database(), closing: make(chan struct{}), gotreply: make(chan reply), addpending: make(chan *pending), } - // TODO: separate TCP port - udp.ourEndpoint = makeEndpoint(realaddr, uint16(realaddr.Port)) - tab, err := newTable(udp, self, db, cfg.Bootnodes) + tab, err := newTable(udp, ln.Database(), cfg.Bootnodes) if err != nil { return nil, nil, err } - udp.Table = tab + udp.tab = tab + udp.wg.Add(2) go udp.loop() go udp.readLoop(cfg.Unhandled) - return udp.Table, udp, nil + return udp.tab, udp, nil +} + +func (t *udp) self() *enode.Node { + return t.localNode.Node() } func (t *udp) close() { close(t.closing) t.conn.Close() - t.db.Close() - // TODO: wait for the loops to end. + t.wg.Wait() +} + +func (t *udp) ourEndpoint() rpcEndpoint { + n := t.self() + a := &net.UDPAddr{IP: n.IP(), Port: n.UDP()} + return makeEndpoint(a, uint16(n.TCP())) } // ping sends a ping message to the given node and waits for a reply. @@ -296,7 +294,7 @@ func (t *udp) ping(toid enode.ID, toaddr *net.UDPAddr) error { func (t *udp) sendPing(toid enode.ID, toaddr *net.UDPAddr, callback func()) <-chan error { req := &ping{ Version: 4, - From: t.ourEndpoint, + From: t.ourEndpoint(), To: makeEndpoint(toaddr, 0), // TODO: maybe use known TCP port from DB Expiration: uint64(time.Now().Add(expiration).Unix()), } @@ -313,6 +311,7 @@ func (t *udp) sendPing(toid enode.ID, toaddr *net.UDPAddr, callback func()) <-ch } return ok }) + t.localNode.UDPContact(toaddr) t.write(toaddr, req.name(), packet) return errc } @@ -381,6 +380,8 @@ func (t *udp) handleReply(from enode.ID, ptype byte, req packet) bool { // loop runs in its own goroutine. it keeps track of // the refresh timer and the pending reply queue. func (t *udp) loop() { + defer t.wg.Done() + var ( plist = list.New() timeout = time.NewTimer(0) @@ -542,10 +543,11 @@ func encodePacket(priv *ecdsa.PrivateKey, ptype byte, req interface{}) (packet, // readLoop runs in its own goroutine. it handles incoming UDP packets. func (t *udp) readLoop(unhandled chan<- ReadPacket) { - defer t.conn.Close() + defer t.wg.Done() if unhandled != nil { defer close(unhandled) } + // Discovery packets are defined to be no larger than 1280 bytes. // Packets larger than this size will be cut at the end and treated // as invalid because their hash won't match. @@ -629,10 +631,11 @@ func (req *ping) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte n := wrapNode(enode.NewV4(key, from.IP, int(req.From.TCP), from.Port)) t.handleReply(n.ID(), pingPacket, req) if time.Since(t.db.LastPongReceived(n.ID())) > bondExpiration { - t.sendPing(n.ID(), from, func() { t.addThroughPing(n) }) + t.sendPing(n.ID(), from, func() { t.tab.addThroughPing(n) }) } else { - t.addThroughPing(n) + t.tab.addThroughPing(n) } + t.localNode.UDPEndpointStatement(from, &net.UDPAddr{IP: req.To.IP, Port: int(req.To.UDP)}) t.db.UpdateLastPingReceived(n.ID(), time.Now()) return nil } @@ -647,6 +650,7 @@ func (req *pong) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte if !t.handleReply(fromID, pongPacket, req) { return errUnsolicitedReply } + t.localNode.UDPEndpointStatement(from, &net.UDPAddr{IP: req.To.IP, Port: int(req.To.UDP)}) t.db.UpdateLastPongReceived(fromID, time.Now()) return nil } @@ -668,9 +672,9 @@ func (req *findnode) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac [] return errUnknownNode } target := enode.ID(crypto.Keccak256Hash(req.Target[:])) - t.mutex.Lock() - closest := t.closest(target, bucketSize).entries - t.mutex.Unlock() + t.tab.mutex.Lock() + closest := t.tab.closest(target, bucketSize).entries + t.tab.mutex.Unlock() p := neighbors{Expiration: uint64(time.Now().Add(expiration).Unix())} var sent bool diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/discv5/net.go b/vendor/github.com/ethereum/go-ethereum/p2p/discv5/net.go index a6cabf080..cdeb28dd5 100644 --- a/vendor/github.com/ethereum/go-ethereum/p2p/discv5/net.go +++ b/vendor/github.com/ethereum/go-ethereum/p2p/discv5/net.go @@ -567,12 +567,11 @@ loop: net.ticketStore.searchLookupDone(res.target, res.nodes, func(n *Node, topic Topic) []byte { if n.state != nil && n.state.canQuery { return net.conn.send(n, topicQueryPacket, topicQuery{Topic: topic}) // TODO: set expiration - } else { - if n.state == unknown { - net.ping(n, n.addr()) - } - return nil } + if n.state == unknown { + net.ping(n, n.addr()) + } + return nil }) case <-statsDump.C: diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/discv5/udp.go b/vendor/github.com/ethereum/go-ethereum/p2p/discv5/udp.go index 49e1cb811..ff5ed983b 100644 --- a/vendor/github.com/ethereum/go-ethereum/p2p/discv5/udp.go +++ b/vendor/github.com/ethereum/go-ethereum/p2p/discv5/udp.go @@ -230,7 +230,8 @@ type udp struct { } // ListenUDP returns a new table that listens for UDP packets on laddr. -func ListenUDP(priv *ecdsa.PrivateKey, conn conn, realaddr *net.UDPAddr, nodeDBPath string, netrestrict *netutil.Netlist) (*Network, error) { +func ListenUDP(priv *ecdsa.PrivateKey, conn conn, nodeDBPath string, netrestrict *netutil.Netlist) (*Network, error) { + realaddr := conn.LocalAddr().(*net.UDPAddr) transport, err := listenUDP(priv, conn, realaddr) if err != nil { return nil, err diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/enode/localnode.go b/vendor/github.com/ethereum/go-ethereum/p2p/enode/localnode.go new file mode 100644 index 000000000..623f8eae1 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/p2p/enode/localnode.go @@ -0,0 +1,246 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package enode + +import ( + "crypto/ecdsa" + "fmt" + "net" + "reflect" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/p2p/enr" + "github.com/ethereum/go-ethereum/p2p/netutil" +) + +const ( + // IP tracker configuration + iptrackMinStatements = 10 + iptrackWindow = 5 * time.Minute + iptrackContactWindow = 10 * time.Minute +) + +// LocalNode produces the signed node record of a local node, i.e. a node run in the +// current process. Setting ENR entries via the Set method updates the record. A new version +// of the record is signed on demand when the Node method is called. +type LocalNode struct { + cur atomic.Value // holds a non-nil node pointer while the record is up-to-date. + id ID + key *ecdsa.PrivateKey + db *DB + + // everything below is protected by a lock + mu sync.Mutex + seq uint64 + entries map[string]enr.Entry + udpTrack *netutil.IPTracker // predicts external UDP endpoint + staticIP net.IP + fallbackIP net.IP + fallbackUDP int +} + +// NewLocalNode creates a local node. +func NewLocalNode(db *DB, key *ecdsa.PrivateKey) *LocalNode { + ln := &LocalNode{ + id: PubkeyToIDV4(&key.PublicKey), + db: db, + key: key, + udpTrack: netutil.NewIPTracker(iptrackWindow, iptrackContactWindow, iptrackMinStatements), + entries: make(map[string]enr.Entry), + } + ln.seq = db.localSeq(ln.id) + ln.invalidate() + return ln +} + +// Database returns the node database associated with the local node. +func (ln *LocalNode) Database() *DB { + return ln.db +} + +// Node returns the current version of the local node record. +func (ln *LocalNode) Node() *Node { + n := ln.cur.Load().(*Node) + if n != nil { + return n + } + // Record was invalidated, sign a new copy. + ln.mu.Lock() + defer ln.mu.Unlock() + ln.sign() + return ln.cur.Load().(*Node) +} + +// ID returns the local node ID. +func (ln *LocalNode) ID() ID { + return ln.id +} + +// Set puts the given entry into the local record, overwriting +// any existing value. +func (ln *LocalNode) Set(e enr.Entry) { + ln.mu.Lock() + defer ln.mu.Unlock() + + ln.set(e) +} + +func (ln *LocalNode) set(e enr.Entry) { + val, exists := ln.entries[e.ENRKey()] + if !exists || !reflect.DeepEqual(val, e) { + ln.entries[e.ENRKey()] = e + ln.invalidate() + } +} + +// Delete removes the given entry from the local record. +func (ln *LocalNode) Delete(e enr.Entry) { + ln.mu.Lock() + defer ln.mu.Unlock() + + ln.delete(e) +} + +func (ln *LocalNode) delete(e enr.Entry) { + _, exists := ln.entries[e.ENRKey()] + if exists { + delete(ln.entries, e.ENRKey()) + ln.invalidate() + } +} + +// SetStaticIP sets the local IP to the given one unconditionally. +// This disables endpoint prediction. +func (ln *LocalNode) SetStaticIP(ip net.IP) { + ln.mu.Lock() + defer ln.mu.Unlock() + + ln.staticIP = ip + ln.updateEndpoints() +} + +// SetFallbackIP sets the last-resort IP address. This address is used +// if no endpoint prediction can be made and no static IP is set. +func (ln *LocalNode) SetFallbackIP(ip net.IP) { + ln.mu.Lock() + defer ln.mu.Unlock() + + ln.fallbackIP = ip + ln.updateEndpoints() +} + +// SetFallbackUDP sets the last-resort UDP port. This port is used +// if no endpoint prediction can be made. +func (ln *LocalNode) SetFallbackUDP(port int) { + ln.mu.Lock() + defer ln.mu.Unlock() + + ln.fallbackUDP = port + ln.updateEndpoints() +} + +// UDPEndpointStatement should be called whenever a statement about the local node's +// UDP endpoint is received. It feeds the local endpoint predictor. +func (ln *LocalNode) UDPEndpointStatement(fromaddr, endpoint *net.UDPAddr) { + ln.mu.Lock() + defer ln.mu.Unlock() + + ln.udpTrack.AddStatement(fromaddr.String(), endpoint.String()) + ln.updateEndpoints() +} + +// UDPContact should be called whenever the local node has announced itself to another node +// via UDP. It feeds the local endpoint predictor. +func (ln *LocalNode) UDPContact(toaddr *net.UDPAddr) { + ln.mu.Lock() + defer ln.mu.Unlock() + + ln.udpTrack.AddContact(toaddr.String()) + ln.updateEndpoints() +} + +func (ln *LocalNode) updateEndpoints() { + // Determine the endpoints. + newIP := ln.fallbackIP + newUDP := ln.fallbackUDP + if ln.staticIP != nil { + newIP = ln.staticIP + } else if ip, port := predictAddr(ln.udpTrack); ip != nil { + newIP = ip + newUDP = port + } + + // Update the record. + if newIP != nil && !newIP.IsUnspecified() { + ln.set(enr.IP(newIP)) + if newUDP != 0 { + ln.set(enr.UDP(newUDP)) + } else { + ln.delete(enr.UDP(0)) + } + } else { + ln.delete(enr.IP{}) + } +} + +// predictAddr wraps IPTracker.PredictEndpoint, converting from its string-based +// endpoint representation to IP and port types. +func predictAddr(t *netutil.IPTracker) (net.IP, int) { + ep := t.PredictEndpoint() + if ep == "" { + return nil, 0 + } + ipString, portString, _ := net.SplitHostPort(ep) + ip := net.ParseIP(ipString) + port, _ := strconv.Atoi(portString) + return ip, port +} + +func (ln *LocalNode) invalidate() { + ln.cur.Store((*Node)(nil)) +} + +func (ln *LocalNode) sign() { + if n := ln.cur.Load().(*Node); n != nil { + return // no changes + } + + var r enr.Record + for _, e := range ln.entries { + r.Set(e) + } + ln.bumpSeq() + r.SetSeq(ln.seq) + if err := SignV4(&r, ln.key); err != nil { + panic(fmt.Errorf("enode: can't sign record: %v", err)) + } + n, err := New(ValidSchemes, &r) + if err != nil { + panic(fmt.Errorf("enode: can't verify local record: %v", err)) + } + ln.cur.Store(n) + log.Info("New local node record", "seq", ln.seq, "id", n.ID(), "ip", n.IP(), "udp", n.UDP(), "tcp", n.TCP()) +} + +func (ln *LocalNode) bumpSeq() { + ln.seq++ + ln.db.storeLocalSeq(ln.id, ln.seq) +} diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/enode/nodedb.go b/vendor/github.com/ethereum/go-ethereum/p2p/enode/nodedb.go index a929b75d7..7ee0c09a9 100644 --- a/vendor/github.com/ethereum/go-ethereum/p2p/enode/nodedb.go +++ b/vendor/github.com/ethereum/go-ethereum/p2p/enode/nodedb.go @@ -35,11 +35,24 @@ import ( "github.com/syndtr/goleveldb/leveldb/util" ) +// Keys in the node database. +const ( + dbVersionKey = "version" // Version of the database to flush if changes + dbItemPrefix = "n:" // Identifier to prefix node entries with + + dbDiscoverRoot = ":discover" + dbDiscoverSeq = dbDiscoverRoot + ":seq" + dbDiscoverPing = dbDiscoverRoot + ":lastping" + dbDiscoverPong = dbDiscoverRoot + ":lastpong" + dbDiscoverFindFails = dbDiscoverRoot + ":findfail" + dbLocalRoot = ":local" + dbLocalSeq = dbLocalRoot + ":seq" +) + var ( - nodeDBNilID = ID{} // Special node ID to use as a nil element. - nodeDBNodeExpiration = 24 * time.Hour // Time after which an unseen node should be dropped. - nodeDBCleanupCycle = time.Hour // Time period for running the expiration task. - nodeDBVersion = 6 + dbNodeExpiration = 24 * time.Hour // Time after which an unseen node should be dropped. + dbCleanupCycle = time.Hour // Time period for running the expiration task. + dbVersion = 7 ) // DB is the node database, storing previously seen nodes and any collected metadata about @@ -50,17 +63,6 @@ type DB struct { quit chan struct{} // Channel to signal the expiring thread to stop } -// Schema layout for the node database -var ( - nodeDBVersionKey = []byte("version") // Version of the database to flush if changes - nodeDBItemPrefix = []byte("n:") // Identifier to prefix node entries with - - nodeDBDiscoverRoot = ":discover" - nodeDBDiscoverPing = nodeDBDiscoverRoot + ":lastping" - nodeDBDiscoverPong = nodeDBDiscoverRoot + ":lastpong" - nodeDBDiscoverFindFails = nodeDBDiscoverRoot + ":findfail" -) - // OpenDB opens a node database for storing and retrieving infos about known peers in the // network. If no path is given an in-memory, temporary database is constructed. func OpenDB(path string) (*DB, error) { @@ -93,13 +95,13 @@ func newPersistentDB(path string) (*DB, error) { // The nodes contained in the cache correspond to a certain protocol version. // Flush all nodes if the version doesn't match. currentVer := make([]byte, binary.MaxVarintLen64) - currentVer = currentVer[:binary.PutVarint(currentVer, int64(nodeDBVersion))] + currentVer = currentVer[:binary.PutVarint(currentVer, int64(dbVersion))] - blob, err := db.Get(nodeDBVersionKey, nil) + blob, err := db.Get([]byte(dbVersionKey), nil) switch err { case leveldb.ErrNotFound: // Version not found (i.e. empty cache), insert it - if err := db.Put(nodeDBVersionKey, currentVer, nil); err != nil { + if err := db.Put([]byte(dbVersionKey), currentVer, nil); err != nil { db.Close() return nil, err } @@ -120,28 +122,27 @@ func newPersistentDB(path string) (*DB, error) { // makeKey generates the leveldb key-blob from a node id and its particular // field of interest. func makeKey(id ID, field string) []byte { - if bytes.Equal(id[:], nodeDBNilID[:]) { + if (id == ID{}) { return []byte(field) } - return append(nodeDBItemPrefix, append(id[:], field...)...) + return append([]byte(dbItemPrefix), append(id[:], field...)...) } // splitKey tries to split a database key into a node id and a field part. func splitKey(key []byte) (id ID, field string) { // If the key is not of a node, return it plainly - if !bytes.HasPrefix(key, nodeDBItemPrefix) { + if !bytes.HasPrefix(key, []byte(dbItemPrefix)) { return ID{}, string(key) } // Otherwise split the id and field - item := key[len(nodeDBItemPrefix):] + item := key[len(dbItemPrefix):] copy(id[:], item[:len(id)]) field = string(item[len(id):]) return id, field } -// fetchInt64 retrieves an integer instance associated with a particular -// database key. +// fetchInt64 retrieves an integer associated with a particular key. func (db *DB) fetchInt64(key []byte) int64 { blob, err := db.lvl.Get(key, nil) if err != nil { @@ -154,18 +155,33 @@ func (db *DB) fetchInt64(key []byte) int64 { return val } -// storeInt64 update a specific database entry to the current time instance as a -// unix timestamp. +// storeInt64 stores an integer in the given key. func (db *DB) storeInt64(key []byte, n int64) error { blob := make([]byte, binary.MaxVarintLen64) blob = blob[:binary.PutVarint(blob, n)] + return db.lvl.Put(key, blob, nil) +} +// fetchUint64 retrieves an integer associated with a particular key. +func (db *DB) fetchUint64(key []byte) uint64 { + blob, err := db.lvl.Get(key, nil) + if err != nil { + return 0 + } + val, _ := binary.Uvarint(blob) + return val +} + +// storeUint64 stores an integer in the given key. +func (db *DB) storeUint64(key []byte, n uint64) error { + blob := make([]byte, binary.MaxVarintLen64) + blob = blob[:binary.PutUvarint(blob, n)] return db.lvl.Put(key, blob, nil) } // Node retrieves a node with a given id from the database. func (db *DB) Node(id ID) *Node { - blob, err := db.lvl.Get(makeKey(id, nodeDBDiscoverRoot), nil) + blob, err := db.lvl.Get(makeKey(id, dbDiscoverRoot), nil) if err != nil { return nil } @@ -184,11 +200,31 @@ func mustDecodeNode(id, data []byte) *Node { // UpdateNode inserts - potentially overwriting - a node into the peer database. func (db *DB) UpdateNode(node *Node) error { + if node.Seq() < db.NodeSeq(node.ID()) { + return nil + } blob, err := rlp.EncodeToBytes(&node.r) if err != nil { return err } - return db.lvl.Put(makeKey(node.ID(), nodeDBDiscoverRoot), blob, nil) + if err := db.lvl.Put(makeKey(node.ID(), dbDiscoverRoot), blob, nil); err != nil { + return err + } + return db.storeUint64(makeKey(node.ID(), dbDiscoverSeq), node.Seq()) +} + +// NodeSeq returns the stored record sequence number of the given node. +func (db *DB) NodeSeq(id ID) uint64 { + return db.fetchUint64(makeKey(id, dbDiscoverSeq)) +} + +// Resolve returns the stored record of the node if it has a larger sequence +// number than n. +func (db *DB) Resolve(n *Node) *Node { + if n.Seq() > db.NodeSeq(n.ID()) { + return n + } + return db.Node(n.ID()) } // DeleteNode deletes all information/keys associated with a node. @@ -218,7 +254,7 @@ func (db *DB) ensureExpirer() { // expirer should be started in a go routine, and is responsible for looping ad // infinitum and dropping stale data from the database. func (db *DB) expirer() { - tick := time.NewTicker(nodeDBCleanupCycle) + tick := time.NewTicker(dbCleanupCycle) defer tick.Stop() for { select { @@ -235,7 +271,7 @@ func (db *DB) expirer() { // expireNodes iterates over the database and deletes all nodes that have not // been seen (i.e. received a pong from) for some allotted time. func (db *DB) expireNodes() error { - threshold := time.Now().Add(-nodeDBNodeExpiration) + threshold := time.Now().Add(-dbNodeExpiration) // Find discovered nodes that are older than the allowance it := db.lvl.NewIterator(nil, nil) @@ -244,7 +280,7 @@ func (db *DB) expireNodes() error { for it.Next() { // Skip the item if not a discovery node id, field := splitKey(it.Key()) - if field != nodeDBDiscoverRoot { + if field != dbDiscoverRoot { continue } // Skip the node if not expired yet (and not self) @@ -260,34 +296,44 @@ func (db *DB) expireNodes() error { // LastPingReceived retrieves the time of the last ping packet received from // a remote node. func (db *DB) LastPingReceived(id ID) time.Time { - return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPing)), 0) + return time.Unix(db.fetchInt64(makeKey(id, dbDiscoverPing)), 0) } // UpdateLastPingReceived updates the last time we tried contacting a remote node. func (db *DB) UpdateLastPingReceived(id ID, instance time.Time) error { - return db.storeInt64(makeKey(id, nodeDBDiscoverPing), instance.Unix()) + return db.storeInt64(makeKey(id, dbDiscoverPing), instance.Unix()) } // LastPongReceived retrieves the time of the last successful pong from remote node. func (db *DB) LastPongReceived(id ID) time.Time { // Launch expirer db.ensureExpirer() - return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPong)), 0) + return time.Unix(db.fetchInt64(makeKey(id, dbDiscoverPong)), 0) } // UpdateLastPongReceived updates the last pong time of a node. func (db *DB) UpdateLastPongReceived(id ID, instance time.Time) error { - return db.storeInt64(makeKey(id, nodeDBDiscoverPong), instance.Unix()) + return db.storeInt64(makeKey(id, dbDiscoverPong), instance.Unix()) } // FindFails retrieves the number of findnode failures since bonding. func (db *DB) FindFails(id ID) int { - return int(db.fetchInt64(makeKey(id, nodeDBDiscoverFindFails))) + return int(db.fetchInt64(makeKey(id, dbDiscoverFindFails))) } // UpdateFindFails updates the number of findnode failures since bonding. func (db *DB) UpdateFindFails(id ID, fails int) error { - return db.storeInt64(makeKey(id, nodeDBDiscoverFindFails), int64(fails)) + return db.storeInt64(makeKey(id, dbDiscoverFindFails), int64(fails)) +} + +// LocalSeq retrieves the local record sequence counter. +func (db *DB) localSeq(id ID) uint64 { + return db.fetchUint64(makeKey(id, dbLocalSeq)) +} + +// storeLocalSeq stores the local record sequence counter. +func (db *DB) storeLocalSeq(id ID, n uint64) { + db.storeUint64(makeKey(id, dbLocalSeq), n) } // QuerySeeds retrieves random nodes to be used as potential seed nodes @@ -309,7 +355,7 @@ seek: ctr := id[0] rand.Read(id[:]) id[0] = ctr + id[0]%16 - it.Seek(makeKey(id, nodeDBDiscoverRoot)) + it.Seek(makeKey(id, dbDiscoverRoot)) n := nextNode(it) if n == nil { @@ -334,7 +380,7 @@ seek: func nextNode(it iterator.Iterator) *Node { for end := false; !end; end = !it.Next() { id, field := splitKey(it.Key()) - if field != nodeDBDiscoverRoot { + if field != dbDiscoverRoot { continue } return mustDecodeNode(id[:], it.Value()) diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/enr/enr.go b/vendor/github.com/ethereum/go-ethereum/p2p/enr/enr.go index 251caf458..444820c15 100644 --- a/vendor/github.com/ethereum/go-ethereum/p2p/enr/enr.go +++ b/vendor/github.com/ethereum/go-ethereum/p2p/enr/enr.go @@ -156,7 +156,7 @@ func (r *Record) Set(e Entry) { } func (r *Record) invalidate() { - if r.signature == nil { + if r.signature != nil { r.seq++ } r.signature = nil diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/metrics.go b/vendor/github.com/ethereum/go-ethereum/p2p/metrics.go index 2d52fd1fd..8df82bb07 100644 --- a/vendor/github.com/ethereum/go-ethereum/p2p/metrics.go +++ b/vendor/github.com/ethereum/go-ethereum/p2p/metrics.go @@ -19,53 +19,214 @@ package p2p import ( + "fmt" "net" + "sync" + "sync/atomic" + "time" + "github.com/ethereum/go-ethereum/p2p/enode" + + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" ) -var ( - ingressConnectMeter = metrics.NewRegisteredMeter("p2p/InboundConnects", nil) - ingressTrafficMeter = metrics.NewRegisteredMeter("p2p/InboundTraffic", nil) - egressConnectMeter = metrics.NewRegisteredMeter("p2p/OutboundConnects", nil) - egressTrafficMeter = metrics.NewRegisteredMeter("p2p/OutboundTraffic", nil) +const ( + MetricsInboundConnects = "p2p/InboundConnects" // Name for the registered inbound connects meter + MetricsInboundTraffic = "p2p/InboundTraffic" // Name for the registered inbound traffic meter + MetricsOutboundConnects = "p2p/OutboundConnects" // Name for the registered outbound connects meter + MetricsOutboundTraffic = "p2p/OutboundTraffic" // Name for the registered outbound traffic meter + + MeteredPeerLimit = 1024 // This amount of peers are individually metered ) +var ( + ingressConnectMeter = metrics.NewRegisteredMeter(MetricsInboundConnects, nil) // Meter counting the ingress connections + ingressTrafficMeter = metrics.NewRegisteredMeter(MetricsInboundTraffic, nil) // Meter metering the cumulative ingress traffic + egressConnectMeter = metrics.NewRegisteredMeter(MetricsOutboundConnects, nil) // Meter counting the egress connections + egressTrafficMeter = metrics.NewRegisteredMeter(MetricsOutboundTraffic, nil) // Meter metering the cumulative egress traffic + + PeerIngressRegistry = metrics.NewPrefixedChildRegistry(metrics.EphemeralRegistry, MetricsInboundTraffic+"/") // Registry containing the peer ingress + PeerEgressRegistry = metrics.NewPrefixedChildRegistry(metrics.EphemeralRegistry, MetricsOutboundTraffic+"/") // Registry containing the peer egress + + meteredPeerFeed event.Feed // Event feed for peer metrics + meteredPeerCount int32 // Actually stored peer connection count +) + +// MeteredPeerEventType is the type of peer events emitted by a metered connection. +type MeteredPeerEventType int + +const ( + // PeerConnected is the type of event emitted when a peer successfully + // made the handshake. + PeerConnected MeteredPeerEventType = iota + + // PeerDisconnected is the type of event emitted when a peer disconnects. + PeerDisconnected + + // PeerHandshakeFailed is the type of event emitted when a peer fails to + // make the handshake or disconnects before the handshake. + PeerHandshakeFailed +) + +// MeteredPeerEvent is an event emitted when peers connect or disconnect. +type MeteredPeerEvent struct { + Type MeteredPeerEventType // Type of peer event + IP net.IP // IP address of the peer + ID enode.ID // NodeID of the peer + Elapsed time.Duration // Time elapsed between the connection and the handshake/disconnection + Ingress uint64 // Ingress count at the moment of the event + Egress uint64 // Egress count at the moment of the event +} + +// SubscribeMeteredPeerEvent registers a subscription for peer life-cycle events +// if metrics collection is enabled. +func SubscribeMeteredPeerEvent(ch chan<- MeteredPeerEvent) event.Subscription { + return meteredPeerFeed.Subscribe(ch) +} + // meteredConn is a wrapper around a net.Conn that meters both the // inbound and outbound network traffic. type meteredConn struct { net.Conn // Network connection to wrap with metering + + connected time.Time // Connection time of the peer + ip net.IP // IP address of the peer + id enode.ID // NodeID of the peer + + // trafficMetered denotes if the peer is registered in the traffic registries. + // Its value is true if the metered peer count doesn't reach the limit in the + // moment of the peer's connection. + trafficMetered bool + ingressMeter metrics.Meter // Meter for the read bytes of the peer + egressMeter metrics.Meter // Meter for the written bytes of the peer + + lock sync.RWMutex // Lock protecting the metered connection's internals } -// newMeteredConn creates a new metered connection, also bumping the ingress or -// egress connection meter. If the metrics system is disabled, this function -// returns the original object. -func newMeteredConn(conn net.Conn, ingress bool) net.Conn { +// newMeteredConn creates a new metered connection, bumps the ingress or egress +// connection meter and also increases the metered peer count. If the metrics +// system is disabled or the IP address is unspecified, this function returns +// the original object. +func newMeteredConn(conn net.Conn, ingress bool, ip net.IP) net.Conn { // Short circuit if metrics are disabled if !metrics.Enabled { return conn } - // Otherwise bump the connection counters and wrap the connection + if ip.IsUnspecified() { + log.Warn("Peer IP is unspecified") + return conn + } + // Bump the connection counters and wrap the connection if ingress { ingressConnectMeter.Mark(1) } else { egressConnectMeter.Mark(1) } - return &meteredConn{Conn: conn} + return &meteredConn{ + Conn: conn, + ip: ip, + connected: time.Now(), + } } -// Read delegates a network read to the underlying connection, bumping the ingress -// traffic meter along the way. +// Read delegates a network read to the underlying connection, bumping the common +// and the peer ingress traffic meters along the way. func (c *meteredConn) Read(b []byte) (n int, err error) { n, err = c.Conn.Read(b) ingressTrafficMeter.Mark(int64(n)) - return + c.lock.RLock() + if c.trafficMetered { + c.ingressMeter.Mark(int64(n)) + } + c.lock.RUnlock() + return n, err } -// Write delegates a network write to the underlying connection, bumping the -// egress traffic meter along the way. +// Write delegates a network write to the underlying connection, bumping the common +// and the peer egress traffic meters along the way. func (c *meteredConn) Write(b []byte) (n int, err error) { n, err = c.Conn.Write(b) egressTrafficMeter.Mark(int64(n)) - return + c.lock.RLock() + if c.trafficMetered { + c.egressMeter.Mark(int64(n)) + } + c.lock.RUnlock() + return n, err +} + +// handshakeDone is called when a peer handshake is done. Registers the peer to +// the ingress and the egress traffic registries using the peer's IP and node ID, +// also emits connect event. +func (c *meteredConn) handshakeDone(id enode.ID) { + if atomic.AddInt32(&meteredPeerCount, 1) >= MeteredPeerLimit { + // Don't register the peer in the traffic registries. + atomic.AddInt32(&meteredPeerCount, -1) + c.lock.Lock() + c.id, c.trafficMetered = id, false + c.lock.Unlock() + log.Warn("Metered peer count reached the limit") + } else { + key := fmt.Sprintf("%s/%s", c.ip, id.String()) + c.lock.Lock() + c.id, c.trafficMetered = id, true + c.ingressMeter = metrics.NewRegisteredMeter(key, PeerIngressRegistry) + c.egressMeter = metrics.NewRegisteredMeter(key, PeerEgressRegistry) + c.lock.Unlock() + } + meteredPeerFeed.Send(MeteredPeerEvent{ + Type: PeerConnected, + IP: c.ip, + ID: id, + Elapsed: time.Since(c.connected), + }) +} + +// Close delegates a close operation to the underlying connection, unregisters +// the peer from the traffic registries and emits close event. +func (c *meteredConn) Close() error { + err := c.Conn.Close() + c.lock.RLock() + if c.id == (enode.ID{}) { + // If the peer disconnects before the handshake. + c.lock.RUnlock() + meteredPeerFeed.Send(MeteredPeerEvent{ + Type: PeerHandshakeFailed, + IP: c.ip, + Elapsed: time.Since(c.connected), + }) + return err + } + id := c.id + if !c.trafficMetered { + // If the peer isn't registered in the traffic registries. + c.lock.RUnlock() + meteredPeerFeed.Send(MeteredPeerEvent{ + Type: PeerDisconnected, + IP: c.ip, + ID: id, + }) + return err + } + ingress, egress := uint64(c.ingressMeter.Count()), uint64(c.egressMeter.Count()) + c.lock.RUnlock() + + // Decrement the metered peer count + atomic.AddInt32(&meteredPeerCount, -1) + + // Unregister the peer from the traffic registries + key := fmt.Sprintf("%s/%s", c.ip, id) + PeerIngressRegistry.Unregister(key) + PeerEgressRegistry.Unregister(key) + + meteredPeerFeed.Send(MeteredPeerEvent{ + Type: PeerDisconnected, + IP: c.ip, + ID: id, + Ingress: ingress, + Egress: egress, + }) + return err } diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/nat/nat.go b/vendor/github.com/ethereum/go-ethereum/p2p/nat/nat.go index a254648c6..8fad921c4 100644 --- a/vendor/github.com/ethereum/go-ethereum/p2p/nat/nat.go +++ b/vendor/github.com/ethereum/go-ethereum/p2p/nat/nat.go @@ -129,21 +129,15 @@ func Map(m Interface, c chan struct{}, protocol string, extport, intport int, na // ExtIP assumes that the local machine is reachable on the given // external IP address, and that any required ports were mapped manually. // Mapping operations will not return an error but won't actually do anything. -func ExtIP(ip net.IP) Interface { - if ip == nil { - panic("IP must not be nil") - } - return extIP(ip) -} +type ExtIP net.IP -type extIP net.IP - -func (n extIP) ExternalIP() (net.IP, error) { return net.IP(n), nil } -func (n extIP) String() string { return fmt.Sprintf("ExtIP(%v)", net.IP(n)) } +func (n ExtIP) ExternalIP() (net.IP, error) { return net.IP(n), nil } +func (n ExtIP) String() string { return fmt.Sprintf("ExtIP(%v)", net.IP(n)) } // These do nothing. -func (extIP) AddMapping(string, int, int, string, time.Duration) error { return nil } -func (extIP) DeleteMapping(string, int, int) error { return nil } + +func (ExtIP) AddMapping(string, int, int, string, time.Duration) error { return nil } +func (ExtIP) DeleteMapping(string, int, int) error { return nil } // Any returns a port mapper that tries to discover any supported // mechanism on the local network. diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/netutil/iptrack.go b/vendor/github.com/ethereum/go-ethereum/p2p/netutil/iptrack.go new file mode 100644 index 000000000..b9cbd5e1c --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/p2p/netutil/iptrack.go @@ -0,0 +1,130 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package netutil + +import ( + "time" + + "github.com/ethereum/go-ethereum/common/mclock" +) + +// IPTracker predicts the external endpoint, i.e. IP address and port, of the local host +// based on statements made by other hosts. +type IPTracker struct { + window time.Duration + contactWindow time.Duration + minStatements int + clock mclock.Clock + statements map[string]ipStatement + contact map[string]mclock.AbsTime + lastStatementGC mclock.AbsTime + lastContactGC mclock.AbsTime +} + +type ipStatement struct { + endpoint string + time mclock.AbsTime +} + +// NewIPTracker creates an IP tracker. +// +// The window parameters configure the amount of past network events which are kept. The +// minStatements parameter enforces a minimum number of statements which must be recorded +// before any prediction is made. Higher values for these parameters decrease 'flapping' of +// predictions as network conditions change. Window duration values should typically be in +// the range of minutes. +func NewIPTracker(window, contactWindow time.Duration, minStatements int) *IPTracker { + return &IPTracker{ + window: window, + contactWindow: contactWindow, + statements: make(map[string]ipStatement), + minStatements: minStatements, + contact: make(map[string]mclock.AbsTime), + clock: mclock.System{}, + } +} + +// PredictFullConeNAT checks whether the local host is behind full cone NAT. It predicts by +// checking whether any statement has been received from a node we didn't contact before +// the statement was made. +func (it *IPTracker) PredictFullConeNAT() bool { + now := it.clock.Now() + it.gcContact(now) + it.gcStatements(now) + for host, st := range it.statements { + if c, ok := it.contact[host]; !ok || c > st.time { + return true + } + } + return false +} + +// PredictEndpoint returns the current prediction of the external endpoint. +func (it *IPTracker) PredictEndpoint() string { + it.gcStatements(it.clock.Now()) + + // The current strategy is simple: find the endpoint with most statements. + counts := make(map[string]int) + maxcount, max := 0, "" + for _, s := range it.statements { + c := counts[s.endpoint] + 1 + counts[s.endpoint] = c + if c > maxcount && c >= it.minStatements { + maxcount, max = c, s.endpoint + } + } + return max +} + +// AddStatement records that a certain host thinks our external endpoint is the one given. +func (it *IPTracker) AddStatement(host, endpoint string) { + now := it.clock.Now() + it.statements[host] = ipStatement{endpoint, now} + if time.Duration(now-it.lastStatementGC) >= it.window { + it.gcStatements(now) + } +} + +// AddContact records that a packet containing our endpoint information has been sent to a +// certain host. +func (it *IPTracker) AddContact(host string) { + now := it.clock.Now() + it.contact[host] = now + if time.Duration(now-it.lastContactGC) >= it.contactWindow { + it.gcContact(now) + } +} + +func (it *IPTracker) gcStatements(now mclock.AbsTime) { + it.lastStatementGC = now + cutoff := now.Add(-it.window) + for host, s := range it.statements { + if s.time < cutoff { + delete(it.statements, host) + } + } +} + +func (it *IPTracker) gcContact(now mclock.AbsTime) { + it.lastContactGC = now + cutoff := now.Add(-it.contactWindow) + for host, ct := range it.contact { + if ct < cutoff { + delete(it.contact, host) + } + } +} diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/protocol.go b/vendor/github.com/ethereum/go-ethereum/p2p/protocol.go index 4b90a2a70..9438ab8e4 100644 --- a/vendor/github.com/ethereum/go-ethereum/p2p/protocol.go +++ b/vendor/github.com/ethereum/go-ethereum/p2p/protocol.go @@ -20,6 +20,7 @@ import ( "fmt" "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" ) // Protocol represents a P2P subprotocol implementation. @@ -52,6 +53,9 @@ type Protocol struct { // about a certain peer in the network. If an info retrieval function is set, // but returns nil, it is assumed that the protocol handshake is still running. PeerInfo func(id enode.ID) interface{} + + // Attributes contains protocol specific information for the node record. + Attributes []enr.Entry } func (p Protocol) cap() Cap { @@ -64,10 +68,6 @@ type Cap struct { Version uint } -func (cap Cap) RlpData() interface{} { - return []interface{}{cap.Name, cap.Version} -} - func (cap Cap) String() string { return fmt.Sprintf("%s/%d", cap.Name, cap.Version) } @@ -79,3 +79,5 @@ func (cs capsByNameAndVersion) Swap(i, j int) { cs[i], cs[j] = cs[j], cs[i] } func (cs capsByNameAndVersion) Less(i, j int) bool { return cs[i].Name < cs[j].Name || (cs[i].Name == cs[j].Name && cs[i].Version < cs[j].Version) } + +func (capsByNameAndVersion) ENRKey() string { return "cap" } diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/protocols/accounting.go b/vendor/github.com/ethereum/go-ethereum/p2p/protocols/accounting.go new file mode 100644 index 000000000..770406a27 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/p2p/protocols/accounting.go @@ -0,0 +1,195 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package protocols + +import ( + "time" + + "github.com/ethereum/go-ethereum/metrics" +) + +//define some metrics +var ( + //All metrics are cumulative + + //total amount of units credited + mBalanceCredit metrics.Counter + //total amount of units debited + mBalanceDebit metrics.Counter + //total amount of bytes credited + mBytesCredit metrics.Counter + //total amount of bytes debited + mBytesDebit metrics.Counter + //total amount of credited messages + mMsgCredit metrics.Counter + //total amount of debited messages + mMsgDebit metrics.Counter + //how many times local node had to drop remote peers + mPeerDrops metrics.Counter + //how many times local node overdrafted and dropped + mSelfDrops metrics.Counter +) + +//Prices defines how prices are being passed on to the accounting instance +type Prices interface { + //Return the Price for a message + Price(interface{}) *Price +} + +type Payer bool + +const ( + Sender = Payer(true) + Receiver = Payer(false) +) + +//Price represents the costs of a message +type Price struct { + Value uint64 // + PerByte bool //True if the price is per byte or for unit + Payer Payer +} + +//For gives back the price for a message +//A protocol provides the message price in absolute value +//This method then returns the correct signed amount, +//depending on who pays, which is identified by the `payer` argument: +//`Send` will pass a `Sender` payer, `Receive` will pass the `Receiver` argument. +//Thus: If Sending and sender pays, amount positive, otherwise negative +//If Receiving, and receiver pays, amount positive, otherwise negative +func (p *Price) For(payer Payer, size uint32) int64 { + price := p.Value + if p.PerByte { + price *= uint64(size) + } + if p.Payer == payer { + return 0 - int64(price) + } + return int64(price) +} + +//Balance is the actual accounting instance +//Balance defines the operations needed for accounting +//Implementations internally maintain the balance for every peer +type Balance interface { + //Adds amount to the local balance with remote node `peer`; + //positive amount = credit local node + //negative amount = debit local node + Add(amount int64, peer *Peer) error +} + +//Accounting implements the Hook interface +//It interfaces to the balances through the Balance interface, +//while interfacing with protocols and its prices through the Prices interface +type Accounting struct { + Balance //interface to accounting logic + Prices //interface to prices logic +} + +func NewAccounting(balance Balance, po Prices) *Accounting { + ah := &Accounting{ + Prices: po, + Balance: balance, + } + return ah +} + +//SetupAccountingMetrics creates a separate registry for p2p accounting metrics; +//this registry should be independent of any other metrics as it persists at different endpoints. +//It also instantiates the given metrics and starts the persisting go-routine which +//at the passed interval writes the metrics to a LevelDB +func SetupAccountingMetrics(reportInterval time.Duration, path string) *AccountingMetrics { + //create an empty registry + registry := metrics.NewRegistry() + //instantiate the metrics + mBalanceCredit = metrics.NewRegisteredCounterForced("account.balance.credit", registry) + mBalanceDebit = metrics.NewRegisteredCounterForced("account.balance.debit", registry) + mBytesCredit = metrics.NewRegisteredCounterForced("account.bytes.credit", registry) + mBytesDebit = metrics.NewRegisteredCounterForced("account.bytes.debit", registry) + mMsgCredit = metrics.NewRegisteredCounterForced("account.msg.credit", registry) + mMsgDebit = metrics.NewRegisteredCounterForced("account.msg.debit", registry) + mPeerDrops = metrics.NewRegisteredCounterForced("account.peerdrops", registry) + mSelfDrops = metrics.NewRegisteredCounterForced("account.selfdrops", registry) + //create the DB and start persisting + return NewAccountingMetrics(registry, reportInterval, path) +} + +//Implement Hook.Send +// Send takes a peer, a size and a msg and +// - calculates the cost for the local node sending a msg of size to peer using the Prices interface +// - credits/debits local node using balance interface +func (ah *Accounting) Send(peer *Peer, size uint32, msg interface{}) error { + //get the price for a message (through the protocol spec) + price := ah.Price(msg) + //this message doesn't need accounting + if price == nil { + return nil + } + //evaluate the price for sending messages + costToLocalNode := price.For(Sender, size) + //do the accounting + err := ah.Add(costToLocalNode, peer) + //record metrics: just increase counters for user-facing metrics + ah.doMetrics(costToLocalNode, size, err) + return err +} + +//Implement Hook.Receive +// Receive takes a peer, a size and a msg and +// - calculates the cost for the local node receiving a msg of size from peer using the Prices interface +// - credits/debits local node using balance interface +func (ah *Accounting) Receive(peer *Peer, size uint32, msg interface{}) error { + //get the price for a message (through the protocol spec) + price := ah.Price(msg) + //this message doesn't need accounting + if price == nil { + return nil + } + //evaluate the price for receiving messages + costToLocalNode := price.For(Receiver, size) + //do the accounting + err := ah.Add(costToLocalNode, peer) + //record metrics: just increase counters for user-facing metrics + ah.doMetrics(costToLocalNode, size, err) + return err +} + +//record some metrics +//this is not an error handling. `err` is returned by both `Send` and `Receive` +//`err` will only be non-nil if a limit has been violated (overdraft), in which case the peer has been dropped. +//if the limit has been violated and `err` is thus not nil: +// * if the price is positive, local node has been credited; thus `err` implicitly signals the REMOTE has been dropped +// * if the price is negative, local node has been debited, thus `err` implicitly signals LOCAL node "overdraft" +func (ah *Accounting) doMetrics(price int64, size uint32, err error) { + if price > 0 { + mBalanceCredit.Inc(price) + mBytesCredit.Inc(int64(size)) + mMsgCredit.Inc(1) + if err != nil { + //increase the number of times a remote node has been dropped due to "overdraft" + mPeerDrops.Inc(1) + } + } else { + mBalanceDebit.Inc(price) + mBytesDebit.Inc(int64(size)) + mMsgDebit.Inc(1) + if err != nil { + //increase the number of times the local node has done an "overdraft" in respect to other nodes + mSelfDrops.Inc(1) + } + } +} diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/protocols/protocol.go b/vendor/github.com/ethereum/go-ethereum/p2p/protocols/protocol.go index 615f74b56..b16720dd3 100644 --- a/vendor/github.com/ethereum/go-ethereum/p2p/protocols/protocol.go +++ b/vendor/github.com/ethereum/go-ethereum/p2p/protocols/protocol.go @@ -122,6 +122,16 @@ type WrappedMsg struct { Payload []byte } +//For accounting, the design is to allow the Spec to describe which and how its messages are priced +//To access this functionality, we provide a Hook interface which will call accounting methods +//NOTE: there could be more such (horizontal) hooks in the future +type Hook interface { + //A hook for sending messages + Send(peer *Peer, size uint32, msg interface{}) error + //A hook for receiving messages + Receive(peer *Peer, size uint32, msg interface{}) error +} + // Spec is a protocol specification including its name and version as well as // the types of messages which are exchanged type Spec struct { @@ -141,6 +151,9 @@ type Spec struct { // each message must have a single unique data type Messages []interface{} + //hook for accounting (could be extended to multiple hooks in the future) + Hook Hook + initOnce sync.Once codes map[reflect.Type]uint64 types map[uint64]reflect.Type @@ -274,6 +287,15 @@ func (p *Peer) Send(ctx context.Context, msg interface{}) error { Payload: r, } + //if the accounting hook is set, call it + if p.spec.Hook != nil { + err := p.spec.Hook.Send(p, wmsg.Size, msg) + if err != nil { + p.Drop(err) + return err + } + } + code, found := p.spec.GetCode(msg) if !found { return errorf(ErrInvalidMsgType, "%v", code) @@ -336,6 +358,14 @@ func (p *Peer) handleIncoming(handle func(ctx context.Context, msg interface{}) return errorf(ErrDecode, "<= %v: %v", msg, err) } + //if the accounting hook is set, call it + if p.spec.Hook != nil { + err := p.spec.Hook.Receive(p, wmsg.Size, val) + if err != nil { + return err + } + } + // call the registered handler callbacks // a registered callback take the decoded message as argument as an interface // which the handler is supposed to cast to the appropriate type @@ -351,7 +381,7 @@ func (p *Peer) handleIncoming(handle func(ctx context.Context, msg interface{}) // * arguments // * context // * the local handshake to be sent to the remote peer -// * funcion to be called on the remote handshake (can be nil) +// * function to be called on the remote handshake (can be nil) // * expects a remote handshake back of the same type // * the dialing peer needs to send the handshake first and then waits for remote // * the listening peer waits for the remote handshake and then sends it diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/protocols/reporter.go b/vendor/github.com/ethereum/go-ethereum/p2p/protocols/reporter.go new file mode 100644 index 000000000..215d4fe31 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/p2p/protocols/reporter.go @@ -0,0 +1,147 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package protocols + +import ( + "encoding/binary" + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + + "github.com/syndtr/goleveldb/leveldb" +) + +//AccountMetrics abstracts away the metrics DB and +//the reporter to persist metrics +type AccountingMetrics struct { + reporter *reporter +} + +//Close will be called when the node is being shutdown +//for a graceful cleanup +func (am *AccountingMetrics) Close() { + close(am.reporter.quit) + am.reporter.db.Close() +} + +//reporter is an internal structure used to write p2p accounting related +//metrics to a LevelDB. It will periodically write the accrued metrics to the DB. +type reporter struct { + reg metrics.Registry //the registry for these metrics (independent of other metrics) + interval time.Duration //duration at which the reporter will persist metrics + db *leveldb.DB //the actual DB + quit chan struct{} //quit the reporter loop +} + +//NewMetricsDB creates a new LevelDB instance used to persist metrics defined +//inside p2p/protocols/accounting.go +func NewAccountingMetrics(r metrics.Registry, d time.Duration, path string) *AccountingMetrics { + var val = make([]byte, 8) + var err error + + //Create the LevelDB + db, err := leveldb.OpenFile(path, nil) + if err != nil { + log.Error(err.Error()) + return nil + } + + //Check for all defined metrics that there is a value in the DB + //If there is, assign it to the metric. This means that the node + //has been running before and that metrics have been persisted. + metricsMap := map[string]metrics.Counter{ + "account.balance.credit": mBalanceCredit, + "account.balance.debit": mBalanceDebit, + "account.bytes.credit": mBytesCredit, + "account.bytes.debit": mBytesDebit, + "account.msg.credit": mMsgCredit, + "account.msg.debit": mMsgDebit, + "account.peerdrops": mPeerDrops, + "account.selfdrops": mSelfDrops, + } + //iterate the map and get the values + for key, metric := range metricsMap { + val, err = db.Get([]byte(key), nil) + //until the first time a value is being written, + //this will return an error. + //it could be beneficial though to log errors later, + //but that would require a different logic + if err == nil { + metric.Inc(int64(binary.BigEndian.Uint64(val))) + } + } + + //create the reporter + rep := &reporter{ + reg: r, + interval: d, + db: db, + quit: make(chan struct{}), + } + + //run the go routine + go rep.run() + + m := &AccountingMetrics{ + reporter: rep, + } + + return m +} + +//run is the goroutine which periodically sends the metrics to the configured LevelDB +func (r *reporter) run() { + intervalTicker := time.NewTicker(r.interval) + + for { + select { + case <-intervalTicker.C: + //at each tick send the metrics + if err := r.save(); err != nil { + log.Error("unable to send metrics to LevelDB", "err", err) + //If there is an error in writing, exit the routine; we assume here that the error is + //severe and don't attempt to write again. + //Also, this should prevent leaking when the node is stopped + return + } + case <-r.quit: + //graceful shutdown + return + } + } +} + +//send the metrics to the DB +func (r *reporter) save() error { + //create a LevelDB Batch + batch := leveldb.Batch{} + //for each metric in the registry (which is independent)... + r.reg.Each(func(name string, i interface{}) { + metric, ok := i.(metrics.Counter) + if ok { + //assuming every metric here to be a Counter (separate registry) + //...create a snapshot... + ms := metric.Snapshot() + byteVal := make([]byte, 8) + binary.BigEndian.PutUint64(byteVal, uint64(ms.Count())) + //...and save the value to the DB + batch.Put([]byte(name), byteVal) + } + }) + return r.db.Write(&batch, nil) +} diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/rlpx.go b/vendor/github.com/ethereum/go-ethereum/p2p/rlpx.go index a105720a4..22a27dd96 100644 --- a/vendor/github.com/ethereum/go-ethereum/p2p/rlpx.go +++ b/vendor/github.com/ethereum/go-ethereum/p2p/rlpx.go @@ -151,7 +151,7 @@ func readProtocolHandshake(rw MsgReader, our *protoHandshake) (*protoHandshake, } if msg.Code == discMsg { // Disconnect before protocol handshake is valid according to the - // spec and we send it ourself if the posthanshake checks fail. + // spec and we send it ourself if the post-handshake checks fail. // We can't return the reason directly, though, because it is echoed // back otherwise. Wrap it in a string instead. var reason [1]DiscReason diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/server.go b/vendor/github.com/ethereum/go-ethereum/p2p/server.go index 8546b02f9..b8c69bad8 100644 --- a/vendor/github.com/ethereum/go-ethereum/p2p/server.go +++ b/vendor/github.com/ethereum/go-ethereum/p2p/server.go @@ -20,9 +20,10 @@ package p2p import ( "bytes" "crypto/ecdsa" + "encoding/hex" "errors" - "fmt" "net" + "sort" "sync" "sync/atomic" "time" @@ -35,8 +36,10 @@ import ( "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/netutil" + "github.com/ethereum/go-ethereum/rlp" ) const ( @@ -160,6 +163,8 @@ type Server struct { lock sync.Mutex // protects running running bool + nodedb *enode.DB + localnode *enode.LocalNode ntab discoverTable listener net.Listener ourHandshake *protoHandshake @@ -222,7 +227,7 @@ type transport interface { MsgReadWriter // transports must provide Close because we use MsgPipe in some of // the tests. Closing the actual network connection doesn't do - // anything in those tests because NsgPipe doesn't use it. + // anything in those tests because MsgPipe doesn't use it. close(err error) } @@ -347,43 +352,13 @@ func (srv *Server) SubscribeEvents(ch chan *PeerEvent) event.Subscription { // Self returns the local node's endpoint information. func (srv *Server) Self() *enode.Node { srv.lock.Lock() - running, listener, ntab := srv.running, srv.listener, srv.ntab + ln := srv.localnode srv.lock.Unlock() - if !running { + if ln == nil { return enode.NewV4(&srv.PrivateKey.PublicKey, net.ParseIP("0.0.0.0"), 0, 0) } - return srv.makeSelf(listener, ntab) -} - -func (srv *Server) makeSelf(listener net.Listener, ntab discoverTable) *enode.Node { - // If the node is running but discovery is off, manually assemble the node infos. - if ntab == nil { - addr := srv.tcpAddr(listener) - return enode.NewV4(&srv.PrivateKey.PublicKey, addr.IP, addr.Port, 0) - } - // Otherwise return the discovery node. - return ntab.Self() -} - -func (srv *Server) tcpAddr(listener net.Listener) net.TCPAddr { - addr := net.TCPAddr{IP: net.IP{0, 0, 0, 0}} - if listener == nil { - return addr // Inbound connections disabled, use zero address. - } - // Otherwise inject the listener address too. - if a, ok := listener.Addr().(*net.TCPAddr); ok { - addr = *a - } - if srv.NAT != nil { - if ip, err := srv.NAT.ExternalIP(); err == nil { - addr.IP = ip - } - } - if addr.IP.IsUnspecified() { - addr.IP = net.IP{127, 0, 0, 1} - } - return addr + return ln.Node() } // Stop terminates the server and all active peer connections. @@ -415,7 +390,7 @@ type sharedUDPConn struct { func (s *sharedUDPConn) ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) { packet, ok := <-s.unhandled if !ok { - return 0, nil, fmt.Errorf("Connection was closed") + return 0, nil, errors.New("Connection was closed") } l := len(packet.Data) if l > len(b) { @@ -443,11 +418,13 @@ func (srv *Server) Start() (err error) { if srv.log == nil { srv.log = log.New() } - srv.log.Info("Starting P2P networking") + if srv.NoDial && srv.ListenAddr == "" { + srv.log.Warn("P2P server will be useless, neither dialing nor listening") + } // static fields if srv.PrivateKey == nil { - return fmt.Errorf("Server.PrivateKey must be set to a non-nil key") + return errors.New("Server.PrivateKey must be set to a non-nil key") } if srv.newTransport == nil { srv.newTransport = newRLPX @@ -466,65 +443,120 @@ func (srv *Server) Start() (err error) { srv.peerOp = make(chan peerOpFunc) srv.peerOpDone = make(chan struct{}) - var ( - conn *net.UDPConn - sconn *sharedUDPConn - realaddr *net.UDPAddr - unhandled chan discover.ReadPacket - ) - - if !srv.NoDiscovery || srv.DiscoveryV5 { - addr, err := net.ResolveUDPAddr("udp", srv.ListenAddr) - if err != nil { + if err := srv.setupLocalNode(); err != nil { + return err + } + if srv.ListenAddr != "" { + if err := srv.setupListening(); err != nil { return err } - conn, err = net.ListenUDP("udp", addr) - if err != nil { - return err - } - realaddr = conn.LocalAddr().(*net.UDPAddr) - if srv.NAT != nil { - if !realaddr.IP.IsLoopback() { - go nat.Map(srv.NAT, srv.quit, "udp", realaddr.Port, realaddr.Port, "ethereum discovery") - } - // TODO: react to external IP changes over time. - if ext, err := srv.NAT.ExternalIP(); err == nil { - realaddr = &net.UDPAddr{IP: ext, Port: realaddr.Port} - } - } + } + if err := srv.setupDiscovery(); err != nil { + return err } - if !srv.NoDiscovery && srv.DiscoveryV5 { - unhandled = make(chan discover.ReadPacket, 100) - sconn = &sharedUDPConn{conn, unhandled} + dynPeers := srv.maxDialedConns() + dialer := newDialState(srv.localnode.ID(), srv.StaticNodes, srv.BootstrapNodes, srv.ntab, dynPeers, srv.NetRestrict) + srv.loopWG.Add(1) + go srv.run(dialer) + return nil +} + +func (srv *Server) setupLocalNode() error { + // Create the devp2p handshake. + pubkey := crypto.FromECDSAPub(&srv.PrivateKey.PublicKey) + srv.ourHandshake = &protoHandshake{Version: baseProtocolVersion, Name: srv.Name, ID: pubkey[1:]} + for _, p := range srv.Protocols { + srv.ourHandshake.Caps = append(srv.ourHandshake.Caps, p.cap()) + } + sort.Sort(capsByNameAndVersion(srv.ourHandshake.Caps)) + + // Create the local node. + db, err := enode.OpenDB(srv.Config.NodeDatabase) + if err != nil { + return err + } + srv.nodedb = db + srv.localnode = enode.NewLocalNode(db, srv.PrivateKey) + srv.localnode.SetFallbackIP(net.IP{127, 0, 0, 1}) + srv.localnode.Set(capsByNameAndVersion(srv.ourHandshake.Caps)) + // TODO: check conflicts + for _, p := range srv.Protocols { + for _, e := range p.Attributes { + srv.localnode.Set(e) + } + } + switch srv.NAT.(type) { + case nil: + // No NAT interface, do nothing. + case nat.ExtIP: + // ExtIP doesn't block, set the IP right away. + ip, _ := srv.NAT.ExternalIP() + srv.localnode.SetStaticIP(ip) + default: + // Ask the router about the IP. This takes a while and blocks startup, + // do it in the background. + srv.loopWG.Add(1) + go func() { + defer srv.loopWG.Done() + if ip, err := srv.NAT.ExternalIP(); err == nil { + srv.localnode.SetStaticIP(ip) + } + }() + } + return nil +} + +func (srv *Server) setupDiscovery() error { + if srv.NoDiscovery && !srv.DiscoveryV5 { + return nil } - // node table + addr, err := net.ResolveUDPAddr("udp", srv.ListenAddr) + if err != nil { + return err + } + conn, err := net.ListenUDP("udp", addr) + if err != nil { + return err + } + realaddr := conn.LocalAddr().(*net.UDPAddr) + srv.log.Debug("UDP listener up", "addr", realaddr) + if srv.NAT != nil { + if !realaddr.IP.IsLoopback() { + go nat.Map(srv.NAT, srv.quit, "udp", realaddr.Port, realaddr.Port, "ethereum discovery") + } + } + srv.localnode.SetFallbackUDP(realaddr.Port) + + // Discovery V4 + var unhandled chan discover.ReadPacket + var sconn *sharedUDPConn if !srv.NoDiscovery { - cfg := discover.Config{ - PrivateKey: srv.PrivateKey, - AnnounceAddr: realaddr, - NodeDBPath: srv.NodeDatabase, - NetRestrict: srv.NetRestrict, - Bootnodes: srv.BootstrapNodes, - Unhandled: unhandled, + if srv.DiscoveryV5 { + unhandled = make(chan discover.ReadPacket, 100) + sconn = &sharedUDPConn{conn, unhandled} } - ntab, err := discover.ListenUDP(conn, cfg) + cfg := discover.Config{ + PrivateKey: srv.PrivateKey, + NetRestrict: srv.NetRestrict, + Bootnodes: srv.BootstrapNodes, + Unhandled: unhandled, + } + ntab, err := discover.ListenUDP(conn, srv.localnode, cfg) if err != nil { return err } srv.ntab = ntab } - + // Discovery V5 if srv.DiscoveryV5 { - var ( - ntab *discv5.Network - err error - ) + var ntab *discv5.Network + var err error if sconn != nil { - ntab, err = discv5.ListenUDP(srv.PrivateKey, sconn, realaddr, "", srv.NetRestrict) //srv.NodeDatabase) + ntab, err = discv5.ListenUDP(srv.PrivateKey, sconn, "", srv.NetRestrict) } else { - ntab, err = discv5.ListenUDP(srv.PrivateKey, conn, realaddr, "", srv.NetRestrict) //srv.NodeDatabase) + ntab, err = discv5.ListenUDP(srv.PrivateKey, conn, "", srv.NetRestrict) } if err != nil { return err @@ -534,32 +566,10 @@ func (srv *Server) Start() (err error) { } srv.DiscV5 = ntab } - - dynPeers := srv.maxDialedConns() - dialer := newDialState(srv.StaticNodes, srv.BootstrapNodes, srv.ntab, dynPeers, srv.NetRestrict) - - // handshake - pubkey := crypto.FromECDSAPub(&srv.PrivateKey.PublicKey) - srv.ourHandshake = &protoHandshake{Version: baseProtocolVersion, Name: srv.Name, ID: pubkey[1:]} - for _, p := range srv.Protocols { - srv.ourHandshake.Caps = append(srv.ourHandshake.Caps, p.cap()) - } - // listen/dial - if srv.ListenAddr != "" { - if err := srv.startListening(); err != nil { - return err - } - } - if srv.NoDial && srv.ListenAddr == "" { - srv.log.Warn("P2P server will be useless, neither dialing nor listening") - } - - srv.loopWG.Add(1) - go srv.run(dialer) return nil } -func (srv *Server) startListening() error { +func (srv *Server) setupListening() error { // Launch the TCP listener. listener, err := net.Listen("tcp", srv.ListenAddr) if err != nil { @@ -568,8 +578,11 @@ func (srv *Server) startListening() error { laddr := listener.Addr().(*net.TCPAddr) srv.ListenAddr = laddr.String() srv.listener = listener + srv.localnode.Set(enr.TCP(laddr.Port)) + srv.loopWG.Add(1) go srv.listenLoop() + // Map the TCP listening port if NAT is configured. if !laddr.IP.IsLoopback() && srv.NAT != nil { srv.loopWG.Add(1) @@ -589,7 +602,10 @@ type dialer interface { } func (srv *Server) run(dialstate dialer) { + srv.log.Info("Started P2P networking", "self", srv.localnode.Node()) defer srv.loopWG.Done() + defer srv.nodedb.Close() + var ( peers = make(map[enode.ID]*Peer) inboundCount = 0 @@ -781,7 +797,7 @@ func (srv *Server) encHandshakeChecks(peers map[enode.ID]*Peer, inboundCount int return DiscTooManyPeers case peers[c.node.ID()] != nil: return DiscAlreadyConnected - case c.node.ID() == srv.Self().ID(): + case c.node.ID() == srv.localnode.ID(): return DiscSelf default: return nil @@ -802,15 +818,11 @@ func (srv *Server) maxDialedConns() int { return srv.MaxPeers / r } -type tempError interface { - Temporary() bool -} - // listenLoop runs in its own goroutine and accepts // inbound connections. func (srv *Server) listenLoop() { defer srv.loopWG.Done() - srv.log.Info("RLPx listener up", "self", srv.Self()) + srv.log.Debug("TCP listener up", "addr", srv.listener.Addr()) tokens := defaultMaxPendingPeers if srv.MaxPendingPeers > 0 { @@ -831,7 +843,7 @@ func (srv *Server) listenLoop() { ) for { fd, err = srv.listener.Accept() - if tempErr, ok := err.(tempError); ok && tempErr.Temporary() { + if netutil.IsTemporaryError(err) { srv.log.Debug("Temporary read error", "err", err) continue } else if err != nil { @@ -851,7 +863,11 @@ func (srv *Server) listenLoop() { } } - fd = newMeteredConn(fd, true) + var ip net.IP + if tcp, ok := fd.RemoteAddr().(*net.TCPAddr); ok { + ip = tcp.IP + } + fd = newMeteredConn(fd, true, ip) srv.log.Trace("Accepted connection", "addr", fd.RemoteAddr()) go func() { srv.SetupConn(fd, inboundConn, nil) @@ -864,10 +880,6 @@ func (srv *Server) listenLoop() { // as a peer. It returns when the connection has been added as a peer // or the handshakes have failed. func (srv *Server) SetupConn(fd net.Conn, flags connFlag, dialDest *enode.Node) error { - self := srv.Self() - if self == nil { - return errors.New("shutdown") - } c := &conn{fd: fd, transport: srv.newTransport(fd), flags: flags, cont: make(chan error)} err := srv.setupConn(c, flags, dialDest) if err != nil { @@ -890,7 +902,7 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *enode.Node) erro if dialDest != nil { dialPubkey = new(ecdsa.PublicKey) if err := dialDest.Load((*enode.Secp256k1)(dialPubkey)); err != nil { - return fmt.Errorf("dial destination doesn't have a secp256k1 public key") + return errors.New("dial destination doesn't have a secp256k1 public key") } } // Run the encryption handshake. @@ -908,6 +920,9 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *enode.Node) erro } else { c.node = nodeFromConn(remotePubkey, c.fd) } + if conn, ok := c.fd.(*meteredConn); ok { + conn.handshakeDone(c.node.ID()) + } clog := srv.log.New("id", c.node.ID(), "addr", c.fd.RemoteAddr(), "conn", c.flags) err = srv.checkpoint(c, srv.posthandshake) if err != nil { @@ -921,7 +936,7 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *enode.Node) erro return err } if id := c.node.ID(); !bytes.Equal(crypto.Keccak256(phs.ID), id[:]) { - clog.Trace("Wrong devp2p handshake identity", "phsid", fmt.Sprintf("%x", phs.ID)) + clog.Trace("Wrong devp2p handshake identity", "phsid", hex.EncodeToString(phs.ID)) return DiscUnexpectedIdentity } c.caps, c.name = phs.Caps, phs.Name @@ -1003,6 +1018,7 @@ type NodeInfo struct { ID string `json:"id"` // Unique node identifier (also the encryption key) Name string `json:"name"` // Name of the node, including client type, version, OS, custom data Enode string `json:"enode"` // Enode URL for adding this peer from remote peers + ENR string `json:"enr"` // Ethereum Node Record IP string `json:"ip"` // IP address of the node Ports struct { Discovery int `json:"discovery"` // UDP listening port for discovery protocol @@ -1014,9 +1030,8 @@ type NodeInfo struct { // NodeInfo gathers and returns a collection of metadata known about the host. func (srv *Server) NodeInfo() *NodeInfo { - node := srv.Self() - // Gather and assemble the generic node infos + node := srv.Self() info := &NodeInfo{ Name: srv.Name, Enode: node.String(), @@ -1027,6 +1042,9 @@ func (srv *Server) NodeInfo() *NodeInfo { } info.Ports.Discovery = node.UDP() info.Ports.Listener = node.TCP() + if enc, err := rlp.EncodeToBytes(node.Record()); err == nil { + info.ENR = "0x" + hex.EncodeToString(enc) + } // Gather all the running protocol infos (only once per protocol type) for _, proto := range srv.Protocols { diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/README.md b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/README.md index d1f8649ea..871d71b2c 100644 --- a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/README.md +++ b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/README.md @@ -63,18 +63,6 @@ using the devp2p node stack rather than executing `main()`. The nodes listen for devp2p connections and WebSocket RPC clients on random localhost ports. -### DockerAdapter - -The `DockerAdapter` is similar to the `ExecAdapter` but executes `docker run` -to run the node in a Docker container using a Docker image containing the -simulation binary at `/bin/p2p-node`. - -The Docker image is built using `docker build` when the adapter is initialised, -meaning no prior setup is necessary other than having a working Docker client. - -Each node listens on the external IP of the container and the default p2p and -RPC ports (`30303` and `8546` respectively). - ## Network A simulation network is created with an ID and default service (which is used diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/docker.go b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/docker.go deleted file mode 100644 index 82eab0e9c..000000000 --- a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/docker.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package adapters - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - - "github.com/docker/docker/pkg/reexec" - "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/p2p/enode" -) - -var ( - ErrLinuxOnly = errors.New("DockerAdapter can only be used on Linux as it uses the current binary (which must be a Linux binary)") -) - -// DockerAdapter is a NodeAdapter which runs simulation nodes inside Docker -// containers. -// -// A Docker image is built which contains the current binary at /bin/p2p-node -// which when executed runs the underlying service (see the description -// of the execP2PNode function for more details) -type DockerAdapter struct { - ExecAdapter -} - -// NewDockerAdapter builds the p2p-node Docker image containing the current -// binary and returns a DockerAdapter -func NewDockerAdapter() (*DockerAdapter, error) { - // Since Docker containers run on Linux and this adapter runs the - // current binary in the container, it must be compiled for Linux. - // - // It is reasonable to require this because the caller can just - // compile the current binary in a Docker container. - if runtime.GOOS != "linux" { - return nil, ErrLinuxOnly - } - - if err := buildDockerImage(); err != nil { - return nil, err - } - - return &DockerAdapter{ - ExecAdapter{ - nodes: make(map[enode.ID]*ExecNode), - }, - }, nil -} - -// Name returns the name of the adapter for logging purposes -func (d *DockerAdapter) Name() string { - return "docker-adapter" -} - -// NewNode returns a new DockerNode using the given config -func (d *DockerAdapter) NewNode(config *NodeConfig) (Node, error) { - if len(config.Services) == 0 { - return nil, errors.New("node must have at least one service") - } - for _, service := range config.Services { - if _, exists := serviceFuncs[service]; !exists { - return nil, fmt.Errorf("unknown node service %q", service) - } - } - - // generate the config - conf := &execNodeConfig{ - Stack: node.DefaultConfig, - Node: config, - } - conf.Stack.DataDir = "/data" - conf.Stack.WSHost = "0.0.0.0" - conf.Stack.WSOrigins = []string{"*"} - conf.Stack.WSExposeAll = true - conf.Stack.P2P.EnableMsgEvents = false - conf.Stack.P2P.NoDiscovery = true - conf.Stack.P2P.NAT = nil - conf.Stack.NoUSB = true - - // listen on all interfaces on a given port, which we set when we - // initialise NodeConfig (usually a random port) - conf.Stack.P2P.ListenAddr = fmt.Sprintf(":%d", config.Port) - - node := &DockerNode{ - ExecNode: ExecNode{ - ID: config.ID, - Config: conf, - adapter: &d.ExecAdapter, - }, - } - node.newCmd = node.dockerCommand - d.ExecAdapter.nodes[node.ID] = &node.ExecNode - return node, nil -} - -// DockerNode wraps an ExecNode but exec's the current binary in a docker -// container rather than locally -type DockerNode struct { - ExecNode -} - -// dockerCommand returns a command which exec's the binary in a Docker -// container. -// -// It uses a shell so that we can pass the _P2P_NODE_CONFIG environment -// variable to the container using the --env flag. -func (n *DockerNode) dockerCommand() *exec.Cmd { - return exec.Command( - "sh", "-c", - fmt.Sprintf( - `exec docker run --interactive --env _P2P_NODE_CONFIG="${_P2P_NODE_CONFIG}" %s p2p-node %s %s`, - dockerImage, strings.Join(n.Config.Node.Services, ","), n.ID.String(), - ), - ) -} - -// dockerImage is the name of the Docker image which gets built to run the -// simulation node -const dockerImage = "p2p-node" - -// buildDockerImage builds the Docker image which is used to run the simulation -// node in a Docker container. -// -// It adds the current binary as "p2p-node" so that it runs execP2PNode -// when executed. -func buildDockerImage() error { - // create a directory to use as the build context - dir, err := ioutil.TempDir("", "p2p-docker") - if err != nil { - return err - } - defer os.RemoveAll(dir) - - // copy the current binary into the build context - bin, err := os.Open(reexec.Self()) - if err != nil { - return err - } - defer bin.Close() - dst, err := os.OpenFile(filepath.Join(dir, "self.bin"), os.O_WRONLY|os.O_CREATE, 0755) - if err != nil { - return err - } - defer dst.Close() - if _, err := io.Copy(dst, bin); err != nil { - return err - } - - // create the Dockerfile - dockerfile := []byte(` -FROM ubuntu:16.04 -RUN mkdir /data -ADD self.bin /bin/p2p-node - `) - if err := ioutil.WriteFile(filepath.Join(dir, "Dockerfile"), dockerfile, 0644); err != nil { - return err - } - - // run 'docker build' - cmd := exec.Command("docker", "build", "-t", dockerImage, dir) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return fmt.Errorf("error building docker image: %s", err) - } - - return nil -} diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/exec.go b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/exec.go index dc7d277ca..abb196717 100644 --- a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/exec.go +++ b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/exec.go @@ -17,7 +17,7 @@ package adapters import ( - "bufio" + "bytes" "context" "crypto/ecdsa" "encoding/json" @@ -25,6 +25,7 @@ import ( "fmt" "io" "net" + "net/http" "os" "os/exec" "os/signal" @@ -43,12 +44,14 @@ import ( "golang.org/x/net/websocket" ) -// ExecAdapter is a NodeAdapter which runs simulation nodes by executing the -// current binary as a child process. -// -// An init hook is used so that the child process executes the node services -// (rather than whataver the main() function would normally do), see the -// execP2PNode function for more information. +func init() { + // Register a reexec function to start a simulation node when the current binary is + // executed as "p2p-node" (rather than whataver the main() function would normally do). + reexec.Register("p2p-node", execP2PNode) +} + +// ExecAdapter is a NodeAdapter which runs simulation nodes by executing the current binary +// as a child process. type ExecAdapter struct { // BaseDir is the directory under which the data directories for each // simulation node are created. @@ -150,15 +153,13 @@ func (n *ExecNode) Client() (*rpc.Client, error) { } // Start exec's the node passing the ID and service as command line arguments -// and the node config encoded as JSON in the _P2P_NODE_CONFIG environment -// variable +// and the node config encoded as JSON in an environment variable. func (n *ExecNode) Start(snapshots map[string][]byte) (err error) { if n.Cmd != nil { return errors.New("already started") } defer func() { if err != nil { - log.Error("node failed to start", "err", err) n.Stop() } }() @@ -175,59 +176,78 @@ func (n *ExecNode) Start(snapshots map[string][]byte) (err error) { return fmt.Errorf("error generating node config: %s", err) } - // use a pipe for stderr so we can both copy the node's stderr to - // os.Stderr and read the WebSocket address from the logs - stderrR, stderrW := io.Pipe() - stderr := io.MultiWriter(os.Stderr, stderrW) + // start the one-shot server that waits for startup information + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + statusURL, statusC := n.waitForStartupJSON(ctx) // start the node cmd := n.newCmd() cmd.Stdout = os.Stdout - cmd.Stderr = stderr - cmd.Env = append(os.Environ(), fmt.Sprintf("_P2P_NODE_CONFIG=%s", confData)) + cmd.Stderr = os.Stderr + cmd.Env = append(os.Environ(), + envStatusURL+"="+statusURL, + envNodeConfig+"="+string(confData), + ) if err := cmd.Start(); err != nil { return fmt.Errorf("error starting node: %s", err) } n.Cmd = cmd // read the WebSocket address from the stderr logs - var wsAddr string - wsAddrC := make(chan string) - go func() { - s := bufio.NewScanner(stderrR) - for s.Scan() { - if strings.Contains(s.Text(), "WebSocket endpoint opened") { - wsAddrC <- wsAddrPattern.FindString(s.Text()) - } - } - }() - select { - case wsAddr = <-wsAddrC: - if wsAddr == "" { - return errors.New("failed to read WebSocket address from stderr") - } - case <-time.After(10 * time.Second): - return errors.New("timed out waiting for WebSocket address on stderr") + status := <-statusC + if status.Err != "" { + return errors.New(status.Err) } - - // create the RPC client and load the node info - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - client, err := rpc.DialWebsocket(ctx, wsAddr, "") + client, err := rpc.DialWebsocket(ctx, status.WSEndpoint, "http://localhost") if err != nil { - return fmt.Errorf("error dialing rpc websocket: %s", err) + return fmt.Errorf("can't connect to RPC server: %v", err) } - var info p2p.NodeInfo - if err := client.CallContext(ctx, &info, "admin_nodeInfo"); err != nil { - return fmt.Errorf("error getting node info: %s", err) - } - n.client = client - n.wsAddr = wsAddr - n.Info = &info + // node ready :) + n.client = client + n.wsAddr = status.WSEndpoint + n.Info = status.NodeInfo return nil } +// waitForStartupJSON runs a one-shot HTTP server to receive a startup report. +func (n *ExecNode) waitForStartupJSON(ctx context.Context) (string, chan nodeStartupJSON) { + var ( + ch = make(chan nodeStartupJSON, 1) + quitOnce sync.Once + srv http.Server + ) + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + ch <- nodeStartupJSON{Err: err.Error()} + return "", ch + } + quit := func(status nodeStartupJSON) { + quitOnce.Do(func() { + l.Close() + ch <- status + }) + } + srv.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var status nodeStartupJSON + if err := json.NewDecoder(r.Body).Decode(&status); err != nil { + status.Err = fmt.Sprintf("can't decode startup report: %v", err) + } + quit(status) + }) + // Run the HTTP server, but don't wait forever and shut it down + // if the context is canceled. + go srv.Serve(l) + go func() { + <-ctx.Done() + quit(nodeStartupJSON{Err: "didn't get startup report"}) + }() + + url := "http://" + l.Addr().String() + return url, ch +} + // execCommand returns a command which runs the node locally by exec'ing // the current binary but setting argv[0] to "p2p-node" so that the child // runs execP2PNode @@ -318,12 +338,6 @@ func (n *ExecNode) Snapshots() (map[string][]byte, error) { return snapshots, n.client.Call(&snapshots, "simulation_snapshot") } -func init() { - // register a reexec function to start a devp2p node when the current - // binary is executed as "p2p-node" - reexec.Register("p2p-node", execP2PNode) -} - // execNodeConfig is used to serialize the node configuration so it can be // passed to the child process as a JSON encoded environment variable type execNodeConfig struct { @@ -333,55 +347,69 @@ type execNodeConfig struct { PeerAddrs map[string]string `json:"peer_addrs,omitempty"` } -// ExternalIP gets an external IP address so that Enode URL is usable -func ExternalIP() net.IP { - addrs, err := net.InterfaceAddrs() - if err != nil { - log.Crit("error getting IP address", "err", err) - } - for _, addr := range addrs { - if ip, ok := addr.(*net.IPNet); ok && !ip.IP.IsLoopback() && !ip.IP.IsLinkLocalUnicast() { - return ip.IP - } - } - log.Warn("unable to determine explicit IP address, falling back to loopback") - return net.IP{127, 0, 0, 1} -} - -// execP2PNode starts a devp2p node when the current binary is executed with +// execP2PNode starts a simulation node when the current binary is executed with // argv[0] being "p2p-node", reading the service / ID from argv[1] / argv[2] -// and the node config from the _P2P_NODE_CONFIG environment variable +// and the node config from an environment variable. func execP2PNode() { glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat())) glogger.Verbosity(log.LvlInfo) log.Root().SetHandler(glogger) + statusURL := os.Getenv(envStatusURL) + if statusURL == "" { + log.Crit("missing " + envStatusURL) + } + // Start the node and gather startup report. + var status nodeStartupJSON + stack, stackErr := startExecNodeStack() + if stackErr != nil { + status.Err = stackErr.Error() + } else { + status.WSEndpoint = "ws://" + stack.WSEndpoint() + status.NodeInfo = stack.Server().NodeInfo() + } + + // Send status to the host. + statusJSON, _ := json.Marshal(status) + if _, err := http.Post(statusURL, "application/json", bytes.NewReader(statusJSON)); err != nil { + log.Crit("Can't post startup info", "url", statusURL, "err", err) + } + if stackErr != nil { + os.Exit(1) + } + + // Stop the stack if we get a SIGTERM signal. + go func() { + sigc := make(chan os.Signal, 1) + signal.Notify(sigc, syscall.SIGTERM) + defer signal.Stop(sigc) + <-sigc + log.Info("Received SIGTERM, shutting down...") + stack.Stop() + }() + stack.Wait() // Wait for the stack to exit. +} + +func startExecNodeStack() (*node.Node, error) { // read the services from argv serviceNames := strings.Split(os.Args[1], ",") // decode the config - confEnv := os.Getenv("_P2P_NODE_CONFIG") + confEnv := os.Getenv(envNodeConfig) if confEnv == "" { - log.Crit("missing _P2P_NODE_CONFIG") + return nil, fmt.Errorf("missing " + envNodeConfig) } var conf execNodeConfig if err := json.Unmarshal([]byte(confEnv), &conf); err != nil { - log.Crit("error decoding _P2P_NODE_CONFIG", "err", err) + return nil, fmt.Errorf("error decoding %s: %v", envNodeConfig, err) } conf.Stack.P2P.PrivateKey = conf.Node.PrivateKey conf.Stack.Logger = log.New("node.id", conf.Node.ID.String()) - if strings.HasPrefix(conf.Stack.P2P.ListenAddr, ":") { - conf.Stack.P2P.ListenAddr = ExternalIP().String() + conf.Stack.P2P.ListenAddr - } - if conf.Stack.WSHost == "0.0.0.0" { - conf.Stack.WSHost = ExternalIP().String() - } - // initialize the devp2p stack stack, err := node.New(&conf.Stack) if err != nil { - log.Crit("error creating node stack", "err", err) + return nil, fmt.Errorf("error creating node stack: %v", err) } // register the services, collecting them into a map so we can wrap @@ -390,7 +418,7 @@ func execP2PNode() { for _, name := range serviceNames { serviceFunc, exists := serviceFuncs[name] if !exists { - log.Crit("unknown node service", "name", name) + return nil, fmt.Errorf("unknown node service %q", err) } constructor := func(nodeCtx *node.ServiceContext) (node.Service, error) { ctx := &ServiceContext{ @@ -409,34 +437,35 @@ func execP2PNode() { return service, nil } if err := stack.Register(constructor); err != nil { - log.Crit("error starting service", "name", name, "err", err) + return stack, fmt.Errorf("error registering service %q: %v", name, err) } } // register the snapshot service - if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { + err = stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { return &snapshotService{services}, nil - }); err != nil { - log.Crit("error starting snapshot service", "err", err) + }) + if err != nil { + return stack, fmt.Errorf("error starting snapshot service: %v", err) } // start the stack - if err := stack.Start(); err != nil { - log.Crit("error stating node stack", "err", err) + if err = stack.Start(); err != nil { + err = fmt.Errorf("error starting stack: %v", err) } + return stack, err +} - // stop the stack if we get a SIGTERM signal - go func() { - sigc := make(chan os.Signal, 1) - signal.Notify(sigc, syscall.SIGTERM) - defer signal.Stop(sigc) - <-sigc - log.Info("Received SIGTERM, shutting down...") - stack.Stop() - }() +const ( + envStatusURL = "_P2P_STATUS_URL" + envNodeConfig = "_P2P_NODE_CONFIG" +) - // wait for the stack to exit - stack.Wait() +// nodeStartupJSON is sent to the simulation host after startup. +type nodeStartupJSON struct { + Err string + WSEndpoint string + NodeInfo *p2p.NodeInfo } // snapshotService is a node.Service which wraps a list of services and diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/ws.go b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/ws.go deleted file mode 100644 index 979a21709..000000000 --- a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/adapters/ws.go +++ /dev/null @@ -1,51 +0,0 @@ -package adapters - -import ( - "bufio" - "errors" - "io" - "regexp" - "strings" - "time" -) - -// wsAddrPattern is a regex used to read the WebSocket address from the node's -// log -var wsAddrPattern = regexp.MustCompile(`ws://[\d.:]+`) - -func matchWSAddr(str string) (string, bool) { - if !strings.Contains(str, "WebSocket endpoint opened") { - return "", false - } - - return wsAddrPattern.FindString(str), true -} - -// findWSAddr scans through reader r, looking for the log entry with -// WebSocket address information. -func findWSAddr(r io.Reader, timeout time.Duration) (string, error) { - ch := make(chan string) - - go func() { - s := bufio.NewScanner(r) - for s.Scan() { - addr, ok := matchWSAddr(s.Text()) - if ok { - ch <- addr - } - } - close(ch) - }() - - var wsAddr string - select { - case wsAddr = <-ch: - if wsAddr == "" { - return "", errors.New("empty result") - } - case <-time.After(timeout): - return "", errors.New("timed out") - } - - return wsAddr, nil -} diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/examples/ping-pong.go b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/examples/ping-pong.go index 597cc950c..cde2f3a67 100644 --- a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/examples/ping-pong.go +++ b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/examples/ping-pong.go @@ -70,14 +70,6 @@ func main() { log.Info("using exec adapter", "tmpdir", tmpdir) adapter = adapters.NewExecAdapter(tmpdir) - case "docker": - log.Info("using docker adapter") - var err error - adapter, err = adapters.NewDockerAdapter() - if err != nil { - log.Crit("error creating docker adapter", "err", err) - } - default: log.Crit(fmt.Sprintf("unknown node adapter %q", *adapterType)) } diff --git a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/network.go b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/network.go index 101ac09f8..ab9f582c5 100644 --- a/vendor/github.com/ethereum/go-ethereum/p2p/simulations/network.go +++ b/vendor/github.com/ethereum/go-ethereum/p2p/simulations/network.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "sync" "time" @@ -116,7 +117,7 @@ func (net *Network) NewNodeWithConfig(conf *adapters.NodeConfig) (*Node, error) Node: adapterNode, Config: conf, } - log.Trace(fmt.Sprintf("node %v created", conf.ID)) + log.Trace("Node created", "id", conf.ID) net.nodeMap[conf.ID] = len(net.Nodes) net.Nodes = append(net.Nodes, node) @@ -167,6 +168,7 @@ func (net *Network) Start(id enode.ID) error { func (net *Network) startWithSnapshots(id enode.ID, snapshots map[string][]byte) error { net.lock.Lock() defer net.lock.Unlock() + node := net.getNode(id) if node == nil { return fmt.Errorf("node %v does not exist", id) @@ -174,13 +176,13 @@ func (net *Network) startWithSnapshots(id enode.ID, snapshots map[string][]byte) if node.Up { return fmt.Errorf("node %v already up", id) } - log.Trace(fmt.Sprintf("starting node %v: %v using %v", id, node.Up, net.nodeAdapter.Name())) + log.Trace("Starting node", "id", id, "adapter", net.nodeAdapter.Name()) if err := node.Start(snapshots); err != nil { - log.Warn(fmt.Sprintf("start up failed: %v", err)) + log.Warn("Node startup failed", "id", id, "err", err) return err } node.Up = true - log.Info(fmt.Sprintf("started node %v: %v", id, node.Up)) + log.Info("Started node", "id", id) net.events.Send(NewEvent(node)) @@ -209,7 +211,6 @@ func (net *Network) watchPeerEvents(id enode.ID, events chan *p2p.PeerEvent, sub defer net.lock.Unlock() node := net.getNode(id) if node == nil { - log.Error("Can not find node for id", "id", id) return } node.Up = false @@ -240,7 +241,7 @@ func (net *Network) watchPeerEvents(id enode.ID, events chan *p2p.PeerEvent, sub case err := <-sub.Err(): if err != nil { - log.Error(fmt.Sprintf("error getting peer events for node %v", id), "err", err) + log.Error("Error in peer event subscription", "id", id, "err", err) } return } @@ -250,7 +251,6 @@ func (net *Network) watchPeerEvents(id enode.ID, events chan *p2p.PeerEvent, sub // Stop stops the node with the given ID func (net *Network) Stop(id enode.ID) error { net.lock.Lock() - defer net.lock.Unlock() node := net.getNode(id) if node == nil { return fmt.Errorf("node %v does not exist", id) @@ -258,12 +258,17 @@ func (net *Network) Stop(id enode.ID) error { if !node.Up { return fmt.Errorf("node %v already down", id) } - if err := node.Stop(); err != nil { + node.Up = false + net.lock.Unlock() + + err := node.Stop() + if err != nil { + net.lock.Lock() + node.Up = true + net.lock.Unlock() return err } - node.Up = false - log.Info(fmt.Sprintf("stop node %v: %v", id, node.Up)) - + log.Info("Stopped node", "id", id, "err", err) net.events.Send(ControlEvent(node)) return nil } @@ -271,7 +276,7 @@ func (net *Network) Stop(id enode.ID) error { // Connect connects two nodes together by calling the "admin_addPeer" RPC // method on the "one" node so that it connects to the "other" node func (net *Network) Connect(oneID, otherID enode.ID) error { - log.Debug(fmt.Sprintf("connecting %s to %s", oneID, otherID)) + log.Debug("Connecting nodes with addPeer", "id", oneID, "other", otherID) conn, err := net.InitConn(oneID, otherID) if err != nil { return err @@ -481,10 +486,10 @@ func (net *Network) InitConn(oneID, otherID enode.ID) (*Conn, error) { err = conn.nodesUp() if err != nil { - log.Trace(fmt.Sprintf("nodes not up: %v", err)) + log.Trace("Nodes not up", "err", err) return nil, fmt.Errorf("nodes not up: %v", err) } - log.Debug("InitConn - connection initiated") + log.Debug("Connection initiated", "id", oneID, "other", otherID) conn.initiated = time.Now() return conn, nil } @@ -492,9 +497,9 @@ func (net *Network) InitConn(oneID, otherID enode.ID) (*Conn, error) { // Shutdown stops all nodes in the network and closes the quit channel func (net *Network) Shutdown() { for _, node := range net.Nodes { - log.Debug(fmt.Sprintf("stopping node %s", node.ID().TerminalString())) + log.Debug("Stopping node", "id", node.ID()) if err := node.Stop(); err != nil { - log.Warn(fmt.Sprintf("error stopping node %s", node.ID().TerminalString()), "err", err) + log.Warn("Can't stop node", "id", node.ID(), "err", err) } } close(net.quitc) @@ -640,11 +645,18 @@ type NodeSnapshot struct { // Snapshot creates a network snapshot func (net *Network) Snapshot() (*Snapshot, error) { + return net.snapshot(nil, nil) +} + +func (net *Network) SnapshotWithServices(addServices []string, removeServices []string) (*Snapshot, error) { + return net.snapshot(addServices, removeServices) +} + +func (net *Network) snapshot(addServices []string, removeServices []string) (*Snapshot, error) { net.lock.Lock() defer net.lock.Unlock() snap := &Snapshot{ Nodes: make([]NodeSnapshot, len(net.Nodes)), - Conns: make([]Conn, len(net.Conns)), } for i, node := range net.Nodes { snap.Nodes[i] = NodeSnapshot{Node: *node} @@ -656,15 +668,49 @@ func (net *Network) Snapshot() (*Snapshot, error) { return nil, err } snap.Nodes[i].Snapshots = snapshots + for _, addSvc := range addServices { + haveSvc := false + for _, svc := range snap.Nodes[i].Node.Config.Services { + if svc == addSvc { + haveSvc = true + break + } + } + if !haveSvc { + snap.Nodes[i].Node.Config.Services = append(snap.Nodes[i].Node.Config.Services, addSvc) + } + } + if len(removeServices) > 0 { + var cleanedServices []string + for _, svc := range snap.Nodes[i].Node.Config.Services { + haveSvc := false + for _, rmSvc := range removeServices { + if rmSvc == svc { + haveSvc = true + break + } + } + if !haveSvc { + cleanedServices = append(cleanedServices, svc) + } + + } + snap.Nodes[i].Node.Config.Services = cleanedServices + } } - for i, conn := range net.Conns { - snap.Conns[i] = *conn + for _, conn := range net.Conns { + if conn.Up { + snap.Conns = append(snap.Conns, *conn) + } } return snap, nil } +var snapshotLoadTimeout = 120 * time.Second + // Load loads a network snapshot func (net *Network) Load(snap *Snapshot) error { + // Start nodes. for _, n := range snap.Nodes { if _, err := net.NewNodeWithConfig(n.Node.Config); err != nil { return err @@ -676,6 +722,69 @@ func (net *Network) Load(snap *Snapshot) error { return err } } + + // Prepare connection events counter. + allConnected := make(chan struct{}) // closed when all connections are established + done := make(chan struct{}) // ensures that the event loop goroutine is terminated + defer close(done) + + // Subscribe to event channel. + // It needs to be done outside of the event loop goroutine (created below) + // to ensure that the event channel is blocking before connect calls are made. + events := make(chan *Event) + sub := net.Events().Subscribe(events) + defer sub.Unsubscribe() + + go func() { + // Expected number of connections. + total := len(snap.Conns) + // Set of all established connections from the snapshot, not other connections. + // Key array element 0 is the connection One field value, and element 1 connection Other field. + connections := make(map[[2]enode.ID]struct{}, total) + + for { + select { + case e := <-events: + // Ignore control events as they do not represent + // connect or disconnect (Up) state change. + if e.Control { + continue + } + // Detect only connection events. + if e.Type != EventTypeConn { + continue + } + connection := [2]enode.ID{e.Conn.One, e.Conn.Other} + // Nodes are still not connected or have been disconnected. + if !e.Conn.Up { + // Delete the connection from the set of established connections. + // This will prevent false positive in case disconnections happen. + delete(connections, connection) + log.Warn("load snapshot: unexpected disconnection", "one", e.Conn.One, "other", e.Conn.Other) + continue + } + // Check that the connection is from the snapshot. + for _, conn := range snap.Conns { + if conn.One == e.Conn.One && conn.Other == e.Conn.Other { + // Add the connection to the set of established connections. + connections[connection] = struct{}{} + if len(connections) == total { + // Signal that all nodes are connected. + close(allConnected) + return + } + + break + } + } + case <-done: + // Load function returned, terminate this goroutine. + return + } + } + }() + + // Start connecting. for _, conn := range snap.Conns { if !net.GetNode(conn.One).Up || !net.GetNode(conn.Other).Up { @@ -687,6 +796,14 @@ func (net *Network) Load(snap *Snapshot) error { return err } } + + select { + // Wait until all connections from the snapshot are established. + case <-allConnected: + // Make sure that we do not wait forever. + case <-time.After(snapshotLoadTimeout): + return errors.New("snapshot connections not established") + } return nil } @@ -708,18 +825,18 @@ func (net *Network) Subscribe(events chan *Event) { } func (net *Network) executeControlEvent(event *Event) { - log.Trace("execute control event", "type", event.Type, "event", event) + log.Trace("Executing control event", "type", event.Type, "event", event) switch event.Type { case EventTypeNode: if err := net.executeNodeEvent(event); err != nil { - log.Error("error executing node event", "event", event, "err", err) + log.Error("Error executing node event", "event", event, "err", err) } case EventTypeConn: if err := net.executeConnEvent(event); err != nil { - log.Error("error executing conn event", "event", event, "err", err) + log.Error("Error executing conn event", "event", event, "err", err) } case EventTypeMsg: - log.Warn("ignoring control msg event") + log.Warn("Ignoring control msg event") } } diff --git a/vendor/github.com/ethereum/go-ethereum/params/config.go b/vendor/github.com/ethereum/go-ethereum/params/config.go index ba719b873..2935ef1f3 100644 --- a/vendor/github.com/ethereum/go-ethereum/params/config.go +++ b/vendor/github.com/ethereum/go-ethereum/params/config.go @@ -42,17 +42,17 @@ var ( EIP155Block: big.NewInt(2675000), EIP158Block: big.NewInt(2675000), ByzantiumBlock: big.NewInt(4370000), - ConstantinopleBlock: nil, + ConstantinopleBlock: big.NewInt(7080000), Ethash: new(EthashConfig), } // MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network. MainnetTrustedCheckpoint = &TrustedCheckpoint{ Name: "mainnet", - SectionIndex: 195, - SectionHead: common.HexToHash("0x1cdd2a84cf6c1261ffccc88f6bcefb513abd7934a96c1e909fbf74767560f16b"), - CHTRoot: common.HexToHash("0xe453333c20391d16b91b6fe11c104704f62c8dba15f69db73b4cdf7e100105eb"), - BloomRoot: common.HexToHash("0x47f30069473072e00d2cdca146dce40f0aad243dfc8221bf810822c091674efe"), + SectionIndex: 208, + SectionHead: common.HexToHash("0x5e9f7696c397d9df8f3b1abda857753575c6f5cff894e1a3d9e1a2af1bd9d6ac"), + CHTRoot: common.HexToHash("0x954a63134f6897f015f026387c59c98c4dae7b336610ff5a143455aac9153e9d"), + BloomRoot: common.HexToHash("0x8006c5e44b14d90d7cc9cd5fa1cb48cf53697ee3bbbf4b76fdfa70b0242500a9"), } // TestnetChainConfig contains the chain parameters to run a node on the Ropsten test network. @@ -73,10 +73,10 @@ var ( // TestnetTrustedCheckpoint contains the light client trusted checkpoint for the Ropsten test network. TestnetTrustedCheckpoint = &TrustedCheckpoint{ Name: "testnet", - SectionIndex: 126, - SectionHead: common.HexToHash("0x48f7dd4c9c60be04bf15fd4d0bcac46ddd8caf6b01d6fb8f8e1f7955cdd1337a"), - CHTRoot: common.HexToHash("0x6e54cb80a1884881ea1a114243af9012c95e0296b47f103b5ab124313968508e"), - BloomRoot: common.HexToHash("0xb55accf6dce6455b47db8510d15eff38d0ed7378829f3036d26b48e7d15da3f6"), + SectionIndex: 139, + SectionHead: common.HexToHash("0x9fad89a5e3b993c8339b9cf2cbbeb72cd08774ea6b71b105b3dd880420c618f4"), + CHTRoot: common.HexToHash("0xc815833881989c5d2035147e1a79a33d22cbc5313e104ff01e6ab405bd28b317"), + BloomRoot: common.HexToHash("0xd94ee9f3c480858f53ec5d059aebdbb2e8d904702f100875ee59ec5f366e841d"), } // RinkebyChainConfig contains the chain parameters to run a node on the Rinkeby test network. @@ -90,7 +90,7 @@ var ( EIP155Block: big.NewInt(3), EIP158Block: big.NewInt(3), ByzantiumBlock: big.NewInt(1035301), - ConstantinopleBlock: nil, + ConstantinopleBlock: big.NewInt(3660663), Clique: &CliqueConfig{ Period: 15, Epoch: 30000, @@ -100,10 +100,10 @@ var ( // RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network. RinkebyTrustedCheckpoint = &TrustedCheckpoint{ Name: "rinkeby", - SectionIndex: 93, - SectionHead: common.HexToHash("0xdefb94aa217ab38f2919f7318d1d5476bd2aabf1ec9148047fe03e555615e0b4"), - CHTRoot: common.HexToHash("0x52c98c2fe508a8332c27dc10538f3fead43306e2b22b597587763c2fe6586da6"), - BloomRoot: common.HexToHash("0x93d83be0c1b12f732b1a027ecdfb16f39b0d020b8c10bfb90e76f3b01adfc5b6"), + SectionIndex: 105, + SectionHead: common.HexToHash("0xec8147d43f936258aaf1b9b9ec91b0a853abf7109f436a23649be809ea43d507"), + CHTRoot: common.HexToHash("0xd92703b444846a3db928e87e450770e5d5cbe193131dc8f7c4cf18b4de925a75"), + BloomRoot: common.HexToHash("0xff45a6f807138a2cde0cea0c209d9ce5ad8e43ccaae5a7c41af801bb72a1ef96"), } // AllEthashProtocolChanges contains every protocol change (EIPs) introduced @@ -111,16 +111,16 @@ var ( // // This configuration is intentionally not using keyed fields to force anyone // adding flags to the config to also have to set these fields. - AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil} + AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil} // AllCliqueProtocolChanges contains every protocol change (EIPs) introduced // and accepted by the Ethereum core developers into the Clique consensus. // // This configuration is intentionally not using keyed fields to force anyone // adding flags to the config to also have to set these fields. - AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}} + AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}} - TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil} + TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil} TestRules = TestChainConfig.Rules(new(big.Int)) ) diff --git a/vendor/github.com/ethereum/go-ethereum/params/version.go b/vendor/github.com/ethereum/go-ethereum/params/version.go index 4cacf8565..ba9ab202a 100644 --- a/vendor/github.com/ethereum/go-ethereum/params/version.go +++ b/vendor/github.com/ethereum/go-ethereum/params/version.go @@ -23,7 +23,7 @@ import ( const ( VersionMajor = 1 // Major version component of the current release VersionMinor = 8 // Minor version component of the current release - VersionPatch = 17 // Patch version component of the current release + VersionPatch = 20 // Patch version component of the current release VersionMeta = "stable" // Version metadata to append to the version string ) diff --git a/vendor/github.com/ethereum/go-ethereum/rpc/client.go b/vendor/github.com/ethereum/go-ethereum/rpc/client.go index d96189a2d..6254c95ff 100644 --- a/vendor/github.com/ethereum/go-ethereum/rpc/client.go +++ b/vendor/github.com/ethereum/go-ethereum/rpc/client.go @@ -25,7 +25,6 @@ import ( "fmt" "net" "net/url" - "os" "reflect" "strconv" "strings" @@ -118,7 +117,8 @@ type Client struct { // for dispatch close chan struct{} - didQuit chan struct{} // closed when client quits + closing chan struct{} // closed when client is quitting + didClose chan struct{} // closed when client quits reconnected chan net.Conn // where write/reconnect sends the new connection readErr chan error // errors from read readResp chan []*jsonrpcMessage // valid messages from read @@ -181,45 +181,6 @@ func DialContext(ctx context.Context, rawurl string) (*Client, error) { } } -type StdIOConn struct{} - -func (io StdIOConn) Read(b []byte) (n int, err error) { - return os.Stdin.Read(b) -} - -func (io StdIOConn) Write(b []byte) (n int, err error) { - return os.Stdout.Write(b) -} - -func (io StdIOConn) Close() error { - return nil -} - -func (io StdIOConn) LocalAddr() net.Addr { - return &net.UnixAddr{Name: "stdio", Net: "stdio"} -} - -func (io StdIOConn) RemoteAddr() net.Addr { - return &net.UnixAddr{Name: "stdio", Net: "stdio"} -} - -func (io StdIOConn) SetDeadline(t time.Time) error { - return &net.OpError{Op: "set", Net: "stdio", Source: nil, Addr: nil, Err: errors.New("deadline not supported")} -} - -func (io StdIOConn) SetReadDeadline(t time.Time) error { - return &net.OpError{Op: "set", Net: "stdio", Source: nil, Addr: nil, Err: errors.New("deadline not supported")} -} - -func (io StdIOConn) SetWriteDeadline(t time.Time) error { - return &net.OpError{Op: "set", Net: "stdio", Source: nil, Addr: nil, Err: errors.New("deadline not supported")} -} -func DialStdIO(ctx context.Context) (*Client, error) { - return newClient(ctx, func(_ context.Context) (net.Conn, error) { - return StdIOConn{}, nil - }) -} - func newClient(initctx context.Context, connectFunc func(context.Context) (net.Conn, error)) (*Client, error) { conn, err := connectFunc(initctx) if err != nil { @@ -231,7 +192,8 @@ func newClient(initctx context.Context, connectFunc func(context.Context) (net.C isHTTP: isHTTP, connectFunc: connectFunc, close: make(chan struct{}), - didQuit: make(chan struct{}), + closing: make(chan struct{}), + didClose: make(chan struct{}), reconnected: make(chan net.Conn), readErr: make(chan error), readResp: make(chan []*jsonrpcMessage), @@ -268,8 +230,8 @@ func (c *Client) Close() { } select { case c.close <- struct{}{}: - <-c.didQuit - case <-c.didQuit: + <-c.didClose + case <-c.didClose: } } @@ -469,7 +431,9 @@ func (c *Client) send(ctx context.Context, op *requestOp, msg interface{}) error // This can happen if the client is overloaded or unable to keep up with // subscription notifications. return ctx.Err() - case <-c.didQuit: + case <-c.closing: + return ErrClientQuit + case <-c.didClose: return ErrClientQuit } } @@ -504,7 +468,7 @@ func (c *Client) reconnect(ctx context.Context) error { case c.reconnected <- newconn: c.writeConn = newconn return nil - case <-c.didQuit: + case <-c.didClose: newconn.Close() return ErrClientQuit } @@ -522,8 +486,9 @@ func (c *Client) dispatch(conn net.Conn) { requestOpLock = c.requestOp // nil while the send lock is held reading = true // if true, a read loop is running ) - defer close(c.didQuit) + defer close(c.didClose) defer func() { + close(c.closing) c.closeRequestOps(ErrClientQuit) conn.Close() if reading { diff --git a/vendor/github.com/ethereum/go-ethereum/rpc/doc.go b/vendor/github.com/ethereum/go-ethereum/rpc/doc.go index 9a6c4abbc..c60381b5a 100644 --- a/vendor/github.com/ethereum/go-ethereum/rpc/doc.go +++ b/vendor/github.com/ethereum/go-ethereum/rpc/doc.go @@ -32,7 +32,7 @@ An example method: func (s *CalcService) Add(a, b int) (int, error) When the returned error isn't nil the returned integer is ignored and the error is -send back to the client. Otherwise the returned integer is send back to the client. +sent back to the client. Otherwise the returned integer is sent back to the client. Optional arguments are supported by accepting pointer values as arguments. E.g. if we want to do the addition in an optional finite field we can accept a mod diff --git a/vendor/github.com/ethereum/go-ethereum/rpc/ipc.go b/vendor/github.com/ethereum/go-ethereum/rpc/ipc.go index b05e503d7..4cce1cf74 100644 --- a/vendor/github.com/ethereum/go-ethereum/rpc/ipc.go +++ b/vendor/github.com/ethereum/go-ethereum/rpc/ipc.go @@ -29,12 +29,12 @@ func (srv *Server) ServeListener(l net.Listener) error { for { conn, err := l.Accept() if netutil.IsTemporaryError(err) { - log.Warn("RPC accept error", "err", err) + log.Warn("IPC accept error", "err", err) continue } else if err != nil { return err } - log.Trace("Accepted connection", "addr", conn.RemoteAddr()) + log.Trace("IPC accepted connection") go srv.ServeCodec(NewJSONCodec(conn), OptionMethodInvocation|OptionSubscriptions) } } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/state/store.go b/vendor/github.com/ethereum/go-ethereum/rpc/ipc_js.go similarity index 59% rename from vendor/github.com/ethereum/go-ethereum/swarm/state/store.go rename to vendor/github.com/ethereum/go-ethereum/rpc/ipc_js.go index fb7fe258f..eceef050e 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/state/store.go +++ b/vendor/github.com/ethereum/go-ethereum/rpc/ipc_js.go @@ -1,4 +1,4 @@ -// Copyright 2018 The go-ethereum Authors +// Copyright 2015 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -14,13 +14,24 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package state +// +build js -// Store defines methods required to get, set, delete values for different keys -// and close the underlying resources. -type Store interface { - Get(key string, i interface{}) (err error) - Put(key string, i interface{}) (err error) - Delete(key string) (err error) - Close() error +package rpc + +import ( + "context" + "errors" + "net" +) + +var errNotSupported = errors.New("rpc: not supported") + +// ipcListen will create a named pipe on the given endpoint. +func ipcListen(endpoint string) (net.Listener, error) { + return nil, errNotSupported +} + +// newIPCConnection will connect to a named pipe with the given endpoint as name. +func newIPCConnection(ctx context.Context, endpoint string) (net.Conn, error) { + return nil, errNotSupported } diff --git a/vendor/github.com/ethereum/go-ethereum/rpc/stdio.go b/vendor/github.com/ethereum/go-ethereum/rpc/stdio.go new file mode 100644 index 000000000..ea552cca2 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/rpc/stdio.go @@ -0,0 +1,66 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rpc + +import ( + "context" + "errors" + "net" + "os" + "time" +) + +// DialStdIO creates a client on stdin/stdout. +func DialStdIO(ctx context.Context) (*Client, error) { + return newClient(ctx, func(_ context.Context) (net.Conn, error) { + return stdioConn{}, nil + }) +} + +type stdioConn struct{} + +func (io stdioConn) Read(b []byte) (n int, err error) { + return os.Stdin.Read(b) +} + +func (io stdioConn) Write(b []byte) (n int, err error) { + return os.Stdout.Write(b) +} + +func (io stdioConn) Close() error { + return nil +} + +func (io stdioConn) LocalAddr() net.Addr { + return &net.UnixAddr{Name: "stdio", Net: "stdio"} +} + +func (io stdioConn) RemoteAddr() net.Addr { + return &net.UnixAddr{Name: "stdio", Net: "stdio"} +} + +func (io stdioConn) SetDeadline(t time.Time) error { + return &net.OpError{Op: "set", Net: "stdio", Source: nil, Addr: nil, Err: errors.New("deadline not supported")} +} + +func (io stdioConn) SetReadDeadline(t time.Time) error { + return &net.OpError{Op: "set", Net: "stdio", Source: nil, Addr: nil, Err: errors.New("deadline not supported")} +} + +func (io stdioConn) SetWriteDeadline(t time.Time) error { + return &net.OpError{Op: "set", Net: "stdio", Source: nil, Addr: nil, Err: errors.New("deadline not supported")} +} diff --git a/vendor/github.com/ethereum/go-ethereum/rpc/subscription.go b/vendor/github.com/ethereum/go-ethereum/rpc/subscription.go index 6ce7befa1..6bbb6f75d 100644 --- a/vendor/github.com/ethereum/go-ethereum/rpc/subscription.go +++ b/vendor/github.com/ethereum/go-ethereum/rpc/subscription.go @@ -52,9 +52,10 @@ type notifierKey struct{} // Server callbacks use the notifier to send notifications. type Notifier struct { codec ServerCodec - subMu sync.RWMutex // guards active and inactive maps + subMu sync.Mutex active map[ID]*Subscription inactive map[ID]*Subscription + buffer map[ID][]interface{} // unsent notifications of inactive subscriptions } // newNotifier creates a new notifier that can be used to send subscription @@ -64,6 +65,7 @@ func newNotifier(codec ServerCodec) *Notifier { codec: codec, active: make(map[ID]*Subscription), inactive: make(map[ID]*Subscription), + buffer: make(map[ID][]interface{}), } } @@ -88,20 +90,26 @@ func (n *Notifier) CreateSubscription() *Subscription { // Notify sends a notification to the client with the given data as payload. // If an error occurs the RPC connection is closed and the error is returned. func (n *Notifier) Notify(id ID, data interface{}) error { - n.subMu.RLock() - defer n.subMu.RUnlock() + n.subMu.Lock() + defer n.subMu.Unlock() - sub, active := n.active[id] - if active { - notification := n.codec.CreateNotification(string(id), sub.namespace, data) - if err := n.codec.Write(notification); err != nil { - n.codec.Close() - return err - } + if sub, active := n.active[id]; active { + n.send(sub, data) + } else { + n.buffer[id] = append(n.buffer[id], data) } return nil } +func (n *Notifier) send(sub *Subscription, data interface{}) error { + notification := n.codec.CreateNotification(string(sub.ID), sub.namespace, data) + err := n.codec.Write(notification) + if err != nil { + n.codec.Close() + } + return err +} + // Closed returns a channel that is closed when the RPC connection is closed. func (n *Notifier) Closed() <-chan interface{} { return n.codec.Closed() @@ -127,9 +135,15 @@ func (n *Notifier) unsubscribe(id ID) error { func (n *Notifier) activate(id ID, namespace string) { n.subMu.Lock() defer n.subMu.Unlock() + if sub, found := n.inactive[id]; found { sub.namespace = namespace n.active[id] = sub delete(n.inactive, id) + // Send buffered notifications. + for _, data := range n.buffer[id] { + n.send(sub, data) + } + delete(n.buffer, id) } } diff --git a/vendor/github.com/ethereum/go-ethereum/signer/core/api.go b/vendor/github.com/ethereum/go-ethereum/signer/core/api.go index c380fe977..e9a335785 100644 --- a/vendor/github.com/ethereum/go-ethereum/signer/core/api.go +++ b/vendor/github.com/ethereum/go-ethereum/signer/core/api.go @@ -82,7 +82,7 @@ type SignerUI interface { // OnSignerStartup is invoked when the signer boots, and tells the UI info about external API location and version // information OnSignerStartup(info StartupInfo) - // OnInputRequried is invoked when clef requires user input, for example master password or + // OnInputRequired is invoked when clef requires user input, for example master password or // pin-code for unlocking hardware wallets OnInputRequired(info UserInputRequest) (UserInputResponse, error) } @@ -197,6 +197,12 @@ type ( Message struct { Text string `json:"text"` } + PasswordRequest struct { + Prompt string `json:"prompt"` + } + PasswordResponse struct { + Password string `json:"password"` + } StartupInfo struct { Info map[string]interface{} `json:"info"` } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/OWNERS b/vendor/github.com/ethereum/go-ethereum/swarm/OWNERS index d4204e08c..4b9ca96eb 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/OWNERS +++ b/vendor/github.com/ethereum/go-ethereum/swarm/OWNERS @@ -7,7 +7,6 @@ swarm ├── fuse ────────────────── @jmozah, @holisticode ├── grafana_dashboards ──── @nonsense ├── metrics ─────────────── @nonsense, @holisticode -├── multihash ───────────── @nolash ├── network ─────────────── ethersphere │ ├── bitvector ───────── @zelig, @janos, @gbalint │ ├── priorityqueue ───── @zelig, @janos, @gbalint diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/act.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/act.go index 52d909827..e54369f9a 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/api/act.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/act.go @@ -458,6 +458,9 @@ func DoACT(ctx *cli.Context, privateKey *ecdsa.PrivateKey, salt []byte, grantees return nil, nil, nil, err } sessionKey, err := NewSessionKeyPK(privateKey, granteePub, salt) + if err != nil { + return nil, nil, nil, err + } hasher := sha3.NewKeccak256() hasher.Write(append(sessionKey, 0)) diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/api.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/api.go index 7bb631967..33a8e3539 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/api/api.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/api.go @@ -42,7 +42,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/swarm/log" - "github.com/ethereum/go-ethereum/swarm/multihash" "github.com/ethereum/go-ethereum/swarm/spancontext" "github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage/feed" @@ -417,7 +416,7 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage return reader, mimeType, status, nil, err } // get the data of the update - _, rsrcData, err := a.feed.GetContent(entry.Feed) + _, contentAddr, err := a.feed.GetContent(entry.Feed) if err != nil { apiGetNotFound.Inc(1) status = http.StatusNotFound @@ -425,23 +424,23 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage return reader, mimeType, status, nil, err } - // extract multihash - decodedMultihash, err := multihash.FromMultihash(rsrcData) - if err != nil { + // extract content hash + if len(contentAddr) != storage.AddressLength { apiGetInvalid.Inc(1) status = http.StatusUnprocessableEntity - log.Warn("invalid multihash in feed update", "err", err) - return reader, mimeType, status, nil, err + errorMessage := fmt.Sprintf("invalid swarm hash in feed update. Expected %d bytes. Got %d", storage.AddressLength, len(contentAddr)) + log.Warn(errorMessage) + return reader, mimeType, status, nil, errors.New(errorMessage) } - manifestAddr = storage.Address(decodedMultihash) - log.Trace("feed update contains multihash", "key", manifestAddr) + manifestAddr = storage.Address(contentAddr) + log.Trace("feed update contains swarm hash", "key", manifestAddr) - // get the manifest the multihash digest points to + // get the manifest the swarm hash points to trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil, NOOPDecrypt) if err != nil { apiGetNotFound.Inc(1) status = http.StatusNotFound - log.Warn(fmt.Sprintf("loadManifestTrie (feed update multihash) error: %v", err)) + log.Warn(fmt.Sprintf("loadManifestTrie (feed update) error: %v", err)) return reader, mimeType, status, nil, err } @@ -451,8 +450,8 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage if entry == nil { status = http.StatusNotFound apiGetNotFound.Inc(1) - err = fmt.Errorf("manifest (feed update multihash) entry for '%s' not found", path) - log.Trace("manifest (feed update multihash) entry not found", "key", manifestAddr, "path", path) + err = fmt.Errorf("manifest (feed update) entry for '%s' not found", path) + log.Trace("manifest (feed update) entry not found", "key", manifestAddr, "path", path) return reader, mimeType, status, nil, err } } @@ -472,7 +471,7 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage // no entry found status = http.StatusNotFound apiGetNotFound.Inc(1) - err = fmt.Errorf("manifest entry for '%s' not found", path) + err = fmt.Errorf("Not found: could not find resource '%s'", path) log.Trace("manifest entry not found", "key", contentAddr, "path", path) } return diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/client/client.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/client/client.go index d9837ca73..f793ca8b8 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/api/client/client.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/client/client.go @@ -19,6 +19,7 @@ package client import ( "archive/tar" "bytes" + "context" "encoding/json" "errors" "fmt" @@ -26,6 +27,7 @@ import ( "io/ioutil" "mime/multipart" "net/http" + "net/http/httptrace" "net/textproto" "net/url" "os" @@ -33,9 +35,14 @@ import ( "regexp" "strconv" "strings" + "time" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/swarm/api" + "github.com/ethereum/go-ethereum/swarm/spancontext" "github.com/ethereum/go-ethereum/swarm/storage/feed" + "github.com/pborman/uuid" ) var ( @@ -474,6 +481,11 @@ type UploadFn func(file *File) error // TarUpload uses the given Uploader to upload files to swarm as a tar stream, // returning the resulting manifest hash func (c *Client) TarUpload(hash string, uploader Uploader, defaultPath string, toEncrypt bool) (string, error) { + ctx, sp := spancontext.StartSpan(context.Background(), "api.client.tarupload") + defer sp.Finish() + + var tn time.Time + reqR, reqW := io.Pipe() defer reqR.Close() addr := hash @@ -489,6 +501,12 @@ func (c *Client) TarUpload(hash string, uploader Uploader, defaultPath string, t if err != nil { return "", err } + + trace := GetClientTrace("swarm api client - upload tar", "api.client.uploadtar", uuid.New()[:8], &tn) + + req = req.WithContext(httptrace.WithClientTrace(ctx, trace)) + transport := http.DefaultTransport + req.Header.Set("Content-Type", "application/x-tar") if defaultPath != "" { q := req.URL.Query() @@ -529,8 +547,8 @@ func (c *Client) TarUpload(hash string, uploader Uploader, defaultPath string, t } reqW.CloseWithError(err) }() - - res, err := http.DefaultClient.Do(req) + tn = time.Now() + res, err := transport.RoundTrip(req) if err != nil { return "", err } @@ -728,3 +746,57 @@ func (c *Client) GetFeedRequest(query *feed.Query, manifestAddressOrDomain strin } return &metadata, nil } + +func GetClientTrace(traceMsg, metricPrefix, ruid string, tn *time.Time) *httptrace.ClientTrace { + trace := &httptrace.ClientTrace{ + GetConn: func(_ string) { + log.Trace(traceMsg+" - http get", "event", "GetConn", "ruid", ruid) + metrics.GetOrRegisterResettingTimer(metricPrefix+".getconn", nil).Update(time.Since(*tn)) + }, + GotConn: func(_ httptrace.GotConnInfo) { + log.Trace(traceMsg+" - http get", "event", "GotConn", "ruid", ruid) + metrics.GetOrRegisterResettingTimer(metricPrefix+".gotconn", nil).Update(time.Since(*tn)) + }, + PutIdleConn: func(err error) { + log.Trace(traceMsg+" - http get", "event", "PutIdleConn", "ruid", ruid, "err", err) + metrics.GetOrRegisterResettingTimer(metricPrefix+".putidle", nil).Update(time.Since(*tn)) + }, + GotFirstResponseByte: func() { + log.Trace(traceMsg+" - http get", "event", "GotFirstResponseByte", "ruid", ruid) + metrics.GetOrRegisterResettingTimer(metricPrefix+".firstbyte", nil).Update(time.Since(*tn)) + }, + Got100Continue: func() { + log.Trace(traceMsg, "event", "Got100Continue", "ruid", ruid) + metrics.GetOrRegisterResettingTimer(metricPrefix+".got100continue", nil).Update(time.Since(*tn)) + }, + DNSStart: func(_ httptrace.DNSStartInfo) { + log.Trace(traceMsg, "event", "DNSStart", "ruid", ruid) + metrics.GetOrRegisterResettingTimer(metricPrefix+".dnsstart", nil).Update(time.Since(*tn)) + }, + DNSDone: func(_ httptrace.DNSDoneInfo) { + log.Trace(traceMsg, "event", "DNSDone", "ruid", ruid) + metrics.GetOrRegisterResettingTimer(metricPrefix+".dnsdone", nil).Update(time.Since(*tn)) + }, + ConnectStart: func(network, addr string) { + log.Trace(traceMsg, "event", "ConnectStart", "ruid", ruid, "network", network, "addr", addr) + metrics.GetOrRegisterResettingTimer(metricPrefix+".connectstart", nil).Update(time.Since(*tn)) + }, + ConnectDone: func(network, addr string, err error) { + log.Trace(traceMsg, "event", "ConnectDone", "ruid", ruid, "network", network, "addr", addr, "err", err) + metrics.GetOrRegisterResettingTimer(metricPrefix+".connectdone", nil).Update(time.Since(*tn)) + }, + WroteHeaders: func() { + log.Trace(traceMsg, "event", "WroteHeaders(request)", "ruid", ruid) + metrics.GetOrRegisterResettingTimer(metricPrefix+".wroteheaders", nil).Update(time.Since(*tn)) + }, + Wait100Continue: func() { + log.Trace(traceMsg, "event", "Wait100Continue", "ruid", ruid) + metrics.GetOrRegisterResettingTimer(metricPrefix+".wait100continue", nil).Update(time.Since(*tn)) + }, + WroteRequest: func(_ httptrace.WroteRequestInfo) { + log.Trace(traceMsg, "event", "WroteRequest", "ruid", ruid) + metrics.GetOrRegisterResettingTimer(metricPrefix+".wroterequest", nil).Update(time.Since(*tn)) + }, + } + return trace +} diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/filesystem.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/filesystem.go index 43695efc1..266ef71be 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/api/filesystem.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/filesystem.go @@ -122,6 +122,10 @@ func (fs *FileSystem) Upload(lpath, index string, toEncrypt bool) (string, error var wait func(context.Context) error ctx := context.TODO() hash, wait, err = fs.api.fileStore.Store(ctx, f, stat.Size(), toEncrypt) + if err != nil { + errors[i] = err + return + } if hash != nil { list[i].Hash = hash.Hex() } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/http/middleware.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/http/middleware.go index 3b2dcc7d5..f7f819eab 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/api/http/middleware.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/http/middleware.go @@ -5,6 +5,7 @@ import ( "net/http" "runtime/debug" "strings" + "time" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/swarm/api" @@ -50,7 +51,7 @@ func ParseURI(h http.Handler) http.Handler { uri, err := api.Parse(strings.TrimLeft(r.URL.Path, "/")) if err != nil { w.WriteHeader(http.StatusBadRequest) - RespondError(w, r, fmt.Sprintf("invalid URI %q", r.URL.Path), http.StatusBadRequest) + respondError(w, r, fmt.Sprintf("invalid URI %q", r.URL.Path), http.StatusBadRequest) return } if uri.Addr != "" && strings.HasPrefix(uri.Addr, "0x") { @@ -73,9 +74,15 @@ func ParseURI(h http.Handler) http.Handler { func InitLoggingResponseWriter(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + tn := time.Now() + writer := newLoggingResponseWriter(w) h.ServeHTTP(writer, r) - log.Debug("request served", "ruid", GetRUID(r.Context()), "code", writer.statusCode) + + ts := time.Since(tn) + log.Info("request served", "ruid", GetRUID(r.Context()), "code", writer.statusCode, "time", ts*time.Millisecond) + metrics.GetOrRegisterResettingTimer(fmt.Sprintf("http.request.%s.time", r.Method), nil).Update(ts) + metrics.GetOrRegisterResettingTimer(fmt.Sprintf("http.request.%s.%d.time", r.Method, writer.statusCode), nil).Update(ts) }) } @@ -88,6 +95,7 @@ func InstrumentOpenTracing(h http.Handler) http.Handler { } spanName := fmt.Sprintf("http.%s.%s", r.Method, uri.Scheme) ctx, sp := spancontext.StartSpan(r.Context(), spanName) + defer sp.Finish() h.ServeHTTP(w, r.WithContext(ctx)) }) diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/http/response.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/http/response.go index c9fb9d285..d4e81d7f6 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/api/http/response.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/http/response.go @@ -53,23 +53,23 @@ func ShowMultipleChoices(w http.ResponseWriter, r *http.Request, list api.Manife log.Debug("ShowMultipleChoices", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context())) msg := "" if list.Entries == nil { - RespondError(w, r, "Could not resolve", http.StatusInternalServerError) + respondError(w, r, "Could not resolve", http.StatusInternalServerError) return } requestUri := strings.TrimPrefix(r.RequestURI, "/") uri, err := api.Parse(requestUri) if err != nil { - RespondError(w, r, "Bad Request", http.StatusBadRequest) + respondError(w, r, "Bad Request", http.StatusBadRequest) } uri.Scheme = "bzz-list" msg += fmt.Sprintf("Disambiguation:
Your request may refer to multiple choices.
Click here if your browser does not redirect you within 5 seconds.
", "/"+uri.String()) - RespondTemplate(w, r, "error", msg, http.StatusMultipleChoices) + respondTemplate(w, r, "error", msg, http.StatusMultipleChoices) } -func RespondTemplate(w http.ResponseWriter, r *http.Request, templateName, msg string, code int) { - log.Debug("RespondTemplate", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context())) +func respondTemplate(w http.ResponseWriter, r *http.Request, templateName, msg string, code int) { + log.Debug("respondTemplate", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context())) respond(w, r, &ResponseParams{ Code: code, Msg: template.HTML(msg), @@ -78,13 +78,12 @@ func RespondTemplate(w http.ResponseWriter, r *http.Request, templateName, msg s }) } -func RespondError(w http.ResponseWriter, r *http.Request, msg string, code int) { - log.Debug("RespondError", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context()), "code", code) - RespondTemplate(w, r, "error", msg, code) +func respondError(w http.ResponseWriter, r *http.Request, msg string, code int) { + log.Info("respondError", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context()), "code", code) + respondTemplate(w, r, "error", msg, code) } func respond(w http.ResponseWriter, r *http.Request, params *ResponseParams) { - w.WriteHeader(params.Code) if params.Code >= 400 { @@ -96,7 +95,7 @@ func respond(w http.ResponseWriter, r *http.Request, params *ResponseParams) { // this cannot be in a switch since an Accept header can have multiple values: "Accept: */*, text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8" if strings.Contains(acceptHeader, "application/json") { if err := respondJSON(w, r, params); err != nil { - RespondError(w, r, "Internal server error", http.StatusInternalServerError) + respondError(w, r, "Internal server error", http.StatusInternalServerError) } } else if strings.Contains(acceptHeader, "text/html") { respondHTML(w, r, params) @@ -107,7 +106,7 @@ func respond(w http.ResponseWriter, r *http.Request, params *ResponseParams) { func respondHTML(w http.ResponseWriter, r *http.Request, params *ResponseParams) { htmlCounter.Inc(1) - log.Debug("respondHTML", "ruid", GetRUID(r.Context())) + log.Info("respondHTML", "ruid", GetRUID(r.Context()), "code", params.Code) err := params.template.Execute(w, params) if err != nil { log.Error(err.Error()) @@ -116,14 +115,14 @@ func respondHTML(w http.ResponseWriter, r *http.Request, params *ResponseParams) func respondJSON(w http.ResponseWriter, r *http.Request, params *ResponseParams) error { jsonCounter.Inc(1) - log.Debug("respondJSON", "ruid", GetRUID(r.Context())) + log.Info("respondJSON", "ruid", GetRUID(r.Context()), "code", params.Code) w.Header().Set("Content-Type", "application/json") return json.NewEncoder(w).Encode(params) } func respondPlaintext(w http.ResponseWriter, r *http.Request, params *ResponseParams) error { plaintextCounter.Inc(1) - log.Debug("respondPlaintext", "ruid", GetRUID(r.Context())) + log.Info("respondPlaintext", "ruid", GetRUID(r.Context()), "code", params.Code) w.Header().Set("Content-Type", "text/plain") strToWrite := "Code: " + fmt.Sprintf("%d", params.Code) + "\n" strToWrite += "Message: " + string(params.Msg) + "\n" diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/http/sctx.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/http/sctx.go index 431e11735..b8dafab0b 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/api/http/sctx.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/http/sctx.go @@ -7,14 +7,10 @@ import ( "github.com/ethereum/go-ethereum/swarm/sctx" ) -type contextKey int - -const ( - uriKey contextKey = iota -) +type uriKey struct{} func GetRUID(ctx context.Context) string { - v, ok := ctx.Value(sctx.HTTPRequestIDKey).(string) + v, ok := ctx.Value(sctx.HTTPRequestIDKey{}).(string) if ok { return v } @@ -22,11 +18,11 @@ func GetRUID(ctx context.Context) string { } func SetRUID(ctx context.Context, ruid string) context.Context { - return context.WithValue(ctx, sctx.HTTPRequestIDKey, ruid) + return context.WithValue(ctx, sctx.HTTPRequestIDKey{}, ruid) } func GetURI(ctx context.Context) *api.URI { - v, ok := ctx.Value(uriKey).(*api.URI) + v, ok := ctx.Value(uriKey{}).(*api.URI) if ok { return v } @@ -34,5 +30,5 @@ func GetURI(ctx context.Context) *api.URI { } func SetURI(ctx context.Context, uri *api.URI) context.Context { - return context.WithValue(ctx, uriKey, uri) + return context.WithValue(ctx, uriKey{}, uri) } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/http/server.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/http/server.go index 370aca5a7..3c6735a73 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/api/http/server.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/http/server.go @@ -41,16 +41,9 @@ import ( "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage/feed" - "github.com/rs/cors" ) -type resourceResponse struct { - Manifest storage.Address `json:"manifest"` - Resource string `json:"resource"` - Update storage.Address `json:"update"` -} - var ( postRawCount = metrics.NewRegisteredCounter("api.http.post.raw.count", nil) postRawFail = metrics.NewRegisteredCounter("api.http.post.raw.fail", nil) @@ -191,10 +184,10 @@ func (s *Server) HandleBzzGet(w http.ResponseWriter, r *http.Request) { if err != nil { if isDecryptError(err) { w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", uri.Address().String())) - RespondError(w, r, err.Error(), http.StatusUnauthorized) + respondError(w, r, err.Error(), http.StatusUnauthorized) return } - RespondError(w, r, fmt.Sprintf("Had an error building the tarball: %v", err), http.StatusInternalServerError) + respondError(w, r, fmt.Sprintf("Had an error building the tarball: %v", err), http.StatusInternalServerError) return } defer reader.Close() @@ -218,7 +211,7 @@ func (s *Server) HandleBzzGet(w http.ResponseWriter, r *http.Request) { func (s *Server) HandleRootPaths(w http.ResponseWriter, r *http.Request) { switch r.RequestURI { case "/": - RespondTemplate(w, r, "landing-page", "Swarm: Please request a valid ENS or swarm hash with the appropriate bzz scheme", 200) + respondTemplate(w, r, "landing-page", "Swarm: Please request a valid ENS or swarm hash with the appropriate bzz scheme", 200) return case "/robots.txt": w.Header().Set("Last-Modified", time.Now().Format(http.TimeFormat)) @@ -227,7 +220,7 @@ func (s *Server) HandleRootPaths(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) w.Write(faviconBytes) default: - RespondError(w, r, "Not Found", http.StatusNotFound) + respondError(w, r, "Not Found", http.StatusNotFound) } } @@ -247,26 +240,26 @@ func (s *Server) HandlePostRaw(w http.ResponseWriter, r *http.Request) { if uri.Path != "" { postRawFail.Inc(1) - RespondError(w, r, "raw POST request cannot contain a path", http.StatusBadRequest) + respondError(w, r, "raw POST request cannot contain a path", http.StatusBadRequest) return } if uri.Addr != "" && uri.Addr != "encrypt" { postRawFail.Inc(1) - RespondError(w, r, "raw POST request addr can only be empty or \"encrypt\"", http.StatusBadRequest) + respondError(w, r, "raw POST request addr can only be empty or \"encrypt\"", http.StatusBadRequest) return } if r.Header.Get("Content-Length") == "" { postRawFail.Inc(1) - RespondError(w, r, "missing Content-Length header in request", http.StatusBadRequest) + respondError(w, r, "missing Content-Length header in request", http.StatusBadRequest) return } addr, _, err := s.api.Store(r.Context(), r.Body, r.ContentLength, toEncrypt) if err != nil { postRawFail.Inc(1) - RespondError(w, r, err.Error(), http.StatusInternalServerError) + respondError(w, r, err.Error(), http.StatusInternalServerError) return } @@ -290,7 +283,7 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *http.Request) { contentType, params, err := mime.ParseMediaType(r.Header.Get("Content-Type")) if err != nil { postFilesFail.Inc(1) - RespondError(w, r, err.Error(), http.StatusBadRequest) + respondError(w, r, err.Error(), http.StatusBadRequest) return } @@ -305,7 +298,7 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *http.Request) { addr, err = s.api.Resolve(r.Context(), uri.Addr) if err != nil { postFilesFail.Inc(1) - RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusInternalServerError) + respondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusInternalServerError) return } log.Debug("resolved key", "ruid", ruid, "key", addr) @@ -313,7 +306,7 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *http.Request) { addr, err = s.api.NewManifest(r.Context(), toEncrypt) if err != nil { postFilesFail.Inc(1) - RespondError(w, r, err.Error(), http.StatusInternalServerError) + respondError(w, r, err.Error(), http.StatusInternalServerError) return } log.Debug("new manifest", "ruid", ruid, "key", addr) @@ -324,7 +317,7 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *http.Request) { case "application/x-tar": _, err := s.handleTarUpload(r, mw) if err != nil { - RespondError(w, r, fmt.Sprintf("error uploading tarball: %v", err), http.StatusInternalServerError) + respondError(w, r, fmt.Sprintf("error uploading tarball: %v", err), http.StatusInternalServerError) return err } return nil @@ -337,7 +330,7 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *http.Request) { }) if err != nil { postFilesFail.Inc(1) - RespondError(w, r, fmt.Sprintf("cannot create manifest: %s", err), http.StatusInternalServerError) + respondError(w, r, fmt.Sprintf("cannot create manifest: %s", err), http.StatusInternalServerError) return } @@ -373,7 +366,7 @@ func (s *Server) handleMultipartUpload(r *http.Request, boundary string, mw *api } var size int64 - var reader io.Reader = part + var reader io.Reader if contentLength := part.Header.Get("Content-Length"); contentLength != "" { size, err = strconv.ParseInt(contentLength, 10, 64) if err != nil { @@ -409,7 +402,6 @@ func (s *Server) handleMultipartUpload(r *http.Request, boundary string, mw *api Path: path, ContentType: part.Header.Get("Content-Type"), Size: size, - ModTime: time.Now(), } log.Debug("adding path to new manifest", "ruid", ruid, "bytes", entry.Size, "path", entry.Path) contentKey, err := mw.AddEntry(r.Context(), reader, entry) @@ -428,7 +420,6 @@ func (s *Server) handleDirectUpload(r *http.Request, mw *api.ManifestWriter) err ContentType: r.Header.Get("Content-Type"), Mode: 0644, Size: r.ContentLength, - ModTime: time.Now(), }) if err != nil { return err @@ -448,7 +439,7 @@ func (s *Server) HandleDelete(w http.ResponseWriter, r *http.Request) { newKey, err := s.api.Delete(r.Context(), uri.Addr, uri.Path) if err != nil { deleteFail.Inc(1) - RespondError(w, r, fmt.Sprintf("could not delete from manifest: %v", err), http.StatusInternalServerError) + respondError(w, r, fmt.Sprintf("could not delete from manifest: %v", err), http.StatusInternalServerError) return } @@ -469,7 +460,7 @@ func (s *Server) HandlePostFeed(w http.ResponseWriter, r *http.Request) { // Creation and update must send feed.updateRequestJSON JSON structure body, err := ioutil.ReadAll(r.Body) if err != nil { - RespondError(w, r, err.Error(), http.StatusInternalServerError) + respondError(w, r, err.Error(), http.StatusInternalServerError) return } @@ -480,7 +471,7 @@ func (s *Server) HandlePostFeed(w http.ResponseWriter, r *http.Request) { if err == api.ErrCannotLoadFeedManifest || err == api.ErrCannotResolveFeedURI { httpStatus = http.StatusNotFound } - RespondError(w, r, fmt.Sprintf("cannot retrieve feed from manifest: %s", err), httpStatus) + respondError(w, r, fmt.Sprintf("cannot retrieve feed from manifest: %s", err), httpStatus) return } @@ -489,32 +480,32 @@ func (s *Server) HandlePostFeed(w http.ResponseWriter, r *http.Request) { query := r.URL.Query() if err := updateRequest.FromValues(query, body); err != nil { // decodes request from query parameters - RespondError(w, r, err.Error(), http.StatusBadRequest) + respondError(w, r, err.Error(), http.StatusBadRequest) return } - if updateRequest.IsUpdate() { + switch { + case updateRequest.IsUpdate(): // Verify that the signature is intact and that the signer is authorized // to update this feed // Check this early, to avoid creating a feed and then not being able to set its first update. if err = updateRequest.Verify(); err != nil { - RespondError(w, r, err.Error(), http.StatusForbidden) + respondError(w, r, err.Error(), http.StatusForbidden) return } _, err = s.api.FeedsUpdate(r.Context(), &updateRequest) if err != nil { - RespondError(w, r, err.Error(), http.StatusInternalServerError) + respondError(w, r, err.Error(), http.StatusInternalServerError) return } - } - - if query.Get("manifest") == "1" { + fallthrough + case query.Get("manifest") == "1": // we create a manifest so we can retrieve feed updates with bzz:// later // this manifest has a special "feed type" manifest, and saves the // feed identification used to retrieve feed updates later m, err := s.api.NewFeedManifest(r.Context(), &updateRequest.Feed) if err != nil { - RespondError(w, r, fmt.Sprintf("failed to create feed manifest: %v", err), http.StatusInternalServerError) + respondError(w, r, fmt.Sprintf("failed to create feed manifest: %v", err), http.StatusInternalServerError) return } // the key to the manifest will be passed back to the client @@ -522,12 +513,14 @@ func (s *Server) HandlePostFeed(w http.ResponseWriter, r *http.Request) { // the manifest key can be set as content in the resolver of the ENS name outdata, err := json.Marshal(m) if err != nil { - RespondError(w, r, fmt.Sprintf("failed to create json response: %s", err), http.StatusInternalServerError) + respondError(w, r, fmt.Sprintf("failed to create json response: %s", err), http.StatusInternalServerError) return } fmt.Fprint(w, string(outdata)) w.Header().Add("Content-type", "application/json") + default: + respondError(w, r, "Missing signature in feed update request", http.StatusBadRequest) } } @@ -559,7 +552,7 @@ func (s *Server) HandleGetFeed(w http.ResponseWriter, r *http.Request) { if err == api.ErrCannotLoadFeedManifest || err == api.ErrCannotResolveFeedURI { httpStatus = http.StatusNotFound } - RespondError(w, r, fmt.Sprintf("cannot retrieve feed information from manifest: %s", err), httpStatus) + respondError(w, r, fmt.Sprintf("cannot retrieve feed information from manifest: %s", err), httpStatus) return } @@ -568,12 +561,12 @@ func (s *Server) HandleGetFeed(w http.ResponseWriter, r *http.Request) { unsignedUpdateRequest, err := s.api.FeedsNewRequest(r.Context(), fd) if err != nil { getFail.Inc(1) - RespondError(w, r, fmt.Sprintf("cannot retrieve feed metadata for feed=%s: %s", fd.Hex(), err), http.StatusNotFound) + respondError(w, r, fmt.Sprintf("cannot retrieve feed metadata for feed=%s: %s", fd.Hex(), err), http.StatusNotFound) return } rawResponse, err := unsignedUpdateRequest.MarshalJSON() if err != nil { - RespondError(w, r, fmt.Sprintf("cannot encode unsigned feed update request: %v", err), http.StatusInternalServerError) + respondError(w, r, fmt.Sprintf("cannot encode unsigned feed update request: %v", err), http.StatusInternalServerError) return } w.Header().Add("Content-type", "application/json") @@ -584,7 +577,7 @@ func (s *Server) HandleGetFeed(w http.ResponseWriter, r *http.Request) { lookupParams := &feed.Query{Feed: *fd} if err = lookupParams.FromValues(r.URL.Query()); err != nil { // parse period, version - RespondError(w, r, fmt.Sprintf("invalid feed update request:%s", err), http.StatusBadRequest) + respondError(w, r, fmt.Sprintf("invalid feed update request:%s", err), http.StatusBadRequest) return } @@ -593,7 +586,7 @@ func (s *Server) HandleGetFeed(w http.ResponseWriter, r *http.Request) { // any error from the switch statement will end up here if err != nil { code, err2 := s.translateFeedError(w, r, "feed lookup fail", err) - RespondError(w, r, err2.Error(), code) + respondError(w, r, err2.Error(), code) return } @@ -639,7 +632,7 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *http.Request) { addr, err := s.api.ResolveURI(r.Context(), uri, pass) if err != nil { getFail.Inc(1) - RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound) + respondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound) return } w.Header().Set("Cache-Control", "max-age=2147483648, immutable") // url was of type bzz:///path, so we are sure it is immutable. @@ -663,7 +656,7 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *http.Request) { reader, isEncrypted := s.api.Retrieve(r.Context(), addr) if _, err := reader.Size(r.Context(), nil); err != nil { getFail.Inc(1) - RespondError(w, r, fmt.Sprintf("root chunk not found %s: %s", addr, err), http.StatusNotFound) + respondError(w, r, fmt.Sprintf("root chunk not found %s: %s", addr, err), http.StatusNotFound) return } @@ -703,7 +696,7 @@ func (s *Server) HandleGetList(w http.ResponseWriter, r *http.Request) { addr, err := s.api.Resolve(r.Context(), uri.Addr) if err != nil { getListFail.Inc(1) - RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound) + respondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound) return } log.Debug("handle.get.list: resolved", "ruid", ruid, "key", addr) @@ -713,10 +706,10 @@ func (s *Server) HandleGetList(w http.ResponseWriter, r *http.Request) { getListFail.Inc(1) if isDecryptError(err) { w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", addr.String())) - RespondError(w, r, err.Error(), http.StatusUnauthorized) + respondError(w, r, err.Error(), http.StatusUnauthorized) return } - RespondError(w, r, err.Error(), http.StatusInternalServerError) + respondError(w, r, err.Error(), http.StatusInternalServerError) return } @@ -764,7 +757,7 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *http.Request) { manifestAddr, err = s.api.Resolve(r.Context(), uri.Addr) if err != nil { getFileFail.Inc(1) - RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound) + respondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound) return } } else { @@ -788,17 +781,17 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *http.Request) { if err != nil { if isDecryptError(err) { w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", manifestAddr)) - RespondError(w, r, err.Error(), http.StatusUnauthorized) + respondError(w, r, err.Error(), http.StatusUnauthorized) return } switch status { case http.StatusNotFound: getFileNotFound.Inc(1) - RespondError(w, r, err.Error(), http.StatusNotFound) + respondError(w, r, err.Error(), http.StatusNotFound) default: getFileFail.Inc(1) - RespondError(w, r, err.Error(), http.StatusInternalServerError) + respondError(w, r, err.Error(), http.StatusInternalServerError) } return } @@ -811,10 +804,10 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *http.Request) { getFileFail.Inc(1) if isDecryptError(err) { w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", manifestAddr)) - RespondError(w, r, err.Error(), http.StatusUnauthorized) + respondError(w, r, err.Error(), http.StatusUnauthorized) return } - RespondError(w, r, err.Error(), http.StatusInternalServerError) + respondError(w, r, err.Error(), http.StatusInternalServerError) return } @@ -827,7 +820,7 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *http.Request) { // check the root chunk exists by retrieving the file's size if _, err := reader.Size(r.Context(), nil); err != nil { getFileNotFound.Inc(1) - RespondError(w, r, fmt.Sprintf("file not found %s: %s", uri, err), http.StatusNotFound) + respondError(w, r, fmt.Sprintf("file not found %s: %s", uri, err), http.StatusNotFound) return } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/testutil/http.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/http/test_server.go similarity index 99% rename from vendor/github.com/ethereum/go-ethereum/swarm/testutil/http.go rename to vendor/github.com/ethereum/go-ethereum/swarm/api/http/test_server.go index cdf9239d0..9245c9c5b 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/testutil/http.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/http/test_server.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package testutil +package http import ( "io/ioutil" diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/api/manifest.go b/vendor/github.com/ethereum/go-ethereum/swarm/api/manifest.go index 7c4cc88e4..890ed88bd 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/api/manifest.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/api/manifest.go @@ -557,7 +557,6 @@ func (mt *manifestTrie) findPrefixOf(path string, quitC chan bool) (entry *manif if path != entry.Path { return nil, 0 } - pos = epl } } return nil, 0 diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/grafana_dashboards/ldbstore.json b/vendor/github.com/ethereum/go-ethereum/swarm/grafana_dashboards/ldbstore.json deleted file mode 100644 index 2d64380ba..000000000 --- a/vendor/github.com/ethereum/go-ethereum/swarm/grafana_dashboards/ldbstore.json +++ /dev/null @@ -1,2278 +0,0 @@ -{ - "annotations": { - "list": [ - { - "$$hashKey": "object:325", - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 1, - "id": 5, - "iteration": 1527598894689, - "links": [], - "panels": [ - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 40, - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 1 - }, - "id": 42, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.localstore.get.cachehit.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "LocalStore get cachehit", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 12, - "y": 1 - }, - "id": 43, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.localstore.get.cachemiss.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "LocalStore get cachemiss", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 7 - }, - "id": 44, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.localstore.getorcreaterequest.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Total LocalStore.GetOrCreateRequest", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 12, - "y": 7 - }, - "id": 47, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.localstore.getorcreaterequest.errfetching.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "LocalStore GetOrCreateRequest ErrFetching", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 13 - }, - "id": 45, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.localstore.getorcreaterequest.hit.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "LocalStore.GetOrCreateRequest hit", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 12, - "y": 13 - }, - "id": 49, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.localstore.getorcreaterequest.miss.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "LocalStore GetOrCreateRequest miss", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 19 - }, - "id": 48, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.localstore.get.error.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "LocalStore get error", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 12, - "y": 19 - }, - "id": 46, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.localstore.get.errfetching.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "LocalStore get ErrFetching", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "title": "LocalStore", - "type": "row" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 1 - }, - "id": 27, - "panels": [], - "title": "LDBStore", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 2 - }, - "id": 29, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.ldbstore.get.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "LDBStore get", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 12, - "y": 2 - }, - "id": 30, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.ldbstore.put.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "LDBStore put", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 8 - }, - "id": 31, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.ldbstore.synciterator.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "LDBStore SyncIterator", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 12, - "y": 8 - }, - "id": 32, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.ldbstore.synciterator.seek.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "LDBStore SyncIterator Seek/Next", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 14 - }, - "id": 50, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.ldbstore.collectgarbage.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "LDBStore Collect Garbage", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 12, - "y": 14 - }, - "id": 51, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.ldbstore.collectgarbage.delete.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "LDBStore Collect Garbage - Actual Deletes", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 20 - }, - "id": 34, - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 39 - }, - "id": 36, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.ldbdatabase.get.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "LDBDatabase get", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 12, - "y": 39 - }, - "id": 37, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.ldbdatabase.write.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "LDBDatabase write", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 45 - }, - "id": 38, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.ldbdatabase.newiterator.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "LDBDatabase NewIterator", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "title": "LDBDatabase", - "type": "row" - } - ], - "refresh": "10s", - "schemaVersion": 16, - "style": "dark", - "tags": [], - "templating": { - "list": [ - { - "auto": false, - "auto_count": 30, - "auto_min": "10s", - "current": { - "text": "10s", - "value": "10s" - }, - "hide": 0, - "label": "resolution", - "name": "myinterval", - "options": [ - { - "selected": false, - "text": "5s", - "value": "5s" - }, - { - "selected": true, - "text": "10s", - "value": "10s" - }, - { - "selected": false, - "text": "30s", - "value": "30s" - }, - { - "selected": false, - "text": "100s", - "value": "100s" - } - ], - "query": "5s,10s,30s,100s", - "refresh": 2, - "type": "interval" - }, - { - "allValue": null, - "current": { - "text": "swarm_30399 + swarm_30400 + swarm_30401", - "value": [ - "swarm_30399", - "swarm_30400", - "swarm_30401" - ] - }, - "datasource": "metrics", - "hide": 0, - "includeAll": true, - "label": null, - "multi": true, - "name": "host", - "options": [], - "query": "SHOW TAG VALUES WITH KEY = \"host\"", - "refresh": 1, - "regex": "", - "sort": 1, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "swarm.http.request.GET.time.span", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-15m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "LDBStore and LDBDatabase", - "uid": "zS6beG7iz", - "version": 28 -} diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/grafana_dashboards/swarm.json b/vendor/github.com/ethereum/go-ethereum/swarm/grafana_dashboards/swarm.json deleted file mode 100644 index 3ee244d15..000000000 --- a/vendor/github.com/ethereum/go-ethereum/swarm/grafana_dashboards/swarm.json +++ /dev/null @@ -1,3198 +0,0 @@ -{ - "annotations": { - "list": [ - { - "$$hashKey": "object:147", - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 1, - "id": 2, - "iteration": 1527598859072, - "links": [], - "panels": [ - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 34, - "panels": [], - "title": "P2P", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 1 - }, - "id": 36, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "none" - ], - "type": "fill" - } - ], - "measurement": "swarm.peer.send.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "P2P Send() - messages sent", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 1 - }, - "id": 37, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "p95($tag_host)", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "none" - ], - "type": "fill" - } - ], - "measurement": "swarm.peer.send_t.span", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "p95" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "P2P Send() timer - 95%ile", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ns", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 10 - }, - "id": 38, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "1 $tag_host", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "none" - ], - "type": "fill" - } - ], - "measurement": "swarm.peer.sendpriority.1.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [] - }, - { - "alias": "2 $tag_host", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "none" - ], - "type": "fill" - } - ], - "measurement": "swarm.peer.sendpriority.2.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "B", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [] - }, - { - "alias": "3 $tag_host", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "none" - ], - "type": "fill" - } - ], - "measurement": "swarm.peer.sendpriority.3.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "C", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "P2P SendPriority() - messages sent", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 10 - }, - "id": 39, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "1 $tag_host", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "none" - ], - "type": "fill" - } - ], - "measurement": "swarm.peer.sendpriority_t.1.span", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "p95" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [] - }, - { - "alias": "2 $tag_host", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "none" - ], - "type": "fill" - } - ], - "measurement": "swarm.peer.sendpriority_t.2.span", - "orderByTime": "ASC", - "policy": "default", - "refId": "B", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "p95" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "P2P SendPriority() timer - 95%ile", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ns", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 19 - }, - "id": 40, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "none" - ], - "type": "fill" - } - ], - "measurement": "swarm.registry.peers.gauge", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "last" - } - ] - ], - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Registry Peers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 28 - }, - "id": 32, - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 2 - }, - "id": 14, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.stack.uptime.gauge", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Uptime", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ns", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "title": "Uptime", - "type": "row" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 29 - }, - "id": 28, - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 7 - }, - "id": 2, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "GET", - "dsType": "influxdb", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "measurement": "swarm.http.request.GET.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - }, - { - "alias": "POST", - "dsType": "influxdb", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "measurement": "swarm.http.request.POST.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "B", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Total HTTP Requests", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 12, - "y": 7 - }, - "id": 26, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.http.request.GET.time.span", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "p95" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "HTTP GET requests 95% timer", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ns", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 13 - }, - "id": 15, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.http.request.GET.time.span", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "p50" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "HTTP GET requests 50% timer", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ns", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 12, - "y": 13 - }, - "id": 8, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "POST", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.http.request.POST.time.span", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "p95" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "HTTP POST requests 95% timer", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ns", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "title": "HTTP", - "type": "row" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 30 - }, - "id": 30, - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 5, - "w": 12, - "x": 0, - "y": 8 - }, - "id": 16, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "dsType": "influxdb", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.lazychunkreader.read.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "LazyChunkReader read() calls", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 5, - "w": 12, - "x": 12, - "y": 8 - }, - "id": 18, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "dsType": "influxdb", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.lazychunkreader.read.err.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "LazyChunkReader read errors", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 5, - "w": 12, - "x": 0, - "y": 13 - }, - "id": 17, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "dsType": "influxdb", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.lazychunkreader.read.bytes.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "LazyChunkReader bytes read", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "decbytes", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "title": "LazyChunkReader", - "type": "row" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 31 - }, - "id": 25, - "panels": [], - "title": "All measurements", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 32 - }, - "id": 3, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "dsType": "influxdb", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.api.get.count.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "API Get (BZZ)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 12, - "y": 32 - }, - "id": 13, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.network.stream.request_from_peers.count.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Request from peers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 38 - }, - "id": 11, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.network.stream.received_chunks.count.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Received chunks", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 12, - "y": 38 - }, - "id": 12, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "dsType": "influxdb", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.storage.cache.requests.size.gauge", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "max" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Requests cache entries", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 44 - }, - "id": 9, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.network.stream.handle_retrieve_request_msg.count.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Handle retrieve request msg", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 12, - "y": 44 - }, - "id": 20, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "dsType": "influxdb", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.syncer.setnextbatch.iterator.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "syncer setnextbatch iterator calls", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 50 - }, - "id": 21, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "dsType": "influxdb", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.peer.handlewantedhashesmsg.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "peer HandleWantedHashesMsg", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 12, - "y": 50 - }, - "id": 22, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "dsType": "influxdb", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.peer.handlesubscribemsg.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "peer HandleSubscribeMsg", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 56 - }, - "id": 23, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "dsType": "influxdb", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.peer.handlewantedhashesmsg.actualget.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "peer HandleWantedHashesMsg actual get", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "metrics", - "fill": 1, - "gridPos": { - "h": 6, - "w": 12, - "x": 12, - "y": 56 - }, - "id": 19, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "$tag_host", - "dsType": "influxdb", - "groupBy": [ - { - "params": [ - "$myinterval" - ], - "type": "time" - }, - { - "params": [ - "host" - ], - "type": "tag" - }, - { - "params": [ - "0" - ], - "type": "fill" - } - ], - "measurement": "swarm.peer.handleofferedhashes.count", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "sum" - } - ] - ], - "tags": [ - { - "key": "host", - "operator": "=~", - "value": "/^$host$/" - } - ] - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "peer OfferedHashesMsg", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "refresh": "30s", - "schemaVersion": 16, - "style": "dark", - "tags": [], - "templating": { - "list": [ - { - "auto": false, - "auto_count": 30, - "auto_min": "10s", - "current": { - "text": "10s", - "value": "10s" - }, - "hide": 0, - "label": "resolution", - "name": "myinterval", - "options": [ - { - "selected": false, - "text": "5s", - "value": "5s" - }, - { - "selected": true, - "text": "10s", - "value": "10s" - }, - { - "selected": false, - "text": "30s", - "value": "30s" - }, - { - "selected": false, - "text": "100s", - "value": "100s" - } - ], - "query": "5s,10s,30s,100s", - "refresh": 2, - "type": "interval" - }, - { - "allValue": null, - "current": { - "text": "swarm_30399 + swarm_30400 + swarm_30401 + swarm_30402", - "value": [ - "swarm_30399", - "swarm_30400", - "swarm_30401", - "swarm_30402" - ] - }, - "datasource": "metrics", - "hide": 0, - "includeAll": true, - "label": null, - "multi": true, - "name": "host", - "options": [], - "query": "SHOW TAG VALUES WITH KEY = \"host\"", - "refresh": 1, - "regex": "", - "sort": 1, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "swarm.http.request.GET.time.span", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-15m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Swarm", - "uid": "vmEtxxgmz", - "version": 138 -} diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/metrics/flags.go b/vendor/github.com/ethereum/go-ethereum/swarm/metrics/flags.go index 79490fd36..7c12120a6 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/metrics/flags.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/metrics/flags.go @@ -27,26 +27,26 @@ import ( ) var ( - metricsEnableInfluxDBExportFlag = cli.BoolFlag{ + MetricsEnableInfluxDBExportFlag = cli.BoolFlag{ Name: "metrics.influxdb.export", Usage: "Enable metrics export/push to an external InfluxDB database", } - metricsInfluxDBEndpointFlag = cli.StringFlag{ + MetricsInfluxDBEndpointFlag = cli.StringFlag{ Name: "metrics.influxdb.endpoint", Usage: "Metrics InfluxDB endpoint", Value: "http://127.0.0.1:8086", } - metricsInfluxDBDatabaseFlag = cli.StringFlag{ + MetricsInfluxDBDatabaseFlag = cli.StringFlag{ Name: "metrics.influxdb.database", Usage: "Metrics InfluxDB database", Value: "metrics", } - metricsInfluxDBUsernameFlag = cli.StringFlag{ + MetricsInfluxDBUsernameFlag = cli.StringFlag{ Name: "metrics.influxdb.username", Usage: "Metrics InfluxDB username", Value: "", } - metricsInfluxDBPasswordFlag = cli.StringFlag{ + MetricsInfluxDBPasswordFlag = cli.StringFlag{ Name: "metrics.influxdb.password", Usage: "Metrics InfluxDB password", Value: "", @@ -55,7 +55,7 @@ var ( // It is used so that we can group all nodes and average a measurement across all of them, but also so // that we can select a specific node and inspect its measurements. // https://docs.influxdata.com/influxdb/v1.4/concepts/key_concepts/#tag-key - metricsInfluxDBHostTagFlag = cli.StringFlag{ + MetricsInfluxDBHostTagFlag = cli.StringFlag{ Name: "metrics.influxdb.host.tag", Usage: "Metrics InfluxDB `host` tag attached to all measurements", Value: "localhost", @@ -65,20 +65,24 @@ var ( // Flags holds all command-line flags required for metrics collection. var Flags = []cli.Flag{ utils.MetricsEnabledFlag, - metricsEnableInfluxDBExportFlag, - metricsInfluxDBEndpointFlag, metricsInfluxDBDatabaseFlag, metricsInfluxDBUsernameFlag, metricsInfluxDBPasswordFlag, metricsInfluxDBHostTagFlag, + MetricsEnableInfluxDBExportFlag, + MetricsInfluxDBEndpointFlag, + MetricsInfluxDBDatabaseFlag, + MetricsInfluxDBUsernameFlag, + MetricsInfluxDBPasswordFlag, + MetricsInfluxDBHostTagFlag, } func Setup(ctx *cli.Context) { if gethmetrics.Enabled { log.Info("Enabling swarm metrics collection") var ( - enableExport = ctx.GlobalBool(metricsEnableInfluxDBExportFlag.Name) - endpoint = ctx.GlobalString(metricsInfluxDBEndpointFlag.Name) - database = ctx.GlobalString(metricsInfluxDBDatabaseFlag.Name) - username = ctx.GlobalString(metricsInfluxDBUsernameFlag.Name) - password = ctx.GlobalString(metricsInfluxDBPasswordFlag.Name) - hosttag = ctx.GlobalString(metricsInfluxDBHostTagFlag.Name) + enableExport = ctx.GlobalBool(MetricsEnableInfluxDBExportFlag.Name) + endpoint = ctx.GlobalString(MetricsInfluxDBEndpointFlag.Name) + database = ctx.GlobalString(MetricsInfluxDBDatabaseFlag.Name) + username = ctx.GlobalString(MetricsInfluxDBUsernameFlag.Name) + password = ctx.GlobalString(MetricsInfluxDBPasswordFlag.Name) + hosttag = ctx.GlobalString(MetricsInfluxDBHostTagFlag.Name) ) // Start system runtime metrics collection diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/multihash/multihash.go b/vendor/github.com/ethereum/go-ethereum/swarm/multihash/multihash.go deleted file mode 100644 index 3306e3a6d..000000000 --- a/vendor/github.com/ethereum/go-ethereum/swarm/multihash/multihash.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package multihash - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" -) - -const ( - defaultMultihashLength = 32 - defaultMultihashTypeCode = 0x1b -) - -var ( - multihashTypeCode uint8 - MultihashLength = defaultMultihashLength -) - -func init() { - multihashTypeCode = defaultMultihashTypeCode - MultihashLength = defaultMultihashLength -} - -// check if valid swarm multihash -func isSwarmMultihashType(code uint8) bool { - return code == multihashTypeCode -} - -// GetMultihashLength returns the digest length of the provided multihash -// It will fail if the multihash is not a valid swarm mulithash -func GetMultihashLength(data []byte) (int, int, error) { - cursor := 0 - typ, c := binary.Uvarint(data) - if c <= 0 { - return 0, 0, errors.New("unreadable hashtype field") - } - if !isSwarmMultihashType(uint8(typ)) { - return 0, 0, fmt.Errorf("hash code %x is not a swarm hashtype", typ) - } - cursor += c - hashlength, c := binary.Uvarint(data[cursor:]) - if c <= 0 { - return 0, 0, errors.New("unreadable length field") - } - cursor += c - - // we cheekily assume hashlength < maxint - inthashlength := int(hashlength) - if len(data[c:]) < inthashlength { - return 0, 0, errors.New("length mismatch") - } - return inthashlength, cursor, nil -} - -// FromMulithash returns the digest portion of the multihash -// It will fail if the multihash is not a valid swarm multihash -func FromMultihash(data []byte) ([]byte, error) { - hashLength, _, err := GetMultihashLength(data) - if err != nil { - return nil, err - } - return data[len(data)-hashLength:], nil -} - -// ToMulithash wraps the provided digest data with a swarm mulithash header -func ToMultihash(hashData []byte) []byte { - buf := bytes.NewBuffer(nil) - b := make([]byte, 8) - c := binary.PutUvarint(b, uint64(multihashTypeCode)) - buf.Write(b[:c]) - c = binary.PutUvarint(b, uint64(len(hashData))) - buf.Write(b[:c]) - buf.Write(hashData) - return buf.Bytes() -} diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/hive.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/hive.go index 1aa1ae42a..ebef54592 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/network/hive.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/hive.go @@ -165,8 +165,8 @@ func (h *Hive) Run(p *BzzPeer) error { // otherwise just send depth to new peer dp.NotifyDepth(depth) } + NotifyPeer(p.BzzAddr, h.Kademlia) } - NotifyPeer(p.BzzAddr, h.Kademlia) defer h.Off(dp) return dp.Run(dp.HandleMsg) } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia.go index 55a0c6f13..a8ecaa4be 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia.go @@ -81,14 +81,15 @@ func NewKadParams() *KadParams { // Kademlia is a table of live peers and a db of known peers (node records) type Kademlia struct { lock sync.RWMutex - *KadParams // Kademlia configuration parameters - base []byte // immutable baseaddress of the table - addrs *pot.Pot // pots container for known peer addresses - conns *pot.Pot // pots container for live peer connections - depth uint8 // stores the last current depth of saturation - nDepth int // stores the last neighbourhood depth - nDepthC chan int // returned by DepthC function to signal neighbourhood depth change - addrCountC chan int // returned by AddrCountC function to signal peer count change + *KadParams // Kademlia configuration parameters + base []byte // immutable baseaddress of the table + addrs *pot.Pot // pots container for known peer addresses + conns *pot.Pot // pots container for live peer connections + depth uint8 // stores the last current depth of saturation + nDepth int // stores the last neighbourhood depth + nDepthC chan int // returned by DepthC function to signal neighbourhood depth change + addrCountC chan int // returned by AddrCountC function to signal peer count change + Pof func(pot.Val, pot.Val, int) (int, bool) // function for calculating kademlia routing distance between two addresses } // NewKademlia creates a Kademlia table for base address addr @@ -103,6 +104,7 @@ func NewKademlia(addr []byte, params *KadParams) *Kademlia { KadParams: params, addrs: pot.NewPot(nil, 0), conns: pot.NewPot(nil, 0), + Pof: pof, } } @@ -175,7 +177,7 @@ func (k *Kademlia) SuggestPeer() (a *BzzAddr, o int, want bool) { k.lock.Lock() defer k.lock.Unlock() minsize := k.MinBinSize - depth := k.neighbourhoodDepth() + depth := depthForPot(k.conns, k.MinProxBinSize, k.base) // if there is a callable neighbour within the current proxBin, connect // this makes sure nearest neighbour set is fully connected var ppo int @@ -261,7 +263,7 @@ func (k *Kademlia) On(p *Peer) (uint8, bool) { // found among live peers, do nothing return v }) - if ins { + if ins && !p.BzzPeer.LightNode { a := newEntry(p.BzzAddr) a.conn = p // insert new online peer into addrs @@ -289,6 +291,7 @@ func (k *Kademlia) On(p *Peer) (uint8, bool) { // neighbourhood depth on each change. // Not receiving from the returned channel will block On function // when the neighbourhood depth is changed. +// TODO: Why is this exported, and if it should be; why can't we have more subscribers than one? func (k *Kademlia) NeighbourhoodDepthC() <-chan int { k.lock.Lock() defer k.lock.Unlock() @@ -305,7 +308,7 @@ func (k *Kademlia) sendNeighbourhoodDepthChange() { // It provides signaling of neighbourhood depth change. // This part of the code is sending new neighbourhood depth to nDepthC if that condition is met. if k.nDepthC != nil { - nDepth := k.neighbourhoodDepth() + nDepth := depthForPot(k.conns, k.MinProxBinSize, k.base) if nDepth != k.nDepth { k.nDepth = nDepth k.nDepthC <- nDepth @@ -329,14 +332,18 @@ func (k *Kademlia) Off(p *Peer) { k.lock.Lock() defer k.lock.Unlock() var del bool - k.addrs, _, _, _ = pot.Swap(k.addrs, p, pof, func(v pot.Val) pot.Val { - // v cannot be nil, must check otherwise we overwrite entry - if v == nil { - panic(fmt.Sprintf("connected peer not found %v", p)) - } + if !p.BzzPeer.LightNode { + k.addrs, _, _, _ = pot.Swap(k.addrs, p, pof, func(v pot.Val) pot.Val { + // v cannot be nil, must check otherwise we overwrite entry + if v == nil { + panic(fmt.Sprintf("connected peer not found %v", p)) + } + del = true + return newEntry(p.BzzAddr) + }) + } else { del = true - return newEntry(p.BzzAddr) - }) + } if del { k.conns, _, _, _ = pot.Swap(k.conns, p, pof, func(_ pot.Val) pot.Val { @@ -357,7 +364,7 @@ func (k *Kademlia) EachBin(base []byte, pof pot.Pof, o int, eachBinFunc func(con var startPo int var endPo int - kadDepth := k.neighbourhoodDepth() + kadDepth := depthForPot(k.conns, k.MinProxBinSize, k.base) k.conns.EachBin(base, pof, o, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool { if startPo > 0 && endPo != k.MaxProxDisplay { @@ -391,7 +398,7 @@ func (k *Kademlia) eachConn(base []byte, o int, f func(*Peer, int, bool) bool) { if len(base) == 0 { base = k.base } - depth := k.neighbourhoodDepth() + depth := depthForPot(k.conns, k.MinProxBinSize, k.base) k.conns.EachNeighbour(base, pof, func(val pot.Val, po int) bool { if po > o { return true @@ -413,7 +420,7 @@ func (k *Kademlia) eachAddr(base []byte, o int, f func(*BzzAddr, int, bool) bool if len(base) == 0 { base = k.base } - depth := k.neighbourhoodDepth() + depth := depthForPot(k.conns, k.MinProxBinSize, k.base) k.addrs.EachNeighbour(base, pof, func(val pot.Val, po int) bool { if po > o { return true @@ -422,21 +429,72 @@ func (k *Kademlia) eachAddr(base []byte, o int, f func(*BzzAddr, int, bool) bool }) } -// neighbourhoodDepth returns the proximity order that defines the distance of +func (k *Kademlia) NeighbourhoodDepth() (depth int) { + k.lock.RLock() + defer k.lock.RUnlock() + return depthForPot(k.conns, k.MinProxBinSize, k.base) +} + +// depthForPot returns the proximity order that defines the distance of // the nearest neighbour set with cardinality >= MinProxBinSize // if there is altogether less than MinProxBinSize peers it returns 0 // caller must hold the lock -func (k *Kademlia) neighbourhoodDepth() (depth int) { - if k.conns.Size() < k.MinProxBinSize { +func depthForPot(p *pot.Pot, minProxBinSize int, pivotAddr []byte) (depth int) { + if p.Size() <= minProxBinSize { return 0 } + + // total number of peers in iteration var size int + + // true if iteration has all prox peers + var b bool + + // last po recorded in iteration + var lastPo int + f := func(v pot.Val, i int) bool { + // po == 256 means that addr is the pivot address(self) + if i == 256 { + return true + } size++ - depth = i - return size < k.MinProxBinSize + + // this means we have all nn-peers. + // depth is by default set to the bin of the farthest nn-peer + if size == minProxBinSize { + b = true + depth = i + return true + } + + // if there are empty bins between farthest nn and current node, + // the depth should recalculated to be + // the farthest of those empty bins + // + // 0 abac ccde + // 1 2a2a + // 2 589f <--- nearest non-nn + // ============ DEPTH 3 =========== + // 3 <--- don't count as empty bins + // 4 <--- don't count as empty bins + // 5 cbcb cdcd <---- furthest nn + // 6 a1a2 b3c4 + if b && i < depth { + depth = i + 1 + lastPo = i + return false + } + lastPo = i + return true + } + p.EachNeighbour(pivotAddr, pof, f) + + // cover edge case where more than one farthest nn + // AND we only have nn-peers + if lastPo == depth { + depth = 0 } - k.conns.EachNeighbour(k.base, pof, f) return depth } @@ -496,7 +554,7 @@ func (k *Kademlia) string() string { liverows := make([]string, k.MaxProxDisplay) peersrows := make([]string, k.MaxProxDisplay) - depth := k.neighbourhoodDepth() + depth := depthForPot(k.conns, k.MinProxBinSize, k.base) rest := k.conns.Size() k.conns.EachBin(k.base, pof, 0, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool { var rowlen int @@ -566,6 +624,7 @@ type PeerPot struct { // as hexadecimal representations of the address. // used for testing only func NewPeerPotMap(kadMinProxSize int, addrs [][]byte) map[string]*PeerPot { + // create a table of all nodes for health check np := pot.NewPot(nil, 0) for _, addr := range addrs { @@ -574,34 +633,47 @@ func NewPeerPotMap(kadMinProxSize int, addrs [][]byte) map[string]*PeerPot { ppmap := make(map[string]*PeerPot) for i, a := range addrs { - pl := 256 - prev := 256 + + // actual kademlia depth + depth := depthForPot(np, kadMinProxSize, a) + + // upon entering a new iteration + // this will hold the value the po should be + // if it's one higher than the po in the last iteration + prevPo := 256 + + // all empty bins which are outside neighbourhood depth var emptyBins []int + + // all nn-peers var nns [][]byte - np.EachNeighbour(addrs[i], pof, func(val pot.Val, po int) bool { - a := val.([]byte) + + np.EachNeighbour(a, pof, func(val pot.Val, po int) bool { + addr := val.([]byte) + // po == 256 means that addr is the pivot address(self) if po == 256 { return true } - if pl == 256 || pl == po { - nns = append(nns, a) + + // iterate through the neighbours, going from the closest to the farthest + // we calculate the nearest neighbours that should be in the set + // depth in this case equates to: + // 1. Within all bins that are higher or equal than depth there are + // at least minProxBinSize peers connected + // 2. depth-1 bin is not empty + if po >= depth { + nns = append(nns, addr) + prevPo = depth - 1 + return true } - if pl == 256 && len(nns) >= kadMinProxSize { - pl = po - prev = po + for j := prevPo; j > po; j-- { + emptyBins = append(emptyBins, j) } - if prev < pl { - for j := prev; j > po; j-- { - emptyBins = append(emptyBins, j) - } - } - prev = po - 1 + prevPo = po - 1 return true }) - for j := prev; j >= 0; j-- { - emptyBins = append(emptyBins, j) - } - log.Trace(fmt.Sprintf("%x NNS: %s", addrs[i][:4], LogAddrs(nns))) + + log.Trace(fmt.Sprintf("%x NNS: %s, emptyBins: %s", addrs[i][:4], LogAddrs(nns), logEmptyBins(emptyBins))) ppmap[common.Bytes2Hex(a)] = &PeerPot{nns, emptyBins} } return ppmap @@ -616,7 +688,7 @@ func (k *Kademlia) saturation(n int) int { prev++ return prev == po && size >= n }) - depth := k.neighbourhoodDepth() + depth := depthForPot(k.conns, k.MinProxBinSize, k.base) if depth < prev { return depth } @@ -629,8 +701,11 @@ func (k *Kademlia) full(emptyBins []int) (full bool) { prev := 0 e := len(emptyBins) ok := true - depth := k.neighbourhoodDepth() + depth := depthForPot(k.conns, k.MinProxBinSize, k.base) k.conns.EachBin(k.base, pof, 0, func(po, _ int, _ func(func(val pot.Val, i int) bool) bool) bool { + if po >= depth { + return false + } if prev == depth+1 { return true } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol.go index 66ae94a88..4b9b28cdc 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/protocol.go @@ -44,7 +44,7 @@ const ( // BzzSpec is the spec of the generic swarm handshake var BzzSpec = &protocols.Spec{ Name: "bzz", - Version: 7, + Version: 8, MaxMsgSize: 10 * 1024 * 1024, Messages: []interface{}{ HandshakeMsg{}, @@ -54,7 +54,7 @@ var BzzSpec = &protocols.Spec{ // DiscoverySpec is the spec for the bzz discovery subprotocols var DiscoverySpec = &protocols.Spec{ Name: "hive", - Version: 6, + Version: 8, MaxMsgSize: 10 * 1024 * 1024, Messages: []interface{}{ peersMsg{}, diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/events.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/events.go index 594d36225..d73c3af4e 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/events.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/events.go @@ -20,16 +20,18 @@ import ( "context" "sync" - "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/simulations" ) // PeerEvent is the type of the channel returned by Simulation.PeerEvents. type PeerEvent struct { // NodeID is the ID of node that the event is caught on. NodeID enode.ID + // PeerID is the ID of the peer node that the event is caught on. + PeerID enode.ID // Event is the event that is caught. - Event *p2p.PeerEvent + Event *simulations.Event // Error is the error that may have happened during event watching. Error error } @@ -37,9 +39,13 @@ type PeerEvent struct { // PeerEventsFilter defines a filter on PeerEvents to exclude messages with // defined properties. Use PeerEventsFilter methods to set required options. type PeerEventsFilter struct { - t *p2p.PeerEventType - protocol *string - msgCode *uint64 + eventType simulations.EventType + + connUp *bool + + msgReceive *bool + protocol *string + msgCode *uint64 } // NewPeerEventsFilter returns a new PeerEventsFilter instance. @@ -47,20 +53,48 @@ func NewPeerEventsFilter() *PeerEventsFilter { return &PeerEventsFilter{} } -// Type sets the filter to only one peer event type. -func (f *PeerEventsFilter) Type(t p2p.PeerEventType) *PeerEventsFilter { - f.t = &t +// Connect sets the filter to events when two nodes connect. +func (f *PeerEventsFilter) Connect() *PeerEventsFilter { + f.eventType = simulations.EventTypeConn + b := true + f.connUp = &b + return f +} + +// Drop sets the filter to events when two nodes disconnect. +func (f *PeerEventsFilter) Drop() *PeerEventsFilter { + f.eventType = simulations.EventTypeConn + b := false + f.connUp = &b + return f +} + +// ReceivedMessages sets the filter to only messages that are received. +func (f *PeerEventsFilter) ReceivedMessages() *PeerEventsFilter { + f.eventType = simulations.EventTypeMsg + b := true + f.msgReceive = &b + return f +} + +// SentMessages sets the filter to only messages that are sent. +func (f *PeerEventsFilter) SentMessages() *PeerEventsFilter { + f.eventType = simulations.EventTypeMsg + b := false + f.msgReceive = &b return f } // Protocol sets the filter to only one message protocol. func (f *PeerEventsFilter) Protocol(p string) *PeerEventsFilter { + f.eventType = simulations.EventTypeMsg f.protocol = &p return f } // MsgCode sets the filter to only one msg code. func (f *PeerEventsFilter) MsgCode(c uint64) *PeerEventsFilter { + f.eventType = simulations.EventTypeMsg f.msgCode = &c return f } @@ -80,19 +114,8 @@ func (s *Simulation) PeerEvents(ctx context.Context, ids []enode.ID, filters ... go func(id enode.ID) { defer s.shutdownWG.Done() - client, err := s.Net.GetNode(id).Client() - if err != nil { - subsWG.Done() - eventC <- PeerEvent{NodeID: id, Error: err} - return - } - events := make(chan *p2p.PeerEvent) - sub, err := client.Subscribe(ctx, "admin", events, "peerEvents") - if err != nil { - subsWG.Done() - eventC <- PeerEvent{NodeID: id, Error: err} - return - } + events := make(chan *simulations.Event) + sub := s.Net.Events().Subscribe(events) defer sub.Unsubscribe() subsWG.Done() @@ -110,28 +133,55 @@ func (s *Simulation) PeerEvents(ctx context.Context, ids []enode.ID, filters ... case <-s.Done(): return case e := <-events: + // ignore control events + if e.Control { + continue + } match := len(filters) == 0 // if there are no filters match all events for _, f := range filters { - if f.t != nil && *f.t != e.Type { - continue + if f.eventType == simulations.EventTypeConn && e.Conn != nil { + if *f.connUp != e.Conn.Up { + continue + } + // all connection filter parameters matched, break the loop + match = true + break } - if f.protocol != nil && *f.protocol != e.Protocol { - continue + if f.eventType == simulations.EventTypeMsg && e.Msg != nil { + if f.msgReceive != nil && *f.msgReceive != e.Msg.Received { + continue + } + if f.protocol != nil && *f.protocol != e.Msg.Protocol { + continue + } + if f.msgCode != nil && *f.msgCode != e.Msg.Code { + continue + } + // all message filter parameters matched, break the loop + match = true + break } - if f.msgCode != nil && e.MsgCode != nil && *f.msgCode != *e.MsgCode { - continue + } + var peerID enode.ID + switch e.Type { + case simulations.EventTypeConn: + peerID = e.Conn.One + if peerID == id { + peerID = e.Conn.Other + } + case simulations.EventTypeMsg: + peerID = e.Msg.One + if peerID == id { + peerID = e.Msg.Other } - // all filter parameters matched, break the loop - match = true - break } if match { select { - case eventC <- PeerEvent{NodeID: id, Event: e}: + case eventC <- PeerEvent{NodeID: id, PeerID: peerID, Event: e}: case <-ctx.Done(): if err := ctx.Err(); err != nil { select { - case eventC <- PeerEvent{NodeID: id, Error: err}: + case eventC <- PeerEvent{NodeID: id, PeerID: peerID, Error: err}: case <-s.Done(): } } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/kademlia.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/kademlia.go index f895181d9..7982810ca 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/kademlia.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/kademlia.go @@ -33,6 +33,7 @@ var BucketKeyKademlia BucketKey = "kademlia" // WaitTillHealthy is blocking until the health of all kademlias is true. // If error is not nil, a map of kademlia that was found not healthy is returned. +// TODO: Check correctness since change in kademlia depth calculation logic func (s *Simulation) WaitTillHealthy(ctx context.Context, kadMinProxSize int) (ill map[enode.ID]*network.Kademlia, err error) { // Prepare PeerPot map for checking Kademlia health var ppmap map[string]*network.PeerPot diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation.go index f6d3ce229..e5435b9f0 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation.go @@ -68,6 +68,10 @@ type ServiceFunc func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Se // New creates a new Simulation instance with new // simulations.Network initialized with provided services. +// Services map must have unique keys as service names and +// every ServiceFunc must return a node.Service of the unique type. +// This restriction is required by node.Node.Start() function +// which is used to start node.Service returned by ServiceFunc. func New(services map[string]ServiceFunc) (s *Simulation) { s = &Simulation{ buckets: make(map[enode.ID]*sync.Map), @@ -76,6 +80,9 @@ func New(services map[string]ServiceFunc) (s *Simulation) { adapterServices := make(map[string]adapters.ServiceFunc, len(services)) for name, serviceFunc := range services { + // Scope this variables correctly + // as they will be in the adapterServices[name] function accessed later. + name, serviceFunc := name, serviceFunc s.serviceNames = append(s.serviceNames, name) adapterServices[name] = func(ctx *adapters.ServiceContext) (node.Service, error) { b := new(sync.Map) diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulations/overlay.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulations/overlay.go index caf7ff1f2..284ae6398 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/network/simulations/overlay.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/simulations/overlay.go @@ -64,12 +64,12 @@ func init() { type Simulation struct { mtx sync.Mutex - stores map[enode.ID]*state.InmemoryStore + stores map[enode.ID]state.Store } func NewSimulation() *Simulation { return &Simulation{ - stores: make(map[enode.ID]*state.InmemoryStore), + stores: make(map[enode.ID]state.Store), } } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery.go index c2adb1009..c73298d9a 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery.go @@ -39,6 +39,7 @@ const ( var ( processReceivedChunksCount = metrics.NewRegisteredCounter("network.stream.received_chunks.count", nil) handleRetrieveRequestMsgCount = metrics.NewRegisteredCounter("network.stream.handle_retrieve_request_msg.count", nil) + retrieveChunkFail = metrics.NewRegisteredCounter("network.stream.retrieve_chunks_fail.count", nil) requestFromPeersCount = metrics.NewRegisteredCounter("network.stream.request_from_peers.count", nil) requestFromPeersEachCount = metrics.NewRegisteredCounter("network.stream.request_from_peers_each.count", nil) @@ -96,6 +97,11 @@ func (s *SwarmChunkServer) processDeliveries() { } } +// SessionIndex returns zero in all cases for SwarmChunkServer. +func (s *SwarmChunkServer) SessionIndex() (uint64, error) { + return 0, nil +} + // SetNextBatch func (s *SwarmChunkServer) SetNextBatch(_, _ uint64) (hashes []byte, from uint64, to uint64, proof *HandoverProof, err error) { select { @@ -141,7 +147,7 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req * "retrieve.request") defer osp.Finish() - s, err := sp.getServer(NewStream(swarmChunkServerStreamName, "", false)) + s, err := sp.getServer(NewStream(swarmChunkServerStreamName, "", true)) if err != nil { return err } @@ -164,11 +170,13 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req * go func() { chunk, err := d.chunkStore.Get(ctx, req.Addr) if err != nil { - log.Warn("ChunkStore.Get can not retrieve chunk", "err", err) + retrieveChunkFail.Inc(1) + log.Debug("ChunkStore.Get can not retrieve chunk", "peer", sp.ID().String(), "addr", req.Addr, "hopcount", req.HopCount, "err", err) return } if req.SkipCheck { - err = sp.Deliver(ctx, chunk, s.priority) + syncing := false + err = sp.Deliver(ctx, chunk, s.priority, syncing) if err != nil { log.Warn("ERROR in handleRetrieveRequestMsg", "err", err) } @@ -184,12 +192,22 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req * return nil } +//Chunk delivery always uses the same message type.... type ChunkDeliveryMsg struct { Addr storage.Address SData []byte // the stored chunk Data (incl size) peer *Peer // set in handleChunkDeliveryMsg } +//...but swap accounting needs to disambiguate if it is a delivery for syncing or for retrieval +//as it decides based on message type if it needs to account for this message or not + +//defines a chunk delivery for retrieval (with accounting) +type ChunkDeliveryMsgRetrieval ChunkDeliveryMsg + +//defines a chunk delivery for syncing (without accounting) +type ChunkDeliveryMsgSyncing ChunkDeliveryMsg + // TODO: Fix context SNAFU func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req *ChunkDeliveryMsg) error { var osp opentracing.Span @@ -229,14 +247,17 @@ func (d *Delivery) RequestFromPeers(ctx context.Context, req *network.Request) ( } else { d.kad.EachConn(req.Addr[:], 255, func(p *network.Peer, po int, nn bool) bool { id := p.ID() - // TODO: skip light nodes that do not accept retrieve requests + if p.LightNode { + // skip light nodes + return true + } if req.SkipPeer(id.String()) { log.Trace("Delivery.RequestFromPeers: skip peer", "peer id", id) return true } sp = d.getPeer(id) if sp == nil { - log.Warn("Delivery.RequestFromPeers: peer not found", "id", id) + //log.Warn("Delivery.RequestFromPeers: peer not found", "id", id) return true } spID = &id diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/messages.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/messages.go index 74c785d58..eb1b2983e 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/messages.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/messages.go @@ -76,7 +76,16 @@ type RequestSubscriptionMsg struct { func (p *Peer) handleRequestSubscription(ctx context.Context, req *RequestSubscriptionMsg) (err error) { log.Debug(fmt.Sprintf("handleRequestSubscription: streamer %s to subscribe to %s with stream %s", p.streamer.addr, p.ID(), req.Stream)) - return p.streamer.Subscribe(p.ID(), req.Stream, req.History, req.Priority) + if err = p.streamer.Subscribe(p.ID(), req.Stream, req.History, req.Priority); err != nil { + // The error will be sent as a subscribe error message + // and will not be returned as it will prevent any new message + // exchange between peers over p2p. Instead, error will be returned + // only if there is one from sending subscribe error message. + err = p.Send(ctx, SubscribeErrorMsg{ + Error: err.Error(), + }) + } + return err } func (p *Peer) handleSubscribeMsg(ctx context.Context, req *SubscribeMsg) (err error) { @@ -149,6 +158,7 @@ type SubscribeErrorMsg struct { } func (p *Peer) handleSubscribeErrorMsg(req *SubscribeErrorMsg) (err error) { + //TODO the error should be channeled to whoever calls the subscribe return fmt.Errorf("subscribe to peer %s: %v", p.ID(), req.Error) } @@ -347,7 +357,8 @@ func (p *Peer) handleWantedHashesMsg(ctx context.Context, req *WantedHashesMsg) return fmt.Errorf("handleWantedHashesMsg get data %x: %v", hash, err) } chunk := storage.NewChunk(hash, data) - if err := p.Deliver(ctx, chunk, s.priority); err != nil { + syncing := true + if err := p.Deliver(ctx, chunk, s.priority, syncing); err != nil { return err } } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/peer.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/peer.go index ef6bbdf70..4bccf56f5 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/peer.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/peer.go @@ -128,17 +128,34 @@ func NewPeer(peer *protocols.Peer, streamer *Registry) *Peer { } // Deliver sends a storeRequestMsg protocol message to the peer -func (p *Peer) Deliver(ctx context.Context, chunk storage.Chunk, priority uint8) error { +// Depending on the `syncing` parameter we send different message types +func (p *Peer) Deliver(ctx context.Context, chunk storage.Chunk, priority uint8, syncing bool) error { var sp opentracing.Span + var msg interface{} + + spanName := "send.chunk.delivery" + + //we send different types of messages if delivery is for syncing or retrievals, + //even if handling and content of the message are the same, + //because swap accounting decides which messages need accounting based on the message type + if syncing { + msg = &ChunkDeliveryMsgSyncing{ + Addr: chunk.Address(), + SData: chunk.Data(), + } + spanName += ".syncing" + } else { + msg = &ChunkDeliveryMsgRetrieval{ + Addr: chunk.Address(), + SData: chunk.Data(), + } + spanName += ".retrieval" + } ctx, sp = spancontext.StartSpan( ctx, - "send.chunk.delivery") + spanName) defer sp.Finish() - msg := &ChunkDeliveryMsg{ - Addr: chunk.Address(), - SData: chunk.Data(), - } return p.SendPriority(ctx, msg, priority) } @@ -166,7 +183,7 @@ func (p *Peer) SendOfferedHashes(s *server, f, t uint64) error { "send.offered.hashes") defer sp.Finish() - hashes, from, to, proof, err := s.SetNextBatch(f, t) + hashes, from, to, proof, err := s.setNextBatch(f, t) if err != nil { return err } @@ -214,10 +231,15 @@ func (p *Peer) setServer(s Stream, o Server, priority uint8) (*server, error) { return nil, ErrMaxPeerServers } + sessionIndex, err := o.SessionIndex() + if err != nil { + return nil, err + } os := &server{ - Server: o, - stream: s, - priority: priority, + Server: o, + stream: s, + priority: priority, + sessionIndex: sessionIndex, } p.servers[s] = os return os, nil diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/stream.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/stream.go index 1eda06c6a..32e107823 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/stream.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/stream.go @@ -18,8 +18,10 @@ package stream import ( "context" + "errors" "fmt" "math" + "reflect" "sync" "time" @@ -46,6 +48,31 @@ const ( HashSize = 32 ) +//Enumerate options for syncing and retrieval +type SyncingOption int +type RetrievalOption int + +//Syncing options +const ( + //Syncing disabled + SyncingDisabled SyncingOption = iota + //Register the client and the server but not subscribe + SyncingRegisterOnly + //Both client and server funcs are registered, subscribe sent automatically + SyncingAutoSubscribe +) + +const ( + //Retrieval disabled. Used mostly for tests to isolate syncing features (i.e. syncing only) + RetrievalDisabled RetrievalOption = iota + //Only the client side of the retrieve request is registered. + //(light nodes do not serve retrieve requests) + //once the client is registered, subscription to retrieve request stream is always sent + RetrievalClientOnly + //Both client and server funcs are registered, subscribe sent automatically + RetrievalEnabled +) + // Registry registry for outgoing and incoming streamer constructors type Registry struct { addr enode.ID @@ -59,27 +86,33 @@ type Registry struct { peers map[enode.ID]*Peer delivery *Delivery intervalsStore state.Store - doRetrieve bool + autoRetrieval bool //automatically subscribe to retrieve request stream maxPeerServers int + spec *protocols.Spec //this protocol's spec + balance protocols.Balance //implements protocols.Balance, for accounting + prices protocols.Prices //implements protocols.Prices, provides prices to accounting } // RegistryOptions holds optional values for NewRegistry constructor. type RegistryOptions struct { SkipCheck bool - DoSync bool - DoRetrieve bool + Syncing SyncingOption //Defines syncing behavior + Retrieval RetrievalOption //Defines retrieval behavior SyncUpdateDelay time.Duration MaxPeerServers int // The limit of servers for each peer in registry } // NewRegistry is Streamer constructor -func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.SyncChunkStore, intervalsStore state.Store, options *RegistryOptions) *Registry { +func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.SyncChunkStore, intervalsStore state.Store, options *RegistryOptions, balance protocols.Balance) *Registry { if options == nil { options = &RegistryOptions{} } if options.SyncUpdateDelay <= 0 { options.SyncUpdateDelay = 15 * time.Second } + //check if retriaval has been disabled + retrieval := options.Retrieval != RetrievalDisabled + streamer := &Registry{ addr: localID, skipCheck: options.SkipCheck, @@ -88,21 +121,40 @@ func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.Sy peers: make(map[enode.ID]*Peer), delivery: delivery, intervalsStore: intervalsStore, - doRetrieve: options.DoRetrieve, + autoRetrieval: retrieval, maxPeerServers: options.MaxPeerServers, + balance: balance, } + streamer.setupSpec() + streamer.api = NewAPI(streamer) delivery.getPeer = streamer.getPeer - streamer.RegisterServerFunc(swarmChunkServerStreamName, func(_ *Peer, _ string, _ bool) (Server, error) { - return NewSwarmChunkServer(delivery.chunkStore), nil - }) - streamer.RegisterClientFunc(swarmChunkServerStreamName, func(p *Peer, t string, live bool) (Client, error) { - return NewSwarmSyncerClient(p, syncChunkStore, NewStream(swarmChunkServerStreamName, t, live)) - }) - RegisterSwarmSyncerServer(streamer, syncChunkStore) - RegisterSwarmSyncerClient(streamer, syncChunkStore) - if options.DoSync { + //if retrieval is enabled, register the server func, so that retrieve requests will be served (non-light nodes only) + if options.Retrieval == RetrievalEnabled { + streamer.RegisterServerFunc(swarmChunkServerStreamName, func(_ *Peer, _ string, live bool) (Server, error) { + if !live { + return nil, errors.New("only live retrieval requests supported") + } + return NewSwarmChunkServer(delivery.chunkStore), nil + }) + } + + //if retrieval is not disabled, register the client func (both light nodes and normal nodes can issue retrieve requests) + if options.Retrieval != RetrievalDisabled { + streamer.RegisterClientFunc(swarmChunkServerStreamName, func(p *Peer, t string, live bool) (Client, error) { + return NewSwarmSyncerClient(p, syncChunkStore, NewStream(swarmChunkServerStreamName, t, live)) + }) + } + + //If syncing is not disabled, the syncing functions are registered (both client and server) + if options.Syncing != SyncingDisabled { + RegisterSwarmSyncerServer(streamer, syncChunkStore) + RegisterSwarmSyncerClient(streamer, syncChunkStore) + } + + //if syncing is set to automatically subscribe to the syncing stream, start the subscription process + if options.Syncing == SyncingAutoSubscribe { // latestIntC function ensures that // - receiving from the in chan is not blocked by processing inside the for loop // - the latest int value is delivered to the loop after the processing is done @@ -183,6 +235,17 @@ func NewRegistry(localID enode.ID, delivery *Delivery, syncChunkStore storage.Sy return streamer } +//we need to construct a spec instance per node instance +func (r *Registry) setupSpec() { + //first create the "bare" spec + r.createSpec() + //if balance is nil, this node has been started without swap support (swapEnabled flag is false) + if r.balance != nil && !reflect.ValueOf(r.balance).IsNil() { + //swap is enabled, so setup the hook + r.spec.Hook = protocols.NewAccounting(r.balance, r.prices) + } +} + // RegisterClient registers an incoming streamer constructor func (r *Registry) RegisterClientFunc(stream string, f func(*Peer, string, bool) (Client, error)) { r.clientMu.Lock() @@ -271,7 +334,6 @@ func (r *Registry) Subscribe(peerId enode.ID, s Stream, h *Range, priority uint8 if err != nil { return err } - if s.Live && h != nil { if err := peer.setClientParams( getHistoryStream(s), @@ -374,8 +436,8 @@ func (r *Registry) Run(p *network.BzzPeer) error { defer close(sp.quit) defer sp.close() - if r.doRetrieve { - err := r.Subscribe(p.ID(), NewStream(swarmChunkServerStreamName, "", false), nil, Top) + if r.autoRetrieval && !p.LightNode { + err := r.Subscribe(p.ID(), NewStream(swarmChunkServerStreamName, "", true), nil, Top) if err != nil { return err } @@ -448,7 +510,7 @@ func (r *Registry) updateSyncing() { } func (r *Registry) runProtocol(p *p2p.Peer, rw p2p.MsgReadWriter) error { - peer := protocols.NewPeer(p, rw, Spec) + peer := protocols.NewPeer(p, rw, r.spec) bp := network.NewBzzPeer(peer) np := network.NewPeer(bp, r.delivery.kad) r.delivery.kad.On(np) @@ -478,8 +540,13 @@ func (p *Peer) HandleMsg(ctx context.Context, msg interface{}) error { case *WantedHashesMsg: return p.handleWantedHashesMsg(ctx, msg) - case *ChunkDeliveryMsg: - return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, msg) + case *ChunkDeliveryMsgRetrieval: + //handling chunk delivery is the same for retrieval and syncing, so let's cast the msg + return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg))) + + case *ChunkDeliveryMsgSyncing: + //handling chunk delivery is the same for retrieval and syncing, so let's cast the msg + return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, ((*ChunkDeliveryMsg)(msg))) case *RetrieveRequestMsg: return p.streamer.delivery.handleRetrieveRequestMsg(ctx, p, msg) @@ -500,10 +567,38 @@ type server struct { stream Stream priority uint8 currentBatch []byte + sessionIndex uint64 +} + +// setNextBatch adjusts passed interval based on session index and whether +// stream is live or history. It calls Server SetNextBatch with adjusted +// interval and returns batch hashes and their interval. +func (s *server) setNextBatch(from, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) { + if s.stream.Live { + if from == 0 { + from = s.sessionIndex + } + if to <= from || from >= s.sessionIndex { + to = math.MaxUint64 + } + } else { + if (to < from && to != 0) || from > s.sessionIndex { + return nil, 0, 0, nil, nil + } + if to == 0 || to > s.sessionIndex { + to = s.sessionIndex + } + } + return s.SetNextBatch(from, to) } // Server interface for outgoing peer Streamer type Server interface { + // SessionIndex is called when a server is initialized + // to get the current cursor state of the stream data. + // Based on this index, live and history stream intervals + // will be adjusted before calling SetNextBatch. + SessionIndex() (uint64, error) SetNextBatch(uint64, uint64) (hashes []byte, from uint64, to uint64, proof *HandoverProof, err error) GetData(context.Context, []byte) ([]byte, error) Close() @@ -639,34 +734,43 @@ func (c *clientParams) clientCreated() { close(c.clientCreatedC) } -// Spec is the spec of the streamer protocol -var Spec = &protocols.Spec{ - Name: "stream", - Version: 7, - MaxMsgSize: 10 * 1024 * 1024, - Messages: []interface{}{ - UnsubscribeMsg{}, - OfferedHashesMsg{}, - WantedHashesMsg{}, - TakeoverProofMsg{}, - SubscribeMsg{}, - RetrieveRequestMsg{}, - ChunkDeliveryMsg{}, - SubscribeErrorMsg{}, - RequestSubscriptionMsg{}, - QuitMsg{}, - }, +//GetSpec returns the streamer spec to callers +//This used to be a global variable but for simulations with +//multiple nodes its fields (notably the Hook) would be overwritten +func (r *Registry) GetSpec() *protocols.Spec { + return r.spec +} + +func (r *Registry) createSpec() { + // Spec is the spec of the streamer protocol + var spec = &protocols.Spec{ + Name: "stream", + Version: 8, + MaxMsgSize: 10 * 1024 * 1024, + Messages: []interface{}{ + UnsubscribeMsg{}, + OfferedHashesMsg{}, + WantedHashesMsg{}, + TakeoverProofMsg{}, + SubscribeMsg{}, + RetrieveRequestMsg{}, + ChunkDeliveryMsgRetrieval{}, + SubscribeErrorMsg{}, + RequestSubscriptionMsg{}, + QuitMsg{}, + ChunkDeliveryMsgSyncing{}, + }, + } + r.spec = spec } func (r *Registry) Protocols() []p2p.Protocol { return []p2p.Protocol{ { - Name: Spec.Name, - Version: Spec.Version, - Length: Spec.Length(), + Name: r.spec.Name, + Version: r.spec.Version, + Length: r.spec.Length(), Run: r.runProtocol, - // NodeInfo: , - // PeerInfo: , }, } } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/syncer.go b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/syncer.go index 38b3078d2..4bfbac8b0 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/syncer.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/network/stream/syncer.go @@ -18,7 +18,6 @@ package stream import ( "context" - "math" "strconv" "time" @@ -36,38 +35,27 @@ const ( // * live request delivery with or without checkback // * (live/non-live historical) chunk syncing per proximity bin type SwarmSyncerServer struct { - po uint8 - store storage.SyncChunkStore - sessionAt uint64 - start uint64 - live bool - quit chan struct{} + po uint8 + store storage.SyncChunkStore + quit chan struct{} } -// NewSwarmSyncerServer is contructor for SwarmSyncerServer -func NewSwarmSyncerServer(live bool, po uint8, syncChunkStore storage.SyncChunkStore) (*SwarmSyncerServer, error) { - sessionAt := syncChunkStore.BinIndex(po) - var start uint64 - if live { - start = sessionAt - } +// NewSwarmSyncerServer is constructor for SwarmSyncerServer +func NewSwarmSyncerServer(po uint8, syncChunkStore storage.SyncChunkStore) (*SwarmSyncerServer, error) { return &SwarmSyncerServer{ - po: po, - store: syncChunkStore, - sessionAt: sessionAt, - start: start, - live: live, - quit: make(chan struct{}), + po: po, + store: syncChunkStore, + quit: make(chan struct{}), }, nil } func RegisterSwarmSyncerServer(streamer *Registry, syncChunkStore storage.SyncChunkStore) { - streamer.RegisterServerFunc("SYNC", func(p *Peer, t string, live bool) (Server, error) { + streamer.RegisterServerFunc("SYNC", func(_ *Peer, t string, _ bool) (Server, error) { po, err := ParseSyncBinKey(t) if err != nil { return nil, err } - return NewSwarmSyncerServer(live, po, syncChunkStore) + return NewSwarmSyncerServer(po, syncChunkStore) }) // streamer.RegisterServerFunc(stream, func(p *Peer) (Server, error) { // return NewOutgoingProvableSwarmSyncer(po, db) @@ -88,25 +76,15 @@ func (s *SwarmSyncerServer) GetData(ctx context.Context, key []byte) ([]byte, er return chunk.Data(), nil } +// SessionIndex returns current storage bin (po) index. +func (s *SwarmSyncerServer) SessionIndex() (uint64, error) { + return s.store.BinIndex(s.po), nil +} + // GetBatch retrieves the next batch of hashes from the dbstore func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) { var batch []byte i := 0 - if s.live { - if from == 0 { - from = s.start - } - if to <= from || from >= s.sessionAt { - to = math.MaxUint64 - } - } else { - if (to < from && to != 0) || from > s.sessionAt { - return nil, 0, 0, nil, nil - } - if to == 0 || to > s.sessionAt { - to = s.sessionAt - } - } var ticker *time.Ticker defer func() { diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pot/address.go b/vendor/github.com/ethereum/go-ethereum/swarm/pot/address.go index 3974ebcaa..728dac14e 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/pot/address.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/pot/address.go @@ -79,46 +79,6 @@ func (a Address) Bytes() []byte { return a[:] } -/* -Proximity(x, y) returns the proximity order of the MSB distance between x and y - -The distance metric MSB(x, y) of two equal length byte sequences x an y is the -value of the binary integer cast of the x^y, ie., x and y bitwise xor-ed. -the binary cast is big endian: most significant bit first (=MSB). - -Proximity(x, y) is a discrete logarithmic scaling of the MSB distance. -It is defined as the reverse rank of the integer part of the base 2 -logarithm of the distance. -It is calculated by counting the number of common leading zeros in the (MSB) -binary representation of the x^y. - -(0 farthest, 255 closest, 256 self) -*/ -func proximity(one, other Address) (ret int, eq bool) { - return posProximity(one, other, 0) -} - -// posProximity(a, b, pos) returns proximity order of b wrt a (symmetric) pretending -// the first pos bits match, checking only bits index >= pos -func posProximity(one, other Address, pos int) (ret int, eq bool) { - for i := pos / 8; i < len(one); i++ { - if one[i] == other[i] { - continue - } - oxo := one[i] ^ other[i] - start := 0 - if i == pos/8 { - start = pos % 8 - } - for j := start; j < 8; j++ { - if (oxo>>uint8(7-j))&0x01 != 0 { - return i*8 + j, false - } - } - } - return len(one) * 8, true -} - // ProxCmp compares the distances a->target and b->target. // Returns -1 if a is closer to target, 1 if b is closer to target // and 0 if they are equal. diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/api.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/api.go index eba7bb722..587382d72 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/api.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/api.go @@ -51,7 +51,7 @@ func NewAPI(ps *Pss) *API { // // All incoming messages to the node matching this topic will be encapsulated in the APIMsg // struct and sent to the subscriber -func (pssapi *API) Receive(ctx context.Context, topic Topic) (*rpc.Subscription, error) { +func (pssapi *API) Receive(ctx context.Context, topic Topic, raw bool, prox bool) (*rpc.Subscription, error) { notifier, supported := rpc.NotifierFromContext(ctx) if !supported { return nil, fmt.Errorf("Subscribe not supported") @@ -59,7 +59,7 @@ func (pssapi *API) Receive(ctx context.Context, topic Topic) (*rpc.Subscription, psssub := notifier.CreateSubscription() - handler := func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error { + hndlr := NewHandler(func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error { apimsg := &APIMsg{ Msg: hexutil.Bytes(msg), Asymmetric: asymmetric, @@ -69,9 +69,15 @@ func (pssapi *API) Receive(ctx context.Context, topic Topic) (*rpc.Subscription, log.Warn(fmt.Sprintf("notification on pss sub topic rpc (sub %v) msg %v failed!", psssub.ID, msg)) } return nil + }) + if raw { + hndlr.caps.raw = true + } + if prox { + hndlr.caps.prox = true } - deregf := pssapi.Register(&topic, handler) + deregf := pssapi.Register(&topic, hndlr) go func() { defer deregf() select { @@ -158,6 +164,10 @@ func (pssapi *API) SendSym(symkeyhex string, topic Topic, msg hexutil.Bytes) err return pssapi.Pss.SendSym(symkeyhex, topic, msg[:]) } +func (pssapi *API) SendRaw(addr hexutil.Bytes, topic Topic, msg hexutil.Bytes) error { + return pssapi.Pss.SendRaw(PssAddress(addr), topic, msg[:]) +} + func (pssapi *API) GetPeerTopics(pubkeyhex string) ([]Topic, error) { topics, _, err := pssapi.Pss.GetPublickeyPeers(pubkeyhex) return topics, err diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/client/client.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/client/client.go index d541081d3..5ee387aa7 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/client/client.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/client/client.go @@ -236,7 +236,7 @@ func (c *Client) RunProtocol(ctx context.Context, proto *p2p.Protocol) error { topichex := topicobj.String() msgC := make(chan pss.APIMsg) c.peerPool[topicobj] = make(map[string]*pssRPCRW) - sub, err := c.rpc.Subscribe(ctx, "pss", msgC, "receive", topichex) + sub, err := c.rpc.Subscribe(ctx, "pss", msgC, "receive", topichex, false, false) if err != nil { return fmt.Errorf("pss event subscription failed: %v", err) } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/handshake.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/handshake.go index e3ead77d0..5486abafa 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/handshake.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/handshake.go @@ -486,7 +486,7 @@ func (api *HandshakeAPI) Handshake(pubkeyid string, topic Topic, sync bool, flus // Activate handshake functionality on a topic func (api *HandshakeAPI) AddHandshake(topic Topic) error { - api.ctrl.deregisterFuncs[topic] = api.ctrl.pss.Register(&topic, api.ctrl.handler) + api.ctrl.deregisterFuncs[topic] = api.ctrl.pss.Register(&topic, NewHandler(api.ctrl.handler)) return nil } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/notify/notify.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/notify/notify.go index 3731fb9db..d3c89058b 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/notify/notify.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/notify/notify.go @@ -113,7 +113,7 @@ func NewController(ps *pss.Pss) *Controller { notifiers: make(map[string]*notifier), subscriptions: make(map[string]*subscription), } - ctrl.pss.Register(&controlTopic, ctrl.Handler) + ctrl.pss.Register(&controlTopic, pss.NewHandler(ctrl.Handler)) return ctrl } @@ -336,7 +336,7 @@ func (c *Controller) handleNotifyWithKeyMsg(msg *Msg) error { // \TODO keep track of and add actual address updaterAddr := pss.PssAddress([]byte{}) c.pss.SetSymmetricKey(symkey, topic, &updaterAddr, true) - c.pss.Register(&topic, c.Handler) + c.pss.Register(&topic, pss.NewHandler(c.Handler)) return c.subscriptions[msg.namestring].handler(msg.namestring, msg.Payload[:len(msg.Payload)-symKeyLength]) } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss.go index e1e24e1f5..d0986d280 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/pss.go @@ -23,11 +23,13 @@ import ( "crypto/rand" "errors" "fmt" + "hash" "sync" "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/sha3" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" @@ -136,10 +138,10 @@ type Pss struct { symKeyDecryptCacheCapacity int // max amount of symkeys to keep. // message handling - handlers map[Topic]map[*Handler]bool // topic and version based pss payload handlers. See pss.Handle() - handlersMu sync.RWMutex - allowRaw bool - hashPool sync.Pool + handlers map[Topic]map[*handler]bool // topic and version based pss payload handlers. See pss.Handle() + handlersMu sync.RWMutex + hashPool sync.Pool + topicHandlerCaps map[Topic]*handlerCaps // caches capabilities of each topic's handlers (see handlerCap* consts in types.go) // process quitC chan struct{} @@ -180,11 +182,12 @@ func NewPss(k *network.Kademlia, params *PssParams) (*Pss, error) { symKeyDecryptCache: make([]*string, params.SymKeyCacheCapacity), symKeyDecryptCacheCapacity: params.SymKeyCacheCapacity, - handlers: make(map[Topic]map[*Handler]bool), - allowRaw: params.AllowRaw, + handlers: make(map[Topic]map[*handler]bool), + topicHandlerCaps: make(map[Topic]*handlerCaps), + hashPool: sync.Pool{ New: func() interface{} { - return storage.MakeHashFunc(storage.DefaultHash)() + return sha3.NewKeccak256() }, }, } @@ -313,30 +316,54 @@ func (p *Pss) PublicKey() *ecdsa.PublicKey { // // Returns a deregister function which needs to be called to // deregister the handler, -func (p *Pss) Register(topic *Topic, handler Handler) func() { +func (p *Pss) Register(topic *Topic, hndlr *handler) func() { p.handlersMu.Lock() defer p.handlersMu.Unlock() handlers := p.handlers[*topic] if handlers == nil { - handlers = make(map[*Handler]bool) + handlers = make(map[*handler]bool) p.handlers[*topic] = handlers + log.Debug("registered handler", "caps", hndlr.caps) } - handlers[&handler] = true - return func() { p.deregister(topic, &handler) } + if hndlr.caps == nil { + hndlr.caps = &handlerCaps{} + } + handlers[hndlr] = true + if _, ok := p.topicHandlerCaps[*topic]; !ok { + p.topicHandlerCaps[*topic] = &handlerCaps{} + } + if hndlr.caps.raw { + p.topicHandlerCaps[*topic].raw = true + } + if hndlr.caps.prox { + p.topicHandlerCaps[*topic].prox = true + } + return func() { p.deregister(topic, hndlr) } } -func (p *Pss) deregister(topic *Topic, h *Handler) { +func (p *Pss) deregister(topic *Topic, hndlr *handler) { p.handlersMu.Lock() defer p.handlersMu.Unlock() handlers := p.handlers[*topic] - if len(handlers) == 1 { + if len(handlers) > 1 { delete(p.handlers, *topic) + // topic caps might have changed now that a handler is gone + caps := &handlerCaps{} + for h := range handlers { + if h.caps.raw { + caps.raw = true + } + if h.caps.prox { + caps.prox = true + } + } + p.topicHandlerCaps[*topic] = caps return } - delete(handlers, h) + delete(handlers, hndlr) } // get all registered handlers for respective topics -func (p *Pss) getHandlers(topic Topic) map[*Handler]bool { +func (p *Pss) getHandlers(topic Topic) map[*handler]bool { p.handlersMu.RLock() defer p.handlersMu.RUnlock() return p.handlers[topic] @@ -348,12 +375,11 @@ func (p *Pss) getHandlers(topic Topic) map[*Handler]bool { // Only passes error to pss protocol handler if payload is not valid pssmsg func (p *Pss) handlePssMsg(ctx context.Context, msg interface{}) error { metrics.GetOrRegisterCounter("pss.handlepssmsg", nil).Inc(1) - pssmsg, ok := msg.(*PssMsg) - if !ok { return fmt.Errorf("invalid message type. Expected *PssMsg, got %T ", msg) } + log.Trace("handler", "self", label(p.Kademlia.BaseAddr()), "topic", label(pssmsg.Payload.Topic[:])) if int64(pssmsg.Expire) < time.Now().Unix() { metrics.GetOrRegisterCounter("pss.expire", nil).Inc(1) log.Warn("pss filtered expired message", "from", common.ToHex(p.Kademlia.BaseAddr()), "to", common.ToHex(pssmsg.To)) @@ -365,13 +391,34 @@ func (p *Pss) handlePssMsg(ctx context.Context, msg interface{}) error { } p.addFwdCache(pssmsg) - if !p.isSelfPossibleRecipient(pssmsg) { - log.Trace("pss was for someone else :'( ... forwarding", "pss", common.ToHex(p.BaseAddr())) + psstopic := Topic(pssmsg.Payload.Topic) + + // raw is simplest handler contingency to check, so check that first + var isRaw bool + if pssmsg.isRaw() { + if !p.topicHandlerCaps[psstopic].raw { + log.Debug("No handler for raw message", "topic", psstopic) + return nil + } + isRaw = true + } + + // check if we can be recipient: + // - no prox handler on message and partial address matches + // - prox handler on message and we are in prox regardless of partial address match + // store this result so we don't calculate again on every handler + var isProx bool + if _, ok := p.topicHandlerCaps[psstopic]; ok { + isProx = p.topicHandlerCaps[psstopic].prox + } + isRecipient := p.isSelfPossibleRecipient(pssmsg, isProx) + if !isRecipient { + log.Trace("pss was for someone else :'( ... forwarding", "pss", common.ToHex(p.BaseAddr()), "prox", isProx) return p.enqueue(pssmsg) } - log.Trace("pss for us, yay! ... let's process!", "pss", common.ToHex(p.BaseAddr())) - if err := p.process(pssmsg); err != nil { + log.Trace("pss for us, yay! ... let's process!", "pss", common.ToHex(p.BaseAddr()), "prox", isProx, "raw", isRaw, "topic", label(pssmsg.Payload.Topic[:])) + if err := p.process(pssmsg, isRaw, isProx); err != nil { qerr := p.enqueue(pssmsg) if qerr != nil { return fmt.Errorf("process fail: processerr %v, queueerr: %v", err, qerr) @@ -384,7 +431,7 @@ func (p *Pss) handlePssMsg(ctx context.Context, msg interface{}) error { // Entry point to processing a message for which the current node can be the intended recipient. // Attempts symmetric and asymmetric decryption with stored keys. // Dispatches message to all handlers matching the message topic -func (p *Pss) process(pssmsg *PssMsg) error { +func (p *Pss) process(pssmsg *PssMsg, raw bool, prox bool) error { metrics.GetOrRegisterCounter("pss.process", nil).Inc(1) var err error @@ -397,10 +444,8 @@ func (p *Pss) process(pssmsg *PssMsg) error { envelope := pssmsg.Payload psstopic := Topic(envelope.Topic) - if pssmsg.isRaw() { - if !p.allowRaw { - return errors.New("raw message support disabled") - } + + if raw { payload = pssmsg.Payload.Data } else { if pssmsg.isSym() { @@ -422,19 +467,27 @@ func (p *Pss) process(pssmsg *PssMsg) error { return err } } - p.executeHandlers(psstopic, payload, from, asymmetric, keyid) + p.executeHandlers(psstopic, payload, from, raw, prox, asymmetric, keyid) return nil } -func (p *Pss) executeHandlers(topic Topic, payload []byte, from *PssAddress, asymmetric bool, keyid string) { +func (p *Pss) executeHandlers(topic Topic, payload []byte, from *PssAddress, raw bool, prox bool, asymmetric bool, keyid string) { handlers := p.getHandlers(topic) peer := p2p.NewPeer(enode.ID{}, fmt.Sprintf("%x", from), []p2p.Cap{}) - for f := range handlers { - err := (*f)(payload, peer, asymmetric, keyid) + for h := range handlers { + if !h.caps.raw && raw { + log.Warn("norawhandler") + continue + } + if !h.caps.prox && prox { + log.Warn("noproxhandler") + continue + } + err := (h.f)(payload, peer, asymmetric, keyid) if err != nil { - log.Warn("Pss handler %p failed: %v", f, err) + log.Warn("Pss handler failed", "err", err) } } } @@ -445,9 +498,23 @@ func (p *Pss) isSelfRecipient(msg *PssMsg) bool { } // test match of leftmost bytes in given message to node's Kademlia address -func (p *Pss) isSelfPossibleRecipient(msg *PssMsg) bool { +func (p *Pss) isSelfPossibleRecipient(msg *PssMsg, prox bool) bool { local := p.Kademlia.BaseAddr() - return bytes.Equal(msg.To, local[:len(msg.To)]) + + // if a partial address matches we are possible recipient regardless of prox + // if not and prox is not set, we are surely not + if bytes.Equal(msg.To, local[:len(msg.To)]) { + + return true + } else if !prox { + return false + } + + depth := p.Kademlia.NeighbourhoodDepth() + po, _ := p.Kademlia.Pof(p.Kademlia.BaseAddr(), msg.To, 0) + log.Trace("selfpossible", "po", po, "depth", depth) + + return depth <= po } ///////////////////////////////////////////////////////////////////// @@ -684,9 +751,6 @@ func (p *Pss) enqueue(msg *PssMsg) error { // // Will fail if raw messages are disallowed func (p *Pss) SendRaw(address PssAddress, topic Topic, msg []byte) error { - if !p.allowRaw { - return errors.New("Raw messages not enabled") - } pssMsgParams := &msgParams{ raw: true, } @@ -699,7 +763,17 @@ func (p *Pss) SendRaw(address PssAddress, topic Topic, msg []byte) error { pssMsg.Expire = uint32(time.Now().Add(p.msgTTL).Unix()) pssMsg.Payload = payload p.addFwdCache(pssMsg) - return p.enqueue(pssMsg) + err := p.enqueue(pssMsg) + if err != nil { + return err + } + + // if we have a proxhandler on this topic + // also deliver message to ourselves + if p.isSelfPossibleRecipient(pssMsg, true) && p.topicHandlerCaps[topic].prox { + return p.process(pssMsg, true, true) + } + return nil } // Send a message using symmetric encryption @@ -800,7 +874,16 @@ func (p *Pss) send(to []byte, topic Topic, msg []byte, asymmetric bool, key []by pssMsg.To = to pssMsg.Expire = uint32(time.Now().Add(p.msgTTL).Unix()) pssMsg.Payload = envelope - return p.enqueue(pssMsg) + err = p.enqueue(pssMsg) + if err != nil { + return err + } + if _, ok := p.topicHandlerCaps[topic]; ok { + if p.isSelfPossibleRecipient(pssMsg, true) && p.topicHandlerCaps[topic].prox { + return p.process(pssMsg, true, true) + } + } + return nil } // Forwards a pss message to the peer(s) closest to the to recipient address in the PssMsg struct @@ -895,6 +978,10 @@ func (p *Pss) cleanFwdCache() { } } +func label(b []byte) string { + return fmt.Sprintf("%04x", b[:2]) +} + // add a message to the cache func (p *Pss) addFwdCache(msg *PssMsg) error { metrics.GetOrRegisterCounter("pss.addfwdcache", nil).Inc(1) @@ -934,10 +1021,14 @@ func (p *Pss) checkFwdCache(msg *PssMsg) bool { // Digest of message func (p *Pss) digest(msg *PssMsg) pssDigest { - hasher := p.hashPool.Get().(storage.SwarmHash) + return p.digestBytes(msg.serialize()) +} + +func (p *Pss) digestBytes(msg []byte) pssDigest { + hasher := p.hashPool.Get().(hash.Hash) defer p.hashPool.Put(hasher) hasher.Reset() - hasher.Write(msg.serialize()) + hasher.Write(msg) digest := pssDigest{} key := hasher.Sum(nil) copy(digest[:], key[:digestLength]) diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/pss/types.go b/vendor/github.com/ethereum/go-ethereum/swarm/pss/types.go index 1e33ecdca..ba963067c 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/pss/types.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/pss/types.go @@ -159,9 +159,39 @@ func (msg *PssMsg) String() string { } // Signature for a message handler function for a PssMsg -// // Implementations of this type are passed to Pss.Register together with a topic, -type Handler func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error +type HandlerFunc func(msg []byte, p *p2p.Peer, asymmetric bool, keyid string) error + +type handlerCaps struct { + raw bool + prox bool +} + +// Handler defines code to be executed upon reception of content. +type handler struct { + f HandlerFunc + caps *handlerCaps +} + +// NewHandler returns a new message handler +func NewHandler(f HandlerFunc) *handler { + return &handler{ + f: f, + caps: &handlerCaps{}, + } +} + +// WithRaw is a chainable method that allows raw messages to be handled. +func (h *handler) WithRaw() *handler { + h.caps.raw = true + return h +} + +// WithProxBin is a chainable method that allows sending messages with full addresses to neighbourhoods using the kademlia depth as reference +func (h *handler) WithProxBin() *handler { + h.caps.prox = true + return h +} // the stateStore handles saving and loading PSS peers and their corresponding keys // it is currently unimplemented @@ -169,10 +199,6 @@ type stateStore struct { values map[string][]byte } -func newStateStore() *stateStore { - return &stateStore{values: make(map[string][]byte)} -} - func (store *stateStore) Load(key string) ([]byte, error) { return nil, nil } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/sctx/sctx.go b/vendor/github.com/ethereum/go-ethereum/swarm/sctx/sctx.go index bed2b1145..fb7d35b00 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/sctx/sctx.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/sctx/sctx.go @@ -2,19 +2,17 @@ package sctx import "context" -type ContextKey int - -const ( - HTTPRequestIDKey ContextKey = iota - requestHostKey +type ( + HTTPRequestIDKey struct{} + requestHostKey struct{} ) func SetHost(ctx context.Context, domain string) context.Context { - return context.WithValue(ctx, requestHostKey, domain) + return context.WithValue(ctx, requestHostKey{}, domain) } func GetHost(ctx context.Context) string { - v, ok := ctx.Value(requestHostKey).(string) + v, ok := ctx.Value(requestHostKey{}).(string) if ok { return v } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/db.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/db.go new file mode 100644 index 000000000..e128b8cbc --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/db.go @@ -0,0 +1,130 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package shed provides a simple abstraction components to compose +// more complex operations on storage data organized in fields and indexes. +// +// Only type which holds logical information about swarm storage chunks data +// and metadata is IndexItem. This part is not generalized mostly for +// performance reasons. +package shed + +import ( + "github.com/ethereum/go-ethereum/metrics" + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" +) + +// The limit for LevelDB OpenFilesCacheCapacity. +const openFileLimit = 128 + +// DB provides abstractions over LevelDB in order to +// implement complex structures using fields and ordered indexes. +// It provides a schema functionality to store fields and indexes +// information about naming and types. +type DB struct { + ldb *leveldb.DB +} + +// NewDB constructs a new DB and validates the schema +// if it exists in database on the given path. +func NewDB(path string) (db *DB, err error) { + ldb, err := leveldb.OpenFile(path, &opt.Options{ + OpenFilesCacheCapacity: openFileLimit, + }) + if err != nil { + return nil, err + } + db = &DB{ + ldb: ldb, + } + + if _, err = db.getSchema(); err != nil { + if err == leveldb.ErrNotFound { + // save schema with initialized default fields + if err = db.putSchema(schema{ + Fields: make(map[string]fieldSpec), + Indexes: make(map[byte]indexSpec), + }); err != nil { + return nil, err + } + } else { + return nil, err + } + } + return db, nil +} + +// Put wraps LevelDB Put method to increment metrics counter. +func (db *DB) Put(key []byte, value []byte) (err error) { + err = db.ldb.Put(key, value, nil) + if err != nil { + metrics.GetOrRegisterCounter("DB.putFail", nil).Inc(1) + return err + } + metrics.GetOrRegisterCounter("DB.put", nil).Inc(1) + return nil +} + +// Get wraps LevelDB Get method to increment metrics counter. +func (db *DB) Get(key []byte) (value []byte, err error) { + value, err = db.ldb.Get(key, nil) + if err != nil { + if err == leveldb.ErrNotFound { + metrics.GetOrRegisterCounter("DB.getNotFound", nil).Inc(1) + } else { + metrics.GetOrRegisterCounter("DB.getFail", nil).Inc(1) + } + return nil, err + } + metrics.GetOrRegisterCounter("DB.get", nil).Inc(1) + return value, nil +} + +// Delete wraps LevelDB Delete method to increment metrics counter. +func (db *DB) Delete(key []byte) (err error) { + err = db.ldb.Delete(key, nil) + if err != nil { + metrics.GetOrRegisterCounter("DB.deleteFail", nil).Inc(1) + return err + } + metrics.GetOrRegisterCounter("DB.delete", nil).Inc(1) + return nil +} + +// NewIterator wraps LevelDB NewIterator method to increment metrics counter. +func (db *DB) NewIterator() iterator.Iterator { + metrics.GetOrRegisterCounter("DB.newiterator", nil).Inc(1) + + return db.ldb.NewIterator(nil, nil) +} + +// WriteBatch wraps LevelDB Write method to increment metrics counter. +func (db *DB) WriteBatch(batch *leveldb.Batch) (err error) { + err = db.ldb.Write(batch, nil) + if err != nil { + metrics.GetOrRegisterCounter("DB.writebatchFail", nil).Inc(1) + return err + } + metrics.GetOrRegisterCounter("DB.writebatch", nil).Inc(1) + return nil +} + +// Close closes LevelDB database. +func (db *DB) Close() (err error) { + return db.ldb.Close() +} diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_string.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_string.go new file mode 100644 index 000000000..a7e8f0c75 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_string.go @@ -0,0 +1,66 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package shed + +import ( + "github.com/syndtr/goleveldb/leveldb" +) + +// StringField is the most simple field implementation +// that stores an arbitrary string under a specific LevelDB key. +type StringField struct { + db *DB + key []byte +} + +// NewStringField retruns a new Instance of StringField. +// It validates its name and type against the database schema. +func (db *DB) NewStringField(name string) (f StringField, err error) { + key, err := db.schemaFieldKey(name, "string") + if err != nil { + return f, err + } + return StringField{ + db: db, + key: key, + }, nil +} + +// Get returns a string value from database. +// If the value is not found, an empty string is returned +// an no error. +func (f StringField) Get() (val string, err error) { + b, err := f.db.Get(f.key) + if err != nil { + if err == leveldb.ErrNotFound { + return "", nil + } + return "", err + } + return string(b), nil +} + +// Put stores a string in the database. +func (f StringField) Put(val string) (err error) { + return f.db.Put(f.key, []byte(val)) +} + +// PutInBatch stores a string in a batch that can be +// saved later in database. +func (f StringField) PutInBatch(batch *leveldb.Batch, val string) { + batch.Put(f.key, []byte(val)) +} diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_struct.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_struct.go new file mode 100644 index 000000000..90daee7fc --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_struct.go @@ -0,0 +1,71 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package shed + +import ( + "github.com/ethereum/go-ethereum/rlp" + "github.com/syndtr/goleveldb/leveldb" +) + +// StructField is a helper to store complex structure by +// encoding it in RLP format. +type StructField struct { + db *DB + key []byte +} + +// NewStructField returns a new StructField. +// It validates its name and type against the database schema. +func (db *DB) NewStructField(name string) (f StructField, err error) { + key, err := db.schemaFieldKey(name, "struct-rlp") + if err != nil { + return f, err + } + return StructField{ + db: db, + key: key, + }, nil +} + +// Get unmarshals data from the database to a provided val. +// If the data is not found leveldb.ErrNotFound is returned. +func (f StructField) Get(val interface{}) (err error) { + b, err := f.db.Get(f.key) + if err != nil { + return err + } + return rlp.DecodeBytes(b, val) +} + +// Put marshals provided val and saves it to the database. +func (f StructField) Put(val interface{}) (err error) { + b, err := rlp.EncodeToBytes(val) + if err != nil { + return err + } + return f.db.Put(f.key, b) +} + +// PutInBatch marshals provided val and puts it into the batch. +func (f StructField) PutInBatch(batch *leveldb.Batch, val interface{}) (err error) { + b, err := rlp.EncodeToBytes(val) + if err != nil { + return err + } + batch.Put(f.key, b) + return nil +} diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_uint64.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_uint64.go new file mode 100644 index 000000000..80e0069ae --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/field_uint64.go @@ -0,0 +1,108 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package shed + +import ( + "encoding/binary" + + "github.com/syndtr/goleveldb/leveldb" +) + +// Uint64Field provides a way to have a simple counter in the database. +// It transparently encodes uint64 type value to bytes. +type Uint64Field struct { + db *DB + key []byte +} + +// NewUint64Field returns a new Uint64Field. +// It validates its name and type against the database schema. +func (db *DB) NewUint64Field(name string) (f Uint64Field, err error) { + key, err := db.schemaFieldKey(name, "uint64") + if err != nil { + return f, err + } + return Uint64Field{ + db: db, + key: key, + }, nil +} + +// Get retrieves a uint64 value from the database. +// If the value is not found in the database a 0 value +// is returned and no error. +func (f Uint64Field) Get() (val uint64, err error) { + b, err := f.db.Get(f.key) + if err != nil { + if err == leveldb.ErrNotFound { + return 0, nil + } + return 0, err + } + return binary.BigEndian.Uint64(b), nil +} + +// Put encodes uin64 value and stores it in the database. +func (f Uint64Field) Put(val uint64) (err error) { + return f.db.Put(f.key, encodeUint64(val)) +} + +// PutInBatch stores a uint64 value in a batch +// that can be saved later in the database. +func (f Uint64Field) PutInBatch(batch *leveldb.Batch, val uint64) { + batch.Put(f.key, encodeUint64(val)) +} + +// Inc increments a uint64 value in the database. +// This operation is not goroutine save. +func (f Uint64Field) Inc() (val uint64, err error) { + val, err = f.Get() + if err != nil { + if err == leveldb.ErrNotFound { + val = 0 + } else { + return 0, err + } + } + val++ + return val, f.Put(val) +} + +// IncInBatch increments a uint64 value in the batch +// by retreiving a value from the database, not the same batch. +// This operation is not goroutine save. +func (f Uint64Field) IncInBatch(batch *leveldb.Batch) (val uint64, err error) { + val, err = f.Get() + if err != nil { + if err == leveldb.ErrNotFound { + val = 0 + } else { + return 0, err + } + } + val++ + f.PutInBatch(batch, val) + return val, nil +} + +// encode transforms uint64 to 8 byte long +// slice in big endian encoding. +func encodeUint64(val uint64) (b []byte) { + b = make([]byte, 8) + binary.BigEndian.PutUint64(b, val) + return b +} diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/index.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/index.go new file mode 100644 index 000000000..ba803e3c2 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/index.go @@ -0,0 +1,264 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package shed + +import ( + "github.com/syndtr/goleveldb/leveldb" +) + +// IndexItem holds fields relevant to Swarm Chunk data and metadata. +// All information required for swarm storage and operations +// on that storage must be defined here. +// This structure is logically connected to swarm storage, +// the only part of this package that is not generalized, +// mostly for performance reasons. +// +// IndexItem is a type that is used for retrieving, storing and encoding +// chunk data and metadata. It is passed as an argument to Index encoding +// functions, get function and put function. +// But it is also returned with additional data from get function call +// and as the argument in iterator function definition. +type IndexItem struct { + Address []byte + Data []byte + AccessTimestamp int64 + StoreTimestamp int64 + // UseMockStore is a pointer to identify + // an unset state of the field in Join function. + UseMockStore *bool +} + +// Merge is a helper method to construct a new +// IndexItem by filling up fields with default values +// of a particular IndexItem with values from another one. +func (i IndexItem) Merge(i2 IndexItem) (new IndexItem) { + if i.Address == nil { + i.Address = i2.Address + } + if i.Data == nil { + i.Data = i2.Data + } + if i.AccessTimestamp == 0 { + i.AccessTimestamp = i2.AccessTimestamp + } + if i.StoreTimestamp == 0 { + i.StoreTimestamp = i2.StoreTimestamp + } + if i.UseMockStore == nil { + i.UseMockStore = i2.UseMockStore + } + return i +} + +// Index represents a set of LevelDB key value pairs that have common +// prefix. It holds functions for encoding and decoding keys and values +// to provide transparent actions on saved data which inclide: +// - getting a particular IndexItem +// - saving a particular IndexItem +// - iterating over a sorted LevelDB keys +// It implements IndexIteratorInterface interface. +type Index struct { + db *DB + prefix []byte + encodeKeyFunc func(fields IndexItem) (key []byte, err error) + decodeKeyFunc func(key []byte) (e IndexItem, err error) + encodeValueFunc func(fields IndexItem) (value []byte, err error) + decodeValueFunc func(value []byte) (e IndexItem, err error) +} + +// IndexFuncs structure defines functions for encoding and decoding +// LevelDB keys and values for a specific index. +type IndexFuncs struct { + EncodeKey func(fields IndexItem) (key []byte, err error) + DecodeKey func(key []byte) (e IndexItem, err error) + EncodeValue func(fields IndexItem) (value []byte, err error) + DecodeValue func(value []byte) (e IndexItem, err error) +} + +// NewIndex returns a new Index instance with defined name and +// encoding functions. The name must be unique and will be validated +// on database schema for a key prefix byte. +func (db *DB) NewIndex(name string, funcs IndexFuncs) (f Index, err error) { + id, err := db.schemaIndexPrefix(name) + if err != nil { + return f, err + } + prefix := []byte{id} + return Index{ + db: db, + prefix: prefix, + // This function adjusts Index LevelDB key + // by appending the provided index id byte. + // This is needed to avoid collisions between keys of different + // indexes as all index ids are unique. + encodeKeyFunc: func(e IndexItem) (key []byte, err error) { + key, err = funcs.EncodeKey(e) + if err != nil { + return nil, err + } + return append(append(make([]byte, 0, len(key)+1), prefix...), key...), nil + }, + // This function reverses the encodeKeyFunc constructed key + // to transparently work with index keys without their index ids. + // It assumes that index keys are prefixed with only one byte. + decodeKeyFunc: func(key []byte) (e IndexItem, err error) { + return funcs.DecodeKey(key[1:]) + }, + encodeValueFunc: funcs.EncodeValue, + decodeValueFunc: funcs.DecodeValue, + }, nil +} + +// Get accepts key fields represented as IndexItem to retrieve a +// value from the index and return maximum available information +// from the index represented as another IndexItem. +func (f Index) Get(keyFields IndexItem) (out IndexItem, err error) { + key, err := f.encodeKeyFunc(keyFields) + if err != nil { + return out, err + } + value, err := f.db.Get(key) + if err != nil { + return out, err + } + out, err = f.decodeValueFunc(value) + if err != nil { + return out, err + } + return out.Merge(keyFields), nil +} + +// Put accepts IndexItem to encode information from it +// and save it to the database. +func (f Index) Put(i IndexItem) (err error) { + key, err := f.encodeKeyFunc(i) + if err != nil { + return err + } + value, err := f.encodeValueFunc(i) + if err != nil { + return err + } + return f.db.Put(key, value) +} + +// PutInBatch is the same as Put method, but it just +// saves the key/value pair to the batch instead +// directly to the database. +func (f Index) PutInBatch(batch *leveldb.Batch, i IndexItem) (err error) { + key, err := f.encodeKeyFunc(i) + if err != nil { + return err + } + value, err := f.encodeValueFunc(i) + if err != nil { + return err + } + batch.Put(key, value) + return nil +} + +// Delete accepts IndexItem to remove a key/value pair +// from the database based on its fields. +func (f Index) Delete(keyFields IndexItem) (err error) { + key, err := f.encodeKeyFunc(keyFields) + if err != nil { + return err + } + return f.db.Delete(key) +} + +// DeleteInBatch is the same as Delete just the operation +// is performed on the batch instead on the database. +func (f Index) DeleteInBatch(batch *leveldb.Batch, keyFields IndexItem) (err error) { + key, err := f.encodeKeyFunc(keyFields) + if err != nil { + return err + } + batch.Delete(key) + return nil +} + +// IndexIterFunc is a callback on every IndexItem that is decoded +// by iterating on an Index keys. +// By returning a true for stop variable, iteration will +// stop, and by returning the error, that error will be +// propagated to the called iterator method on Index. +type IndexIterFunc func(item IndexItem) (stop bool, err error) + +// IterateAll iterates over all keys of the Index. +func (f Index) IterateAll(fn IndexIterFunc) (err error) { + it := f.db.NewIterator() + defer it.Release() + + for ok := it.Seek(f.prefix); ok; ok = it.Next() { + key := it.Key() + if key[0] != f.prefix[0] { + break + } + keyIndexItem, err := f.decodeKeyFunc(key) + if err != nil { + return err + } + valueIndexItem, err := f.decodeValueFunc(it.Value()) + if err != nil { + return err + } + stop, err := fn(keyIndexItem.Merge(valueIndexItem)) + if err != nil { + return err + } + if stop { + break + } + } + return it.Error() +} + +// IterateFrom iterates over Index keys starting from the key +// encoded from the provided IndexItem. +func (f Index) IterateFrom(start IndexItem, fn IndexIterFunc) (err error) { + startKey, err := f.encodeKeyFunc(start) + if err != nil { + return err + } + it := f.db.NewIterator() + defer it.Release() + + for ok := it.Seek(startKey); ok; ok = it.Next() { + key := it.Key() + if key[0] != f.prefix[0] { + break + } + keyIndexItem, err := f.decodeKeyFunc(key) + if err != nil { + return err + } + valueIndexItem, err := f.decodeValueFunc(it.Value()) + if err != nil { + return err + } + stop, err := fn(keyIndexItem.Merge(valueIndexItem)) + if err != nil { + return err + } + if stop { + break + } + } + return it.Error() +} diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/shed/schema.go b/vendor/github.com/ethereum/go-ethereum/swarm/shed/schema.go new file mode 100644 index 000000000..cfb7c6d64 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/swarm/shed/schema.go @@ -0,0 +1,134 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package shed + +import ( + "encoding/json" + "errors" + "fmt" +) + +var ( + // LevelDB key value for storing the schema. + keySchema = []byte{0} + // LevelDB key prefix for all field type. + // LevelDB keys will be constructed by appending name values to this prefix. + keyPrefixFields byte = 1 + // LevelDB key prefix from which indexing keys start. + // Every index has its own key prefix and this value defines the first one. + keyPrefixIndexStart byte = 2 // Q: or maybe a higher number like 7, to have more space for potential specific perfixes +) + +// schema is used to serialize known database structure information. +type schema struct { + Fields map[string]fieldSpec `json:"fields"` // keys are field names + Indexes map[byte]indexSpec `json:"indexes"` // keys are index prefix bytes +} + +// fieldSpec holds information about a particular field. +// It does not need Name field as it is contained in the +// schema.Field map key. +type fieldSpec struct { + Type string `json:"type"` +} + +// indxSpec holds information about a particular index. +// It does not contain index type, as indexes do not have type. +type indexSpec struct { + Name string `json:"name"` +} + +// schemaFieldKey retrives the complete LevelDB key for +// a particular field form the schema definition. +func (db *DB) schemaFieldKey(name, fieldType string) (key []byte, err error) { + if name == "" { + return nil, errors.New("field name can not be blank") + } + if fieldType == "" { + return nil, errors.New("field type can not be blank") + } + s, err := db.getSchema() + if err != nil { + return nil, err + } + var found bool + for n, f := range s.Fields { + if n == name { + if f.Type != fieldType { + return nil, fmt.Errorf("field %q of type %q stored as %q in db", name, fieldType, f.Type) + } + break + } + } + if !found { + s.Fields[name] = fieldSpec{ + Type: fieldType, + } + err := db.putSchema(s) + if err != nil { + return nil, err + } + } + return append([]byte{keyPrefixFields}, []byte(name)...), nil +} + +// schemaIndexID retrieves the complete LevelDB prefix for +// a particular index. +func (db *DB) schemaIndexPrefix(name string) (id byte, err error) { + if name == "" { + return 0, errors.New("index name can not be blank") + } + s, err := db.getSchema() + if err != nil { + return 0, err + } + nextID := keyPrefixIndexStart + for i, f := range s.Indexes { + if i >= nextID { + nextID = i + 1 + } + if f.Name == name { + return i, nil + } + } + id = nextID + s.Indexes[id] = indexSpec{ + Name: name, + } + return id, db.putSchema(s) +} + +// getSchema retrieves the complete schema from +// the database. +func (db *DB) getSchema() (s schema, err error) { + b, err := db.Get(keySchema) + if err != nil { + return s, err + } + err = json.Unmarshal(b, &s) + return s, err +} + +// putSchema stores the complete schema to +// the database. +func (db *DB) putSchema(s schema) (err error) { + b, err := json.Marshal(s) + if err != nil { + return err + } + return db.Put(keySchema, b) +} diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/state/dbstore.go b/vendor/github.com/ethereum/go-ethereum/swarm/state/dbstore.go index 5e5c172b2..fc5dd8f7c 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/state/dbstore.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/state/dbstore.go @@ -22,6 +22,7 @@ import ( "errors" "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/storage" ) // ErrNotFound is returned when no results are returned from the database @@ -30,6 +31,15 @@ var ErrNotFound = errors.New("ErrorNotFound") // ErrInvalidArgument is returned when the argument type does not match the expected type var ErrInvalidArgument = errors.New("ErrorInvalidArgument") +// Store defines methods required to get, set, delete values for different keys +// and close the underlying resources. +type Store interface { + Get(key string, i interface{}) (err error) + Put(key string, i interface{}) (err error) + Delete(key string) (err error) + Close() error +} + // DBStore uses LevelDB to store values. type DBStore struct { db *leveldb.DB @@ -46,6 +56,17 @@ func NewDBStore(path string) (s *DBStore, err error) { }, nil } +// NewInmemoryStore returns a new instance of DBStore. To be used only in tests and simulations. +func NewInmemoryStore() *DBStore { + db, err := leveldb.Open(storage.NewMemStorage(), nil) + if err != nil { + panic(err) + } + return &DBStore{ + db: db, + } +} + // Get retrieves a persisted value for a specific key. If there is no results // ErrNotFound is returned. The provided parameter should be either a byte slice or // a struct that implements the encoding.BinaryUnmarshaler interface @@ -69,7 +90,7 @@ func (s *DBStore) Get(key string, i interface{}) (err error) { // Put stores an object that implements Binary for a specific key. func (s *DBStore) Put(key string, i interface{}) (err error) { - bytes := []byte{} + var bytes []byte marshaler, ok := i.(encoding.BinaryMarshaler) if !ok { diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/state/inmemorystore.go b/vendor/github.com/ethereum/go-ethereum/swarm/state/inmemorystore.go deleted file mode 100644 index 1ca25404a..000000000 --- a/vendor/github.com/ethereum/go-ethereum/swarm/state/inmemorystore.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package state - -import ( - "encoding" - "encoding/json" - "sync" -) - -// InmemoryStore is the reference implementation of Store interface that is supposed -// to be used in tests. -type InmemoryStore struct { - db map[string][]byte - mu sync.RWMutex -} - -// NewInmemoryStore returns a new instance of InmemoryStore. -func NewInmemoryStore() *InmemoryStore { - return &InmemoryStore{ - db: make(map[string][]byte), - } -} - -// Get retrieves a value stored for a specific key. If there is no value found, -// ErrNotFound is returned. -func (s *InmemoryStore) Get(key string, i interface{}) (err error) { - s.mu.RLock() - defer s.mu.RUnlock() - - bytes, ok := s.db[key] - if !ok { - return ErrNotFound - } - - unmarshaler, ok := i.(encoding.BinaryUnmarshaler) - if !ok { - return json.Unmarshal(bytes, i) - } - - return unmarshaler.UnmarshalBinary(bytes) -} - -// Put stores a value for a specific key. -func (s *InmemoryStore) Put(key string, i interface{}) (err error) { - s.mu.Lock() - defer s.mu.Unlock() - bytes := []byte{} - - marshaler, ok := i.(encoding.BinaryMarshaler) - if !ok { - if bytes, err = json.Marshal(i); err != nil { - return err - } - } else { - if bytes, err = marshaler.MarshalBinary(); err != nil { - return err - } - } - - s.db[key] = bytes - return nil -} - -// Delete removes value stored under a specific key. -func (s *InmemoryStore) Delete(key string) (err error) { - s.mu.Lock() - defer s.mu.Unlock() - - if _, ok := s.db[key]; !ok { - return ErrNotFound - } - delete(s.db, key) - return nil -} - -// Close does not do anything. -func (s *InmemoryStore) Close() error { - return nil -} diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/chunker.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/chunker.go index 40292e88f..cbe65372a 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/chunker.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/chunker.go @@ -22,6 +22,7 @@ import ( "fmt" "io" "sync" + "time" "github.com/ethereum/go-ethereum/metrics" ch "github.com/ethereum/go-ethereum/swarm/chunk" @@ -410,10 +411,14 @@ func (r *LazyChunkReader) Size(ctx context.Context, quitC chan bool) (n int64, e log.Debug("lazychunkreader.size", "addr", r.addr) if r.chunkData == nil { + + startTime := time.Now() chunkData, err := r.getter.Get(cctx, Reference(r.addr)) if err != nil { + metrics.GetOrRegisterResettingTimer("lcr.getter.get.err", nil).UpdateSince(startTime) return 0, err } + metrics.GetOrRegisterResettingTimer("lcr.getter.get", nil).UpdateSince(startTime) r.chunkData = chunkData s := r.chunkData.Size() log.Debug("lazychunkreader.size", "key", r.addr, "size", s) @@ -542,8 +547,10 @@ func (r *LazyChunkReader) join(b []byte, off int64, eoff int64, depth int, treeS wg.Add(1) go func(j int64) { childAddress := chunkData[8+j*r.hashSize : 8+(j+1)*r.hashSize] + startTime := time.Now() chunkData, err := r.getter.Get(r.ctx, Reference(childAddress)) if err != nil { + metrics.GetOrRegisterResettingTimer("lcr.getter.get.err", nil).UpdateSince(startTime) log.Debug("lazychunkreader.join", "key", fmt.Sprintf("%x", childAddress), "err", err) select { case errC <- fmt.Errorf("chunk %v-%v not found; key: %s", off, off+treeSize, fmt.Sprintf("%x", childAddress)): @@ -551,6 +558,7 @@ func (r *LazyChunkReader) join(b []byte, off int64, eoff int64, depth int, treeS } return } + metrics.GetOrRegisterResettingTimer("lcr.getter.get", nil).UpdateSince(startTime) if l := len(chunkData); l < 9 { select { case errC <- fmt.Errorf("chunk %v-%v incomplete; key: %s, data length %v", off, off+treeSize, fmt.Sprintf("%x", childAddress), l): diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore.go index 2a7f51cb3..bd4f6b916 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/ldbstore.go @@ -32,7 +32,6 @@ import ( "fmt" "io" "io/ioutil" - "sort" "sync" "github.com/ethereum/go-ethereum/metrics" @@ -40,12 +39,16 @@ import ( "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/storage/mock" "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/opt" ) const ( - gcArrayFreeRatio = 0.1 - maxGCitems = 5000 // max number of items to be gc'd per call to collectGarbage() + defaultGCRatio = 10 + defaultMaxGCRound = 10000 + defaultMaxGCBatch = 5000 + + wEntryCnt = 1 << 0 + wIndexCnt = 1 << 1 + wAccessCnt = 1 << 2 ) var ( @@ -54,26 +57,19 @@ var ( var ( keyIndex = byte(0) - keyOldData = byte(1) keyAccessCnt = []byte{2} keyEntryCnt = []byte{3} keyDataIdx = []byte{4} keyData = byte(6) keyDistanceCnt = byte(7) keySchema = []byte{8} + keyGCIdx = byte(9) // access to chunk data index, used by garbage collection in ascending order from first entry ) var ( ErrDBClosed = errors.New("LDBStore closed") ) -type gcItem struct { - idx uint64 - value uint64 - idxKey []byte - po uint8 -} - type LDBStoreParams struct { *StoreParams Path string @@ -89,6 +85,16 @@ func NewLDBStoreParams(storeparams *StoreParams, path string) *LDBStoreParams { } } +type garbage struct { + maxRound int // maximum number of chunks to delete in one garbage collection round + maxBatch int // maximum number of chunks to delete in one db request batch + ratio int // 1/x ratio to calculate the number of chunks to gc on a low capacity db + count int // number of chunks deleted in running round + target int // number of chunks to delete in running round + batch *dbBatch // the delete batch + runC chan struct{} // struct in chan means gc is NOT running +} + type LDBStore struct { db *LDBDatabase @@ -102,12 +108,12 @@ type LDBStore struct { hashfunc SwarmHasher po func(Address) uint8 - batchC chan bool batchesC chan struct{} closed bool batch *dbBatch lock sync.RWMutex quit chan struct{} + gc *garbage // Functions encodeDataFunc is used to bypass // the default functionality of DbStore with @@ -166,9 +172,47 @@ func NewLDBStore(params *LDBStoreParams) (s *LDBStore, err error) { data, _ = s.db.Get(keyDataIdx) s.dataIdx = BytesToU64(data) + // set up garbage collection + s.gc = &garbage{ + maxBatch: defaultMaxGCBatch, + maxRound: defaultMaxGCRound, + ratio: defaultGCRatio, + } + + s.gc.runC = make(chan struct{}, 1) + s.gc.runC <- struct{}{} + return s, nil } +// MarkAccessed increments the access counter as a best effort for a chunk, so +// the chunk won't get garbage collected. +func (s *LDBStore) MarkAccessed(addr Address) { + s.lock.Lock() + defer s.lock.Unlock() + + if s.closed { + return + } + + proximity := s.po(addr) + s.tryAccessIdx(addr, proximity) +} + +// initialize and set values for processing of gc round +func (s *LDBStore) startGC(c int) { + + s.gc.count = 0 + // calculate the target number of deletions + if c >= s.gc.maxRound { + s.gc.target = s.gc.maxRound + } else { + s.gc.target = c / s.gc.ratio + } + s.gc.batch = newBatch() + log.Debug("startgc", "requested", c, "target", s.gc.target) +} + // NewMockDbStore creates a new instance of DbStore with // mockStore set to a provided value. If mockStore argument is nil, // this function behaves exactly as NewDbStore. @@ -225,6 +269,35 @@ func getDataKey(idx uint64, po uint8) []byte { return key } +func getGCIdxKey(index *dpaDBIndex) []byte { + key := make([]byte, 9) + key[0] = keyGCIdx + binary.BigEndian.PutUint64(key[1:], index.Access) + return key +} + +func getGCIdxValue(index *dpaDBIndex, po uint8, addr Address) []byte { + val := make([]byte, 41) // po = 1, index.Index = 8, Address = 32 + val[0] = po + binary.BigEndian.PutUint64(val[1:], index.Idx) + copy(val[9:], addr) + return val +} + +func parseIdxKey(key []byte) (byte, []byte) { + return key[0], key[1:] +} + +func parseGCIdxEntry(accessCnt []byte, val []byte) (index *dpaDBIndex, po uint8, addr Address) { + index = &dpaDBIndex{ + Idx: binary.BigEndian.Uint64(val[1:]), + Access: binary.BigEndian.Uint64(accessCnt), + } + po = val[0] + addr = val[9:] + return +} + func encodeIndex(index *dpaDBIndex) []byte { data, _ := rlp.EncodeToBytes(index) return data @@ -247,55 +320,71 @@ func decodeData(addr Address, data []byte) (*chunk, error) { return NewChunk(addr, data[32:]), nil } -func (s *LDBStore) collectGarbage(ratio float32) { - log.Trace("collectGarbage", "ratio", ratio) +func (s *LDBStore) collectGarbage() error { + + // prevent duplicate gc from starting when one is already running + select { + case <-s.gc.runC: + default: + return nil + } + + s.lock.Lock() + entryCnt := s.entryCnt + s.lock.Unlock() metrics.GetOrRegisterCounter("ldbstore.collectgarbage", nil).Inc(1) - it := s.db.NewIterator() - defer it.Release() + // calculate the amount of chunks to collect and reset counter + s.startGC(int(entryCnt)) + log.Debug("collectGarbage", "target", s.gc.target, "entryCnt", entryCnt) - garbage := []*gcItem{} - gcnt := 0 + var totalDeleted int + for s.gc.count < s.gc.target { + it := s.db.NewIterator() + ok := it.Seek([]byte{keyGCIdx}) + var singleIterationCount int - for ok := it.Seek([]byte{keyIndex}); ok && (gcnt < maxGCitems) && (uint64(gcnt) < s.entryCnt); ok = it.Next() { - itkey := it.Key() + // every batch needs a lock so we avoid entries changing accessidx in the meantime + s.lock.Lock() + for ; ok && (singleIterationCount < s.gc.maxBatch); ok = it.Next() { - if (itkey == nil) || (itkey[0] != keyIndex) { - break + // quit if no more access index keys + itkey := it.Key() + if (itkey == nil) || (itkey[0] != keyGCIdx) { + break + } + + // get chunk data entry from access index + val := it.Value() + index, po, hash := parseGCIdxEntry(itkey[1:], val) + keyIdx := make([]byte, 33) + keyIdx[0] = keyIndex + copy(keyIdx[1:], hash) + + // add delete operation to batch + s.delete(s.gc.batch.Batch, index, keyIdx, po) + singleIterationCount++ + s.gc.count++ + log.Trace("garbage collect enqueued chunk for deletion", "key", hash) + + // break if target is not on max garbage batch boundary + if s.gc.count >= s.gc.target { + break + } } - // it.Key() contents change on next call to it.Next(), so we must copy it - key := make([]byte, len(it.Key())) - copy(key, it.Key()) - - val := it.Value() - - var index dpaDBIndex - - hash := key[1:] - decodeIndex(val, &index) - po := s.po(hash) - - gci := &gcItem{ - idxKey: key, - idx: index.Idx, - value: index.Access, // the smaller, the more likely to be gc'd. see sort comparator below. - po: po, - } - - garbage = append(garbage, gci) - gcnt++ + s.writeBatch(s.gc.batch, wEntryCnt) + s.lock.Unlock() + it.Release() + log.Trace("garbage collect batch done", "batch", singleIterationCount, "total", s.gc.count) } - sort.Slice(garbage[:gcnt], func(i, j int) bool { return garbage[i].value < garbage[j].value }) + s.gc.runC <- struct{}{} + log.Debug("garbage collect done", "c", s.gc.count) - cutoff := int(float32(gcnt) * ratio) - metrics.GetOrRegisterCounter("ldbstore.collectgarbage.delete", nil).Inc(int64(cutoff)) - - for i := 0; i < cutoff; i++ { - s.delete(garbage[i].idx, garbage[i].idxKey, garbage[i].po) - } + metrics.GetOrRegisterCounter("ldbstore.collectgarbage.delete", nil).Inc(int64(totalDeleted)) + return nil } // Export writes all chunks from the store to a tar archive, returning the @@ -418,7 +507,7 @@ func (s *LDBStore) Import(in io.Reader) (int64, error) { } } -//Cleanup iterates over the database and deletes chunks if they pass the `f` condition +// Cleanup iterates over the database and deletes chunks if they pass the `f` condition func (s *LDBStore) Cleanup(f func(*chunk) bool) { var errorsFound, removed, total int @@ -474,7 +563,7 @@ func (s *LDBStore) Cleanup(f func(*chunk) bool) { // if chunk is to be removed if f(c) { log.Warn("chunk for cleanup", "key", fmt.Sprintf("%x", key), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(c.sdata), "size", cs) - s.delete(index.Idx, getIndexKey(key[1:]), po) + s.deleteNow(&index, getIndexKey(key[1:]), po) removed++ errorsFound++ } @@ -483,67 +572,196 @@ func (s *LDBStore) Cleanup(f func(*chunk) bool) { log.Warn(fmt.Sprintf("Found %v errors out of %v entries. Removed %v chunks.", errorsFound, total, removed)) } -func (s *LDBStore) ReIndex() { - //Iterates over the database and checks that there are no faulty chunks +// CleanGCIndex rebuilds the garbage collector index from scratch, while +// removing inconsistent elements, e.g., indices with missing data chunks. +// WARN: it's a pretty heavy, long running function. +func (s *LDBStore) CleanGCIndex() error { + s.lock.Lock() + defer s.lock.Unlock() + + batch := leveldb.Batch{} + + var okEntryCount uint64 + var totalEntryCount uint64 + + // throw out all gc indices, we will rebuild from cleaned index it := s.db.NewIterator() - startPosition := []byte{keyOldData} - it.Seek(startPosition) - var key []byte - var errorsFound, total int + it.Seek([]byte{keyGCIdx}) + var gcDeletes int for it.Valid() { - key = it.Key() - if (key == nil) || (key[0] != keyOldData) { + rowType, _ := parseIdxKey(it.Key()) + if rowType != keyGCIdx { break } - data := it.Value() - hasher := s.hashfunc() - hasher.Write(data) - hash := hasher.Sum(nil) - - newKey := make([]byte, 10) - oldCntKey := make([]byte, 2) - newCntKey := make([]byte, 2) - oldCntKey[0] = keyDistanceCnt - newCntKey[0] = keyDistanceCnt - key[0] = keyData - key[1] = s.po(Address(key[1:])) - oldCntKey[1] = key[1] - newCntKey[1] = s.po(Address(newKey[1:])) - copy(newKey[2:], key[1:]) - newValue := append(hash, data...) - - batch := new(leveldb.Batch) - batch.Delete(key) - s.bucketCnt[oldCntKey[1]]-- - batch.Put(oldCntKey, U64ToBytes(s.bucketCnt[oldCntKey[1]])) - batch.Put(newKey, newValue) - s.bucketCnt[newCntKey[1]]++ - batch.Put(newCntKey, U64ToBytes(s.bucketCnt[newCntKey[1]])) - s.db.Write(batch) + batch.Delete(it.Key()) + gcDeletes++ it.Next() } + log.Debug("gc", "deletes", gcDeletes) + if err := s.db.Write(&batch); err != nil { + return err + } + batch.Reset() + it.Release() - log.Warn(fmt.Sprintf("Found %v errors out of %v entries", errorsFound, total)) + + // corrected po index pointer values + var poPtrs [256]uint64 + + // set to true if chunk count not on 4096 iteration boundary + var doneIterating bool + + // last key index in previous iteration + lastIdxKey := []byte{keyIndex} + + // counter for debug output + var cleanBatchCount int + + // go through all key index entries + for !doneIterating { + cleanBatchCount++ + var idxs []dpaDBIndex + var chunkHashes [][]byte + var pos []uint8 + it := s.db.NewIterator() + + it.Seek(lastIdxKey) + + // 4096 is just a nice number, don't look for any hidden meaning here... + var i int + for i = 0; i < 4096; i++ { + + // this really shouldn't happen unless database is empty + // but let's keep it to be safe + if !it.Valid() { + doneIterating = true + break + } + + // if it's not keyindex anymore we're done iterating + rowType, chunkHash := parseIdxKey(it.Key()) + if rowType != keyIndex { + doneIterating = true + break + } + + // decode the retrieved index + var idx dpaDBIndex + err := decodeIndex(it.Value(), &idx) + if err != nil { + return fmt.Errorf("corrupt index: %v", err) + } + po := s.po(chunkHash) + lastIdxKey = it.Key() + + // if we don't find the data key, remove the entry + // if we find it, add to the array of new gc indices to create + dataKey := getDataKey(idx.Idx, po) + _, err = s.db.Get(dataKey) + if err != nil { + log.Warn("deleting inconsistent index (missing data)", "key", chunkHash) + batch.Delete(it.Key()) + } else { + idxs = append(idxs, idx) + chunkHashes = append(chunkHashes, chunkHash) + pos = append(pos, po) + okEntryCount++ + if idx.Idx > poPtrs[po] { + poPtrs[po] = idx.Idx + } + } + totalEntryCount++ + it.Next() + } + it.Release() + + // flush the key index corrections + err := s.db.Write(&batch) + if err != nil { + return err + } + batch.Reset() + + // add correct gc indices + for i, okIdx := range idxs { + gcIdxKey := getGCIdxKey(&okIdx) + gcIdxData := getGCIdxValue(&okIdx, pos[i], chunkHashes[i]) + batch.Put(gcIdxKey, gcIdxData) + log.Trace("clean ok", "key", chunkHashes[i], "gcKey", gcIdxKey, "gcData", gcIdxData) + } + + // flush them + err = s.db.Write(&batch) + if err != nil { + return err + } + batch.Reset() + + log.Debug("clean gc index pass", "batch", cleanBatchCount, "checked", i, "kept", len(idxs)) + } + + log.Debug("gc cleanup entries", "ok", okEntryCount, "total", totalEntryCount, "batchlen", batch.Len()) + + // lastly add updated entry count + var entryCount [8]byte + binary.BigEndian.PutUint64(entryCount[:], okEntryCount) + batch.Put(keyEntryCnt, entryCount[:]) + + // and add the new po index pointers + var poKey [2]byte + poKey[0] = keyDistanceCnt + for i, poPtr := range poPtrs { + poKey[1] = uint8(i) + if poPtr == 0 { + batch.Delete(poKey[:]) + } else { + var idxCount [8]byte + binary.BigEndian.PutUint64(idxCount[:], poPtr) + batch.Put(poKey[:], idxCount[:]) + } + } + + // if you made it this far your harddisk has survived. Congratulations + return s.db.Write(&batch) } -func (s *LDBStore) Delete(addr Address) { +// Delete is removes a chunk and updates indices. +// Is thread safe +func (s *LDBStore) Delete(addr Address) error { s.lock.Lock() defer s.lock.Unlock() ikey := getIndexKey(addr) - var indx dpaDBIndex - s.tryAccessIdx(ikey, &indx) + idata, err := s.db.Get(ikey) + if err != nil { + return err + } - s.delete(indx.Idx, ikey, s.po(addr)) + var idx dpaDBIndex + decodeIndex(idata, &idx) + proximity := s.po(addr) + return s.deleteNow(&idx, ikey, proximity) } -func (s *LDBStore) delete(idx uint64, idxKey []byte, po uint8) { +// executes one delete operation immediately +// see *LDBStore.delete +func (s *LDBStore) deleteNow(idx *dpaDBIndex, idxKey []byte, po uint8) error { + batch := new(leveldb.Batch) + s.delete(batch, idx, idxKey, po) + return s.db.Write(batch) +} + +// adds a delete chunk operation to the provided batch +// if called directly, decrements entrycount regardless if the chunk exists upon deletion. Risk of wrap to max uint64 +func (s *LDBStore) delete(batch *leveldb.Batch, idx *dpaDBIndex, idxKey []byte, po uint8) { metrics.GetOrRegisterCounter("ldbstore.delete", nil).Inc(1) - batch := new(leveldb.Batch) + gcIdxKey := getGCIdxKey(idx) + batch.Delete(gcIdxKey) + dataKey := getDataKey(idx.Idx, po) + batch.Delete(dataKey) batch.Delete(idxKey) - batch.Delete(getDataKey(idx, po)) s.entryCnt-- dbEntryCount.Dec(1) cntKey := make([]byte, 2) @@ -551,7 +769,6 @@ func (s *LDBStore) delete(idx uint64, idxKey []byte, po uint8) { cntKey[1] = po batch.Put(keyEntryCnt, U64ToBytes(s.entryCnt)) batch.Put(cntKey, U64ToBytes(s.bucketCnt[po])) - s.db.Write(batch) } func (s *LDBStore) BinIndex(po uint8) uint64 { @@ -572,6 +789,9 @@ func (s *LDBStore) CurrentStorageIndex() uint64 { return s.dataIdx } +// Put adds a chunk to the database, adding indices and incrementing global counters. +// If it already exists, it merely increments the access count of the existing entry. +// Is thread safe func (s *LDBStore) Put(ctx context.Context, chunk Chunk) error { metrics.GetOrRegisterCounter("ldbstore.put", nil).Inc(1) log.Trace("ldbstore.put", "key", chunk.Address()) @@ -593,15 +813,14 @@ func (s *LDBStore) Put(ctx context.Context, chunk Chunk) error { idata, err := s.db.Get(ikey) if err != nil { s.doPut(chunk, &index, po) - } else { - log.Trace("ldbstore.put: chunk already exists, only update access", "key", chunk.Address) - decodeIndex(idata, &index) } - index.Access = s.accessCnt - s.accessCnt++ idata = encodeIndex(&index) s.batch.Put(ikey, idata) + // add the access-chunkindex index for garbage collection + gcIdxKey := getGCIdxKey(&index) + gcIdxData := getGCIdxValue(&index, po, chunk.Address()) + s.batch.Put(gcIdxKey, gcIdxData) s.lock.Unlock() select { @@ -617,7 +836,7 @@ func (s *LDBStore) Put(ctx context.Context, chunk Chunk) error { } } -// force putting into db, does not check access index +// force putting into db, does not check or update necessary indices func (s *LDBStore) doPut(chunk Chunk, index *dpaDBIndex, po uint8) { data := s.encodeDataFunc(chunk) dkey := getDataKey(s.dataIdx, po) @@ -627,7 +846,8 @@ func (s *LDBStore) doPut(chunk Chunk, index *dpaDBIndex, po uint8) { s.entryCnt++ dbEntryCount.Inc(1) s.dataIdx++ - + index.Access = s.accessCnt + s.accessCnt++ cntKey := make([]byte, 2) cntKey[0] = keyDistanceCnt cntKey[1] = po @@ -659,38 +879,26 @@ func (s *LDBStore) writeCurrentBatch() error { if l == 0 { return nil } - e := s.entryCnt - d := s.dataIdx - a := s.accessCnt s.batch = newBatch() - b.err = s.writeBatch(b, e, d, a) + b.err = s.writeBatch(b, wEntryCnt|wAccessCnt|wIndexCnt) close(b.c) - for e > s.capacity { - log.Trace("for >", "e", e, "s.capacity", s.capacity) - // Collect garbage in a separate goroutine - // to be able to interrupt this loop by s.quit. - done := make(chan struct{}) - go func() { - s.collectGarbage(gcArrayFreeRatio) - log.Trace("collectGarbage closing done") - close(done) - }() - - select { - case <-s.quit: - return errors.New("CollectGarbage terminated due to quit") - case <-done: - } - e = s.entryCnt + if s.entryCnt >= s.capacity { + go s.collectGarbage() } return nil } // must be called non concurrently -func (s *LDBStore) writeBatch(b *dbBatch, entryCnt, dataIdx, accessCnt uint64) error { - b.Put(keyEntryCnt, U64ToBytes(entryCnt)) - b.Put(keyDataIdx, U64ToBytes(dataIdx)) - b.Put(keyAccessCnt, U64ToBytes(accessCnt)) +func (s *LDBStore) writeBatch(b *dbBatch, wFlag uint8) error { + if wFlag&wEntryCnt > 0 { + b.Put(keyEntryCnt, U64ToBytes(s.entryCnt)) + } + if wFlag&wIndexCnt > 0 { + b.Put(keyDataIdx, U64ToBytes(s.dataIdx)) + } + if wFlag&wAccessCnt > 0 { + b.Put(keyAccessCnt, U64ToBytes(s.accessCnt)) + } l := b.Len() if err := s.db.Write(b.Batch); err != nil { return fmt.Errorf("unable to write batch: %v", err) @@ -712,23 +920,33 @@ func newMockEncodeDataFunc(mockStore *mock.NodeStore) func(chunk Chunk) []byte { } } -// try to find index; if found, update access cnt and return true -func (s *LDBStore) tryAccessIdx(ikey []byte, index *dpaDBIndex) bool { +// tryAccessIdx tries to find index entry. If found then increments the access +// count for garbage collection and returns the index entry and true for found, +// otherwise returns nil and false. +func (s *LDBStore) tryAccessIdx(addr Address, po uint8) (*dpaDBIndex, bool) { + ikey := getIndexKey(addr) idata, err := s.db.Get(ikey) if err != nil { - return false + return nil, false } + + index := new(dpaDBIndex) decodeIndex(idata, index) + oldGCIdxKey := getGCIdxKey(index) s.batch.Put(keyAccessCnt, U64ToBytes(s.accessCnt)) - s.accessCnt++ index.Access = s.accessCnt idata = encodeIndex(index) + s.accessCnt++ s.batch.Put(ikey, idata) + newGCIdxKey := getGCIdxKey(index) + newGCIdxData := getGCIdxValue(index, po, ikey[1:]) + s.batch.Delete(oldGCIdxKey) + s.batch.Put(newGCIdxKey, newGCIdxData) select { case s.batchesC <- struct{}{}: default: } - return true + return index, true } // GetSchema is returning the current named schema of the datastore as read from LevelDB @@ -739,7 +957,7 @@ func (s *LDBStore) GetSchema() (string, error) { data, err := s.db.Get(keySchema) if err != nil { if err == leveldb.ErrNotFound { - return "", nil + return DbSchemaNone, nil } return "", err } @@ -755,6 +973,9 @@ func (s *LDBStore) PutSchema(schema string) error { return s.db.Put(keySchema, []byte(schema)) } +// Get retrieves the chunk matching the provided key from the database. +// If the chunk entry does not exist, it returns an error +// Updates access count and is thread safe func (s *LDBStore) Get(_ context.Context, addr Address) (chunk Chunk, err error) { metrics.GetOrRegisterCounter("ldbstore.get", nil).Inc(1) log.Trace("ldbstore.get", "key", addr) @@ -764,12 +985,14 @@ func (s *LDBStore) Get(_ context.Context, addr Address) (chunk Chunk, err error) return s.get(addr) } +// TODO: To conform with other private methods of this object indices should not be updated func (s *LDBStore) get(addr Address) (chunk *chunk, err error) { - var indx dpaDBIndex if s.closed { return nil, ErrDBClosed } - if s.tryAccessIdx(getIndexKey(addr), &indx) { + proximity := s.po(addr) + index, found := s.tryAccessIdx(addr, proximity) + if found { var data []byte if s.getDataFunc != nil { // if getDataFunc is defined, use it to retrieve the chunk data @@ -780,13 +1003,12 @@ func (s *LDBStore) get(addr Address) (chunk *chunk, err error) { } } else { // default DbStore functionality to retrieve chunk data - proximity := s.po(addr) - datakey := getDataKey(indx.Idx, proximity) + datakey := getDataKey(index.Idx, proximity) data, err = s.db.Get(datakey) - log.Trace("ldbstore.get retrieve", "key", addr, "indexkey", indx.Idx, "datakey", fmt.Sprintf("%x", datakey), "proximity", proximity) + log.Trace("ldbstore.get retrieve", "key", addr, "indexkey", index.Idx, "datakey", fmt.Sprintf("%x", datakey), "proximity", proximity) if err != nil { log.Trace("ldbstore.get chunk found but could not be accessed", "key", addr, "err", err) - s.delete(indx.Idx, getIndexKey(addr), s.po(addr)) + s.deleteNow(index, getIndexKey(addr), s.po(addr)) return } } @@ -813,33 +1035,14 @@ func newMockGetDataFunc(mockStore *mock.NodeStore) func(addr Address) (data []by } } -func (s *LDBStore) updateAccessCnt(addr Address) { - - s.lock.Lock() - defer s.lock.Unlock() - - var index dpaDBIndex - s.tryAccessIdx(getIndexKey(addr), &index) // result_chn == nil, only update access cnt - -} - func (s *LDBStore) setCapacity(c uint64) { s.lock.Lock() defer s.lock.Unlock() s.capacity = c - if s.entryCnt > c { - ratio := float32(1.01) - float32(c)/float32(s.entryCnt) - if ratio < gcArrayFreeRatio { - ratio = gcArrayFreeRatio - } - if ratio > 1 { - ratio = 1 - } - for s.entryCnt > c { - s.collectGarbage(ratio) - } + for s.entryCnt > c { + s.collectGarbage() } } @@ -879,15 +1082,3 @@ func (s *LDBStore) SyncIterator(since uint64, until uint64, po uint8, f func(Add } return it.Error() } - -func databaseExists(path string) bool { - o := &opt.Options{ - ErrorIfMissing: true, - } - tdb, err := leveldb.OpenFile(path, o) - if err != nil { - return false - } - defer tdb.Close() - return true -} diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/localstore.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/localstore.go index 4fa6fb2f6..111821ff6 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/localstore.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/localstore.go @@ -153,6 +153,7 @@ func (ls *LocalStore) get(ctx context.Context, addr Address) (chunk Chunk, err e if err == nil { metrics.GetOrRegisterCounter("localstore.get.cachehit", nil).Inc(1) + go ls.DbStore.MarkAccessed(addr) return chunk, nil } @@ -193,33 +194,51 @@ func (ls *LocalStore) Close() { ls.DbStore.Close() } -// Migrate checks the datastore schema vs the runtime schema, and runs migrations if they don't match +// Migrate checks the datastore schema vs the runtime schema and runs +// migrations if they don't match func (ls *LocalStore) Migrate() error { - schema, err := ls.DbStore.GetSchema() + actualDbSchema, err := ls.DbStore.GetSchema() if err != nil { log.Error(err.Error()) return err } - log.Debug("found schema", "schema", schema, "runtime-schema", CurrentDbSchema) - if schema != CurrentDbSchema { - // run migrations - - if schema == "" { - log.Debug("running migrations for", "schema", schema, "runtime-schema", CurrentDbSchema) - - // delete chunks that are not valid, i.e. chunks that do not pass any of the ls.Validators - ls.DbStore.Cleanup(func(c *chunk) bool { - return !ls.isValid(c) - }) - - err := ls.DbStore.PutSchema(DbSchemaPurity) - if err != nil { - log.Error(err.Error()) - return err - } - } + if actualDbSchema == CurrentDbSchema { + return nil } + log.Debug("running migrations for", "schema", actualDbSchema, "runtime-schema", CurrentDbSchema) + + if actualDbSchema == DbSchemaNone { + ls.migrateFromNoneToPurity() + actualDbSchema = DbSchemaPurity + } + + if err := ls.DbStore.PutSchema(actualDbSchema); err != nil { + return err + } + + if actualDbSchema == DbSchemaPurity { + if err := ls.migrateFromPurityToHalloween(); err != nil { + return err + } + actualDbSchema = DbSchemaHalloween + } + + if err := ls.DbStore.PutSchema(actualDbSchema); err != nil { + return err + } return nil } + +func (ls *LocalStore) migrateFromNoneToPurity() { + // delete chunks that are not valid, i.e. chunks that do not pass + // any of the ls.Validators + ls.DbStore.Cleanup(func(c *chunk) bool { + return !ls.isValid(c) + }) +} + +func (ls *LocalStore) migrateFromPurityToHalloween() error { + return ls.DbStore.CleanGCIndex() +} diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/db/db.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/db/db.go index 43bfa24f0..73ae199e8 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/db/db.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/db/db.go @@ -86,6 +86,13 @@ func (s *GlobalStore) Put(addr common.Address, key []byte, data []byte) error { return s.db.Write(batch, nil) } +// Delete removes the chunk reference to node with address addr. +func (s *GlobalStore) Delete(addr common.Address, key []byte) error { + batch := new(leveldb.Batch) + batch.Delete(nodeDBKey(addr, key)) + return s.db.Write(batch, nil) +} + // HasKey returns whether a node with addr contains the key. func (s *GlobalStore) HasKey(addr common.Address, key []byte) bool { has, err := s.db.Has(nodeDBKey(addr, key), nil) diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/mem/mem.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/mem/mem.go index 8878309d0..3a0a2beb8 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/mem/mem.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/mem/mem.go @@ -83,6 +83,22 @@ func (s *GlobalStore) Put(addr common.Address, key []byte, data []byte) error { return nil } +// Delete removes the chunk data for node with address addr. +func (s *GlobalStore) Delete(addr common.Address, key []byte) error { + s.mu.Lock() + defer s.mu.Unlock() + + var count int + if _, ok := s.nodes[string(key)]; ok { + delete(s.nodes[string(key)], addr) + count = len(s.nodes[string(key)]) + } + if count == 0 { + delete(s.data, string(key)) + } + return nil +} + // HasKey returns whether a node with addr contains the key. func (s *GlobalStore) HasKey(addr common.Address, key []byte) bool { s.mu.Lock() diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/mock.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/mock.go index 81340f927..1fb71b70a 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/mock.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/mock.go @@ -70,6 +70,12 @@ func (n *NodeStore) Put(key []byte, data []byte) error { return n.store.Put(n.addr, key, data) } +// Delete removes chunk data for a key for a node that has the address +// provided on NodeStore initialization. +func (n *NodeStore) Delete(key []byte) error { + return n.store.Delete(n.addr, key) +} + // GlobalStorer defines methods for mock db store // that stores chunk data for all swarm nodes. // It is used in tests to construct mock NodeStores @@ -77,6 +83,7 @@ func (n *NodeStore) Put(key []byte, data []byte) error { type GlobalStorer interface { Get(addr common.Address, key []byte) (data []byte, err error) Put(addr common.Address, key []byte, data []byte) error + Delete(addr common.Address, key []byte) error HasKey(addr common.Address, key []byte) bool // NewNodeStore creates an instance of NodeStore // to be used by a single swarm node with diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/rpc/rpc.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/rpc/rpc.go index 6e735f698..8cd6c83a7 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/rpc/rpc.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/rpc/rpc.go @@ -73,6 +73,12 @@ func (s *GlobalStore) Put(addr common.Address, key []byte, data []byte) error { return err } +// Delete calls a Delete method to RPC server. +func (s *GlobalStore) Delete(addr common.Address, key []byte) error { + err := s.client.Call(nil, "mockStore_delete", addr, key) + return err +} + // HasKey calls a HasKey method to RPC server. func (s *GlobalStore) HasKey(addr common.Address, key []byte) bool { var has bool diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/test/test.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/test/test.go index 02da3af55..10180985f 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/test/test.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/mock/test/test.go @@ -72,6 +72,31 @@ func MockStore(t *testing.T, globalStore mock.GlobalStorer, n int) { } } } + t.Run("delete", func(t *testing.T) { + chunkAddr := storage.Address([]byte("1234567890abcd")) + for _, addr := range addrs { + err := globalStore.Put(addr, chunkAddr, []byte("data")) + if err != nil { + t.Fatalf("put data to store %s key %s: %v", addr.Hex(), chunkAddr.Hex(), err) + } + } + firstNodeAddr := addrs[0] + if err := globalStore.Delete(firstNodeAddr, chunkAddr); err != nil { + t.Fatalf("delete from store %s key %s: %v", firstNodeAddr.Hex(), chunkAddr.Hex(), err) + } + for i, addr := range addrs { + _, err := globalStore.Get(addr, chunkAddr) + if i == 0 { + if err != mock.ErrNotFound { + t.Errorf("get data from store %s key %s: expected mock.ErrNotFound error, got %v", addr.Hex(), chunkAddr.Hex(), err) + } + } else { + if err != nil { + t.Errorf("get data from store %s key %s: %v", addr.Hex(), chunkAddr.Hex(), err) + } + } + } + }) }) t.Run("NodeStore", func(t *testing.T) { @@ -114,6 +139,34 @@ func MockStore(t *testing.T, globalStore mock.GlobalStorer, n int) { } } } + t.Run("delete", func(t *testing.T) { + chunkAddr := storage.Address([]byte("1234567890abcd")) + var chosenStore *mock.NodeStore + for addr, store := range nodes { + if chosenStore == nil { + chosenStore = store + } + err := store.Put(chunkAddr, []byte("data")) + if err != nil { + t.Fatalf("put data to store %s key %s: %v", addr.Hex(), chunkAddr.Hex(), err) + } + } + if err := chosenStore.Delete(chunkAddr); err != nil { + t.Fatalf("delete key %s: %v", chunkAddr.Hex(), err) + } + for addr, store := range nodes { + _, err := store.Get(chunkAddr) + if store == chosenStore { + if err != mock.ErrNotFound { + t.Errorf("get data from store %s key %s: expected mock.ErrNotFound error, got %v", addr.Hex(), chunkAddr.Hex(), err) + } + } else { + if err != nil { + t.Errorf("get data from store %s key %s: %v", addr.Hex(), chunkAddr.Hex(), err) + } + } + } + }) }) } diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/schema.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/schema.go index fb8498a29..91847ca0f 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/schema.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/schema.go @@ -1,6 +1,17 @@ package storage +// The DB schema we want to use. The actual/current DB schema might differ +// until migrations are run. +const CurrentDbSchema = DbSchemaHalloween + +// There was a time when we had no schema at all. +const DbSchemaNone = "" + // "purity" is the first formal schema of LevelDB we release together with Swarm 0.3.5 const DbSchemaPurity = "purity" -const CurrentDbSchema = DbSchemaPurity +// "halloween" is here because we had a screw in the garbage collector index. +// Because of that we had to rebuild the GC index to get rid of erroneous +// entries and that takes a long time. This schema is used for bookkeeping, +// so rebuild index will run just once. +const DbSchemaHalloween = "halloween" diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/storage/types.go b/vendor/github.com/ethereum/go-ethereum/swarm/storage/types.go index 8c70f4584..42557766e 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/storage/types.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/storage/types.go @@ -25,7 +25,6 @@ import ( "fmt" "hash" "io" - "io/ioutil" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto/sha3" @@ -80,6 +79,19 @@ func (a Address) bits(i, j uint) uint { return res } +// Proximity(x, y) returns the proximity order of the MSB distance between x and y +// +// The distance metric MSB(x, y) of two equal length byte sequences x an y is the +// value of the binary integer cast of the x^y, ie., x and y bitwise xor-ed. +// the binary cast is big endian: most significant bit first (=MSB). +// +// Proximity(x, y) is a discrete logarithmic scaling of the MSB distance. +// It is defined as the reverse rank of the integer part of the base 2 +// logarithm of the distance. +// It is calculated by counting the number of common leading zeros in the (MSB) +// binary representation of the x^y. +// +// (0 farthest, 255 closest, 256 self) func Proximity(one, other []byte) (ret int) { b := (MaxPO-1)/8 + 1 if b > len(one) { @@ -231,26 +243,13 @@ func GenerateRandomChunk(dataSize int64) Chunk { } func GenerateRandomChunks(dataSize int64, count int) (chunks []Chunk) { - if dataSize > ch.DefaultSize { - dataSize = ch.DefaultSize - } for i := 0; i < count; i++ { - ch := GenerateRandomChunk(ch.DefaultSize) + ch := GenerateRandomChunk(dataSize) chunks = append(chunks, ch) } return chunks } -func GenerateRandomData(l int) (r io.Reader, slice []byte) { - slice, err := ioutil.ReadAll(io.LimitReader(rand.Reader, int64(l))) - if err != nil { - panic("rand error") - } - // log.Warn("generate random data", "len", len(slice), "data", common.Bytes2Hex(slice)) - r = io.LimitReader(bytes.NewReader(slice), int64(l)) - return r, slice -} - // Size, Seek, Read, ReadAt type LazySectionReader interface { Context() context.Context diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/swap/swap.go b/vendor/github.com/ethereum/go-ethereum/swarm/swap/swap.go new file mode 100644 index 000000000..5d636dc20 --- /dev/null +++ b/vendor/github.com/ethereum/go-ethereum/swarm/swap/swap.go @@ -0,0 +1,98 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package swap + +import ( + "errors" + "fmt" + "strconv" + "sync" + + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/protocols" + "github.com/ethereum/go-ethereum/swarm/log" + "github.com/ethereum/go-ethereum/swarm/state" +) + +// SwAP Swarm Accounting Protocol +// a peer to peer micropayment system +// A node maintains an individual balance with every peer +// Only messages which have a price will be accounted for +type Swap struct { + stateStore state.Store //stateStore is needed in order to keep balances across sessions + lock sync.RWMutex //lock the balances + balances map[enode.ID]int64 //map of balances for each peer +} + +// New - swap constructor +func New(stateStore state.Store) (swap *Swap) { + swap = &Swap{ + stateStore: stateStore, + balances: make(map[enode.ID]int64), + } + return +} + +//Swap implements the protocols.Balance interface +//Add is the (sole) accounting function +func (s *Swap) Add(amount int64, peer *protocols.Peer) (err error) { + s.lock.Lock() + defer s.lock.Unlock() + + //load existing balances from the state store + err = s.loadState(peer) + if err != nil && err != state.ErrNotFound { + return + } + //adjust the balance + //if amount is negative, it will decrease, otherwise increase + s.balances[peer.ID()] += amount + //save the new balance to the state store + peerBalance := s.balances[peer.ID()] + err = s.stateStore.Put(peer.ID().String(), &peerBalance) + + log.Debug(fmt.Sprintf("balance for peer %s: %s", peer.ID().String(), strconv.FormatInt(peerBalance, 10))) + return err +} + +//GetPeerBalance returns the balance for a given peer +func (swap *Swap) GetPeerBalance(peer enode.ID) (int64, error) { + swap.lock.RLock() + defer swap.lock.RUnlock() + if p, ok := swap.balances[peer]; ok { + return p, nil + } + return 0, errors.New("Peer not found") +} + +//load balances from the state store (persisted) +func (s *Swap) loadState(peer *protocols.Peer) (err error) { + var peerBalance int64 + peerID := peer.ID() + //only load if the current instance doesn't already have this peer's + //balance in memory + if _, ok := s.balances[peerID]; !ok { + err = s.stateStore.Get(peerID.String(), &peerBalance) + s.balances[peerID] = peerBalance + } + return +} + +//Clean up Swap +func (swap *Swap) Close() { + swap.stateStore.Close() +} diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/swarm.go b/vendor/github.com/ethereum/go-ethereum/swarm/swarm.go index aea0989a1..a4ff94051 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/swarm.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/swarm.go @@ -1,4 +1,4 @@ -// Copyright 2016 The go-ethereum Authors +// Copyright 2018 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -51,6 +51,7 @@ import ( "github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage/feed" "github.com/ethereum/go-ethereum/swarm/storage/mock" + "github.com/ethereum/go-ethereum/swarm/swap" "github.com/ethereum/go-ethereum/swarm/tracing" ) @@ -65,19 +66,22 @@ var ( // the swarm stack type Swarm struct { - config *api.Config // swarm configuration - api *api.API // high level api layer (fs/manifest) - dns api.Resolver // DNS registrar - fileStore *storage.FileStore // distributed preimage archive, the local API to the storage with document level storage/retrieval support - streamer *stream.Registry - bzz *network.Bzz // the logistic manager - backend chequebook.Backend // simple blockchain Backend - privateKey *ecdsa.PrivateKey - corsString string - swapEnabled bool - netStore *storage.NetStore - sfs *fuse.SwarmFS // need this to cleanup all the active mounts on node exit - ps *pss.Pss + config *api.Config // swarm configuration + api *api.API // high level api layer (fs/manifest) + dns api.Resolver // DNS registrar + fileStore *storage.FileStore // distributed preimage archive, the local API to the storage with document level storage/retrieval support + streamer *stream.Registry + bzz *network.Bzz // the logistic manager + backend chequebook.Backend // simple blockchain Backend + privateKey *ecdsa.PrivateKey + corsString string + swapEnabled bool + netStore *storage.NetStore + sfs *fuse.SwarmFS // need this to cleanup all the active mounts on node exit + ps *pss.Pss + swap *swap.Swap + stateStore *state.DBStore + accountingMetrics *protocols.AccountingMetrics tracerClose io.Closer } @@ -132,7 +136,7 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e LightNode: config.LightNodeEnabled, } - stateStore, err := state.NewDBStore(filepath.Join(config.Path, "state-store.db")) + self.stateStore, err = state.NewDBStore(filepath.Join(config.Path, "state-store.db")) if err != nil { return } @@ -171,17 +175,38 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e delivery := stream.NewDelivery(to, self.netStore) self.netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, config.DeliverySkipCheck).New + if config.SwapEnabled { + balancesStore, err := state.NewDBStore(filepath.Join(config.Path, "balances.db")) + if err != nil { + return nil, err + } + self.swap = swap.New(balancesStore) + self.accountingMetrics = protocols.SetupAccountingMetrics(10*time.Second, filepath.Join(config.Path, "metrics.db")) + } + var nodeID enode.ID if err := nodeID.UnmarshalText([]byte(config.NodeID)); err != nil { return nil, err } - self.streamer = stream.NewRegistry(nodeID, delivery, self.netStore, stateStore, &stream.RegistryOptions{ + + syncing := stream.SyncingAutoSubscribe + if !config.SyncEnabled || config.LightNodeEnabled { + syncing = stream.SyncingDisabled + } + + retrieval := stream.RetrievalEnabled + if config.LightNodeEnabled { + retrieval = stream.RetrievalClientOnly + } + + registryOptions := &stream.RegistryOptions{ SkipCheck: config.DeliverySkipCheck, - DoSync: config.SyncEnabled, - DoRetrieve: true, + Syncing: syncing, + Retrieval: retrieval, SyncUpdateDelay: config.SyncUpdateDelay, MaxPeerServers: config.MaxStreamPeerServers, - }) + } + self.streamer = stream.NewRegistry(nodeID, delivery, self.netStore, self.stateStore, registryOptions, self.swap) // Swarm Hash Merklised Chunking for Arbitrary-length Document/File storage self.fileStore = storage.NewFileStore(self.netStore, self.config.FileStoreParams) @@ -204,7 +229,7 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e log.Debug("Setup local storage") - self.bzz = network.NewBzz(bzzconfig, to, stateStore, stream.Spec, self.streamer.Run) + self.bzz = network.NewBzz(bzzconfig, to, self.stateStore, self.streamer.GetSpec(), self.streamer.Run) // Pss = postal service over swarm (devp2p over bzz) self.ps, err = pss.NewPss(to, config.Pss) @@ -341,7 +366,9 @@ func (self *Swarm) Start(srv *p2p.Server) error { newaddr := self.bzz.UpdateLocalAddr([]byte(srv.Self().String())) log.Info("Updated bzz local addr", "oaddr", fmt.Sprintf("%x", newaddr.OAddr), "uaddr", fmt.Sprintf("%s", newaddr.UAddr)) // set chequebook - if self.config.SwapEnabled { + //TODO: Currently if swap is enabled and no chequebook (or inexistent) contract is provided, the node would crash. + //Once we integrate back the contracts, this check MUST be revisited + if self.config.SwapEnabled && self.config.SwapAPI != "" { ctx := context.Background() // The initial setup has no deadline. err := self.SetChequebook(ctx) if err != nil { @@ -422,14 +449,24 @@ func (self *Swarm) Stop() error { ch.Stop() ch.Save() } - + if self.swap != nil { + self.swap.Close() + } + if self.accountingMetrics != nil { + self.accountingMetrics.Close() + } if self.netStore != nil { self.netStore.Close() } self.sfs.Stop() stopCounter.Inc(1) self.streamer.Stop() - return self.bzz.Stop() + + err := self.bzz.Stop() + if self.stateStore != nil { + self.stateStore.Close() + } + return err } // implements the node.Service interface diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/testutil/file.go b/vendor/github.com/ethereum/go-ethereum/swarm/testutil/file.go index ecb0d971e..70732aa92 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/testutil/file.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/testutil/file.go @@ -17,8 +17,10 @@ package testutil import ( + "bytes" "io" "io/ioutil" + "math/rand" "os" "strings" "testing" @@ -42,3 +44,22 @@ func TempFileWithContent(t *testing.T, content string) string { } return tempFile.Name() } + +// RandomBytes returns pseudo-random deterministic result +// because test fails must be reproducible +func RandomBytes(seed, length int) []byte { + b := make([]byte, length) + reader := rand.New(rand.NewSource(int64(seed))) + for n := 0; n < length; { + read, err := reader.Read(b[n:]) + if err != nil { + panic(err) + } + n += read + } + return b +} + +func RandomReader(seed, length int) *bytes.Reader { + return bytes.NewReader(RandomBytes(seed, length)) +} diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/tracing/tracing.go b/vendor/github.com/ethereum/go-ethereum/swarm/tracing/tracing.go index b84cfb310..f95fa41b8 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/tracing/tracing.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/tracing/tracing.go @@ -9,7 +9,6 @@ import ( "github.com/ethereum/go-ethereum/log" jaeger "github.com/uber/jaeger-client-go" jaegercfg "github.com/uber/jaeger-client-go/config" - jaegerlog "github.com/uber/jaeger-client-go/log" cli "gopkg.in/urfave/cli.v1" ) @@ -85,13 +84,13 @@ func initTracer(endpoint, svc string) (closer io.Closer) { // Example logger and metrics factory. Use github.com/uber/jaeger-client-go/log // and github.com/uber/jaeger-lib/metrics respectively to bind to real logging and metrics // frameworks. - jLogger := jaegerlog.StdLogger + //jLogger := jaegerlog.StdLogger //jMetricsFactory := metrics.NullFactory // Initialize tracer with a logger and a metrics factory closer, err := cfg.InitGlobalTracer( svc, - jaegercfg.Logger(jLogger), + //jaegercfg.Logger(jLogger), //jaegercfg.Metrics(jMetricsFactory), //jaegercfg.Observer(rpcmetrics.NewObserver(jMetricsFactory, rpcmetrics.DefaultNameNormalizer)), ) diff --git a/vendor/github.com/ethereum/go-ethereum/swarm/version/version.go b/vendor/github.com/ethereum/go-ethereum/swarm/version/version.go index 9fa1daad1..c34810901 100644 --- a/vendor/github.com/ethereum/go-ethereum/swarm/version/version.go +++ b/vendor/github.com/ethereum/go-ethereum/swarm/version/version.go @@ -23,7 +23,7 @@ import ( const ( VersionMajor = 0 // Major version component of the current release VersionMinor = 3 // Minor version component of the current release - VersionPatch = 5 // Patch version component of the current release + VersionPatch = 8 // Patch version component of the current release VersionMeta = "stable" // Version metadata to append to the version string ) diff --git a/vendor/github.com/ethereum/go-ethereum/tests/block_test_util.go b/vendor/github.com/ethereum/go-ethereum/tests/block_test_util.go index 427a94958..9fa69bf4e 100644 --- a/vendor/github.com/ethereum/go-ethereum/tests/block_test_util.go +++ b/vendor/github.com/ethereum/go-ethereum/tests/block_test_util.go @@ -27,6 +27,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" @@ -48,12 +49,13 @@ func (t *BlockTest) UnmarshalJSON(in []byte) error { } type btJSON struct { - Blocks []btBlock `json:"blocks"` - Genesis btHeader `json:"genesisBlockHeader"` - Pre core.GenesisAlloc `json:"pre"` - Post core.GenesisAlloc `json:"postState"` - BestBlock common.UnprefixedHash `json:"lastblockhash"` - Network string `json:"network"` + Blocks []btBlock `json:"blocks"` + Genesis btHeader `json:"genesisBlockHeader"` + Pre core.GenesisAlloc `json:"pre"` + Post core.GenesisAlloc `json:"postState"` + BestBlock common.UnprefixedHash `json:"lastblockhash"` + Network string `json:"network"` + SealEngine string `json:"sealEngine"` } type btBlock struct { @@ -110,8 +112,13 @@ func (t *BlockTest) Run() error { if gblock.Root() != t.json.Genesis.StateRoot { return fmt.Errorf("genesis block state root does not match test: computed=%x, test=%x", gblock.Root().Bytes()[:6], t.json.Genesis.StateRoot[:6]) } - - chain, err := core.NewBlockChain(db, nil, config, ethash.NewShared(), vm.Config{}, nil) + var engine consensus.Engine + if t.json.SealEngine == "NoProof" { + engine = ethash.NewFaker() + } else { + engine = ethash.NewShared() + } + chain, err := core.NewBlockChain(db, &core.CacheConfig{TrieCleanLimit: 0}, config, engine, vm.Config{}, nil) if err != nil { return err } diff --git a/vendor/github.com/ethereum/go-ethereum/tests/init.go b/vendor/github.com/ethereum/go-ethereum/tests/init.go index f0a4943c1..db0457b6d 100644 --- a/vendor/github.com/ethereum/go-ethereum/tests/init.go +++ b/vendor/github.com/ethereum/go-ethereum/tests/init.go @@ -86,6 +86,15 @@ var Forks = map[string]*params.ChainConfig{ EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(5), }, + "ByzantiumToConstantinopleAt5": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(5), + }, } // UnsupportedForkError is returned when a test requests a fork that isn't implemented. diff --git a/vendor/github.com/ethereum/go-ethereum/trie/database.go b/vendor/github.com/ethereum/go-ethereum/trie/database.go index d0691b637..739a98add 100644 --- a/vendor/github.com/ethereum/go-ethereum/trie/database.go +++ b/vendor/github.com/ethereum/go-ethereum/trie/database.go @@ -22,6 +22,7 @@ import ( "sync" "time" + "github.com/allegro/bigcache" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" @@ -30,6 +31,11 @@ import ( ) var ( + memcacheCleanHitMeter = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil) + memcacheCleanMissMeter = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil) + memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil) + memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil) + memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil) memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil) memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil) @@ -64,9 +70,10 @@ type DatabaseReader interface { type Database struct { diskdb ethdb.Database // Persistent storage for matured trie nodes - nodes map[common.Hash]*cachedNode // Data and references relationships of a node - oldest common.Hash // Oldest tracked node, flush-list head - newest common.Hash // Newest tracked node, flush-list tail + cleans *bigcache.BigCache // GC friendly memory cache of clean node RLPs + dirties map[common.Hash]*cachedNode // Data and references relationships of dirty nodes + oldest common.Hash // Oldest tracked node, flush-list head + newest common.Hash // Newest tracked node, flush-list tail preimages map[common.Hash][]byte // Preimages of nodes from the secure trie seckeybuf [secureKeyLength]byte // Ephemeral buffer for calculating preimage keys @@ -79,7 +86,7 @@ type Database struct { flushnodes uint64 // Nodes flushed since last commit flushsize common.StorageSize // Data storage flushed since last commit - nodesSize common.StorageSize // Storage size of the nodes cache (exc. flushlist) + dirtiesSize common.StorageSize // Storage size of the dirty node cache (exc. flushlist) preimagesSize common.StorageSize // Storage size of the preimages cache lock sync.RWMutex @@ -134,7 +141,7 @@ type cachedNode struct { node node // Cached collapsed trie node, or raw rlp data size uint16 // Byte size of the useful cached data - parents uint16 // Number of live nodes referencing this one + parents uint32 // Number of live nodes referencing this one children map[common.Hash]uint16 // External children referenced by this node flushPrev common.Hash // Previous node in the flush-list @@ -262,11 +269,30 @@ func expandNode(hash hashNode, n node, cachegen uint16) node { } // NewDatabase creates a new trie database to store ephemeral trie content before -// its written out to disk or garbage collected. +// its written out to disk or garbage collected. No read cache is created, so all +// data retrievals will hit the underlying disk database. func NewDatabase(diskdb ethdb.Database) *Database { + return NewDatabaseWithCache(diskdb, 0) +} + +// NewDatabaseWithCache creates a new trie database to store ephemeral trie content +// before its written out to disk or garbage collected. It also acts as a read cache +// for nodes loaded from disk. +func NewDatabaseWithCache(diskdb ethdb.Database, cache int) *Database { + var cleans *bigcache.BigCache + if cache > 0 { + cleans, _ = bigcache.NewBigCache(bigcache.Config{ + Shards: 1024, + LifeWindow: time.Hour, + MaxEntriesInWindow: cache * 1024, + MaxEntrySize: 512, + HardMaxCacheSize: cache, + }) + } return &Database{ diskdb: diskdb, - nodes: map[common.Hash]*cachedNode{{}: {}}, + cleans: cleans, + dirties: map[common.Hash]*cachedNode{{}: {}}, preimages: make(map[common.Hash][]byte), } } @@ -293,7 +319,7 @@ func (db *Database) InsertBlob(hash common.Hash, blob []byte) { // size tracking. func (db *Database) insert(hash common.Hash, blob []byte, node node) { // If the node's already cached, skip - if _, ok := db.nodes[hash]; ok { + if _, ok := db.dirties[hash]; ok { return } // Create the cached entry for this node @@ -303,19 +329,19 @@ func (db *Database) insert(hash common.Hash, blob []byte, node node) { flushPrev: db.newest, } for _, child := range entry.childs() { - if c := db.nodes[child]; c != nil { + if c := db.dirties[child]; c != nil { c.parents++ } } - db.nodes[hash] = entry + db.dirties[hash] = entry // Update the flush-list endpoints if db.oldest == (common.Hash{}) { db.oldest, db.newest = hash, hash } else { - db.nodes[db.newest].flushNext, db.newest = hash, hash + db.dirties[db.newest].flushNext, db.newest = hash, hash } - db.nodesSize += common.StorageSize(common.HashLength + entry.size) + db.dirtiesSize += common.StorageSize(common.HashLength + entry.size) } // insertPreimage writes a new trie node pre-image to the memory database if it's @@ -333,35 +359,64 @@ func (db *Database) insertPreimage(hash common.Hash, preimage []byte) { // node retrieves a cached trie node from memory, or returns nil if none can be // found in the memory cache. func (db *Database) node(hash common.Hash, cachegen uint16) node { - // Retrieve the node from cache if available + // Retrieve the node from the clean cache if available + if db.cleans != nil { + if enc, err := db.cleans.Get(string(hash[:])); err == nil && enc != nil { + memcacheCleanHitMeter.Mark(1) + memcacheCleanReadMeter.Mark(int64(len(enc))) + return mustDecodeNode(hash[:], enc, cachegen) + } + } + // Retrieve the node from the dirty cache if available db.lock.RLock() - node := db.nodes[hash] + dirty := db.dirties[hash] db.lock.RUnlock() - if node != nil { - return node.obj(hash, cachegen) + if dirty != nil { + return dirty.obj(hash, cachegen) } // Content unavailable in memory, attempt to retrieve from disk enc, err := db.diskdb.Get(hash[:]) if err != nil || enc == nil { return nil } + if db.cleans != nil { + db.cleans.Set(string(hash[:]), enc) + memcacheCleanMissMeter.Mark(1) + memcacheCleanWriteMeter.Mark(int64(len(enc))) + } return mustDecodeNode(hash[:], enc, cachegen) } // Node retrieves an encoded cached trie node from memory. If it cannot be found // cached, the method queries the persistent database for the content. func (db *Database) Node(hash common.Hash) ([]byte, error) { - // Retrieve the node from cache if available + // Retrieve the node from the clean cache if available + if db.cleans != nil { + if enc, err := db.cleans.Get(string(hash[:])); err == nil && enc != nil { + memcacheCleanHitMeter.Mark(1) + memcacheCleanReadMeter.Mark(int64(len(enc))) + return enc, nil + } + } + // Retrieve the node from the dirty cache if available db.lock.RLock() - node := db.nodes[hash] + dirty := db.dirties[hash] db.lock.RUnlock() - if node != nil { - return node.rlp(), nil + if dirty != nil { + return dirty.rlp(), nil } // Content unavailable in memory, attempt to retrieve from disk - return db.diskdb.Get(hash[:]) + enc, err := db.diskdb.Get(hash[:]) + if err == nil && enc != nil { + if db.cleans != nil { + db.cleans.Set(string(hash[:]), enc) + memcacheCleanMissMeter.Mark(1) + memcacheCleanWriteMeter.Mark(int64(len(enc))) + } + } + return enc, err } // preimage retrieves a cached trie node pre-image from memory. If it cannot be @@ -395,8 +450,8 @@ func (db *Database) Nodes() []common.Hash { db.lock.RLock() defer db.lock.RUnlock() - var hashes = make([]common.Hash, 0, len(db.nodes)) - for hash := range db.nodes { + var hashes = make([]common.Hash, 0, len(db.dirties)) + for hash := range db.dirties { if hash != (common.Hash{}) { // Special case for "root" references/nodes hashes = append(hashes, hash) } @@ -415,18 +470,18 @@ func (db *Database) Reference(child common.Hash, parent common.Hash) { // reference is the private locked version of Reference. func (db *Database) reference(child common.Hash, parent common.Hash) { // If the node does not exist, it's a node pulled from disk, skip - node, ok := db.nodes[child] + node, ok := db.dirties[child] if !ok { return } // If the reference already exists, only duplicate for roots - if db.nodes[parent].children == nil { - db.nodes[parent].children = make(map[common.Hash]uint16) - } else if _, ok = db.nodes[parent].children[child]; ok && parent != (common.Hash{}) { + if db.dirties[parent].children == nil { + db.dirties[parent].children = make(map[common.Hash]uint16) + } else if _, ok = db.dirties[parent].children[child]; ok && parent != (common.Hash{}) { return } node.parents++ - db.nodes[parent].children[child]++ + db.dirties[parent].children[child]++ } // Dereference removes an existing reference from a root node. @@ -439,25 +494,25 @@ func (db *Database) Dereference(root common.Hash) { db.lock.Lock() defer db.lock.Unlock() - nodes, storage, start := len(db.nodes), db.nodesSize, time.Now() + nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() db.dereference(root, common.Hash{}) - db.gcnodes += uint64(nodes - len(db.nodes)) - db.gcsize += storage - db.nodesSize + db.gcnodes += uint64(nodes - len(db.dirties)) + db.gcsize += storage - db.dirtiesSize db.gctime += time.Since(start) memcacheGCTimeTimer.Update(time.Since(start)) - memcacheGCSizeMeter.Mark(int64(storage - db.nodesSize)) - memcacheGCNodesMeter.Mark(int64(nodes - len(db.nodes))) + memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize)) + memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties))) - log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.nodes), "size", storage-db.nodesSize, "time", time.Since(start), - "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.nodes), "livesize", db.nodesSize) + log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), + "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) } // dereference is the private locked version of Dereference. func (db *Database) dereference(child common.Hash, parent common.Hash) { // Dereference the parent-child - node := db.nodes[parent] + node := db.dirties[parent] if node.children != nil && node.children[child] > 0 { node.children[child]-- @@ -466,7 +521,7 @@ func (db *Database) dereference(child common.Hash, parent common.Hash) { } } // If the child does not exist, it's a previously committed node. - node, ok := db.nodes[child] + node, ok := db.dirties[child] if !ok { return } @@ -483,20 +538,20 @@ func (db *Database) dereference(child common.Hash, parent common.Hash) { switch child { case db.oldest: db.oldest = node.flushNext - db.nodes[node.flushNext].flushPrev = common.Hash{} + db.dirties[node.flushNext].flushPrev = common.Hash{} case db.newest: db.newest = node.flushPrev - db.nodes[node.flushPrev].flushNext = common.Hash{} + db.dirties[node.flushPrev].flushNext = common.Hash{} default: - db.nodes[node.flushPrev].flushNext = node.flushNext - db.nodes[node.flushNext].flushPrev = node.flushPrev + db.dirties[node.flushPrev].flushNext = node.flushNext + db.dirties[node.flushNext].flushPrev = node.flushPrev } // Dereference all children and delete the node for _, hash := range node.childs() { db.dereference(hash, child) } - delete(db.nodes, child) - db.nodesSize -= common.StorageSize(common.HashLength + int(node.size)) + delete(db.dirties, child) + db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) } } @@ -509,13 +564,13 @@ func (db *Database) Cap(limit common.StorageSize) error { // by only uncaching existing data when the database write finalizes. db.lock.RLock() - nodes, storage, start := len(db.nodes), db.nodesSize, time.Now() + nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() batch := db.diskdb.NewBatch() - // db.nodesSize only contains the useful data in the cache, but when reporting + // db.dirtiesSize only contains the useful data in the cache, but when reporting // the total memory consumption, the maintenance metadata is also needed to be // counted. For every useful node, we track 2 extra hashes as the flushlist. - size := db.nodesSize + common.StorageSize((len(db.nodes)-1)*2*common.HashLength) + size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*2*common.HashLength) // If the preimage cache got large enough, push to disk. If it's still small // leave for later to deduplicate writes. @@ -540,7 +595,7 @@ func (db *Database) Cap(limit common.StorageSize) error { oldest := db.oldest for size > limit && oldest != (common.Hash{}) { // Fetch the oldest referenced node and push into the batch - node := db.nodes[oldest] + node := db.dirties[oldest] if err := batch.Put(oldest[:], node.rlp()); err != nil { db.lock.RUnlock() return err @@ -578,25 +633,25 @@ func (db *Database) Cap(limit common.StorageSize) error { db.preimagesSize = 0 } for db.oldest != oldest { - node := db.nodes[db.oldest] - delete(db.nodes, db.oldest) + node := db.dirties[db.oldest] + delete(db.dirties, db.oldest) db.oldest = node.flushNext - db.nodesSize -= common.StorageSize(common.HashLength + int(node.size)) + db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) } if db.oldest != (common.Hash{}) { - db.nodes[db.oldest].flushPrev = common.Hash{} + db.dirties[db.oldest].flushPrev = common.Hash{} } - db.flushnodes += uint64(nodes - len(db.nodes)) - db.flushsize += storage - db.nodesSize + db.flushnodes += uint64(nodes - len(db.dirties)) + db.flushsize += storage - db.dirtiesSize db.flushtime += time.Since(start) memcacheFlushTimeTimer.Update(time.Since(start)) - memcacheFlushSizeMeter.Mark(int64(storage - db.nodesSize)) - memcacheFlushNodesMeter.Mark(int64(nodes - len(db.nodes))) + memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize)) + memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties))) - log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.nodes), "size", storage-db.nodesSize, "time", time.Since(start), - "flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.nodes), "livesize", db.nodesSize) + log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), + "flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) return nil } @@ -630,7 +685,7 @@ func (db *Database) Commit(node common.Hash, report bool) error { } } // Move the trie itself into the batch, flushing if enough data is accumulated - nodes, storage := len(db.nodes), db.nodesSize + nodes, storage := len(db.dirties), db.dirtiesSize if err := db.commit(node, batch); err != nil { log.Error("Failed to commit trie from trie database", "err", err) db.lock.RUnlock() @@ -654,15 +709,15 @@ func (db *Database) Commit(node common.Hash, report bool) error { db.uncache(node) memcacheCommitTimeTimer.Update(time.Since(start)) - memcacheCommitSizeMeter.Mark(int64(storage - db.nodesSize)) - memcacheCommitNodesMeter.Mark(int64(nodes - len(db.nodes))) + memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize)) + memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties))) logger := log.Info if !report { logger = log.Debug } - logger("Persisted trie from memory database", "nodes", nodes-len(db.nodes)+int(db.flushnodes), "size", storage-db.nodesSize+db.flushsize, "time", time.Since(start)+db.flushtime, - "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.nodes), "livesize", db.nodesSize) + logger("Persisted trie from memory database", "nodes", nodes-len(db.dirties)+int(db.flushnodes), "size", storage-db.dirtiesSize+db.flushsize, "time", time.Since(start)+db.flushtime, + "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) // Reset the garbage collection statistics db.gcnodes, db.gcsize, db.gctime = 0, 0, 0 @@ -674,7 +729,7 @@ func (db *Database) Commit(node common.Hash, report bool) error { // commit is the private locked version of Commit. func (db *Database) commit(hash common.Hash, batch ethdb.Batch) error { // If the node does not exist, it's a previously committed node - node, ok := db.nodes[hash] + node, ok := db.dirties[hash] if !ok { return nil } @@ -702,7 +757,7 @@ func (db *Database) commit(hash common.Hash, batch ethdb.Batch) error { // to disk. func (db *Database) uncache(hash common.Hash) { // If the node does not exist, we're done on this path - node, ok := db.nodes[hash] + node, ok := db.dirties[hash] if !ok { return } @@ -710,20 +765,20 @@ func (db *Database) uncache(hash common.Hash) { switch hash { case db.oldest: db.oldest = node.flushNext - db.nodes[node.flushNext].flushPrev = common.Hash{} + db.dirties[node.flushNext].flushPrev = common.Hash{} case db.newest: db.newest = node.flushPrev - db.nodes[node.flushPrev].flushNext = common.Hash{} + db.dirties[node.flushPrev].flushNext = common.Hash{} default: - db.nodes[node.flushPrev].flushNext = node.flushNext - db.nodes[node.flushNext].flushPrev = node.flushPrev + db.dirties[node.flushPrev].flushNext = node.flushNext + db.dirties[node.flushNext].flushPrev = node.flushPrev } // Uncache the node's subtries and remove the node itself too for _, child := range node.childs() { db.uncache(child) } - delete(db.nodes, hash) - db.nodesSize -= common.StorageSize(common.HashLength + int(node.size)) + delete(db.dirties, hash) + db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) } // Size returns the current storage size of the memory cache in front of the @@ -732,11 +787,11 @@ func (db *Database) Size() (common.StorageSize, common.StorageSize) { db.lock.RLock() defer db.lock.RUnlock() - // db.nodesSize only contains the useful data in the cache, but when reporting + // db.dirtiesSize only contains the useful data in the cache, but when reporting // the total memory consumption, the maintenance metadata is also needed to be // counted. For every useful node, we track 2 extra hashes as the flushlist. - var flushlistSize = common.StorageSize((len(db.nodes) - 1) * 2 * common.HashLength) - return db.nodesSize + flushlistSize, db.preimagesSize + var flushlistSize = common.StorageSize((len(db.dirties) - 1) * 2 * common.HashLength) + return db.dirtiesSize + flushlistSize, db.preimagesSize } // verifyIntegrity is a debug method to iterate over the entire trie stored in @@ -749,12 +804,12 @@ func (db *Database) verifyIntegrity() { // Iterate over all the cached nodes and accumulate them into a set reachable := map[common.Hash]struct{}{{}: {}} - for child := range db.nodes[common.Hash{}].children { + for child := range db.dirties[common.Hash{}].children { db.accumulate(child, reachable) } // Find any unreachable but cached nodes unreachable := []string{} - for hash, node := range db.nodes { + for hash, node := range db.dirties { if _, ok := reachable[hash]; !ok { unreachable = append(unreachable, fmt.Sprintf("%x: {Node: %v, Parents: %d, Prev: %x, Next: %x}", hash, node.node, node.parents, node.flushPrev, node.flushNext)) @@ -769,7 +824,7 @@ func (db *Database) verifyIntegrity() { // cached children found in memory. func (db *Database) accumulate(hash common.Hash, reachable map[common.Hash]struct{}) { // Mark the node reachable if present in the memory cache - node, ok := db.nodes[hash] + node, ok := db.dirties[hash] if !ok { return } diff --git a/vendor/github.com/ethereum/go-ethereum/trie/iterator.go b/vendor/github.com/ethereum/go-ethereum/trie/iterator.go index 00b890eb8..77f168166 100644 --- a/vendor/github.com/ethereum/go-ethereum/trie/iterator.go +++ b/vendor/github.com/ethereum/go-ethereum/trie/iterator.go @@ -181,6 +181,8 @@ func (it *nodeIterator) LeafProof() [][]byte { if len(it.stack) > 0 { if _, ok := it.stack[len(it.stack)-1].node.(valueNode); ok { hasher := newHasher(0, 0, nil) + defer returnHasherToPool(hasher) + proofs := make([][]byte, 0, len(it.stack)) for i, item := range it.stack[:len(it.stack)-1] { diff --git a/vendor/github.com/ethereum/go-ethereum/trie/proof.go b/vendor/github.com/ethereum/go-ethereum/trie/proof.go index 6cb8f4d5f..f90ecd7d8 100644 --- a/vendor/github.com/ethereum/go-ethereum/trie/proof.go +++ b/vendor/github.com/ethereum/go-ethereum/trie/proof.go @@ -66,6 +66,8 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.Putter) error { } } hasher := newHasher(0, 0, nil) + defer returnHasherToPool(hasher) + for i, n := range nodes { // Don't bother checking for errors here since hasher panics // if encoding doesn't work and we're not writing to any database. diff --git a/vendor/github.com/ethereum/go-ethereum/whisper/mailserver/mailserver.go b/vendor/github.com/ethereum/go-ethereum/whisper/mailserver/mailserver.go index af9418d9f..d7af4baae 100644 --- a/vendor/github.com/ethereum/go-ethereum/whisper/mailserver/mailserver.go +++ b/vendor/github.com/ethereum/go-ethereum/whisper/mailserver/mailserver.go @@ -14,6 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . +// Package mailserver provides a naive, example mailserver implementation package mailserver import ( @@ -26,9 +27,11 @@ import ( "github.com/ethereum/go-ethereum/rlp" whisper "github.com/ethereum/go-ethereum/whisper/whisperv6" "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/util" ) +// WMailServer represents the state data of the mailserver. type WMailServer struct { db *leveldb.DB w *whisper.Whisper @@ -42,6 +45,8 @@ type DBKey struct { raw []byte } +// NewDbKey is a helper function that creates a levelDB +// key from a hash and an integer. func NewDbKey(t uint32, h common.Hash) *DBKey { const sz = common.HashLength + 4 var k DBKey @@ -53,6 +58,7 @@ func NewDbKey(t uint32, h common.Hash) *DBKey { return &k } +// Init initializes the mail server. func (s *WMailServer) Init(shh *whisper.Whisper, path string, password string, pow float64) error { var err error if len(path) == 0 { @@ -63,7 +69,7 @@ func (s *WMailServer) Init(shh *whisper.Whisper, path string, password string, p return fmt.Errorf("password is not specified") } - s.db, err = leveldb.OpenFile(path, nil) + s.db, err = leveldb.OpenFile(path, &opt.Options{OpenFilesCacheCapacity: 32}) if err != nil { return fmt.Errorf("open DB file: %s", err) } @@ -82,12 +88,14 @@ func (s *WMailServer) Init(shh *whisper.Whisper, path string, password string, p return nil } +// Close cleans up before shutdown. func (s *WMailServer) Close() { if s.db != nil { s.db.Close() } } +// Archive stores the func (s *WMailServer) Archive(env *whisper.Envelope) { key := NewDbKey(env.Expiry-env.TTL, env.Hash()) rawEnvelope, err := rlp.EncodeToBytes(env) @@ -101,6 +109,8 @@ func (s *WMailServer) Archive(env *whisper.Envelope) { } } +// DeliverMail responds with saved messages upon request by the +// messages' owner. func (s *WMailServer) DeliverMail(peer *whisper.Peer, request *whisper.Envelope) { if peer == nil { log.Error("Whisper peer is nil")