2022-05-19 19:56:03 +00:00
|
|
|
## Nim-Codex
|
2022-01-10 15:32:56 +00:00
|
|
|
## Copyright (c) 2021 Status Research & Development GmbH
|
|
|
|
## Licensed under either of
|
|
|
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
|
|
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
|
|
## at your option.
|
|
|
|
## This file may not be copied, modified, or distributed except according to
|
|
|
|
## those terms.
|
|
|
|
|
2024-03-12 09:57:13 +00:00
|
|
|
{.push raises: [].}
|
2022-01-10 15:32:56 +00:00
|
|
|
|
|
|
|
import std/os
|
2022-04-12 13:21:07 +00:00
|
|
|
import std/terminal
|
2022-01-10 15:32:56 +00:00
|
|
|
import std/options
|
2022-04-14 10:49:03 +00:00
|
|
|
import std/strutils
|
2022-04-12 13:21:07 +00:00
|
|
|
import std/typetraits
|
2022-01-10 15:32:56 +00:00
|
|
|
|
2023-02-15 19:40:21 +00:00
|
|
|
import pkg/chronos
|
2022-11-01 20:05:40 +00:00
|
|
|
import pkg/chronicles/helpers
|
2022-04-12 13:21:07 +00:00
|
|
|
import pkg/chronicles/topics_registry
|
2022-01-10 15:32:56 +00:00
|
|
|
import pkg/confutils/defs
|
2022-04-13 16:32:35 +00:00
|
|
|
import pkg/confutils/std/net
|
2023-05-02 13:06:34 +00:00
|
|
|
import pkg/toml_serialization
|
2022-04-14 10:49:03 +00:00
|
|
|
import pkg/metrics
|
|
|
|
import pkg/metrics/chronos_httpserver
|
2022-04-13 16:32:35 +00:00
|
|
|
import pkg/stew/shims/net as stewnet
|
2023-07-06 23:23:27 +00:00
|
|
|
import pkg/stew/shims/parseutils
|
2023-11-09 05:35:55 +00:00
|
|
|
import pkg/stew/byteutils
|
2022-01-10 15:32:56 +00:00
|
|
|
import pkg/libp2p
|
2022-04-14 16:20:01 +00:00
|
|
|
import pkg/ethers
|
2023-11-09 05:35:55 +00:00
|
|
|
import pkg/questionable
|
|
|
|
import pkg/questionable/results
|
2022-01-10 15:32:56 +00:00
|
|
|
|
2024-03-12 09:57:13 +00:00
|
|
|
import ./codextypes
|
2022-04-13 16:32:35 +00:00
|
|
|
import ./discovery
|
feat: create logging proxy (#663)
* implement a logging proxy
The logging proxy:
- prevents the need to import chronicles (as well as export except toJson),
- prevents the need to override `writeValue` or use or import nim-json-seralization elsewhere in the codebase, allowing for sole use of utils/json for de/serialization,
- and handles json formatting correctly in chronicles json sinks
* Rename logging -> logutils to avoid ambiguity with common names
* clean up
* add setProperty for JsonRecord, remove nim-json-serialization conflict
* Allow specifying textlines and json format separately
Not specifying a LogFormat will apply the formatting to both textlines and json sinks.
Specifying a LogFormat will apply the formatting to only that sink.
* remove unneeded usages of std/json
We only need to import utils/json instead of std/json
* move serialization from rest/json to utils/json so it can be shared
* fix NoColors ambiguity
Was causing unit tests to fail on Windows.
* Remove nre usage to fix Windows error
Windows was erroring with `could not load: pcre64.dll`. Instead of fixing that error, remove the pcre usage :)
* Add logutils module doc
* Shorten logutils.formatIt for `NBytes`
Both json and textlines formatIt were not needed, and could be combined into one formatIt
* remove debug integration test config
debug output and logformat of json for integration test logs
* Use ## module doc to support docgen
* bump nim-poseidon2 to export fromBytes
Before the changes in this branch, fromBytes was likely being resolved by nim-stew, or other dependency. With the changes in this branch, that dependency was removed and fromBytes could no longer be resolved. By exporting fromBytes from nim-poseidon, the correct resolution is now happening.
* fixes to get compiling after rebasing master
* Add support for Result types being logged using formatIt
2024-01-23 07:35:03 +00:00
|
|
|
import ./logutils
|
2022-12-03 00:00:55 +00:00
|
|
|
import ./stores
|
2023-07-06 23:23:27 +00:00
|
|
|
import ./units
|
|
|
|
import ./utils
|
2022-03-02 16:30:42 +00:00
|
|
|
|
2024-03-12 09:57:13 +00:00
|
|
|
export units, net, codextypes, logutils
|
|
|
|
|
2023-12-21 06:41:43 +00:00
|
|
|
export
|
|
|
|
DefaultQuotaBytes,
|
|
|
|
DefaultBlockTtl,
|
|
|
|
DefaultBlockMaintenanceInterval,
|
|
|
|
DefaultNumberOfBlocksToMaintainPerInterval
|
2022-01-10 15:32:56 +00:00
|
|
|
|
2024-03-12 09:57:13 +00:00
|
|
|
proc defaultDataDir*(): string =
|
|
|
|
let dataDir = when defined(windows):
|
|
|
|
"AppData" / "Roaming" / "Codex"
|
|
|
|
elif defined(macosx):
|
|
|
|
"Library" / "Application Support" / "Codex"
|
|
|
|
else:
|
|
|
|
".cache" / "codex"
|
|
|
|
|
|
|
|
getHomeDir() / dataDir
|
|
|
|
|
2023-06-19 06:21:03 +00:00
|
|
|
const
|
|
|
|
codex_enable_api_debug_peers* {.booldefine.} = false
|
2023-06-22 10:32:18 +00:00
|
|
|
codex_enable_proof_failures* {.booldefine.} = false
|
2023-10-03 09:53:58 +00:00
|
|
|
codex_enable_log_counter* {.booldefine.} = false
|
2023-06-19 06:21:03 +00:00
|
|
|
|
2024-03-12 09:57:13 +00:00
|
|
|
DefaultDataDir* = defaultDataDir()
|
2024-03-29 14:58:40 +00:00
|
|
|
DefaultCircuitDir* = defaultDataDir() / "circuits"
|
2024-03-12 09:57:13 +00:00
|
|
|
|
2022-01-10 15:32:56 +00:00
|
|
|
type
|
2024-03-12 09:57:13 +00:00
|
|
|
StartUpCmd* {.pure.} = enum
|
|
|
|
noCmd
|
|
|
|
persistence
|
|
|
|
|
|
|
|
PersistenceCmd* {.pure.} = enum
|
|
|
|
noCmd
|
|
|
|
prover
|
2022-01-10 15:32:56 +00:00
|
|
|
|
feat: create logging proxy (#663)
* implement a logging proxy
The logging proxy:
- prevents the need to import chronicles (as well as export except toJson),
- prevents the need to override `writeValue` or use or import nim-json-seralization elsewhere in the codebase, allowing for sole use of utils/json for de/serialization,
- and handles json formatting correctly in chronicles json sinks
* Rename logging -> logutils to avoid ambiguity with common names
* clean up
* add setProperty for JsonRecord, remove nim-json-serialization conflict
* Allow specifying textlines and json format separately
Not specifying a LogFormat will apply the formatting to both textlines and json sinks.
Specifying a LogFormat will apply the formatting to only that sink.
* remove unneeded usages of std/json
We only need to import utils/json instead of std/json
* move serialization from rest/json to utils/json so it can be shared
* fix NoColors ambiguity
Was causing unit tests to fail on Windows.
* Remove nre usage to fix Windows error
Windows was erroring with `could not load: pcre64.dll`. Instead of fixing that error, remove the pcre usage :)
* Add logutils module doc
* Shorten logutils.formatIt for `NBytes`
Both json and textlines formatIt were not needed, and could be combined into one formatIt
* remove debug integration test config
debug output and logformat of json for integration test logs
* Use ## module doc to support docgen
* bump nim-poseidon2 to export fromBytes
Before the changes in this branch, fromBytes was likely being resolved by nim-stew, or other dependency. With the changes in this branch, that dependency was removed and fromBytes could no longer be resolved. By exporting fromBytes from nim-poseidon, the correct resolution is now happening.
* fixes to get compiling after rebasing master
* Add support for Result types being logged using formatIt
2024-01-23 07:35:03 +00:00
|
|
|
LogKind* {.pure.} = enum
|
2022-04-12 13:21:07 +00:00
|
|
|
Auto = "auto"
|
|
|
|
Colors = "colors"
|
|
|
|
NoColors = "nocolors"
|
|
|
|
Json = "json"
|
|
|
|
None = "none"
|
|
|
|
|
2023-03-14 22:32:15 +00:00
|
|
|
RepoKind* = enum
|
|
|
|
repoFS = "fs"
|
|
|
|
repoSQLite = "sqlite"
|
2024-05-30 06:57:10 +00:00
|
|
|
repoLevelDb = "leveldb"
|
2023-03-14 22:32:15 +00:00
|
|
|
|
2022-05-19 19:56:03 +00:00
|
|
|
CodexConf* = object
|
2023-05-02 13:06:34 +00:00
|
|
|
configFile* {.
|
|
|
|
desc: "Loads the configuration from a TOML file"
|
|
|
|
defaultValueDesc: "none"
|
|
|
|
defaultValue: InputFile.none
|
|
|
|
name: "config-file" }: Option[InputFile]
|
|
|
|
|
2022-01-10 15:32:56 +00:00
|
|
|
logLevel* {.
|
2023-06-16 16:20:49 +00:00
|
|
|
defaultValue: "info"
|
2022-04-12 13:21:07 +00:00
|
|
|
desc: "Sets the log level",
|
2022-11-01 20:05:40 +00:00
|
|
|
name: "log-level" }: string
|
2022-04-12 13:21:07 +00:00
|
|
|
|
|
|
|
logFormat* {.
|
|
|
|
hidden
|
|
|
|
desc: "Specifies what kind of logs should be written to stdout (auto, colors, nocolors, json)"
|
|
|
|
defaultValueDesc: "auto"
|
|
|
|
defaultValue: LogKind.Auto
|
|
|
|
name: "log-format" }: LogKind
|
|
|
|
|
2022-04-14 10:49:03 +00:00
|
|
|
metricsEnabled* {.
|
|
|
|
desc: "Enable the metrics server"
|
|
|
|
defaultValue: false
|
|
|
|
name: "metrics" }: bool
|
|
|
|
|
|
|
|
metricsAddress* {.
|
|
|
|
desc: "Listening address of the metrics server"
|
|
|
|
defaultValue: ValidIpAddress.init("127.0.0.1")
|
|
|
|
defaultValueDesc: "127.0.0.1"
|
|
|
|
name: "metrics-address" }: ValidIpAddress
|
|
|
|
|
|
|
|
metricsPort* {.
|
|
|
|
desc: "Listening HTTP port of the metrics server"
|
|
|
|
defaultValue: 8008
|
|
|
|
name: "metrics-port" }: Port
|
2022-01-10 15:32:56 +00:00
|
|
|
|
|
|
|
dataDir* {.
|
2024-01-29 19:31:29 +00:00
|
|
|
desc: "The directory where codex will store configuration and data"
|
2024-03-12 09:57:13 +00:00
|
|
|
defaultValue: DefaultDataDir
|
|
|
|
defaultValueDesc: $DefaultDataDir
|
2022-01-10 15:32:56 +00:00
|
|
|
abbr: "d"
|
|
|
|
name: "data-dir" }: OutDir
|
|
|
|
|
2024-03-29 14:58:40 +00:00
|
|
|
circuitDir* {.
|
|
|
|
desc: "Directory where Codex will store proof circuit data"
|
|
|
|
defaultValue: DefaultCircuitDir
|
|
|
|
defaultValueDesc: $DefaultCircuitDir
|
|
|
|
abbr: "cd"
|
|
|
|
name: "circuit-dir" }: OutDir
|
|
|
|
|
2024-03-12 09:57:13 +00:00
|
|
|
listenAddrs* {.
|
|
|
|
desc: "Multi Addresses to listen on"
|
|
|
|
defaultValue: @[
|
|
|
|
MultiAddress.init("/ip4/0.0.0.0/tcp/0")
|
|
|
|
.expect("Should init multiaddress")]
|
|
|
|
defaultValueDesc: "/ip4/0.0.0.0/tcp/0"
|
|
|
|
abbr: "i"
|
|
|
|
name: "listen-addrs" }: seq[MultiAddress]
|
|
|
|
|
|
|
|
# TODO: change this once we integrate nat support
|
|
|
|
nat* {.
|
|
|
|
desc: "IP Addresses to announce behind a NAT"
|
|
|
|
defaultValue: ValidIpAddress.init("127.0.0.1")
|
|
|
|
defaultValueDesc: "127.0.0.1"
|
|
|
|
abbr: "a"
|
|
|
|
name: "nat" }: ValidIpAddress
|
|
|
|
|
|
|
|
discoveryIp* {.
|
|
|
|
desc: "Discovery listen address"
|
|
|
|
defaultValue: ValidIpAddress.init(IPv4_any())
|
|
|
|
defaultValueDesc: "0.0.0.0"
|
|
|
|
abbr: "e"
|
|
|
|
name: "disc-ip" }: ValidIpAddress
|
|
|
|
|
|
|
|
discoveryPort* {.
|
|
|
|
desc: "Discovery (UDP) port"
|
|
|
|
defaultValue: 8090.Port
|
|
|
|
defaultValueDesc: "8090"
|
|
|
|
abbr: "u"
|
|
|
|
name: "disc-port" }: Port
|
|
|
|
|
|
|
|
netPrivKeyFile* {.
|
|
|
|
desc: "Source of network (secp256k1) private key file path or name"
|
|
|
|
defaultValue: "key"
|
|
|
|
name: "net-privkey" }: string
|
|
|
|
|
|
|
|
bootstrapNodes* {.
|
|
|
|
desc: "Specifies one or more bootstrap nodes to use when connecting to the network"
|
|
|
|
abbr: "b"
|
|
|
|
name: "bootstrap-node" }: seq[SignedPeerRecord]
|
|
|
|
|
|
|
|
maxPeers* {.
|
|
|
|
desc: "The maximum number of peers to connect to"
|
|
|
|
defaultValue: 160
|
|
|
|
name: "max-peers" }: int
|
|
|
|
|
|
|
|
agentString* {.
|
|
|
|
defaultValue: "Codex"
|
|
|
|
desc: "Node agent string which is used as identifier in network"
|
|
|
|
name: "agent-string" }: string
|
|
|
|
|
|
|
|
apiBindAddress* {.
|
|
|
|
desc: "The REST API bind address"
|
|
|
|
defaultValue: "127.0.0.1"
|
|
|
|
name: "api-bindaddr"
|
|
|
|
}: string
|
|
|
|
|
|
|
|
apiPort* {.
|
|
|
|
desc: "The REST Api port",
|
|
|
|
defaultValue: 8080.Port
|
|
|
|
defaultValueDesc: "8080"
|
|
|
|
name: "api-port"
|
|
|
|
abbr: "p" }: Port
|
|
|
|
|
2024-06-17 11:33:21 +00:00
|
|
|
apiCorsAllowedOrigin* {.
|
|
|
|
desc: "The REST Api CORS allowed origin for downloading data. '*' will allow all origins, '' will allow none.",
|
|
|
|
defaultValue: string.none
|
|
|
|
defaultValueDesc: "Disallow all cross origin requests to download data"
|
|
|
|
name: "api-cors-origin" }: Option[string]
|
|
|
|
|
2024-03-12 09:57:13 +00:00
|
|
|
repoKind* {.
|
2024-05-30 06:57:10 +00:00
|
|
|
desc: "Backend for main repo store (fs, sqlite, leveldb)"
|
2024-03-12 09:57:13 +00:00
|
|
|
defaultValueDesc: "fs"
|
|
|
|
defaultValue: repoFS
|
|
|
|
name: "repo-kind" }: RepoKind
|
|
|
|
|
|
|
|
storageQuota* {.
|
|
|
|
desc: "The size of the total storage quota dedicated to the node"
|
|
|
|
defaultValue: DefaultQuotaBytes
|
|
|
|
defaultValueDesc: $DefaultQuotaBytes
|
|
|
|
name: "storage-quota"
|
|
|
|
abbr: "q" }: NBytes
|
|
|
|
|
|
|
|
blockTtl* {.
|
|
|
|
desc: "Default block timeout in seconds - 0 disables the ttl"
|
|
|
|
defaultValue: DefaultBlockTtl
|
|
|
|
defaultValueDesc: $DefaultBlockTtl
|
|
|
|
name: "block-ttl"
|
|
|
|
abbr: "t" }: Duration
|
|
|
|
|
|
|
|
blockMaintenanceInterval* {.
|
|
|
|
desc: "Time interval in seconds - determines frequency of block maintenance cycle: how often blocks are checked for expiration and cleanup"
|
|
|
|
defaultValue: DefaultBlockMaintenanceInterval
|
|
|
|
defaultValueDesc: $DefaultBlockMaintenanceInterval
|
|
|
|
name: "block-mi" }: Duration
|
|
|
|
|
|
|
|
blockMaintenanceNumberOfBlocks* {.
|
|
|
|
desc: "Number of blocks to check every maintenance cycle"
|
|
|
|
defaultValue: DefaultNumberOfBlocksToMaintainPerInterval
|
|
|
|
defaultValueDesc: $DefaultNumberOfBlocksToMaintainPerInterval
|
|
|
|
name: "block-mn" }: int
|
|
|
|
|
|
|
|
cacheSize* {.
|
|
|
|
desc: "The size of the block cache, 0 disables the cache - might help on slow hardrives"
|
|
|
|
defaultValue: 0
|
|
|
|
defaultValueDesc: "0"
|
|
|
|
name: "cache-size"
|
|
|
|
abbr: "c" }: NBytes
|
|
|
|
|
|
|
|
logFile* {.
|
|
|
|
desc: "Logs to file"
|
|
|
|
defaultValue: string.none
|
|
|
|
name: "log-file"
|
|
|
|
hidden
|
|
|
|
.}: Option[string]
|
2022-08-09 04:29:06 +00:00
|
|
|
|
2024-03-12 09:57:13 +00:00
|
|
|
case cmd* {.
|
|
|
|
defaultValue: noCmd
|
|
|
|
command }: StartUpCmd
|
|
|
|
of persistence:
|
2022-04-14 16:20:01 +00:00
|
|
|
ethProvider* {.
|
|
|
|
desc: "The URL of the JSON-RPC API of the Ethereum node"
|
|
|
|
defaultValue: "ws://localhost:8545"
|
|
|
|
name: "eth-provider"
|
|
|
|
.}: string
|
|
|
|
|
|
|
|
ethAccount* {.
|
|
|
|
desc: "The Ethereum account that is used for storage contracts"
|
2022-08-09 04:29:06 +00:00
|
|
|
defaultValue: EthAddress.none
|
2024-03-12 09:57:13 +00:00
|
|
|
defaultValueDesc: ""
|
2022-04-14 16:20:01 +00:00
|
|
|
name: "eth-account"
|
2022-08-09 04:29:06 +00:00
|
|
|
.}: Option[EthAddress]
|
2022-04-14 16:20:01 +00:00
|
|
|
|
2023-09-13 14:17:56 +00:00
|
|
|
ethPrivateKey* {.
|
|
|
|
desc: "File containing Ethereum private key for storage contracts"
|
|
|
|
defaultValue: string.none
|
2024-03-12 09:57:13 +00:00
|
|
|
defaultValueDesc: ""
|
2023-09-13 14:17:56 +00:00
|
|
|
name: "eth-private-key"
|
|
|
|
.}: Option[string]
|
|
|
|
|
2023-05-03 07:24:25 +00:00
|
|
|
marketplaceAddress* {.
|
|
|
|
desc: "Address of deployed Marketplace contract"
|
|
|
|
defaultValue: EthAddress.none
|
2024-03-12 09:57:13 +00:00
|
|
|
defaultValueDesc: ""
|
2023-05-03 07:24:25 +00:00
|
|
|
name: "marketplace-address"
|
|
|
|
.}: Option[EthAddress]
|
2022-04-25 13:12:37 +00:00
|
|
|
|
2024-03-12 09:57:13 +00:00
|
|
|
# TODO: should go behind a feature flag
|
|
|
|
simulateProofFailures* {.
|
|
|
|
desc: "Simulates proof failures once every N proofs. 0 = disabled."
|
|
|
|
defaultValue: 0
|
|
|
|
name: "simulate-proof-failures"
|
|
|
|
hidden
|
|
|
|
.}: int
|
|
|
|
|
2023-04-19 13:06:00 +00:00
|
|
|
validator* {.
|
|
|
|
desc: "Enables validator, requires an Ethereum node"
|
|
|
|
defaultValue: false
|
|
|
|
name: "validator"
|
|
|
|
.}: bool
|
|
|
|
|
|
|
|
validatorMaxSlots* {.
|
|
|
|
desc: "Maximum number of slots that the validator monitors"
|
|
|
|
defaultValue: 1000
|
|
|
|
name: "validator-max-slots"
|
|
|
|
.}: int
|
|
|
|
|
2024-03-12 09:57:13 +00:00
|
|
|
case persistenceCmd* {.
|
|
|
|
defaultValue: noCmd
|
|
|
|
command }: PersistenceCmd
|
|
|
|
|
|
|
|
of PersistenceCmd.prover:
|
|
|
|
circomR1cs* {.
|
|
|
|
desc: "The r1cs file for the storage circuit"
|
|
|
|
defaultValue: $DefaultDataDir / "circuits" / "proof_main.r1cs"
|
|
|
|
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.r1cs"
|
|
|
|
name: "circom-r1cs"
|
|
|
|
.}: InputFile
|
|
|
|
|
|
|
|
circomWasm* {.
|
|
|
|
desc: "The wasm file for the storage circuit"
|
|
|
|
defaultValue: $DefaultDataDir / "circuits" / "proof_main.wasm"
|
|
|
|
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.wasm"
|
|
|
|
name: "circom-wasm"
|
|
|
|
.}: InputFile
|
|
|
|
|
|
|
|
circomZkey* {.
|
|
|
|
desc: "The zkey file for the storage circuit"
|
|
|
|
defaultValue: $DefaultDataDir / "circuits" / "proof_main.zkey"
|
|
|
|
defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.zkey"
|
|
|
|
name: "circom-zkey"
|
|
|
|
.}: InputFile
|
|
|
|
|
|
|
|
# TODO: should probably be hidden and behind a feature flag
|
|
|
|
circomNoZkey* {.
|
|
|
|
desc: "Ignore the zkey file - use only for testing!"
|
|
|
|
defaultValue: false
|
|
|
|
name: "circom-no-zkey"
|
|
|
|
.}: bool
|
|
|
|
|
|
|
|
numProofSamples* {.
|
|
|
|
desc: "Number of samples to prove"
|
|
|
|
defaultValue: DefaultSamplesNum
|
|
|
|
defaultValueDesc: $DefaultSamplesNum
|
|
|
|
name: "proof-samples" }: int
|
|
|
|
|
|
|
|
maxSlotDepth* {.
|
|
|
|
desc: "The maximum depth of the slot tree"
|
|
|
|
defaultValue: DefaultMaxSlotDepth
|
|
|
|
defaultValueDesc: $DefaultMaxSlotDepth
|
|
|
|
name: "max-slot-depth" }: int
|
|
|
|
|
|
|
|
maxDatasetDepth* {.
|
|
|
|
desc: "The maximum depth of the dataset tree"
|
|
|
|
defaultValue: DefaultMaxDatasetDepth
|
|
|
|
defaultValueDesc: $DefaultMaxDatasetDepth
|
|
|
|
name: "max-dataset-depth" }: int
|
|
|
|
|
|
|
|
maxBlockDepth* {.
|
|
|
|
desc: "The maximum depth of the network block merkle tree"
|
|
|
|
defaultValue: DefaultBlockDepth
|
|
|
|
defaultValueDesc: $DefaultBlockDepth
|
|
|
|
name: "max-block-depth" }: int
|
|
|
|
|
|
|
|
maxCellElms* {.
|
|
|
|
desc: "The maximum number of elements in a cell"
|
|
|
|
defaultValue: DefaultCellElms
|
|
|
|
defaultValueDesc: $DefaultCellElms
|
|
|
|
name: "max-cell-elements" }: int
|
|
|
|
of PersistenceCmd.noCmd:
|
|
|
|
discard
|
|
|
|
|
|
|
|
of StartUpCmd.noCmd:
|
|
|
|
discard # end of persistence
|
2022-01-10 15:32:56 +00:00
|
|
|
|
2022-04-14 16:20:01 +00:00
|
|
|
EthAddress* = ethers.Address
|
|
|
|
|
feat: create logging proxy (#663)
* implement a logging proxy
The logging proxy:
- prevents the need to import chronicles (as well as export except toJson),
- prevents the need to override `writeValue` or use or import nim-json-seralization elsewhere in the codebase, allowing for sole use of utils/json for de/serialization,
- and handles json formatting correctly in chronicles json sinks
* Rename logging -> logutils to avoid ambiguity with common names
* clean up
* add setProperty for JsonRecord, remove nim-json-serialization conflict
* Allow specifying textlines and json format separately
Not specifying a LogFormat will apply the formatting to both textlines and json sinks.
Specifying a LogFormat will apply the formatting to only that sink.
* remove unneeded usages of std/json
We only need to import utils/json instead of std/json
* move serialization from rest/json to utils/json so it can be shared
* fix NoColors ambiguity
Was causing unit tests to fail on Windows.
* Remove nre usage to fix Windows error
Windows was erroring with `could not load: pcre64.dll`. Instead of fixing that error, remove the pcre usage :)
* Add logutils module doc
* Shorten logutils.formatIt for `NBytes`
Both json and textlines formatIt were not needed, and could be combined into one formatIt
* remove debug integration test config
debug output and logformat of json for integration test logs
* Use ## module doc to support docgen
* bump nim-poseidon2 to export fromBytes
Before the changes in this branch, fromBytes was likely being resolved by nim-stew, or other dependency. With the changes in this branch, that dependency was removed and fromBytes could no longer be resolved. By exporting fromBytes from nim-poseidon, the correct resolution is now happening.
* fixes to get compiling after rebasing master
* Add support for Result types being logged using formatIt
2024-01-23 07:35:03 +00:00
|
|
|
logutils.formatIt(LogFormat.textLines, EthAddress): it.short0xHexLog
|
|
|
|
logutils.formatIt(LogFormat.json, EthAddress): %it
|
|
|
|
|
2024-03-12 09:57:13 +00:00
|
|
|
func persistence*(self: CodexConf): bool =
|
|
|
|
self.cmd == StartUpCmd.persistence
|
|
|
|
|
|
|
|
func prover*(self: CodexConf): bool =
|
|
|
|
self.persistence and self.persistenceCmd == PersistenceCmd.prover
|
|
|
|
|
2023-03-08 11:45:55 +00:00
|
|
|
proc getCodexVersion(): string =
|
|
|
|
let tag = strip(staticExec("git tag"))
|
|
|
|
if tag.isEmptyOrWhitespace:
|
|
|
|
return "untagged build"
|
|
|
|
return tag
|
2022-04-14 10:49:03 +00:00
|
|
|
|
2023-03-08 11:45:55 +00:00
|
|
|
proc getCodexRevision(): string =
|
2023-06-21 22:02:05 +00:00
|
|
|
# using a slice in a static context breaks nimsuggest for some reason
|
|
|
|
var res = strip(staticExec("git rev-parse --short HEAD"))
|
|
|
|
return res
|
2022-04-14 10:49:03 +00:00
|
|
|
|
2023-03-08 11:45:55 +00:00
|
|
|
proc getNimBanner(): string =
|
|
|
|
staticExec("nim --version | grep Version")
|
|
|
|
|
|
|
|
const
|
|
|
|
codexVersion* = getCodexVersion()
|
|
|
|
codexRevision* = getCodexRevision()
|
|
|
|
nimBanner* = getNimBanner()
|
2022-04-14 10:49:03 +00:00
|
|
|
|
2022-05-19 19:56:03 +00:00
|
|
|
codexFullVersion* =
|
2023-03-08 11:45:55 +00:00
|
|
|
"Codex version: " & codexVersion & "\p" &
|
|
|
|
"Codex revision: " & codexRevision & "\p" &
|
2022-04-14 10:49:03 +00:00
|
|
|
nimBanner
|
|
|
|
|
2023-08-01 23:47:57 +00:00
|
|
|
proc parseCmdArg*(T: typedesc[MultiAddress],
|
|
|
|
input: string): MultiAddress
|
2023-05-02 13:06:34 +00:00
|
|
|
{.upraises: [ValueError, LPError].} =
|
2023-08-01 23:47:57 +00:00
|
|
|
var ma: MultiAddress
|
|
|
|
let res = MultiAddress.init(input)
|
|
|
|
if res.isOk:
|
|
|
|
ma = res.get()
|
|
|
|
else:
|
2024-03-12 09:57:13 +00:00
|
|
|
warn "Invalid MultiAddress", input=input, error = res.error()
|
2023-08-01 23:47:57 +00:00
|
|
|
quit QuitFailure
|
|
|
|
ma
|
2022-04-12 13:21:07 +00:00
|
|
|
|
2023-03-09 11:23:45 +00:00
|
|
|
proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T =
|
2022-04-13 16:32:35 +00:00
|
|
|
var res: SignedPeerRecord
|
|
|
|
try:
|
|
|
|
if not res.fromURI(uri):
|
2024-03-12 09:57:13 +00:00
|
|
|
warn "Invalid SignedPeerRecord uri", uri = uri
|
2022-04-13 16:32:35 +00:00
|
|
|
quit QuitFailure
|
|
|
|
except CatchableError as exc:
|
2024-03-12 09:57:13 +00:00
|
|
|
warn "Invalid SignedPeerRecord uri", uri = uri, error = exc.msg
|
2022-04-13 16:32:35 +00:00
|
|
|
quit QuitFailure
|
|
|
|
res
|
|
|
|
|
2023-07-06 23:23:27 +00:00
|
|
|
proc parseCmdArg*(T: type EthAddress, address: string): T =
|
2022-04-14 16:20:01 +00:00
|
|
|
EthAddress.init($address).get()
|
|
|
|
|
2023-07-06 23:23:27 +00:00
|
|
|
proc parseCmdArg*(T: type NBytes, val: string): T =
|
|
|
|
var num = 0'i64
|
|
|
|
let count = parseSize(val, num, alwaysBin = true)
|
|
|
|
if count == 0:
|
2024-03-12 09:57:13 +00:00
|
|
|
warn "Invalid number of bytes", nbytes = val
|
2023-07-06 23:23:27 +00:00
|
|
|
quit QuitFailure
|
|
|
|
NBytes(num)
|
|
|
|
|
|
|
|
proc parseCmdArg*(T: type Duration, val: string): T =
|
|
|
|
var dur: Duration
|
|
|
|
let count = parseDuration(val, dur)
|
|
|
|
if count == 0:
|
2024-03-12 09:57:13 +00:00
|
|
|
warn "Cannot parse duration", dur = dur
|
2023-07-06 23:23:27 +00:00
|
|
|
quit QuitFailure
|
|
|
|
dur
|
|
|
|
|
2023-05-02 13:06:34 +00:00
|
|
|
proc readValue*(r: var TomlReader, val: var EthAddress)
|
|
|
|
{.upraises: [SerializationError, IOError].} =
|
|
|
|
val = EthAddress.init(r.readValue(string)).get()
|
|
|
|
|
|
|
|
proc readValue*(r: var TomlReader, val: var SignedPeerRecord) =
|
|
|
|
without uri =? r.readValue(string).catch, err:
|
|
|
|
error "invalid SignedPeerRecord configuration value", error = err.msg
|
|
|
|
quit QuitFailure
|
|
|
|
|
|
|
|
val = SignedPeerRecord.parseCmdArg(uri)
|
|
|
|
|
2023-08-01 23:47:57 +00:00
|
|
|
proc readValue*(r: var TomlReader, val: var MultiAddress) =
|
|
|
|
without input =? r.readValue(string).catch, err:
|
|
|
|
error "invalid MultiAddress configuration value", error = err.msg
|
|
|
|
quit QuitFailure
|
|
|
|
|
|
|
|
let res = MultiAddress.init(input)
|
|
|
|
if res.isOk:
|
|
|
|
val = res.get()
|
|
|
|
else:
|
|
|
|
warn "Invalid MultiAddress", input=input, error=res.error()
|
|
|
|
quit QuitFailure
|
|
|
|
|
2023-07-06 23:23:27 +00:00
|
|
|
proc readValue*(r: var TomlReader, val: var NBytes)
|
|
|
|
{.upraises: [SerializationError, IOError].} =
|
|
|
|
var value = 0'i64
|
|
|
|
var str = r.readValue(string)
|
|
|
|
let count = parseSize(str, value, alwaysBin = true)
|
|
|
|
if count == 0:
|
|
|
|
error "invalid number of bytes for configuration value", value = str
|
|
|
|
quit QuitFailure
|
|
|
|
val = NBytes(value)
|
|
|
|
|
|
|
|
proc readValue*(r: var TomlReader, val: var Duration)
|
|
|
|
{.upraises: [SerializationError, IOError].} =
|
|
|
|
var str = r.readValue(string)
|
|
|
|
var dur: Duration
|
|
|
|
let count = parseDuration(str, dur)
|
|
|
|
if count == 0:
|
|
|
|
error "Invalid duration parse", value = str
|
|
|
|
quit QuitFailure
|
|
|
|
val = dur
|
|
|
|
|
2022-04-14 16:20:01 +00:00
|
|
|
# no idea why confutils needs this:
|
2023-03-09 11:23:45 +00:00
|
|
|
proc completeCmdArg*(T: type EthAddress; val: string): seq[string] =
|
2022-04-14 16:20:01 +00:00
|
|
|
discard
|
2023-07-06 23:23:27 +00:00
|
|
|
|
|
|
|
proc completeCmdArg*(T: type NBytes; val: string): seq[string] =
|
|
|
|
discard
|
|
|
|
|
|
|
|
proc completeCmdArg*(T: type Duration; val: string): seq[string] =
|
|
|
|
discard
|
2022-04-14 16:20:01 +00:00
|
|
|
|
2022-04-12 13:21:07 +00:00
|
|
|
# silly chronicles, colors is a compile-time property
|
feat: create logging proxy (#663)
* implement a logging proxy
The logging proxy:
- prevents the need to import chronicles (as well as export except toJson),
- prevents the need to override `writeValue` or use or import nim-json-seralization elsewhere in the codebase, allowing for sole use of utils/json for de/serialization,
- and handles json formatting correctly in chronicles json sinks
* Rename logging -> logutils to avoid ambiguity with common names
* clean up
* add setProperty for JsonRecord, remove nim-json-serialization conflict
* Allow specifying textlines and json format separately
Not specifying a LogFormat will apply the formatting to both textlines and json sinks.
Specifying a LogFormat will apply the formatting to only that sink.
* remove unneeded usages of std/json
We only need to import utils/json instead of std/json
* move serialization from rest/json to utils/json so it can be shared
* fix NoColors ambiguity
Was causing unit tests to fail on Windows.
* Remove nre usage to fix Windows error
Windows was erroring with `could not load: pcre64.dll`. Instead of fixing that error, remove the pcre usage :)
* Add logutils module doc
* Shorten logutils.formatIt for `NBytes`
Both json and textlines formatIt were not needed, and could be combined into one formatIt
* remove debug integration test config
debug output and logformat of json for integration test logs
* Use ## module doc to support docgen
* bump nim-poseidon2 to export fromBytes
Before the changes in this branch, fromBytes was likely being resolved by nim-stew, or other dependency. With the changes in this branch, that dependency was removed and fromBytes could no longer be resolved. By exporting fromBytes from nim-poseidon, the correct resolution is now happening.
* fixes to get compiling after rebasing master
* Add support for Result types being logged using formatIt
2024-01-23 07:35:03 +00:00
|
|
|
proc stripAnsi*(v: string): string =
|
2022-04-12 13:21:07 +00:00
|
|
|
var
|
|
|
|
res = newStringOfCap(v.len)
|
|
|
|
i: int
|
|
|
|
|
|
|
|
while i < v.len:
|
|
|
|
let c = v[i]
|
|
|
|
if c == '\x1b':
|
|
|
|
var
|
|
|
|
x = i + 1
|
|
|
|
found = false
|
|
|
|
|
|
|
|
while x < v.len: # look for [..m
|
|
|
|
let c2 = v[x]
|
|
|
|
if x == i + 1:
|
|
|
|
if c2 != '[':
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
if c2 in {'0'..'9'} + {';'}:
|
|
|
|
discard # keep looking
|
|
|
|
elif c2 == 'm':
|
|
|
|
i = x + 1
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
inc x
|
|
|
|
|
|
|
|
if found: # skip adding c
|
|
|
|
continue
|
|
|
|
res.add c
|
|
|
|
inc i
|
|
|
|
|
|
|
|
res
|
|
|
|
|
2023-05-02 13:06:34 +00:00
|
|
|
proc updateLogLevel*(logLevel: string) {.upraises: [ValueError].} =
|
2022-11-01 20:05:40 +00:00
|
|
|
# Updates log levels (without clearing old ones)
|
|
|
|
let directives = logLevel.split(";")
|
|
|
|
try:
|
2023-06-16 16:20:49 +00:00
|
|
|
setLogLevel(parseEnum[LogLevel](directives[0].toUpperAscii))
|
2022-11-01 20:05:40 +00:00
|
|
|
except ValueError:
|
2023-06-16 16:20:49 +00:00
|
|
|
raise (ref ValueError)(msg: "Please specify one of: trace, debug, info, notice, warn, error or fatal")
|
2022-11-01 20:05:40 +00:00
|
|
|
|
|
|
|
if directives.len > 1:
|
|
|
|
for topicName, settings in parseTopicDirectives(directives[1..^1]):
|
|
|
|
if not setTopicState(topicName, settings.state, settings.logLevel):
|
|
|
|
warn "Unrecognized logging topic", topic = topicName
|
|
|
|
|
2022-05-19 19:56:03 +00:00
|
|
|
proc setupLogging*(conf: CodexConf) =
|
2023-11-09 05:35:55 +00:00
|
|
|
when defaultChroniclesStream.outputs.type.arity != 3:
|
2022-04-12 13:21:07 +00:00
|
|
|
warn "Logging configuration options not enabled in the current build"
|
|
|
|
else:
|
2023-11-09 05:35:55 +00:00
|
|
|
var logFile: ?IoHandle
|
2022-04-12 13:21:07 +00:00
|
|
|
proc noOutput(logLevel: LogLevel, msg: LogOutputStr) = discard
|
|
|
|
proc writeAndFlush(f: File, msg: LogOutputStr) =
|
|
|
|
try:
|
|
|
|
f.write(msg)
|
|
|
|
f.flushFile()
|
|
|
|
except IOError as err:
|
|
|
|
logLoggingFailure(cstring(msg), err)
|
|
|
|
|
|
|
|
proc stdoutFlush(logLevel: LogLevel, msg: LogOutputStr) =
|
|
|
|
writeAndFlush(stdout, msg)
|
|
|
|
|
|
|
|
proc noColorsFlush(logLevel: LogLevel, msg: LogOutputStr) =
|
|
|
|
writeAndFlush(stdout, stripAnsi(msg))
|
|
|
|
|
2023-11-09 05:35:55 +00:00
|
|
|
proc fileFlush(logLevel: LogLevel, msg: LogOutputStr) =
|
|
|
|
if file =? logFile:
|
|
|
|
if error =? file.writeFile(stripAnsi(msg).toBytes).errorOption:
|
|
|
|
error "failed to write to log file", errorCode = $error
|
|
|
|
|
|
|
|
defaultChroniclesStream.outputs[2].writer = noOutput
|
|
|
|
if logFilePath =? conf.logFile and logFilePath.len > 0:
|
|
|
|
let logFileHandle = openFile(
|
|
|
|
logFilePath,
|
|
|
|
{OpenFlags.Write, OpenFlags.Create, OpenFlags.Truncate}
|
|
|
|
)
|
|
|
|
if logFileHandle.isErr:
|
|
|
|
error "failed to open log file",
|
|
|
|
path = logFilePath,
|
|
|
|
errorCode = $logFileHandle.error
|
|
|
|
else:
|
|
|
|
logFile = logFileHandle.option
|
|
|
|
defaultChroniclesStream.outputs[2].writer = fileFlush
|
|
|
|
|
2022-04-12 13:21:07 +00:00
|
|
|
defaultChroniclesStream.outputs[1].writer = noOutput
|
|
|
|
|
2023-10-03 09:53:58 +00:00
|
|
|
let writer =
|
2022-04-12 13:21:07 +00:00
|
|
|
case conf.logFormat:
|
|
|
|
of LogKind.Auto:
|
|
|
|
if isatty(stdout):
|
|
|
|
stdoutFlush
|
|
|
|
else:
|
|
|
|
noColorsFlush
|
|
|
|
of LogKind.Colors: stdoutFlush
|
|
|
|
of LogKind.NoColors: noColorsFlush
|
|
|
|
of LogKind.Json:
|
|
|
|
defaultChroniclesStream.outputs[1].writer = stdoutFlush
|
|
|
|
noOutput
|
|
|
|
of LogKind.None:
|
|
|
|
noOutput
|
|
|
|
|
2023-10-03 09:53:58 +00:00
|
|
|
when codex_enable_log_counter:
|
|
|
|
var counter = 0.uint64
|
|
|
|
proc numberedWriter(logLevel: LogLevel, msg: LogOutputStr) =
|
|
|
|
inc(counter)
|
|
|
|
let withoutNewLine = msg[0..^2]
|
|
|
|
writer(logLevel, withoutNewLine & " count=" & $counter & "\n")
|
|
|
|
defaultChroniclesStream.outputs[0].writer = numberedWriter
|
|
|
|
else:
|
|
|
|
defaultChroniclesStream.outputs[0].writer = writer
|
|
|
|
|
2022-11-01 20:05:40 +00:00
|
|
|
try:
|
|
|
|
updateLogLevel(conf.logLevel)
|
|
|
|
except ValueError as err:
|
|
|
|
try:
|
|
|
|
stderr.write "Invalid value for --log-level. " & err.msg & "\n"
|
|
|
|
except IOError:
|
|
|
|
echo "Invalid value for --log-level. " & err.msg
|
|
|
|
quit QuitFailure
|
2022-04-14 10:49:03 +00:00
|
|
|
|
2022-05-19 19:56:03 +00:00
|
|
|
proc setupMetrics*(config: CodexConf) =
|
2022-04-14 10:49:03 +00:00
|
|
|
if config.metricsEnabled:
|
|
|
|
let metricsAddress = config.metricsAddress
|
|
|
|
notice "Starting metrics HTTP server",
|
|
|
|
url = "http://" & $metricsAddress & ":" & $config.metricsPort & "/metrics"
|
|
|
|
try:
|
|
|
|
startMetricsHttpServer($metricsAddress, config.metricsPort)
|
|
|
|
except CatchableError as exc:
|
|
|
|
raiseAssert exc.msg
|
|
|
|
except Exception as exc:
|
|
|
|
raiseAssert exc.msg # TODO fix metrics
|