2021-05-19 06:38:13 +00:00
import strutils
2021-02-14 20:04:54 +00:00
const nimCachePathOverride {.strdefine.} = ""
when nimCachePathOverride == "":
when defined(release):
let nimCachePath = "nimcache/release/" & projectName()
else:
let nimCachePath = "nimcache/debug/" & projectName()
2020-02-13 12:19:12 +00:00
else:
2021-02-14 20:04:54 +00:00
let nimCachePath = nimCachePathOverride
2020-10-27 12:09:03 +00:00
switch("nimcache", nimCachePath)
2019-07-03 07:35:05 +00:00
use LTO in release builds (#1661)
* use LTO in release builds
This significantly (40%) speeds up block replay and hashing - for example replaying first 1000
blocks, without/with LTO:
```
[arnetheduck@tempus ncli]$ ../env.sh nim c -d:release ncli_db
[arnetheduck@tempus ncli]$ ./ncli_db bench --db:db --network:medalla --slots:1000
Loaded 215006 blocks, head slot 307400
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
25468.481, 0.000, 25468.481, 25468.481, 1, Initialize DB
0.297, 0.516, 0.053, 13.645, 721, Load block from database
26.458, 0.000, 26.458, 26.458, 1, Load state from database
20.737, 8.288, 11.096, 199.325, 690, Apply block
333.069, 62.798, 45.225, 429.452, 31, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
```
[arnetheduck@tempus ncli]$ ../env.sh nim c -d:release --passc:-flto --passl:-flto --stacktrace:off ncli_db
[arnetheduck@tempus ncli]$ ./ncli_db bench --db:db --network:medalla --slots:1000
Loaded 215006 blocks, head slot 307400
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
23903.006, 0.000, 23903.006, 23903.006, 1, Initialize DB
0.253, 0.122, 0.047, 0.731, 721, Load block from database
24.455, 0.000, 24.455, 24.455, 1, Load state from database
18.734, 7.062, 10.346, 167.397, 690, Apply block
194.869, 33.175, 29.311, 226.981, 31, Apply epoch block
```
Epoch processing is heavy on both arithmetics and hash caching, both of which get a
significant boost here.
This makes sense: nim creates lots of small functions spread out over many C files. A much
worse solution is to try to annotate code with `inline` - it copies functions to multiple
C files but still doesn't do intermodule optimizations significantly limiting the
compilers' ability to reason about the code, causing bloat and misrepresenting the usefulness
of a function to the call frequency analysis that drives actual (C-compiler) inlining and many
other optimizations.
In particular, many nim functions are part of `system` or the `C` backend - stack tracing,
memory allocation etc - nim's inlining system is pretty incomplete in that it does not deal
with these and many other cases.
* windows workaround
* skip LTO on windows for now
2020-09-24 16:40:28 +00:00
# `-flto` gives a significant improvement in processing speed, specially hash tree and state transition (basically any CPU-bound code implemented in nim)
# With LTO enabled, optimization flags should be passed to both compiler and linker!
2020-09-25 16:15:02 +00:00
if defined(release) and not defined(disableLTO):
2021-11-04 14:35:36 +00:00
# "-w" is not passed to the compiler during linking, so we need to disable
# some warnings by hand.
switch("passL", "-Wno-stringop-overflow -Wno-stringop-overread")
use LTO in release builds (#1661)
* use LTO in release builds
This significantly (40%) speeds up block replay and hashing - for example replaying first 1000
blocks, without/with LTO:
```
[arnetheduck@tempus ncli]$ ../env.sh nim c -d:release ncli_db
[arnetheduck@tempus ncli]$ ./ncli_db bench --db:db --network:medalla --slots:1000
Loaded 215006 blocks, head slot 307400
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
25468.481, 0.000, 25468.481, 25468.481, 1, Initialize DB
0.297, 0.516, 0.053, 13.645, 721, Load block from database
26.458, 0.000, 26.458, 26.458, 1, Load state from database
20.737, 8.288, 11.096, 199.325, 690, Apply block
333.069, 62.798, 45.225, 429.452, 31, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
```
[arnetheduck@tempus ncli]$ ../env.sh nim c -d:release --passc:-flto --passl:-flto --stacktrace:off ncli_db
[arnetheduck@tempus ncli]$ ./ncli_db bench --db:db --network:medalla --slots:1000
Loaded 215006 blocks, head slot 307400
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
23903.006, 0.000, 23903.006, 23903.006, 1, Initialize DB
0.253, 0.122, 0.047, 0.731, 721, Load block from database
24.455, 0.000, 24.455, 24.455, 1, Load state from database
18.734, 7.062, 10.346, 167.397, 690, Apply block
194.869, 33.175, 29.311, 226.981, 31, Apply epoch block
```
Epoch processing is heavy on both arithmetics and hash caching, both of which get a
significant boost here.
This makes sense: nim creates lots of small functions spread out over many C files. A much
worse solution is to try to annotate code with `inline` - it copies functions to multiple
C files but still doesn't do intermodule optimizations significantly limiting the
compilers' ability to reason about the code, causing bloat and misrepresenting the usefulness
of a function to the call frequency analysis that drives actual (C-compiler) inlining and many
other optimizations.
In particular, many nim functions are part of `system` or the `C` backend - stack tracing,
memory allocation etc - nim's inlining system is pretty incomplete in that it does not deal
with these and many other cases.
* windows workaround
* skip LTO on windows for now
2020-09-24 16:40:28 +00:00
if defined(macosx): # Clang
switch("passC", "-flto=thin")
2020-10-27 12:09:03 +00:00
switch("passL", "-flto=thin -Wl,-object_path_lto," & nimCachePath & "/lto")
use LTO in release builds (#1661)
* use LTO in release builds
This significantly (40%) speeds up block replay and hashing - for example replaying first 1000
blocks, without/with LTO:
```
[arnetheduck@tempus ncli]$ ../env.sh nim c -d:release ncli_db
[arnetheduck@tempus ncli]$ ./ncli_db bench --db:db --network:medalla --slots:1000
Loaded 215006 blocks, head slot 307400
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
25468.481, 0.000, 25468.481, 25468.481, 1, Initialize DB
0.297, 0.516, 0.053, 13.645, 721, Load block from database
26.458, 0.000, 26.458, 26.458, 1, Load state from database
20.737, 8.288, 11.096, 199.325, 690, Apply block
333.069, 62.798, 45.225, 429.452, 31, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
```
[arnetheduck@tempus ncli]$ ../env.sh nim c -d:release --passc:-flto --passl:-flto --stacktrace:off ncli_db
[arnetheduck@tempus ncli]$ ./ncli_db bench --db:db --network:medalla --slots:1000
Loaded 215006 blocks, head slot 307400
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
23903.006, 0.000, 23903.006, 23903.006, 1, Initialize DB
0.253, 0.122, 0.047, 0.731, 721, Load block from database
24.455, 0.000, 24.455, 24.455, 1, Load state from database
18.734, 7.062, 10.346, 167.397, 690, Apply block
194.869, 33.175, 29.311, 226.981, 31, Apply epoch block
```
Epoch processing is heavy on both arithmetics and hash caching, both of which get a
significant boost here.
This makes sense: nim creates lots of small functions spread out over many C files. A much
worse solution is to try to annotate code with `inline` - it copies functions to multiple
C files but still doesn't do intermodule optimizations significantly limiting the
compilers' ability to reason about the code, causing bloat and misrepresenting the usefulness
of a function to the call frequency analysis that drives actual (C-compiler) inlining and many
other optimizations.
In particular, many nim functions are part of `system` or the `C` backend - stack tracing,
memory allocation etc - nim's inlining system is pretty incomplete in that it does not deal
with these and many other cases.
* windows workaround
* skip LTO on windows for now
2020-09-24 16:40:28 +00:00
elif defined(linux):
2020-11-27 01:29:06 +00:00
switch("passC", "-flto=jobserver")
switch("passL", "-flto=jobserver")
2020-11-20 10:00:22 +00:00
switch("passC", "-finline-limit=100000")
switch("passL", "-finline-limit=100000")
use LTO in release builds (#1661)
* use LTO in release builds
This significantly (40%) speeds up block replay and hashing - for example replaying first 1000
blocks, without/with LTO:
```
[arnetheduck@tempus ncli]$ ../env.sh nim c -d:release ncli_db
[arnetheduck@tempus ncli]$ ./ncli_db bench --db:db --network:medalla --slots:1000
Loaded 215006 blocks, head slot 307400
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
25468.481, 0.000, 25468.481, 25468.481, 1, Initialize DB
0.297, 0.516, 0.053, 13.645, 721, Load block from database
26.458, 0.000, 26.458, 26.458, 1, Load state from database
20.737, 8.288, 11.096, 199.325, 690, Apply block
333.069, 62.798, 45.225, 429.452, 31, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
```
[arnetheduck@tempus ncli]$ ../env.sh nim c -d:release --passc:-flto --passl:-flto --stacktrace:off ncli_db
[arnetheduck@tempus ncli]$ ./ncli_db bench --db:db --network:medalla --slots:1000
Loaded 215006 blocks, head slot 307400
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
23903.006, 0.000, 23903.006, 23903.006, 1, Initialize DB
0.253, 0.122, 0.047, 0.731, 721, Load block from database
24.455, 0.000, 24.455, 24.455, 1, Load state from database
18.734, 7.062, 10.346, 167.397, 690, Apply block
194.869, 33.175, 29.311, 226.981, 31, Apply epoch block
```
Epoch processing is heavy on both arithmetics and hash caching, both of which get a
significant boost here.
This makes sense: nim creates lots of small functions spread out over many C files. A much
worse solution is to try to annotate code with `inline` - it copies functions to multiple
C files but still doesn't do intermodule optimizations significantly limiting the
compilers' ability to reason about the code, causing bloat and misrepresenting the usefulness
of a function to the call frequency analysis that drives actual (C-compiler) inlining and many
other optimizations.
In particular, many nim functions are part of `system` or the `C` backend - stack tracing,
memory allocation etc - nim's inlining system is pretty incomplete in that it does not deal
with these and many other cases.
* windows workaround
* skip LTO on windows for now
2020-09-24 16:40:28 +00:00
else:
2020-10-27 12:09:03 +00:00
# On windows, LTO needs more love and attention so "gcc-ar" and "gcc-ranlib" are
# used for static libraries.
use LTO in release builds (#1661)
* use LTO in release builds
This significantly (40%) speeds up block replay and hashing - for example replaying first 1000
blocks, without/with LTO:
```
[arnetheduck@tempus ncli]$ ../env.sh nim c -d:release ncli_db
[arnetheduck@tempus ncli]$ ./ncli_db bench --db:db --network:medalla --slots:1000
Loaded 215006 blocks, head slot 307400
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
25468.481, 0.000, 25468.481, 25468.481, 1, Initialize DB
0.297, 0.516, 0.053, 13.645, 721, Load block from database
26.458, 0.000, 26.458, 26.458, 1, Load state from database
20.737, 8.288, 11.096, 199.325, 690, Apply block
333.069, 62.798, 45.225, 429.452, 31, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
```
[arnetheduck@tempus ncli]$ ../env.sh nim c -d:release --passc:-flto --passl:-flto --stacktrace:off ncli_db
[arnetheduck@tempus ncli]$ ./ncli_db bench --db:db --network:medalla --slots:1000
Loaded 215006 blocks, head slot 307400
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
23903.006, 0.000, 23903.006, 23903.006, 1, Initialize DB
0.253, 0.122, 0.047, 0.731, 721, Load block from database
24.455, 0.000, 24.455, 24.455, 1, Load state from database
18.734, 7.062, 10.346, 167.397, 690, Apply block
194.869, 33.175, 29.311, 226.981, 31, Apply epoch block
```
Epoch processing is heavy on both arithmetics and hash caching, both of which get a
significant boost here.
This makes sense: nim creates lots of small functions spread out over many C files. A much
worse solution is to try to annotate code with `inline` - it copies functions to multiple
C files but still doesn't do intermodule optimizations significantly limiting the
compilers' ability to reason about the code, causing bloat and misrepresenting the usefulness
of a function to the call frequency analysis that drives actual (C-compiler) inlining and many
other optimizations.
In particular, many nim functions are part of `system` or the `C` backend - stack tracing,
memory allocation etc - nim's inlining system is pretty incomplete in that it does not deal
with these and many other cases.
* windows workaround
* skip LTO on windows for now
2020-09-24 16:40:28 +00:00
discard
2021-11-04 14:35:36 +00:00
# show C compiler warnings
if defined(cwarnings):
let common_gcc_options = "-Wno-discarded-qualifiers -Wno-incompatible-pointer-types"
if defined(windows):
put("gcc.options.always", "-mno-ms-bitfields " & common_gcc_options)
put("clang.options.always", "-mno-ms-bitfields " & common_gcc_options)
else:
put("gcc.options.always", common_gcc_options)
put("clang.options.always", common_gcc_options)
if defined(limitStackUsage):
# This limits stack usage of each individual function to 1MB - the option is
# available on some GCC versions but not all - run with `-d:limitStackUsage`
2021-11-05 17:11:25 +00:00
# and look for .su files in "./build/", "./nimcache/" or $TMPDIR that list the
# stack size of each function.
2021-11-04 14:35:36 +00:00
switch("passC", "-fstack-usage -Werror=stack-usage=1048576")
switch("passL", "-fstack-usage -Werror=stack-usage=1048576")
2020-02-13 12:19:12 +00:00
if defined(windows):
2019-08-24 20:07:04 +00:00
# disable timestamps in Windows PE headers - https://wiki.debian.org/ReproducibleBuilds/TimestampsInPEBinaries
2020-02-13 12:19:12 +00:00
switch("passL", "-Wl,--no-insert-timestamp")
2019-04-11 21:30:26 +00:00
# increase stack size
2020-02-13 12:19:12 +00:00
switch("passL", "-Wl,--stack,8388608")
2019-04-24 23:49:41 +00:00
# https://github.com/nim-lang/Nim/issues/4057
--tlsEmulation:off
2020-02-13 12:19:12 +00:00
if defined(i386):
2019-11-12 14:34:12 +00:00
# set the IMAGE_FILE_LARGE_ADDRESS_AWARE flag so we can use PAE, if enabled, and access more than 2 GiB of RAM
2020-02-13 12:19:12 +00:00
switch("passL", "-Wl,--large-address-aware")
2019-10-25 17:03:55 +00:00
2019-10-07 08:29:39 +00:00
# The dynamic Chronicles output currently prevents us from using colors on Windows
# because these require direct manipulations of the stdout File object.
2020-02-13 12:19:12 +00:00
switch("define", "chronicles_colors=off")
2019-02-05 17:48:34 +00:00
2020-02-05 17:20:05 +00:00
# This helps especially for 32-bit x86, which sans SSE2 and newer instructions
# requires quite roundabout code generation for cryptography, and other 64-bit
# and larger arithmetic use cases, along with register starvation issues. When
# engineering a more portable binary release, this should be tweaked but still
# use at least -msse2 or -msse3.
2021-08-04 13:22:23 +00:00
#
2021-12-05 18:02:58 +00:00
# https://github.com/status-im/nimbus-eth2/blob/stable/docs/cpu_features.md#ssse3-supplemental-sse3
# suggests that SHA256 hashing with SSSE3 is 20% faster than without SSSE3, so
# given its near-ubiquity in the x86 installed base, it renders a distribution
# build more viable on an overall broader range of hardware.
#
2021-08-04 13:22:23 +00:00
# Apple's Clang can't handle "-march=native" on M1: https://github.com/status-im/nimbus-eth2/issues/2758
if defined(disableMarchNative) or (defined(macosx) and defined(arm64)):
2021-01-07 09:19:29 +00:00
if defined(i386) or defined(amd64):
2021-12-05 18:02:58 +00:00
switch("passC", "-mssse3")
switch("passL", "-mssse3")
2020-02-13 12:19:12 +00:00
else:
switch("passC", "-march=native")
use LTO in release builds (#1661)
* use LTO in release builds
This significantly (40%) speeds up block replay and hashing - for example replaying first 1000
blocks, without/with LTO:
```
[arnetheduck@tempus ncli]$ ../env.sh nim c -d:release ncli_db
[arnetheduck@tempus ncli]$ ./ncli_db bench --db:db --network:medalla --slots:1000
Loaded 215006 blocks, head slot 307400
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
25468.481, 0.000, 25468.481, 25468.481, 1, Initialize DB
0.297, 0.516, 0.053, 13.645, 721, Load block from database
26.458, 0.000, 26.458, 26.458, 1, Load state from database
20.737, 8.288, 11.096, 199.325, 690, Apply block
333.069, 62.798, 45.225, 429.452, 31, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
```
[arnetheduck@tempus ncli]$ ../env.sh nim c -d:release --passc:-flto --passl:-flto --stacktrace:off ncli_db
[arnetheduck@tempus ncli]$ ./ncli_db bench --db:db --network:medalla --slots:1000
Loaded 215006 blocks, head slot 307400
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
23903.006, 0.000, 23903.006, 23903.006, 1, Initialize DB
0.253, 0.122, 0.047, 0.731, 721, Load block from database
24.455, 0.000, 24.455, 24.455, 1, Load state from database
18.734, 7.062, 10.346, 167.397, 690, Apply block
194.869, 33.175, 29.311, 226.981, 31, Apply epoch block
```
Epoch processing is heavy on both arithmetics and hash caching, both of which get a
significant boost here.
This makes sense: nim creates lots of small functions spread out over many C files. A much
worse solution is to try to annotate code with `inline` - it copies functions to multiple
C files but still doesn't do intermodule optimizations significantly limiting the
compilers' ability to reason about the code, causing bloat and misrepresenting the usefulness
of a function to the call frequency analysis that drives actual (C-compiler) inlining and many
other optimizations.
In particular, many nim functions are part of `system` or the `C` backend - stack tracing,
memory allocation etc - nim's inlining system is pretty incomplete in that it does not deal
with these and many other cases.
* windows workaround
* skip LTO on windows for now
2020-09-24 16:40:28 +00:00
switch("passL", "-march=native")
2020-02-13 12:19:12 +00:00
if defined(windows):
2020-02-11 23:36:54 +00:00
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65782
2020-02-12 13:23:49 +00:00
# ("-fno-asynchronous-unwind-tables" breaks Nim's exception raising, sometimes)
2020-09-24 11:45:34 +00:00
switch("passC", "-mno-avx512f")
use LTO in release builds (#1661)
* use LTO in release builds
This significantly (40%) speeds up block replay and hashing - for example replaying first 1000
blocks, without/with LTO:
```
[arnetheduck@tempus ncli]$ ../env.sh nim c -d:release ncli_db
[arnetheduck@tempus ncli]$ ./ncli_db bench --db:db --network:medalla --slots:1000
Loaded 215006 blocks, head slot 307400
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
25468.481, 0.000, 25468.481, 25468.481, 1, Initialize DB
0.297, 0.516, 0.053, 13.645, 721, Load block from database
26.458, 0.000, 26.458, 26.458, 1, Load state from database
20.737, 8.288, 11.096, 199.325, 690, Apply block
333.069, 62.798, 45.225, 429.452, 31, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
```
[arnetheduck@tempus ncli]$ ../env.sh nim c -d:release --passc:-flto --passl:-flto --stacktrace:off ncli_db
[arnetheduck@tempus ncli]$ ./ncli_db bench --db:db --network:medalla --slots:1000
Loaded 215006 blocks, head slot 307400
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
23903.006, 0.000, 23903.006, 23903.006, 1, Initialize DB
0.253, 0.122, 0.047, 0.731, 721, Load block from database
24.455, 0.000, 24.455, 24.455, 1, Load state from database
18.734, 7.062, 10.346, 167.397, 690, Apply block
194.869, 33.175, 29.311, 226.981, 31, Apply epoch block
```
Epoch processing is heavy on both arithmetics and hash caching, both of which get a
significant boost here.
This makes sense: nim creates lots of small functions spread out over many C files. A much
worse solution is to try to annotate code with `inline` - it copies functions to multiple
C files but still doesn't do intermodule optimizations significantly limiting the
compilers' ability to reason about the code, causing bloat and misrepresenting the usefulness
of a function to the call frequency analysis that drives actual (C-compiler) inlining and many
other optimizations.
In particular, many nim functions are part of `system` or the `C` backend - stack tracing,
memory allocation etc - nim's inlining system is pretty incomplete in that it does not deal
with these and many other cases.
* windows workaround
* skip LTO on windows for now
2020-09-24 16:40:28 +00:00
switch("passL", "-mno-avx512f")
2020-02-05 17:20:05 +00:00
2020-12-16 13:07:48 +00:00
# omitting frame pointers in nim breaks the GC
# https://github.com/nim-lang/Nim/issues/10625
switch("passC", "-fno-omit-frame-pointer")
switch("passL", "-fno-omit-frame-pointer")
2019-08-24 20:07:04 +00:00
--threads:on
--opt:speed
--excessiveStackTrace:on
2019-10-17 13:21:45 +00:00
# enable metric collection
2020-02-13 12:19:12 +00:00
--define:metrics
2021-11-02 17:06:36 +00:00
--define:chronicles_line_numbers # These are disabled for release binaries
2020-03-28 22:04:43 +00:00
# for heap-usage-by-instance-type metrics and object base-type strings
--define:nimTypeNames
2019-11-12 23:22:21 +00:00
2020-05-25 18:55:07 +00:00
# switch("define", "snappy_implementation=libp2p")
2021-11-29 20:47:31 +00:00
# TODO https://github.com/status-im/nimbus-eth2/issues/3130
# We are still seeing problems with the websock package, se we stick to using news:
switch("define", "json_rpc_websocket_package=news")
2020-06-11 16:41:43 +00:00
const currentDir = currentSourcePath()[0 .. ^(len("config.nims") + 1)]
switch("define", "nim_compiler_path=" & currentDir & "env.sh nim")
2020-11-13 15:00:45 +00:00
switch("define", "withoutPCRE")
2020-06-11 16:41:43 +00:00
2020-03-24 11:13:07 +00:00
switch("import", "testutils/moduletests")
2021-05-19 06:38:13 +00:00
const useLibStackTrace = not defined(windows) and
2020-06-04 13:36:44 +00:00
not defined(disable_libbacktrace)
when useLibStackTrace:
--define:nimStackTraceOverride
switch("import", "libbacktrace")
else:
--stacktrace:on
--linetrace:on
2021-05-19 06:38:13 +00:00
var canEnableDebuggingSymbols = true
if defined(macosx):
# The default open files limit is too low on macOS (512), breaking the
# "--debugger:native" build. It can be increased with `ulimit -n 1024`.
let openFilesLimitTarget = 1024
var openFilesLimit = 0
try:
openFilesLimit = staticExec("ulimit -n").strip(chars = Whitespace + Newlines).parseInt()
if openFilesLimit < openFilesLimitTarget:
echo "Open files limit too low to enable debugging symbols and lightweight stack traces."
echo "Increase it with \"ulimit -n " & $openFilesLimitTarget & "\""
canEnableDebuggingSymbols = false
except:
echo "ulimit error"
# We ignore this resource limit on Windows, where a default `ulimit -n` of 256
# in Git Bash is apparently ignored by the OS, and on Linux where the default of
# 1024 is good enough for us.
if canEnableDebuggingSymbols:
2019-11-12 17:05:05 +00:00
# add debugging symbols and original files and line numbers
--debugger:native
2020-02-13 12:19:12 +00:00
--define:nimOldCaseObjects # https://github.com/status-im/nim-confutils/issues/9
# `switch("warning[CaseTransition]", "off")` fails with "Error: invalid command line option: '--warning[CaseTransition]'"
switch("warning", "CaseTransition:off")
2019-08-24 20:07:04 +00:00
2020-06-03 12:49:32 +00:00
# The compiler doth protest too much, methinks, about all these cases where it can't
# do its (N)RVO pass: https://github.com/nim-lang/RFCs/issues/230
switch("warning", "ObservableStores:off")
# Too many false positives for "Warning: method has lock level <unknown>, but another method has 0 [LockLevel]"
switch("warning", "LockLevel:off")
2020-08-08 19:24:14 +00:00
# Useful for Chronos metrics.
2020-09-11 15:07:16 +00:00
#--define:chronosFutureTracking
2020-10-07 10:02:00 +00:00
# ############################################################
#
# No LTO for crypto
#
# ############################################################
# This applies per-file compiler flags to C files
# which do not support {.localPassC: "-fno-lto".}
# Unfortunately this is filename based instead of path-based
# Assumes GCC
# BLST
put("server.always", "-fno-lto")
put("assembly.always", "-fno-lto")
# Secp256k1
put("secp256k1.always", "-fno-lto")
# BearSSL - only RNGs
put("aesctr_drbg.always", "-fno-lto")
put("hmac_drbg.always", "-fno-lto")
put("sysrng.always", "-fno-lto")
# Miracl - only ECP to derive public key from private key
put("ecp_BLS12381.always", "-fno-lto")
# ############################################################
#
# Spurious warnings
#
# ############################################################
# sqlite3.c: In function ‘ sqlite3SelectNew’ :
# vendor/nim-sqlite3-abi/sqlite3.c:124500: warning: function may return address of local variable [-Wreturn-local-addr]
put("sqlite3.always", "-fno-lto") # -Wno-return-local-addr