2021-04-28 16:41:02 +00:00
|
|
|
name: CI
|
2021-04-30 20:22:37 +00:00
|
|
|
on:
|
|
|
|
push:
|
|
|
|
paths-ignore: ['media/**', 'docs/**', '**/*.md']
|
2022-02-25 08:19:12 +00:00
|
|
|
branches:
|
|
|
|
- stable
|
|
|
|
- testing
|
|
|
|
- unstable
|
2021-04-30 20:22:37 +00:00
|
|
|
pull_request:
|
|
|
|
paths-ignore: ['media/**', 'docs/**', '**/*.md']
|
2022-02-25 08:19:12 +00:00
|
|
|
workflow_dispatch:
|
2020-09-24 11:45:34 +00:00
|
|
|
|
|
|
|
jobs:
|
|
|
|
build:
|
|
|
|
strategy:
|
|
|
|
fail-fast: false
|
|
|
|
matrix:
|
|
|
|
target:
|
|
|
|
- os: linux
|
|
|
|
cpu: amd64
|
|
|
|
- os: linux
|
|
|
|
cpu: i386
|
|
|
|
- os: macos
|
|
|
|
cpu: amd64
|
|
|
|
- os: windows
|
|
|
|
cpu: amd64
|
2022-02-25 08:19:12 +00:00
|
|
|
branch: [version-1-2, version-1-6]
|
2020-09-24 11:45:34 +00:00
|
|
|
include:
|
|
|
|
- target:
|
|
|
|
os: linux
|
2022-08-18 18:07:57 +00:00
|
|
|
builder: ubuntu-20.04
|
2022-02-25 08:19:12 +00:00
|
|
|
shell: bash
|
2020-09-24 11:45:34 +00:00
|
|
|
- target:
|
|
|
|
os: macos
|
2022-07-22 07:08:21 +00:00
|
|
|
builder: macos-11
|
2022-02-25 08:19:12 +00:00
|
|
|
shell: bash
|
2020-09-24 11:45:34 +00:00
|
|
|
- target:
|
|
|
|
os: windows
|
|
|
|
builder: windows-2019
|
2022-02-25 08:19:12 +00:00
|
|
|
shell: msys2 {0}
|
|
|
|
|
|
|
|
defaults:
|
|
|
|
run:
|
|
|
|
shell: ${{ matrix.shell }}
|
|
|
|
|
|
|
|
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
|
2020-09-24 11:45:34 +00:00
|
|
|
runs-on: ${{ matrix.builder }}
|
|
|
|
steps:
|
2020-09-25 12:10:35 +00:00
|
|
|
- name: Get branch name
|
|
|
|
shell: bash
|
2020-10-06 13:52:36 +00:00
|
|
|
run: |
|
2020-10-06 14:16:10 +00:00
|
|
|
if [[ '${{ github.event_name }}' == 'pull_request' ]]; then
|
2020-10-06 14:14:45 +00:00
|
|
|
echo "##[set-output name=branch_name;]$(echo ${GITHUB_HEAD_REF})"
|
|
|
|
echo "Branch found (PR): ${GITHUB_HEAD_REF}"
|
|
|
|
else
|
|
|
|
echo "##[set-output name=branch_name;]$(echo ${GITHUB_REF#refs/heads/})"
|
|
|
|
echo "Branch found (not PR): ${GITHUB_REF#refs/heads/}"
|
|
|
|
fi
|
2020-09-25 12:10:35 +00:00
|
|
|
id: get_branch
|
|
|
|
|
2022-02-25 08:19:12 +00:00
|
|
|
- name: Cancel Previous Runs (except main branches)
|
2020-09-25 12:10:35 +00:00
|
|
|
if: >
|
2021-01-10 14:23:02 +00:00
|
|
|
steps.get_branch.outputs.branch_name != 'stable' &&
|
2021-01-10 16:34:02 +00:00
|
|
|
steps.get_branch.outputs.branch_name != 'unstable' &&
|
|
|
|
steps.get_branch.outputs.branch_name != 'testing'
|
2022-02-25 08:19:12 +00:00
|
|
|
uses: styfle/cancel-workflow-action@0.9.1
|
2020-09-24 11:45:34 +00:00
|
|
|
with:
|
|
|
|
access_token: ${{ github.token }}
|
|
|
|
|
2022-02-25 08:19:12 +00:00
|
|
|
- name: Checkout
|
2020-09-24 11:45:34 +00:00
|
|
|
uses: actions/checkout@v2
|
2021-02-02 22:31:01 +00:00
|
|
|
|
2020-09-24 11:45:34 +00:00
|
|
|
- name: Install build dependencies (Linux i386)
|
|
|
|
if: runner.os == 'Linux' && matrix.target.cpu == 'i386'
|
|
|
|
run: |
|
|
|
|
sudo dpkg --add-architecture i386
|
|
|
|
sudo apt-fast update -qq
|
|
|
|
sudo DEBIAN_FRONTEND='noninteractive' apt-fast install \
|
2020-11-13 15:00:45 +00:00
|
|
|
--no-install-recommends -yq gcc-multilib g++-multilib
|
2020-09-24 11:45:34 +00:00
|
|
|
mkdir -p external/bin
|
|
|
|
cat << EOF > external/bin/gcc
|
|
|
|
#!/bin/bash
|
2020-12-10 09:18:07 +00:00
|
|
|
exec $(which gcc) -m32 -mno-adx "\$@"
|
2020-09-24 11:45:34 +00:00
|
|
|
EOF
|
|
|
|
cat << EOF > external/bin/g++
|
|
|
|
#!/bin/bash
|
2020-12-10 09:18:07 +00:00
|
|
|
exec $(which g++) -m32 -mno-adx "\$@"
|
2020-09-24 11:45:34 +00:00
|
|
|
EOF
|
|
|
|
chmod 755 external/bin/gcc external/bin/g++
|
2021-01-08 10:12:10 +00:00
|
|
|
echo "${{ github.workspace }}/external/bin" >> $GITHUB_PATH
|
2020-09-24 11:45:34 +00:00
|
|
|
|
2022-02-25 08:19:12 +00:00
|
|
|
- name: MSYS2 (Windows i386)
|
|
|
|
if: runner.os == 'Windows' && matrix.target.cpu == 'i386'
|
|
|
|
uses: msys2/setup-msys2@v2
|
|
|
|
with:
|
|
|
|
path-type: inherit
|
|
|
|
msystem: MINGW32
|
|
|
|
install: >-
|
|
|
|
base-devel
|
|
|
|
git
|
|
|
|
mingw-w64-i686-toolchain
|
2022-03-05 14:40:08 +00:00
|
|
|
mingw-w64-i686-cmake
|
2022-02-25 08:19:12 +00:00
|
|
|
|
|
|
|
- name: MSYS2 (Windows amd64)
|
|
|
|
if: runner.os == 'Windows' && matrix.target.cpu == 'amd64'
|
|
|
|
uses: msys2/setup-msys2@v2
|
2020-09-24 11:45:34 +00:00
|
|
|
with:
|
2022-02-25 08:19:12 +00:00
|
|
|
path-type: inherit
|
|
|
|
install: >-
|
|
|
|
base-devel
|
|
|
|
git
|
|
|
|
mingw-w64-x86_64-toolchain
|
2022-03-05 14:40:08 +00:00
|
|
|
mingw-w64-x86_64-cmake
|
2020-09-24 11:45:34 +00:00
|
|
|
|
|
|
|
- name: Restore Nim DLLs dependencies (Windows) from cache
|
|
|
|
if: runner.os == 'Windows'
|
|
|
|
id: windows-dlls-cache
|
2020-09-25 14:05:29 +00:00
|
|
|
uses: actions/cache@v2
|
2020-09-24 11:45:34 +00:00
|
|
|
with:
|
2022-02-25 08:19:12 +00:00
|
|
|
path: external/dlls
|
2020-09-24 11:45:34 +00:00
|
|
|
key: 'dlls-${{ matrix.target.cpu }}'
|
|
|
|
|
|
|
|
- name: Install DLLs dependencies (Windows)
|
|
|
|
if: >
|
|
|
|
steps.windows-dlls-cache.outputs.cache-hit != 'true' &&
|
|
|
|
runner.os == 'Windows'
|
|
|
|
run: |
|
|
|
|
mkdir -p external
|
|
|
|
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
|
2022-02-25 08:19:12 +00:00
|
|
|
7z x -y external/windeps.zip -oexternal/dlls
|
2020-09-25 16:53:17 +00:00
|
|
|
|
|
|
|
- name: Path to cached dependencies (Windows)
|
|
|
|
if: >
|
|
|
|
runner.os == 'Windows'
|
|
|
|
run: |
|
2022-02-25 08:19:12 +00:00
|
|
|
echo "${{ github.workspace }}/external/dlls" >> $GITHUB_PATH
|
|
|
|
# for miniupnp that runs "wingenminiupnpcstrings.exe" from the current dir
|
|
|
|
echo "." >> $GITHUB_PATH
|
2020-09-24 11:45:34 +00:00
|
|
|
|
2022-02-25 08:19:12 +00:00
|
|
|
- name: Install build dependencies (macOS)
|
2020-09-24 11:45:34 +00:00
|
|
|
if: runner.os == 'macOS'
|
|
|
|
run: |
|
|
|
|
brew install gnu-getopt
|
|
|
|
brew link --force gnu-getopt
|
|
|
|
|
2022-02-25 08:19:12 +00:00
|
|
|
- name: Derive environment variables
|
2020-09-24 11:45:34 +00:00
|
|
|
run: |
|
2022-02-25 08:19:12 +00:00
|
|
|
if [[ '${{ matrix.target.cpu }}' == 'amd64' ]]; then
|
|
|
|
PLATFORM=x64
|
|
|
|
else
|
|
|
|
PLATFORM=x86
|
|
|
|
fi
|
|
|
|
echo "PLATFORM=${PLATFORM}" >> $GITHUB_ENV
|
2022-02-16 11:41:50 +00:00
|
|
|
|
2022-02-25 08:19:12 +00:00
|
|
|
# Stack usage test on recent enough gcc:
|
|
|
|
if [[ '${{ runner.os }}' == 'Linux' && '${{ matrix.target.cpu }}' == 'amd64' ]]; then
|
|
|
|
export NIMFLAGS="${NIMFLAGS} -d:limitStackUsage"
|
|
|
|
echo "NIMFLAGS=${NIMFLAGS}" >> $GITHUB_ENV
|
|
|
|
fi
|
|
|
|
|
|
|
|
# libminiupnp / natpmp
|
|
|
|
if [[ '${{ runner.os }}' == 'Linux' && '${{ matrix.target.cpu }}' == 'i386' ]]; then
|
|
|
|
export CFLAGS="${CFLAGS} -m32 -mno-adx"
|
|
|
|
echo "CFLAGS=${CFLAGS}" >> $GITHUB_ENV
|
|
|
|
fi
|
|
|
|
|
|
|
|
ncpu=""
|
2022-03-05 14:40:08 +00:00
|
|
|
make_cmd="make"
|
2022-02-25 08:19:12 +00:00
|
|
|
case '${{ runner.os }}' in
|
|
|
|
'Linux')
|
|
|
|
ncpu=$(nproc)
|
|
|
|
;;
|
|
|
|
'macOS')
|
|
|
|
ncpu=$(sysctl -n hw.ncpu)
|
|
|
|
;;
|
|
|
|
'Windows')
|
|
|
|
ncpu=${NUMBER_OF_PROCESSORS}
|
2022-03-05 14:40:08 +00:00
|
|
|
make_cmd="mingw32-make"
|
2022-02-25 08:19:12 +00:00
|
|
|
;;
|
|
|
|
esac
|
|
|
|
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
|
|
|
|
echo "ncpu=${ncpu}" >> $GITHUB_ENV
|
2022-03-05 14:40:08 +00:00
|
|
|
echo "make_cmd=${make_cmd}" >> $GITHUB_ENV
|
2020-09-24 11:45:34 +00:00
|
|
|
|
2021-02-11 19:03:29 +00:00
|
|
|
- name: Build Nim and Nimbus dependencies
|
2020-09-24 11:45:34 +00:00
|
|
|
run: |
|
2022-03-05 14:40:08 +00:00
|
|
|
${make_cmd} -j ${ncpu} NIM_COMMIT=${{ matrix.branch }} ARCH_OVERRIDE=${PLATFORM} QUICK_AND_DIRTY_COMPILER=1 update
|
2022-02-25 08:19:12 +00:00
|
|
|
./env.sh nim --version
|
2020-09-24 11:45:34 +00:00
|
|
|
|
|
|
|
- name: Get latest fixtures commit hash
|
|
|
|
id: fixtures_version
|
|
|
|
run: |
|
|
|
|
getHash() {
|
|
|
|
git ls-remote "https://github.com/$1" "${2:-HEAD}" | cut -f 1
|
|
|
|
}
|
|
|
|
fixturesHash=$(getHash status-im/nim-eth2-scenarios)
|
2022-02-25 08:19:12 +00:00
|
|
|
echo "::set-output name=fixtures::${fixturesHash}"
|
2020-09-24 11:45:34 +00:00
|
|
|
|
|
|
|
- name: Restore Ethereum Foundation fixtures from cache
|
|
|
|
id: fixtures-cache
|
2020-09-25 14:05:29 +00:00
|
|
|
uses: actions/cache@v2
|
2020-09-24 11:45:34 +00:00
|
|
|
with:
|
2022-02-25 08:19:12 +00:00
|
|
|
path: fixturesCache
|
2020-09-25 14:05:29 +00:00
|
|
|
key: 'eth2-scenarios-${{ steps.fixtures_version.outputs.fixtures }}'
|
2020-09-24 11:45:34 +00:00
|
|
|
|
2020-09-25 16:13:46 +00:00
|
|
|
# Important: even with a cache hit, this should be run
|
|
|
|
# as it symlinks the cached items in their proper place
|
2020-09-24 11:45:34 +00:00
|
|
|
- name: Get the Ethereum Foundation fixtures
|
|
|
|
run: |
|
2021-10-12 11:36:52 +00:00
|
|
|
scripts/setup_scenarios.sh fixturesCache
|
2020-09-24 11:45:34 +00:00
|
|
|
|
2020-09-25 14:05:29 +00:00
|
|
|
- name: Smoke test the Beacon Node and Validator Client with all tracing enabled
|
|
|
|
run: |
|
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
|
|
|
if [[ "${{ matrix.branch }}" == "version-1-6" ]]; then
|
2022-06-01 00:57:42 +00:00
|
|
|
# change to "|| true" to hide the CI failures in GitHub's UI (escape hatch if a blocker is detected in 1.6)
|
|
|
|
${make_cmd} -j ${ncpu} V=1 NIM_COMMIT=${{ matrix.branch }} LOG_LEVEL=TRACE nimbus_beacon_node nimbus_validator_client || false
|
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
|
|
|
else
|
2022-04-12 10:07:06 +00:00
|
|
|
${make_cmd} -j ${ncpu} V=1 NIM_COMMIT=${{ matrix.branch }} LOG_LEVEL=TRACE nimbus_beacon_node nimbus_validator_client
|
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
|
|
|
fi
|
2022-02-16 11:41:50 +00:00
|
|
|
|
2022-02-25 08:19:12 +00:00
|
|
|
- name: Build all tools
|
2022-02-16 11:41:50 +00:00
|
|
|
run: |
|
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
|
|
|
if [[ "${{ matrix.branch }}" == "version-1-6" ]]; then
|
2022-06-01 00:57:42 +00:00
|
|
|
# change to "|| true" to hide the CI failures in GitHub's UI (escape hatch if a blocker is detected in 1.6)
|
|
|
|
${make_cmd} -j ${ncpu} V=1 NIM_COMMIT=${{ matrix.branch }} || false
|
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
|
|
|
else
|
2022-04-12 10:07:06 +00:00
|
|
|
${make_cmd} -j ${ncpu} V=1 NIM_COMMIT=${{ matrix.branch }}
|
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
|
|
|
fi
|
2022-02-25 08:19:12 +00:00
|
|
|
# The Windows image runs out of disk space, so make some room
|
|
|
|
rm -rf nimcache
|
|
|
|
|
|
|
|
- name: Run tests
|
|
|
|
run: |
|
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
|
|
|
if [[ "${{ matrix.branch }}" == "version-1-6" ]]; then
|
2022-05-19 02:07:33 +00:00
|
|
|
# change to "|| true" to hide the CI failures in GitHub's UI (escape hatch if a blocker is detected in 1.6)
|
|
|
|
${make_cmd} -j ${ncpu} V=1 NIM_COMMIT=${{ matrix.branch }} DISABLE_TEST_FIXTURES_SCRIPT=1 test || false
|
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
|
|
|
else
|
2022-04-12 10:07:06 +00:00
|
|
|
${make_cmd} -j ${ncpu} V=1 NIM_COMMIT=${{ matrix.branch }} DISABLE_TEST_FIXTURES_SCRIPT=1 test
|
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
|
|
|
fi
|
2020-09-24 11:45:34 +00:00
|
|
|
|
2021-04-28 16:41:02 +00:00
|
|
|
# The upload creates a combined report that gets posted as a comment on the PR
|
2021-10-26 22:49:35 +00:00
|
|
|
# https://github.com/EnricoMi/publish-unit-test-result-action
|
2021-04-28 16:41:02 +00:00
|
|
|
- name: Upload combined results
|
|
|
|
uses: actions/upload-artifact@v2
|
|
|
|
with:
|
|
|
|
name: Unit Test Results ${{ matrix.target.os }}-${{ matrix.target.cpu }}
|
2022-02-25 08:19:12 +00:00
|
|
|
path: build/*.xml
|
2021-04-28 16:41:02 +00:00
|
|
|
|
2021-10-26 22:49:35 +00:00
|
|
|
# https://github.com/EnricoMi/publish-unit-test-result-action
|
|
|
|
event_file:
|
|
|
|
name: "Event File"
|
2021-04-28 16:41:02 +00:00
|
|
|
runs-on: ubuntu-latest
|
|
|
|
steps:
|
2021-10-26 22:49:35 +00:00
|
|
|
- name: Upload
|
|
|
|
uses: actions/upload-artifact@v2
|
|
|
|
with:
|
|
|
|
name: Event File
|
|
|
|
path: ${{ github.event_path }}
|