From c61a7306a94e1a4152a0ee3b53a3e915e97d87c8 Mon Sep 17 00:00:00 2001 From: Jenkins Date: Fri, 4 Aug 2023 14:33:36 +0000 Subject: [PATCH] Update documentation --- 404.html | 2 +- categories/index.html | 2 +- index.html | 2 +- ....7df7b9d5330d839733e334d553ef23c2.min.json | 1 + linkmap | 74 +++---- roadmap/acid/updates/2023-08-02/index.html | 4 +- roadmap/codex/updates/2023-07-21/index.html | 2 +- roadmap/codex/updates/2023-08-01/index.html | 2 +- roadmap/index.html | 2 +- .../updates/2023-07-12/index.html | 2 +- .../updates/2023-08-02/index.html | 2 +- roadmap/nomos/updates/2023-07-24/index.html | 2 +- roadmap/nomos/updates/2023-07-31/index.html | 2 +- roadmap/page/2/index.html | 2 +- roadmap/vac/updates/2023-07-10/index.html | 2 +- roadmap/vac/updates/2023-07-17/index.html | 2 +- roadmap/vac/updates/2023-07-24/index.html | 2 +- roadmap/vac/updates/2023-07-31/index.html | 2 +- roadmap/waku/updates/2023-07-24/index.html | 208 ++++++++++++------ roadmap/waku/updates/2023-07-31/index.html | 2 +- tags/acid-updates/index.html | 2 +- tags/codex-updates/index.html | 2 +- tags/ilab-updates/index.html | 2 +- tags/index.html | 2 +- tags/nomos-updates/index.html | 2 +- tags/vac-updates/index.html | 2 +- tags/waku-updates/index.html | 2 +- 27 files changed, 205 insertions(+), 128 deletions(-) create mode 100644 indices/contentIndex.7df7b9d5330d839733e334d553ef23c2.min.json diff --git a/404.html b/404.html index 81559acab..8d9a2bb2e 100644 --- a/404.html +++ b/404.html @@ -102,7 +102,7 @@ index: data.index, links: data.links, })), - fetch("https:\/\/roadmap.logos.co\/indices\/contentIndex.fac68475ffec5f5f9e8dca14f02f2bba.min.json") + fetch("https:\/\/roadmap.logos.co\/indices\/contentIndex.7df7b9d5330d839733e334d553ef23c2.min.json") .then(data => data.json()), ]) .then(([{index, links}, content]) => ({ diff --git a/categories/index.html b/categories/index.html index ca891c6d3..7e0ce1212 100644 --- a/categories/index.html +++ b/categories/index.html @@ -102,7 +102,7 @@ index: data.index, links: data.links, })), - fetch("https:\/\/roadmap.logos.co\/indices\/contentIndex.fac68475ffec5f5f9e8dca14f02f2bba.min.json") + fetch("https:\/\/roadmap.logos.co\/indices\/contentIndex.7df7b9d5330d839733e334d553ef23c2.min.json") .then(data => data.json()), ]) .then(([{index, links}, content]) => ({ diff --git a/index.html b/index.html index 5a38012ef..f5a5dd3ef 100644 --- a/index.html +++ b/index.html @@ -103,7 +103,7 @@ index: data.index, links: data.links, })), - fetch("https:\/\/roadmap.logos.co\/indices\/contentIndex.fac68475ffec5f5f9e8dca14f02f2bba.min.json") + fetch("https:\/\/roadmap.logos.co\/indices\/contentIndex.7df7b9d5330d839733e334d553ef23c2.min.json") .then(data => data.json()), ]) .then(([{index, links}, content]) => ({ diff --git a/indices/contentIndex.7df7b9d5330d839733e334d553ef23c2.min.json b/indices/contentIndex.7df7b9d5330d839733e334d553ef23c2.min.json new file mode 100644 index 000000000..4b52b8c36 --- /dev/null +++ b/indices/contentIndex.7df7b9d5330d839733e334d553ef23c2.min.json @@ -0,0 +1 @@ +{"/":{"title":"Logos Technical Roadmap and Activity","content":"This site attempts to inform the previous, current, and future work required to fulfill the requirements of the projects under the Logos Collective, a complete tech stack that provides infrastructure for the self-sovereign network state. To learn more about the motivation, please visit the [Logos Collective Site](https://logos.co).\n\n## Navigation\n- Research and Development Updates\n\t- [Vac updates](tags/vac-updates)\n\t- [Waku updates](tags/waku-updates)\n\t- [Codex updates](tags/codex-updates)\n\t- [Nomos updates](tags/nomos-updates)\n\t- [Innovation Lab updates](tags/ilab-updates)\n\t- [Comms updates](tags/acid-updates)","lastmodified":"2023-08-03T16:00:41.712472167Z","tags":[]},"/private/notes/CJK-+-Latex-Support-%E6%B5%8B%E8%AF%95":{"title":"CJK + Latex Support (测试)","content":"\n## Chinese, Japanese, Korean Support\n几乎在我们意识到之前,我们已经离开了地面。\n\n우리가 그것을 알기도 전에 우리는 땅을 떠났습니다.\n\n私たちがそれを知るほぼ前に、私たちは地面を離れていました。\n\n## Latex\n\nBlock math works with two dollar signs `$$...$$`\n\n$$f(x) = \\int_{-\\infty}^\\infty\n f\\hat(\\xi),e^{2 \\pi i \\xi x}\n \\,d\\xi$$\n\t\nInline math also works with single dollar signs `$...$`. For example, Euler's identity but inline: $e^{i\\pi} = 0$\n\nAligned equations work quite well:\n\n$$\n\\begin{aligned}\na \u0026= b + c \\\\ \u0026= e + f \\\\\n\\end{aligned}\n$$\n\nAnd matrices\n\n$$\n\\begin{bmatrix}\n1 \u0026 2 \u0026 3 \\\\\na \u0026 b \u0026 c\n\\end{bmatrix}\n$$\n\n## RTL\nMore information on configuring RTL languages like Arabic in the [config](config.md) page.\n","lastmodified":"2023-07-16T15:52:26.098715355Z","tags":[]},"/private/notes/callouts":{"title":"Callouts","content":"\n## Callout support\n\nQuartz supports the same Admonition-callout syntax as Obsidian.\n\nThis includes\n- 12 Distinct callout types (each with several aliases)\n- Collapsable callouts\n\nSee [documentation on supported types and syntax here](https://help.obsidian.md/How+to/Use+callouts#Types).\n\n## Showcase\n\n\u003e [!EXAMPLE] Examples\n\u003e\n\u003e Aliases: example\n\n\u003e [!note] Notes\n\u003e\n\u003e Aliases: note\n\n\u003e [!abstract] Summaries \n\u003e\n\u003e Aliases: abstract, summary, tldr\n\n\u003e [!info] Info \n\u003e\n\u003e Aliases: info, todo\n\n\u003e [!tip] Hint \n\u003e\n\u003e Aliases: tip, hint, important\n\n\u003e [!success] Success \n\u003e\n\u003e Aliases: success, check, done\n\n\u003e [!question] Question \n\u003e\n\u003e Aliases: question, help, faq\n\n\u003e [!warning] Warning \n\u003e\n\u003e Aliases: warning, caution, attention\n\n\u003e [!failure] Failure \n\u003e\n\u003e Aliases: failure, fail, missing\n\n\u003e [!danger] Error\n\u003e\n\u003e Aliases: danger, error\n\n\u003e [!bug] Bug\n\u003e\n\u003e Aliases: bug\n\n\u003e [!quote] Quote\n\u003e\n\u003e Aliases: quote, cite\n","lastmodified":"2023-07-16T15:52:26.098715355Z","tags":[]},"/private/notes/config":{"title":"Configuration","content":"\n## Configuration\nQuartz is designed to be extremely configurable. You can find the bulk of the configuration scattered throughout the repository depending on how in-depth you'd like to get.\n\nThe majority of configuration can be found under `data/config.yaml`. An annotated example configuration is shown below.\n\n```yaml {title=\"data/config.yaml\"}\n# The name to display in the footer\nname: Jacky Zhao\n\n# whether to globally show the table of contents on each page\n# this can be turned off on a per-page basis by adding this to the\n# front-matter of that note\nenableToc: true\n\n# whether to by-default open or close the table of contents on each page\nopenToc: false\n\n# whether to display on-hover link preview cards\nenableLinkPreview: true\n\n# whether to render titles for code blocks\nenableCodeBlockTitle: true \n\n# whether to render copy buttons for code blocks\nenableCodeBlockCopy: true \n\n# whether to render callouts\nenableCallouts: true\n\n# whether to try to process Latex\nenableLatex: true\n\n# whether to enable single-page-app style rendering\n# this prevents flashes of unstyled content and improves\n# smoothness of Quartz. More info in issue #109 on GitHub\nenableSPA: true\n\n# whether to render a footer\nenableFooter: true\n\n# whether backlinks of pages should show the context in which\n# they were mentioned\nenableContextualBacklinks: true\n\n# whether to show a section of recent notes on the home page\nenableRecentNotes: false\n\n# whether to display an 'edit' button next to the last edited field\n# that links to github\nenableGitHubEdit: true\nGitHubLink: https://github.com/jackyzha0/quartz/tree/hugo/content\n\n# whether to use Operand to power semantic search\n# IMPORTANT: replace this API key with your own if you plan on using\n# Operand search!\nenableSemanticSearch: false\noperandApiKey: \"REPLACE-WITH-YOUR-OPERAND-API-KEY\"\n\n# page description used for SEO\ndescription:\n Host your second brain and digital garden for free. Quartz features extremely fast full-text search,\n Wikilink support, backlinks, local graph, tags, and link previews.\n\n# title of the home page (also for SEO)\npage_title:\n \"🪴 Quartz 3.2\"\n\n# links to show in the footer\nlinks:\n - link_name: Twitter\n link: https://twitter.com/_jzhao\n - link_name: Github\n link: https://github.com/jackyzha0\n```\n\n### Code Block Titles\nTo add code block titles with Quartz:\n\n1. Ensure that code block titles are enabled in Quartz's configuration:\n\n ```yaml {title=\"data/config.yaml\", linenos=false}\n enableCodeBlockTitle: true\n ```\n\n2. Add the `title` attribute to the desired [code block\n fence](https://gohugo.io/content-management/syntax-highlighting/#highlighting-in-code-fences):\n\n ```markdown {linenos=false}\n ```yaml {title=\"data/config.yaml\"}\n enableCodeBlockTitle: true # example from step 1\n ```\n ```\n\n**Note** that if `{title=\u003cmy-title\u003e}` is included, and code block titles are not\nenabled, no errors will occur, and the title attribute will be ignored.\n\n### HTML Favicons\nIf you would like to customize the favicons of your Quartz-based website, you \ncan add them to the `data/config.yaml` file. The **default** without any set \n`favicon` key is:\n\n```html {title=\"layouts/partials/head.html\", linenostart=15}\n\u003clink rel=\"shortcut icon\" href=\"icon.png\" type=\"image/png\"\u003e\n```\n\nThe default can be overridden by defining a value to the `favicon` key in your \n`data/config.yaml` file. For example, here is a `List[Dictionary]` example format, which is\nequivalent to the default:\n\n```yaml {title=\"data/config.yaml\", linenos=false}\nfavicon:\n - { rel: \"shortcut icon\", href: \"icon.png\", type: \"image/png\" }\n# - { ... } # Repeat for each additional favicon you want to add\n```\n\nIn this format, the keys are identical to their HTML representations.\n\nIf you plan to add multiple favicons generated by a website (see list below), it\nmay be easier to define it as HTML. Here is an example which appends the \n**Apple touch icon** to Quartz's default favicon:\n\n```yaml {title=\"data/config.yaml\", linenos=false}\nfavicon: |\n \u003clink rel=\"shortcut icon\" href=\"icon.png\" type=\"image/png\"\u003e\n \u003clink rel=\"apple-touch-icon\" sizes=\"180x180\" href=\"/apple-touch-icon.png\"\u003e\n```\n\nThis second favicon will now be used as a web page icon when someone adds your \nwebpage to the home screen of their Apple device. If you are interested in more \ninformation about the current and past standards of favicons, you can read \n[this article](https://www.emergeinteractive.com/insights/detail/the-essentials-of-favicons/).\n\n**Note** that all generated favicon paths, defined by the `href` \nattribute, are relative to the `static/` directory.\n\n### Graph View\nTo customize the Interactive Graph view, you can poke around `data/graphConfig.yaml`.\n\n```yaml {title=\"data/graphConfig.yaml\"}\n# if true, a Global Graph will be shown on home page with full width, no backlink.\n# A different set of Local Graphs will be shown on sub pages.\n# if false, Local Graph will be default on every page as usual\nenableGlobalGraph: false\n\n### Local Graph ###\nlocalGraph:\n # whether automatically generate a legend\n enableLegend: false\n \n # whether to allow dragging nodes in the graph\n enableDrag: true\n \n # whether to allow zooming and panning the graph\n enableZoom: true\n \n # how many neighbours of the current node to show (-1 is all nodes)\n depth: 1\n \n # initial zoom factor of the graph\n scale: 1.2\n \n # how strongly nodes should repel each other\n repelForce: 2\n\n # how strongly should nodes be attracted to the center of gravity\n centerForce: 1\n\n # what the default link length should be\n linkDistance: 1\n \n # how big the node labels should be\n fontSize: 0.6\n \n # scale at which to start fading the labes on nodes\n opacityScale: 3\n\n### Global Graph ###\nglobalGraph:\n\t# same settings as above\n\n### For all graphs ###\n# colour specific nodes path off of their path\npaths:\n - /moc: \"#4388cc\"\n```\n\n\n## Styling\nWant to go even more in-depth? You can add custom CSS styling and change existing colours through editing `assets/styles/custom.scss`. If you'd like to target specific parts of the site, you can add ids and classes to the HTML partials in `/layouts/partials`. \n\n### Partials\nPartials are what dictate what gets rendered to the page. Want to change how pages are styled and structured? You can edit the appropriate layout in `/layouts`.\n\nFor example, the structure of the home page can be edited through `/layouts/index.html`. To customize the footer, you can edit `/layouts/partials/footer.html`\n\nMore info about partials on [Hugo's website.](https://gohugo.io/templates/partials/)\n\nStill having problems? Checkout our [FAQ and Troubleshooting guide](troubleshooting.md).\n\n## Language Support\n[CJK + Latex Support (测试)](CJK%20+%20Latex%20Support%20(测试).md) comes out of the box with Quartz.\n\nWant to support languages that read from right-to-left (like Arabic)? Hugo (and by proxy, Quartz) supports this natively.\n\nFollow the steps [Hugo provides here](https://gohugo.io/content-management/multilingual/#configure-languages) and modify your `config.toml`\n\nFor example:\n\n```toml\ndefaultContentLanguage = 'ar'\n[languages]\n [languages.ar]\n languagedirection = 'rtl'\n title = 'مدونتي'\n weight = 1\n```\n","lastmodified":"2023-07-16T15:52:26.098715355Z","tags":["setup"]},"/private/notes/custom-Domain":{"title":"Custom Domain","content":"\n### Registrar\nThis step is only applicable if you are using a **custom domain**! If you are using a `\u003cYOUR-USERNAME\u003e.github.io` domain, you can skip this step.\n\nFor this last bit to take effect, you also need to create a CNAME record with the DNS provider you register your domain with (i.e. NameCheap, Google Domains).\n\nGitHub has some [documentation on this](https://docs.github.com/en/pages/configuring-a-custom-domain-for-your-github-pages-site/managing-a-custom-domain-for-your-github-pages-site), but the tldr; is to\n\n1. Go to your forked repository (`github.com/\u003cYOUR-GITHUB-USERNAME\u003e/quartz`) settings page and go to the Pages tab. Under \"Custom domain\", type your custom domain, then click **Save**.\n2. Go to your DNS Provider and create a CNAME record that points from your domain to `\u003cYOUR-GITHUB-USERNAME.github.io.` (yes, with the trailing period).\n\n\t![Example Configuration for Quartz](google-domains.png)*Example Configuration for Quartz*\n3. Wait 30 minutes to an hour for the network changes to kick in.\n4. Done!","lastmodified":"2023-07-16T15:52:26.098715355Z","tags":[]},"/private/notes/editing":{"title":"Editing Content in Quartz","content":"\n## Editing \nQuartz runs on top of [Hugo](https://gohugo.io/) so all notes are written in [Markdown](https://www.markdownguide.org/getting-started/).\n\n### Folder Structure\nHere's a rough overview of what's what.\n\n**All content in your garden can found in the `/content` folder.** To make edits, you can open any of the files and make changes directly and save it. You can organize content into any folder you'd like.\n\n**To edit the main home page, open `/content/_index.md`.**\n\nTo create a link between notes in your garden, just create a normal link using Markdown pointing to the document in question. Please note that **all links should be relative to the root `/content` path**. \n\n```markdown\nFor example, I want to link this current document to `notes/config.md`.\n[A link to the config page](notes/config.md)\n```\n\nSimilarly, you can put local images anywhere in the `/content` folder.\n\n```markdown\nExample image (source is in content/notes/images/example.png)\n![Example Image](/content/notes/images/example.png)\n```\n\nYou can also use wikilinks if that is what you are more comfortable with!\n\n### Front Matter\nHugo is picky when it comes to metadata for files. Make sure that your title is double-quoted and that you have a title defined at the top of your file like so. You can also add tags here as well.\n\n```yaml\n---\ntitle: \"Example Title\"\ntags:\n- example-tag\n---\n\nRest of your content here...\n```\n\n### Obsidian\nI recommend using [Obsidian](http://obsidian.md/) as a way to edit and grow your digital garden. It comes with a really nice editor and graphical interface to preview all of your local files.\n\nThis step is **highly recommended**.\n\n\u003e 🔗 Step 3: [How to setup your Obsidian Vault to work with Quartz](obsidian.md)\n\n## Previewing Changes\nThis step is purely optional and mostly for those who want to see the published version of their digital garden locally before opening it up to the internet. This is *highly recommended* but not required.\n\n\u003e 👀 Step 4: [Preview Quartz Changes](preview%20changes.md)\n\nFor those who like to live life more on the edge, viewing the garden through Obsidian gets you pretty close to the real thing.\n\n## Publishing Changes\nNow that you know the basics of managing your digital garden using Quartz, you can publish it to the internet!\n\n\u003e 🌍 Step 5: [Hosting Quartz online!](hosting.md)\n\nHaving problems? Checkout our [FAQ and Troubleshooting guide](troubleshooting.md).\n","lastmodified":"2023-07-16T15:52:26.098715355Z","tags":["setup"]},"/private/notes/hosting":{"title":"Deploying Quartz to the Web","content":"\n## Hosting on GitHub Pages\nQuartz is designed to be effortless to deploy. If you forked and cloned Quartz directly from the repository, everything should already be good to go! Follow the steps below.\n\n### Enable GitHub Actions\nBy default, GitHub disables workflows from running automatically on Forked Repostories. Head to the 'Actions' tab of your forked repository and Enable Workflows to setup deploying your Quartz site!\n\n![Enable GitHub Actions](github-actions.png)*Enable GitHub Actions*\n\n### Enable GitHub Pages\n\nHead to the 'Settings' tab of your forked repository and go to the 'Pages' tab.\n\n1. (IMPORTANT) Set the source to deploy from `master` (and not `hugo`) using `/ (root)`\n2. Set a custom domain here if you have one!\n\n![Enable GitHub Pages](github-pages.png)*Enable GitHub Pages*\n\n### Pushing Changes\nTo see your changes on the internet, we need to push it them to GitHub. Quartz is a `git` repository so updating it is the same workflow as you would follow as if it were just a regular software project.\n\n```shell\n# Navigate to Quartz folder\ncd \u003cpath-to-quartz\u003e\n\n# Commit all changes\ngit add .\ngit commit -m \"message describing changes\"\n\n# Push to GitHub to update site\ngit push origin hugo\n```\n\nNote: we specifically push to the `hugo` branch here. Our GitHub action automatically runs everytime a push to is detected to that branch and then updates the `master` branch for redeployment.\n\n### Setting up the Site\nNow let's get this site up and running. Never hosted a site before? No problem. Have a fancy custom domain you already own or want to subdomain your Quartz? That's easy too.\n\nHere, we take advantage of GitHub's free page hosting to deploy our site. Change `baseURL` in `/config.toml`. \n\nMake sure that your `baseURL` has a trailing `/`!\n\n[Reference `config.toml` here](https://github.com/jackyzha0/quartz/blob/hugo/config.toml)\n\n```toml\nbaseURL = \"https://\u003cYOUR-DOMAIN\u003e/\"\n```\n\nIf you are using this under a subdomain (e.g. `\u003cYOUR-GITHUB-USERNAME\u003e.github.io/quartz`), include the trailing `/`. **You need to do this especially if you are using GitHub!**\n\n```toml\nbaseURL = \"https://\u003cYOUR-GITHUB-USERNAME\u003e.github.io/quartz/\"\n```\n\nChange `cname` in `/.github/workflows/deploy.yaml`. Again, if you don't have a custom domain to use, you can use `\u003cYOUR-USERNAME\u003e.github.io`.\n\nPlease note that the `cname` field should *not* have any path `e.g. end with /quartz` or have a trailing `/`.\n\n[Reference `deploy.yaml` here](https://github.com/jackyzha0/quartz/blob/hugo/.github/workflows/deploy.yaml)\n\n```yaml {title=\".github/workflows/deploy.yaml\"}\n- name: Deploy \n uses: peaceiris/actions-gh-pages@v3 \n with: \n\tgithub_token: ${{ secrets.GITHUB_TOKEN }} # this can stay as is, GitHub fills this in for us!\n\tpublish_dir: ./public \n\tpublish_branch: master\n\tcname: \u003cYOUR-DOMAIN\u003e\n```\n\nHave a custom domain? [Learn how to set it up with Quartz ](custom%20Domain.md).\n\n### Ignoring Files\nOnly want to publish a subset of all of your notes? Don't worry, Quartz makes this a simple two-step process.\n\n❌ [Excluding pages from being published](ignore%20notes.md)\n\n---\n\nNow that your Quartz is live, let's figure out how to make Quartz really *yours*!\n\n\u003e Step 6: 🎨 [Customizing Quartz](config.md)\n\nHaving problems? Checkout our [FAQ and Troubleshooting guide](troubleshooting.md).\n","lastmodified":"2023-07-16T15:52:26.098715355Z","tags":["setup"]},"/private/notes/ignore-notes":{"title":"Ignoring Notes","content":"\n### Quartz Ignore\nEdit `ignoreFiles` in `config.toml` to include paths you'd like to exclude from being rendered.\n\n```toml\n...\nignoreFiles = [ \n \"/content/templates/*\", \n \"/content/private/*\", \n \"\u003cyour path here\u003e\"\n]\n```\n\n`ignoreFiles` supports the use of Regular Expressions (RegEx) so you can ignore patterns as well (e.g. ignoring all `.png`s by doing `\\\\.png$`).\nTo ignore a specific file, you can also add the tag `draft: true` to the frontmatter of a note.\n\n```markdown\n---\ntitle: Some Private Note\ndraft: true\n---\n...\n```\n\nMore details in [Hugo's documentation](https://gohugo.io/getting-started/configuration/#ignore-content-and-data-files-when-rendering).\n\n### Global Ignore\nHowever, just adding to the `ignoreFiles` will only prevent the page from being access through Quartz. If you want to prevent the file from being pushed to GitHub (for example if you have a public repository), you need to also add the path to the `.gitignore` file at the root of the repository.","lastmodified":"2023-07-16T15:52:26.098715355Z","tags":[]},"/private/notes/obsidian":{"title":"Obsidian Vault Integration","content":"\n## Setup\nObsidian is the preferred way to use Quartz. You can either create a new Obsidian Vault or link one that your already have.\n\n### New Vault\nIf you don't have an existing Vault, [download Obsidian](https://obsidian.md/) and create a new Vault in the `/content` folder that you created and cloned during the [setup](setup.md) step.\n\n### Linking an existing Vault\nThe easiest way to use an existing Vault is to copy all of your files (directory and hierarchies intact) into the `/content` folder.\n\n## Settings\nGreat, now that you have your Obsidian linked to your Quartz, let's fix some settings so that they play well.\n\n1. Under Options \u003e Files and Links, set the New link format to always use Absolute Path in Vault.\n2. Go to Settings \u003e Files \u0026 Links \u003e Turn \"on\" automatically update internal links.\n\n![Obsidian Settings](obsidian-settings.png)*Obsidian Settings*\n\n## Templates\nInserting front matter everytime you want to create a new Note gets annoying really quickly. Luckily, Obsidian supports templates which makes inserting new content really easily.\n\n**If you decide to overwrite the `/content` folder completely, don't remove the `/content/templates` folder!**\n\nHead over to Options \u003e Core Plugins and enable the Templates plugin. Then go to Options \u003e Hotkeys and set a hotkey for 'Insert Template' (I recommend `[cmd]+T`). That way, when you create a new note, you can just press the hotkey for a new template and be ready to go!\n\n\u003e 👀 Step 4: [Preview Quartz Changes](preview%20changes.md)","lastmodified":"2023-07-16T15:52:26.102715423Z","tags":["setup"]},"/private/notes/philosophy":{"title":"Quartz Philosophy","content":"\n\u003e “[One] who works with the door open gets all kinds of interruptions, but [they] also occasionally gets clues as to what the world is and what might be important.” — Richard Hamming\n\n## Why Quartz?\nHosting a public digital garden isn't easy. There are an overwhelming number of tutorials, resources, and guides for tools like [Notion](https://www.notion.so/), [Roam](https://roamresearch.com/), and [Obsidian](https://obsidian.md/), yet none of them have super easy to use *free* tools to publish that garden to the world.\n\nI've personally found that\n1. It's nice to access notes from anywhere\n2. Having a public digital garden invites open conversations\n3. It makes keeping personal notes and knowledge *playful and fun*\n\nI was really inspired by [Bianca](https://garden.bianca.digital/) and [Joel](https://joelhooks.com/digital-garden)'s digital gardens and wanted to try making my own.\n\n**The goal of Quartz is to make hosting your own public digital garden free and simple.** You don't even need your own website. Quartz does all of that for you and gives your own little corner of the internet.\n","lastmodified":"2023-07-16T15:52:26.102715423Z","tags":[]},"/private/notes/preview-changes":{"title":"Preview Changes","content":"\nIf you'd like to preview what your Quartz site looks like before deploying it to the internet, here's exactly how to do that!\n\nNote that both of these steps need to be completed.\n\n## Install `hugo-obsidian`\nThis step will generate the list of backlinks for Hugo to parse. Ensure you have [Go](https://golang.org/doc/install) (\u003e= 1.16) installed.\n\n```bash\n# Install and link `hugo-obsidian` locally\ngo install github.com/jackyzha0/hugo-obsidian@latest\n```\n\nIf you are running into an error saying that `command not found: hugo-obsidian`, make sure you set your `GOPATH` correctly! This will allow your terminal to correctly recognize hugo-obsidian as an executable.\n\nAfterwards, start the Hugo server as shown above and your local backlinks and interactive graph should be populated!\n\n## Installing Hugo\nHugo is the static site generator that powers Quartz. [Install Hugo with \"extended\" Sass/SCSS version](https://gohugo.io/getting-started/installing/) first. Then,\n\n```bash\n# Navigate to your local Quartz folder\ncd \u003clocation-of-your-local-quartz\u003e\n\n# Start local server\nmake serve\n\n# View your site in a browser at http://localhost:1313/\n```\n\n\u003e 🌍 Step 5: [Hosting Quartz online!](hosting.md)","lastmodified":"2023-07-16T15:52:26.102715423Z","tags":["setup"]},"/private/notes/search":{"title":"Search","content":"\nQuartz supports two modes of searching through content.\n\n## Full-text\nFull-text search is the default in Quartz. It produces results that *exactly* match the search query. This is easier to setup but usually produces lower quality matches.\n\n```yaml {title=\"data/config.yaml\"}\n# the default option\nenableSemanticSearch: false\n```\n\n## Natural Language\nNatural language search is powered by [Operand](https://operand.ai/). It understands language like a person does and finds results that best match user intent. In this sense, it is closer to how Google Search works.\n\nNatural language search tends to produce higher quality results than full-text search.\n\nHere's how to set it up.\n\n1. Create an Operand Account on [their website](https://operand.ai/).\n2. Go to Dashboard \u003e Settings \u003e Integrations.\n3. Follow the steps to setup the GitHub integration. Operand needs access to GitHub in order to index your digital garden properly!\n4. Head over to Dashboard \u003e Objects and press `(Cmd + K)` to open the omnibar and select 'Create Collection'.\n\t1. Set the 'Collection Label' to something that will help you remember it.\n\t2. You can leave the 'Parent Collection' field empty.\n5. Click into your newly made Collection.\n\t1. Press the 'share' button that looks like three dots connected by lines.\n\t2. Set the 'Interface Type' to `object-search` and click 'Create'.\n\t3. This will bring you to a new page with a search bar. Ignore this for now.\n6. Go back to Dashboard \u003e Settings \u003e API Keys and find your Quartz-specific Operand API key under 'Other keys'.\n\t1. Copy the key (which looks something like `0e733a7f-9b9c-48c6-9691-b54fa1c8b910`).\n\t2. Open `data/config.yaml`. Set `enableSemanticSearch` to `true` and `operandApiKey` to your copied key.\n\n```yaml {title=\"data/config.yaml\"}\n# the default option\nenableSemanticSearch: true\noperandApiKey: \"0e733a7f-9b9c-48c6-9691-b54fa1c8b910\"\n```\n7. Make a commit and push your changes to GitHub. See the [[hosting|hosting]] page if you haven't done this already.\n\t1. This step is *required* for Operand to be able to properly index your content. \n\t2. Head over to Dashboard \u003e Objects and select the collection that you made earlier\n8. Press `(Cmd + K)` to open the omnibar again and select 'Create GitHub Repo'\n\t1. Set the 'Repository Label' to `Quartz`\n\t2. Set the 'Repository Owner' to your GitHub username\n\t3. Set the 'Repository Ref' to `master`\n\t4. Set the 'Repository Name' to the name of your repository (usually just `quartz` if you forked the repository without changing the name)\n\t5. Leave 'Root Path' and 'Root URL' empty\n9. Wait for your repository to index and enjoy natural language search in Quartz! Operand refreshes the index every 2h so all you need to do is just push to GitHub to update the contents in the search.","lastmodified":"2023-07-16T15:52:26.102715423Z","tags":[]},"/private/notes/setup":{"title":"Setup","content":"\n## Making your own Quartz\nSetting up Quartz requires a basic understanding of `git`. If you are unfamiliar, [this resource](https://resources.nwplus.io/2-beginner/how-to-git-github.html) is a great place to start!\n\n### Forking\n\u003e A fork is a copy of a repository. Forking a repository allows you to freely experiment with changes without affecting the original project.\n\nNavigate to the GitHub repository for the Quartz project:\n\n📁 [Quartz Repository](https://github.com/jackyzha0/quartz)\n\nThen, Fork the repository into your own GitHub account. If you don't have an account, you can make on for free [here](https://github.com/join). More details about forking a repo can be found on [GitHub's documentation](https://docs.github.com/en/get-started/quickstart/fork-a-repo).\n\n### Cloning\nAfter you've made a fork of the repository, you need to download the files locally onto your machine. Ensure you have `git`, then type the following command replacing `YOUR-USERNAME` with your GitHub username.\n\n```shell\ngit clone https://github.com/YOUR-USERNAME/quartz\n```\n\n## Editing\nGreat! Now you have everything you need to start editing and growing your digital garden. If you're ready to start writing content already, check out the recommended flow for editing notes in Quartz.\n\n\u003e ✏️ Step 2: [Editing Notes in Quartz](editing.md)\n\nHaving problems? Checkout our [FAQ and Troubleshooting guide](troubleshooting.md).\n","lastmodified":"2023-07-16T15:52:26.102715423Z","tags":["setup"]},"/private/notes/showcase":{"title":"Showcase","content":"\nWant to see what Quartz can do? Here are some cool community gardens :)\n\n- [Quartz Documentation (this site!)](https://quartz.jzhao.xyz/)\n- [Jacky Zhao's Garden](https://jzhao.xyz/)\n- [Scaling Synthesis - A hypertext research notebook](https://scalingsynthesis.com/)\n- [AWAGMI Intern Notes](https://notes.awagmi.xyz/)\n- [Shihyu's PKM](https://shihyuho.github.io/pkm/)\n- [Chloe's Garden](https://garden.chloeabrasada.online/)\n- [SlRvb's Site](https://slrvb.github.io/Site/)\n- [Course notes for Information Technology Advanced Theory](https://a2itnotes.github.io/quartz/)\n- [Brandon Boswell's Garden](https://brandonkboswell.com)\n- [Siyang's Courtyard](https://siyangsun.github.io/courtyard/)\n\nIf you want to see your own on here, submit a [Pull Request adding yourself to this file](https://github.com/jackyzha0/quartz/blob/hugo/content/notes/showcase.md)!\n","lastmodified":"2023-07-16T15:52:26.102715423Z","tags":[]},"/private/notes/troubleshooting":{"title":"Troubleshooting and FAQ","content":"\nStill having trouble? Here are a list of common questions and problems people encounter when installing Quartz.\n\nWhile you're here, join our [Discord](https://discord.gg/cRFFHYye7t) :)\n\n### Does Quartz have Latex support?\nYes! See [CJK + Latex Support (测试)](CJK%20+%20Latex%20Support%20(测试).md) for a brief demo.\n\n### Can I use \\\u003cObsidian Plugin\\\u003e in Quartz?\nUnless it produces direct Markdown output in the file, no. There currently is no way to bundle plugin code with Quartz.\n\nThe easiest way would be to add your own HTML partial that supports the functionality you are looking for.\n\n### My GitHub pages is just showing the README and not Quartz\nMake sure you set the source to deploy from `master` (and not `hugo`) using `/ (root)`! See more in the [hosting](hosting.md) guide\n\n### Some of my pages have 'January 1, 0001' as the last modified date\nThis is a problem caused by `git` treating files as case-insensitive by default and some of your posts probably have capitalized file names. You can turn this off in your Quartz by running this command.\n\n```shell\n# in the root of your Quartz (same folder as config.toml)\ngit config core.ignorecase true\n\n# or globally (not recommended)\ngit config --global core.ignorecase true\n```\n\n### Can I publish only a subset of my pages?\nYes! Quartz makes selective publishing really easy. Heres a guide on [excluding pages from being published](ignore%20notes.md).\n\n### Can I host this myself and not on GitHub Pages?\nYes! All built files can be found under `/public` in the `master` branch. More details under [hosting](hosting.md).\n\n### `command not found: hugo-obsidian`\nMake sure you set your `GOPATH` correctly! This will allow your terminal to correctly recognize `hugo-obsidian` as an executable.\n\n```shell\n# Add the following 2 lines to your ~/.bash_profile\nexport GOPATH=/Users/$USER/go\nexport PATH=$GOPATH/bin:$PATH\n\n# In your current terminal, to reload the session\nsource ~/.bash_profile\n```\n\n### How come my notes aren't being rendered?\nYou probably forgot to include front matter in your Markdown files. You can either setup [Obsidian](obsidian.md) to do this for you or you need to manually define it. More details in [the 'how to edit' guide](editing.md).\n\n### My custom domain isn't working!\nWalk through the steps in [the hosting guide](hosting.md) again. Make sure you wait 30 min to 1 hour for changes to take effect.\n\n### How do I setup Google Analytics?\nYou can edit it in `config.toml` and either use a V3 (UA-) or V4 (G-) tag.\n\n### How do I change the content on the home page?\nTo edit the main home page, open `/content/_index.md`.\n\n### How do I change the colours?\nYou can change the theme by editing `assets/custom.scss`. More details on customization and themeing can be found in the [customization guide](config.md).\n\n### How do I add images?\nYou can put images anywhere in the `/content` folder.\n\n```markdown\nExample image (source is in content/notes/images/example.png)\n![Example Image](/content/notes/images/example.png)\n```\n\n### My Interactive Graph and Backlinks aren't up to date\nBy default, the `linkIndex.json` (which Quartz needs to generate the Interactive Graph and Backlinks) are not regenerated locally. To set that up, see the guide on [local editing](editing.md)\n\n### Can I use React/Vue/some other framework?\nNot out of the box. You could probably make it work by editing `/layouts/_default/single.html` but that's not what Quartz is designed to work with. 99% of things you are trying to do with those frameworks you can accomplish perfectly fine using just vanilla HTML/CSS/JS.\n\n## Still Stuck?\nQuartz isn't perfect! If you're still having troubles, file an issue in the GitHub repo with as much information as you can reasonably provide. Alternatively, you can message me on [Twitter](https://twitter.com/_jzhao) and I'll try to get back to you as soon as I can.\n\n🐛 [Submit an Issue](https://github.com/jackyzha0/quartz/issues)","lastmodified":"2023-07-16T15:52:26.102715423Z","tags":[]},"/private/notes/updating":{"title":"Updating","content":"\nHaven't updated Quartz in a while and want all the cool new optimizations? On Unix/Mac systems you can run the following command for a one-line update! This command will show you a log summary of all commits since you last updated, press `q` to acknowledge this. Then, it will show you each change in turn and press `y` to accept the patch or `n` to reject it. Usually you should press `y` for most of these unless it conflicts with existing changes you've made! \n\n```shell\nmake update\n```\n\nOr, if you don't want the interactive parts and just want to force update your local garden (this assumed that you are okay with some of your personalizations been overriden!)\n\n```shell\nmake update-force\n```\n\nOr, manually checkout the changes yourself.\n\n\u003e [!warning] Warning!\n\u003e\n\u003e If you customized the files in `data/`, or anything inside `layouts/`, your customization may be overwritten!\n\u003e Make sure you have a copy of these changes if you don't want to lose them.\n\n\n```shell\n# add Quartz as a remote host\ngit remote add upstream git@github.com:jackyzha0/quartz.git\n\n# index and fetch changes\ngit fetch upstream\ngit checkout -p upstream/hugo -- layouts .github Makefile assets/js assets/styles/base.scss assets/styles/darkmode.scss config.toml data \n```\n","lastmodified":"2023-07-16T15:52:26.102715423Z","tags":[]},"/private/requirements/overview":{"title":"Logos Network Requirements Overview","content":"\nThis document describes the requirements of the Logos Network.\n\n\u003e Network sovereignty is an extension of the collective sovereignty of the individuals within. \n\n\u003e Meaningful participation in the network should be acheivable by affordable and accessible consumer grade hardware.\n\n\u003e Privacy by default. \n\n\u003e A given CiC should have the option to gracefully exit the network and operate on its own.\n\n","lastmodified":"2023-07-16T15:52:26.102715423Z","tags":["requirements"]},"/private/roadmap/consensus/candidates/carnot/FAQ":{"title":"Frequently Asked Questions","content":"\n## Network Requirements and Assumptions\n\n### What assumptions do we need Waku to fulfill? - Corey\n\u003e `Moh:` Waku needs to fill the following requirements, taken from the Carnot paper:\n\n\u003e **Definition 3** (Probabilistic Reliable Dissemination). _After the GST, and when the leader is correct, all the correct nodes deliver the proposal sent by the leader (w.h.p)._\n\n\u003e **Definition 4** (Probabilistic Fulfillment). _After the GST, and when the current and previous leaders are correct, the number of votes collected by teh current leader is $2c+1$ (w.h.p)._\n\n## Tradeoffs\n\n### I think the main clear disadvantage of such a scheme is the added latency of the multiple layers. - Alvaro\n\n\u003e `Moh:` The added latency will be O(log(n/C)), where C is the committee size. But I guess it will be hard to avoid it. Though it also depends on how fast the network layer (potentially Waku) propagats msgs and also on execution time of the transaction as well.\n\n\u003e `Alvaro:` Well IIUC the only latency we are introducing is directly proportional to the levels of subcommitee nesting (ie the log(n/C)), which is understandably the price to pay. We have to make sure though that what we gain by introducing this is really worth the extra cost vs the typical comittee formation via randao or perhaps VDFs\n\n\u003e `Moh:` Again the Typical committee formation with randao can reduce their wait time value to match our latency, but then it becomes vulnerable and fail if the network latency becomes greater than their slot interval. If they keep it too large it may not fail but becomes slow. We won't have that problem. If an adversary has the power to slow down the network then their liveness will fail, whereas we won't have that issue.\n\n## How would you compare Aptos and Carnot? - Alvaro\n\n\u003e `Moh:` It is variant of DiemBFT, Sui is based on Nahrwal, both cannot scale to more than few hunderd of nodes. That is why they achieve that low latency.\n\n\u003e `Alvaro:` Yes, so they need to select a committee of that size in order to operate at that latency What's wrong with selecting a committee vs Carnot's solution? This I'm asking genuinely to understand and because everyone will ask this question when we release.\n\n\u003e `Moh:` When you select a committee you have to wait for a time slot to make sure the result of consensus has propagated. Again strong synchrony assumption (slot time), formation of forks, increase in PoS attack vector come into play\nWithin committee the protocol does not need a wait time but for its results to get propagated if scalability is to be achieved, then wait time has to be added or signatures have to be collected from thousands of nodes.\n\n\u003e `Alvaro:` Can you elaborate?\n\n\u003e `Moh:` Ethereum (and any other protocol who runs the consensus in a single committee selected from a large group on nodes) has wait time so that the output of the consenus propagates to all honest nodes before the next committee is selected. Else the next committee will fail or only forks will be formed and chain length won't increase. But since this wait time as stated, increases latency, makes the protocol vulnerable, Ethereum wants to avoid it to achieve responsivess. To avoid wait time (add responsiveness) a protocol has to collect attestation signatures from 2/3rd of all nodes (not a single committee) to move to the second round (Carnot is already responsive). But aggregating and verifying signatures thousands of signatures is expensive and time consuming. This is why they are working to improve BLS signatures. Instead we have changed the consensus protocol in such a way that a small number of signatures need to be aggregated and verified to achieve responsiveness and fast finality. We can further improve performance by using the improved BLS signatures.\n\n\u003e One cannot achieve fast finality while running the consensus in a small committee. Because attestation of a Block within the single committee is not enough. This block can be averted if the leader of the next committee has not seen it. Therefore, there should be enough delay so that all honest nodes can see it. This is why we have this wait/slot time. Another issue can be a malicious leader from the next chosen committee can also avert a block of honest leader and hence preventing honest leaders from getting rewards. If blocks of honest leaders are averted for long time, stake of malicious leaders will increase. Moreover, malicious leaders can delay blocks of honest nodes by making fork and averting them. Addressing these issues will further make the protocol complex, while still laking fast finality.\n\n## Data Distribution\n\n### How much failure rate of erasure code transmission are we expecting. Basically, what are the EC coding parameters that we expect to be sending such that we have some failure rate of transmission? Has that been looked into? - Dmitriy\n\u003e `Moh:` This is a great question and it points to the tension between the failure rate vs overhead. We have briefly looked into this (today me and Marcin @madxor discussed such cases), but we haven’t thoroughly analyzed this. In our case, the rate of failure also depends on committee size. We look into $10^{-3}$ to $10^{-6}$ probability of failure. And in this case, the coding overhead can be somewhere between 200%-500% approximately. This means for a committee size of 500 (while expecting receipt of messages from 251 correct nodes), for a failure rate of $10^{-6}$ a single node has to send \u003e 6Mb of data for a 1Mb of actual data. Though 5x overhead is large, it still prevent us from sending/receiving 500 Mb of data in return for a failure probability of 1 proposal out of 1 million. From the protocol perspective, we can address EC failures in multiple ways: a: Since the root committee only forwards the coded chunks only when they have successfully rebuilt the block. This means the root committee can be contacted to download additional coded chunks to decode the block. b: We allow this failure and let the leader be replaced but since there is proof that the failure is due to the reason that a decoder failed to reconstruct the block, therefore, the leader cannot be punished (if we chose to employ punishment in PoS). \n\n### How much data should a given block be. Are there limits on this and if so, what are they and what do they depend on? - Dmitriy\n\u003e `Moh:` This question can be answered during simulations and experiments over links of different bandwidths and latencies. We will test the protocol performances with different block sizes. As we know increasing the block size results in increased throughput as well as latency. What is the most appropriate block size can be determined once we observe the tradeoff between throughput vs latency.\n\n## Signature Propagation\n\n### Who sends the signatures up from a given committee? Do that have any leadered power within the committee? - Tanguy\n\u003e `Moh:` Each node in a committee multicasts its vote to all members of the parent committee. Since the size of the vote is small the bit complexity will be low. Introducing a leader within each committee will create a single point of failure within each committee. This is why we avoid maintaining a leader within each committee\n\n## Network Scale\n\n### What is our expected minimum number of nodes within the network? - Dmitriy\n\u003e `Moh:` For a small number of nodes we can have just a single committee. But I am not sure how many nodes will join our network \n\n## Byzantine Behavior\n\n### Can we also consider a flavor that adds attestation/attribution to misbehaving nodes? That will come at a price but there might be a set of use cases which would like to have lower performance with strong attribution. Not saying that it must be part of the initial design, but can be think-through/added later. - Marcin\n\u003e `Moh:` Attestation to misbehaving nodes is part of this protocol. For example, if a node sends an incorrect vote or if a leader proposes an invalid transaction, then this proof will be shared with the network to punish the misbehaving nodes (Though currently this is not part of pseudocode). But it is not possible to reliably prove the attestation of not participation.\n\n\u003e `Marcin:` Great, and definitely, we cannot attest that a node was not participating - I was not suggesting that;). But we can also think about extending the attestation for lazy-participants case (if it’s not already part of the protocol).\n\n\u003e `Moh:` OK, thanks for the clarification 😁 . Of course we can have this feature to forward the proof of participation of successor committees. In the first version of Carnot we had this feature as a sliding window. One could choose the size of the window (in terms of tree levels) for which a node should forward the proof of participation. In the most recent version the size of sliding window is 0. And it is 1 for the root committee. It means root committee members have to forward the proof of participation of their child committee members. Since I was able to prove protocol correctness without forwarding the proofs so we avoid it. But it can be part of the protocol without any significant changes in the protocol\n\n\u003e If the proof scheme is efficient ( as the results you presented) in practice and the cost of creating and verifying proofs is not significant then actually adding proofs can be good. But not required.\n\n### Also, how do you reward online validators / punish offline ones if you can't prove at the block level that someone attested or not? - Tanguy\n\u003e `Moh:` This is very tricky and so far no one has done it right (to my knowledge). Current reward mechanism for attestation, favours fast nodes.This means if malicious nodes in the network are fast, they can increase their stake in the network faster than the honest nodes and eventually take control of the network. Or in the case of Ethereum a Byzantine leader can include signature of malicious nodes more frequently in the proof of attestation, hence malicious nodes will be rewarded more frequently. Also let me add that I don't have definite answer to your question currently, but I think by revising the protocol assumptions, incentive mechanism and using a game theoretical approach this problem can be resolved.\n\n\u003e An honest node should wait for a specific number of children votes (to make sure everyone is voting on the same proposal) before voting but does not need to provide any cryptographic proof. Though we build a threshold signature from root committee members and it’s children but not from the whole tree. As long as enough number of nodes follow the the protocol we should be fine. I am working on protocol proofs. Also I think bugs should be discovered during development and testing phase. Changing protocol to detect potential bug might not be a good practice.\n\n### doesn't having randomly distributed malicious nodes (say there is a 20%) increase the odds that over a third of a committee end up being from those malicious ones? It seems intuitive: since a 20% at the global scale is always \u003c1/3, but when randomly distributed there is always non-zero chance they end up in a single group, thus affecting liveness more and more the closer we get to that global 1/3. Consequently, if I'm understanding the algorithm correctly, it would have worse liveness guarantees that classical pBFT, say with a randomly-selected commitee from the total set. - Alvaro\n\n\u003e `Alexander:` We assume that fraction of malicious nodes is $1/4$ and given we chooses comm. sizes, which will depend on total number of nodes, appropriately this guarantees that with high probability we are below $1/3$ in each committee.\n\n\u003e `Alvaro:` ok, but then both the global guarantee is below that current \"standard\" of 1/3 of malicious nodes and even then we are talking about non-zero probabilities that a comm has the power to slow down consensus via requiring reformation of comms (is this right?)\n\n\u003e `Alexander:` This is the price we pay to improve scalability. Also these probabilities of failure can be very low.\n\n### What happens in Carnot when one committee is taken over by \u003e1/3 intra-comm byzantine nodes? - Alvaro\n\n\u003e `Moh:` When there is a failure the overlay is recalculated. By gradually increasing the fault tolerance by a small value, the probability of failure of a committee slightly increases but upon recalculating the correct overlay, inactive nodes that caused the failure of previous overlay (when no committee has more than 1/3 Byzantine nodes) will be slashed.\n\n\n\n## Synchronicity\n\n### How to guarantee synchronicity. In particular how to avoid that in a big network different nodes see a proposal with 2c+1 votes but different votes and thus different random seed - Giacomo\n\n\u003e `Moh:` The assumption is that there exists some known finite time bound Δ and a special event called GST (Global Stabilization Time) such that:\n\n\u003e The adversary must cause the GST event to eventually happen after some unknown finite time. Any message sent at time x must be delivered by time $\\delta + \\text{max}(x,GST)$. In the Partial synchrony model, the system behaves asynchronously till GST and synchronously after GST.\n\n\u003e Moreover, votes travel one level at a time from tree leaves to the tree root. We only need the proof of votes of root+child committees to conclude with a high probability that the majority of nodes have voted.\n\n### That's a timeout? How does this work exactly without timing assumptions? Trying to find this in the document -Alvaro\n\n\u003e `Moh:` Each committee only verifies the votes of its child committees. Once a verified 2/3rd votes of its child members, it then sends it vote to its parent. In this way each layer of the tree verifies the votes (attests) the layer below. Thus, a node does not have to collect and verify 2/3rd of all thousands of votes (as done in other responsive BFTs) but only from its child nodes.","lastmodified":"2023-07-16T15:52:26.106715491Z","tags":["Carnot","consensus"]},"/private/roadmap/consensus/candidates/carnot/overview":{"title":"Carnot Overview","content":"\nCarnot (formerly LogosBFT) is a Byzantine Fault Tolerant (BFT) [consensus](roadmap/consensus/index.md) candidate for the Nomos Network that utilizes Fountain Codes and a committees tree structure to optimize message propagation in the presence of a large number of nodes, while maintaining high througput and fast finality. More specifically, these are the research contributions in Carnot. To our knowledge, Carnot is the first consensus protocol that can achieve together all of these properties:\n\n1. Scalability: Carnot is highly scalable, scaling to thousands of nodes.\n2. Responsiveness: The ability of a protocol to operate with the speed of a wire but not a maximum delay (block delay, slot time, etc.) is called responsiveness. Responsiveness reduces latency and helps the Carnot achieve Fast Finality. Moreover, it improves Carnot's resilience against adversaries that can slow down network traffic. \n3. Fork avoidance: Carnot avoids the formation of forks in a happy path. Forks formation has the following adverse consequences that the Carnot avoids.\n 1. Wastage of resources on orphan blocks and reduced throughput with increased latency for transactions in orphan blocks\n 2. Increased attack vector on PoS as attackers can employ a strategy to force the network to accept their fork resulting in increased stake for adversaries.\n\n- [FAQ](FAQ.md): Here is a page that tracks various questions people have around Carnot.\n\n## Work Streams\n\n### Current State of the Art\nAn ongoing survey of the current state of the art around Consensus Mechanisms and their peripheral dependencies is being conducted by Tuanir, and can be found in the following WIP Overleaf document: \n- [WIP Consensus SoK](https://www.overleaf.com/project/633acc1acaa6ffe456d1ab1f)\n\n### Committee Tree Overlay\nThe basis of Carnot is dependent upon establishing an committee overlay tree structure for message distribution. \n\nAn overview video can be found in the following link: \n- [Carnot Overview by Moh during Offsite](https://drive.google.com/file/d/17L0JPgC0L1ejbjga7_6ZitBfHUe3VO11/view?usp=sharing)\n\nThe details of this are being worked on by Moh and Alexander and can be found in the following overleaf documents: \n- [Moh's draft](https://www.overleaf.com/project/6341fb4a3cf4f20f158afad3)\n- [Alexander's notes on the statistical properties of committees](https://www.overleaf.com/project/630c7e20e56998385e7d8416)\n- [Alexander's python code for computing committee sizes](https://github.com/AMozeika/committees)\n\nA simulation notebook is being worked on by Corey to investigate the properties of various tree overlay structures and estimate their practical performance:\n- [Corey's Overlay Jupyter Notebook](https://github.com/logos-co/scratch/tree/main/corpetty/committee_sim)\n\n#### Failure Recovery\nThere exists a timeout that triggers an overlay reconfiguration. Currently work is being done to calculate the probabilities of another failure based on a given percentage of byzantine nodes within the network. \n- [Recovery Failure Probabilities]() - LINK TO WORK HERE\n\n### Random Beacon\nA random beacon is required to choose a leader and establish a seed for defining the overlay tree. Marcin is working on the various avenues. His previous presentations can be found in the following presentation slides (in chronological order):\n- [Intro to Multiparty Random Beacons](https://cloud.logos.co/index.php/s/b39EmQrZRt5rrfL)\n- [Circles of Trust](https://cloud.logos.co/index.php/s/NXJZX8X8pHg6akw)\n- [Compact Certificates of Knowledge](https://cloud.logos.co/index.php/s/oSJ4ykR4A55QHkG)\n\n### Erasure Coding (LT Codes / Fountain Codes / Raptor Codes)\nIn order to reduce message complexity during propagation, we are investigating the use of Luby Transform (LT) codes, more specifically [Fountain Codes](https://en.wikipedia.org/wiki/Fountain_code), to break up the block to be propagated to validators and recombined by local peers within a committee. \n- [LT Code implementation in Rust](https://github.com/chrido/fountain) - unclear about legal status of LT or Raptor Codes, it is currently under investigation.\n\n","lastmodified":"2023-07-16T15:52:26.106715491Z","tags":["consensus","candidate","Carnot"]},"/private/roadmap/consensus/candidates/claro":{"title":"Claro: Consensus Candidate","content":"\n\n\n**Claro** (formerly Glacier) is a consensus candidate for the Logos network that aims to be an improvement to the Avalanche family of consensus protocols. \n\n\n### Implementations\nThe protocol has been implemented in multiple languages to facilitate learning and testing. The individual code repositories can be found in the following links:\n- Rust (reference)\n- Python\n- Common Lisp\n\n### Simulations/Experiments/Analysis\nIn order to test the performance of the protocol, and how it stacked up to the Avalanche family of protocols, we have performed a multitude of simulations and experiments under various assumptions. \n- [Alvaro's initial Python implementations and simulation code](https://github.com/status-im/consensus-models)\n\n### Specification\nCurrently the Claro consensus protocol is being drafted into a specification so that other implementations can be created. It's draft resides under [Vac](https://vac.dev) and can be tracked [here](https://github.com/vacp2p/rfc/pull/512/)\n\n### Additional Information\n\n","lastmodified":"2023-07-16T15:52:26.106715491Z","tags":["consensus","candidate","claro"]},"/private/roadmap/consensus/development/overview":{"title":"Development Work","content":"","lastmodified":"2023-07-16T15:52:26.106715491Z","tags":["consensus","development"]},"/private/roadmap/consensus/development/prototypes":{"title":"Consensus Prototypes","content":"\nConsensus Prototypes is a collection of Rust implementations of the [Consensus Candidates](tags/candidates)\n\n## Tiny Node\n\n\n## Required Roles\n- Lead Developer (filled)","lastmodified":"2023-07-16T15:52:26.106715491Z","tags":["consensus","development"]},"/private/roadmap/consensus/overview":{"title":"Consensus Work","content":"\nConsensus is the foundation of the network. It is how a group of peer-to-peer nodes understands how to agree on information in a distributed way, particuluarly in the presence of byzantine actors. \n\n## Consensus Roadmap\n### Consensus Candidates\n- [Carnot](private/roadmap/consensus/candidates/carnot/overview.md) - Carnot is the current leading consensus candidate for the Nomos network. It is designed to maximize efficiency of message dissemination while supoorting hundreds of thousands of full validators. It gets its name from the thermodynamic concept of the [Carnot Cycle](https://en.wikipedia.org/wiki/Carnot_cycle), which defines maximal efficiency of work from heat through iterative gas expansions and contractions. \n- [Claro](claro.md) - Claro is a variant of the Avalanche Snow family of protocols, designed to be more efficient at the decision making process by leveraging the concept of \"confidence\" across peer responses. \n\n\n### Theoretical Analysis\n- [snow-family](snow-family.md)\n\n### Development\n- [prototypes](prototypes.md)\n\n## Open Roles\n- [distributed-systems-researcher](distributed-systems-researcher.md)","lastmodified":"2023-07-16T15:52:26.106715491Z","tags":["consensus"]},"/private/roadmap/consensus/theory/overview":{"title":"Consensus Theory Work","content":"\nThis track of work is dedicated to creating theoretical models of distributed consensus in order to evaluate them from a mathematical standpoint. \n\n## Navigation\n- [Snow Family Analysis](snow-family.md)\n","lastmodified":"2023-07-16T15:52:26.106715491Z","tags":["consensus","theory"]},"/private/roadmap/consensus/theory/snow-family":{"title":"Theoretical Analysis of the Snow Family of Consensus Protocols","content":"\nIn order to evaluate the properties of the Avalanche family of consensus protocols more rigorously than the original [whitepapers](), we work to create an analytical framework to explore and better understand the theoretical boundaries of the underlying protocols, and under what parameterization they will break vs a set of adversarial strategies","lastmodified":"2023-07-16T15:52:26.106715491Z","tags":["consensus","theory","snow"]},"/private/roadmap/networking/carnot-waku-specification":{"title":"A Specification proposal for using Waku for Carnot Consensus","content":"\n##### Definition Reference \n- $k$ - size of a given committee\n- $n_C$ - number of committees in the overlay, or nodes in the tree\n- $d$ - depth of the overlay tree\n- $n_d$ - number of committees at a given depth of the tree\n\n## Motivation\nIn #Carnot, an overlay is created to facilitate message distribution and voting aggregation. This document will focus on the differentiated channels of communication for message distribution. Whether or not voting aggregation and subsequenty traversal back up the tree can utilize the same channels will be investigated later. \n\nThe overlay is described as a binary tree of committees, where a individual in each committee propogates messages to an assigned node in their two children committees of the tree, until the leaf nodes have recieved enough information to reconstitute the proposal block. \n\nThis communication protocol will naturally form \"pools of information streams\" that people will need to listen to in order to do their assigned work:\n- inner committee communication\n- parent-child chain communication\n- intitial leader distribution\n\n### **inner committee communication** \nall members of a given committee will need to gossip with each other in order to reform the initial proposal block\n- This results in $n_C$ pools of $k$-sized communication pools.\n\n### **parent-child chain communication** \nThe formation of the committee and the lifecycle of a chunk of erasure coded data forms a number of \"parent-child\" chains. \n- If we completely minimize the communcation between committees, then this results in $k$ number of $n_C$-sized communication pools.\n- It is not clear if individual levels of the tree needs to \"execute\" the message to their children, or if the root committee can broadcast to everyone within its assigned parent-chain communcation pool at the same time.\n- It is also unclear if individual levels of the tree need to send independant messages to each of their children, or if a unified communication pool can be leveraged at the tree-level. This results in $d$ communication pools of $n_d$-size. \n\n### **initial leader distribution**\nFor each proposal, a leader needs to distribute the erasure coded proposal block to the root committee\n- This results in a single communication pool of size $k(+1)$.\n- the $(+1)$ above is the leader, who could also be a part of the root committee. The leader changes with each block proposal, and we seek to minimize the time between leader selection and a round start. Thusly, this results in a requirement that each node in the network must maintain a connection to every node in the root committee. \n\n## Proposal\nThis part of the document will attempt to propose using various aspects of Waku, to facilitate both the setup of the above-mentioned communication pools as well as encryption schemes to add a layer of privacy (and hopefully efficiency) to message distribution. \n\nWe seek to minimize the availability of data such that an individual has only the information to do his job and nothing more.\n\nWe also seek to minimize the amount of messages being passed such that eventually everyone can reconstruct the initial proposal block\n\n`???` for Waku-Relay, 6 connections is optimal, resulting in latency ???\n\n`???` Is it better to have multiple pubsub topics with a simple encryption scheme or a single one with a complex encryption scheme\n\nAs there seems to be a lot of dynamic change from one proposal to the next, I would expect [`noise`](https://vac.dev/wakuv2-noise) to be a quality candidate to facilitate the creation of secure ephemeral keys in the to-be proposed encryption scheme. \n\nIt is also of interest how [`contentTopics`](https://rfc.vac.dev/spec/23/) can be leveraged to optimize the communication pools. \n\n## Whiteboard diagram and notes\n![Whiteboard Diagram](images/Overlay-Communications-Brainstorm.png)","lastmodified":"2023-07-16T15:52:26.106715491Z","tags":["waku","carnot","networking","consensus"]},"/private/roadmap/networking/overview":{"title":"P2P Networking Overview","content":"\nThis page summarizes the work around the P2P networking layer of the Nomos project.\n\n## Waku\n[Waku](https://waku.org) is an privacy-preserving, ephemeral, peer-to-peer (P2P) messaging suite of protocols which is developed under [Vac](https://vac.dev) and maintained/productionized by the [Logos Collective](https://logos.co). \n\nIt is hopeful that Nomos can leverage the work of the Waku project to provide the P2P networking layer and peripheral services associated with passing messages around the network. Below is a list of the associated work to investigate the use of Waku within the Nomos Project. \n\n### Scalability and Fault-Tolerance Studies\nCurrently, the amount of research and analysis of the scalability of Waku is not sufficient to give enough confidence that Waku can serve as the networking layer for the Nomos project. Thusly, it is our effort to push this analysis forward by investigating the various boundaries of scale for Waku. Below is a list of endeavors in this direction which we hope serves the broader community: \n- [Status' use of Waku study w/ Kurtosis](status-waku-kurtosis.md)\n- [Using Waku for Carnot Overlay](carnot-waku-specification.md)\n\n### Rust implementations\nWe have created and maintain a stop-gap solution to using Waku with the Rust programming language, which is wrapping the [go-waku](https://github.com/status-im/go-waku) library in Rust and publishing it as a crate. This library allows us to do tests with our [Tiny Node](roadmap/development/prototypes.md#Tiny-Node) implementation more quickly while also providing other projects in the ecosystem to leverage Waku within their Rust codebases more quickly. \n\nIt is desired that we implement a more robust and efficient Rust library for Waku, but this is a significant amount of work. \n\nLinks:\n- [Rust bindings to go-waku repo](https://github.com/waku-org/waku-rust-bindings)","lastmodified":"2023-07-16T15:52:26.106715491Z","tags":["networking","overview"]},"/private/roadmap/networking/status-network-agents":{"title":"Status Network Agents Breakdown","content":"\nThis page creates a model to describe the impact of the various clients within the Status ecosystem by describing their individual contribution to the messages within the Waku network they leverage. \n\nThis model will serve to create a realistic network topology while also informing the appropriate _dimensions of scale_ that are relevant to explore in the [Status Waku scalability study](status-waku-kurtosis.md)\n\nStatus has three main clients that users interface with (in increasing \"network weight\" ordering):\n- Status Web\n- Status Mobile\n- Status Desktop\n\nEach of these clients has differing (on average) resources available to them, and thusly, provides and consumes different Waku protocols and services within the Status network. Here we will detail their associated messaging impact to the network using the following model:\n\n```\nAgent\n - feature\n - protocol\n - contentTopic, messageType, payloadSize, frequency\n```\n\nBy describing all `Agents` and their associated feature list, we should be able do the following:\n\n- Estimate how much impact per unit time an individual `Agent` impacts the Status network\n- Create a realistic network topology and usage within a simulation framework (_e.g._ Kurtosis)\n- Facilitate a Status Specification of `Agents`\n- Set an example for future agent based modeling and simulation work for the Waku protocol suite \n\n## Status Web\n\n## Status Mobile\n\n## Status Desktop\nStatus Desktop serves as the backbone for the Status Network, as the software runs on hardware that is has more available resources, typically has more stable network and robust connections, and generally has a drastically lower churn (or none at all). This results in it running the most Waku protocols for longer periods of time, resulting int he heaviest usage of the Waku network w.r.t. messaging. \n\nHere is the model breakdown of its usage:\n```\nStatus Desktop\n - Prekey bundle broadcast\n - Account sync\n - Historical message melivery\n - Waku-Relay (answering message queries)\n - Message propogation\n - Waku-Relay\n - Waku-Lightpush (receiving)\n```","lastmodified":"2023-07-16T15:52:26.106715491Z","tags":["status","waku","scalability"]},"/private/roadmap/networking/status-waku-kurtosis":{"title":"Status' use of Waku - A Scalability Study","content":"\n[Status](https://status.im) is the largest consumer of the Waku protocol, leveraging it for their entire networking stack. THeir upcoming release of Status Desktop and the associated Communities product will heavily push the limits of what Waku can do. As mentioned in the [Networking Overview](private/roadmap/networking/overview.md) page, rigorous scalability studies have yet to be conducted of Waku (v2). \n\nWhile these studies most immediately benefit the Status product suite, it behooves the Nomos Project to assist as the lessons learned immediately inform us the limits of what the Waku protocol suite can handle, and how that fits within our [Technical Requirements](private/requirements/overview.md).\n\nThis work has been kicked off as a partnership with the [Kurtosis](https://kurtosis.com) distributed systems development platform. It is our hope that the experience and accumen gained during this partnership and study will serve us in the future with respect to Nomos developme, and more broadly, all projects under the Logos Collective. \n\nAs such, here is an overview of the various resources towards this endeavor:\n- [Status Network Agent Breakdown](status-network-agents.md) - A document that describes the archetypal agents that participate in the Status Network and their associated Waku consumption.\n- [Wakurtosis repo](https://github.com/logos-co/wakurtosis) - A Kurtosis module to run scalability studies\n- [Waku Topology Test repo](https://github.com/logos-co/Waku-topology-test) - a Python script that facilitates setting up a reasonable network topology for the purpose of injecting the network configuration into the above Kurtosis repo\n- [Initial Vac forum post introducing this work](https://forum.vac.dev/t/waku-v2-scalability-studies/142)\n- [Waku Github Issue detailing work progression](https://github.com/waku-org/pm/issues/2)\n - this is also a place to maintain communications of progress\n- [Initial Waku V2 theoretical scalability study](https://vac.dev/waku-v1-v2-bandwidth-comparison)","lastmodified":"2023-07-16T15:52:26.106715491Z","tags":["networking","scalability","waku"]},"/private/roadmap/virtual-machines/overview":{"title":"overview","content":"\n## Motivation\nLogos seeks to use a privacy-first virtual machine for transaction execution. We believe this can only be acheived through zero-knowledge. The majority of current work in the field focuses more towards the aggregation and subsequent verification of transactions. This leads us to explore the researching and development of a privacy-first virtual machine. \n\nLINK TO APPROPRIATE NETWORK REQUIREMENTS HERE\n\n#### Educational Resources\n- primer on Zero Knowledge Virtual Machines - [link](https://youtu.be/GRFPGJW0hic)\n\n### Implementations:\n- TinyRAM - link\n- CairoVM\n- zkSync\n- Hermes\n- [MIDEN](https://polygon.technology/solutions/polygon-miden/) (Polygon)\n- RISC-0\n\t- RISC-0 Rust Starter Repository - [link](https://github.com/risc0/risc0-rust-starter)\n\t- targets RISC-V architecture\n\t- benefits:\n\t\t- a lot of languages already compile to RISC-V\n\t- negatives:\n\t\t- not optimized or EVM where most tooling exists currently\n\n## General Building Blocks of a ZK-VM\n- CPU\n\t- modeled with \"execution trays\"\n- RAM\n\t- overhead to look out for\n\t\t- range checks\n\t\t- bitwise operations\n\t\t- hashing\n- Specialized circuits\n- Recursion\n\n## Approaches\n- zk-WASM\n- zk-EVM\n- RISC-0\n\t- RISK-0 Rust Starter Repository - [link](https://github.com/risc0/risc0-rust-starter)\n\t- targets RISC-V architecture\n\t- benefits:\n\t\t- a lot of languages already compile to RISC-V\n\t\t- https://youtu.be/2MXHgUGEsHs - Why use the RISC Zero zkVM?\n\t- negatives:\n\t\t- not optimized or EVM where most tooling exists currently\n\n## General workstreams\n- bytecode compiler\n- zero-knowledge circuit design\n- opcode architecture (???)\n- engineering\n- required proof system\n- control flow\n\t- MAST (as used in MIDEN)\n\n## Roles\n- [ZK Research Engineer](zero-knowledge-research-engineer.md)\n- Senior Rust Developer\n","lastmodified":"2023-07-16T15:52:26.106715491Z","tags":["virtual machines","zero knowledge"]},"/private/roles/distributed-systems-researcher":{"title":"Open Role: Distributed Systems Researcher","content":"\n\n## About Status\n\nStatus is building the tools and infrastructure for the advancement of a secure, private, and open web3. \n\nWith the high level goals of preserving the right to privacy, mitigating the risk of censorship, and promoting economic trade in a transparent, open manner, Status is building a community where anyone is welcome to join and contribute.\n\nAs an organization, Status seeks to push the web3 ecosystem forward through research, creation of developer tools, and support of the open source community. \n\nAs a product, Status is an open source, Ethereum-based app that gives users the power to chat, transact, and access a revolutionary world of DApps on the decentralized web. But Status is also building foundational infrastructure for the whole Ethereum ecosystem, including the Nimbus ETH 1.0 and 2.0 clients, the Keycard hardware wallet, and the Waku messaging protocol (a continuation of Whisper).\n\nAs a team, Status has been completely distributed since inception. Our team is currently 100+ core contributors strong, and welcomes a growing number of community members from all walks of life, scattered all around the globe. \n\nWe care deeply about open source, and our organizational structure has minimal hierarchy and no fixed work hours. We believe in working with a high degree of autonomy while supporting the organization's priorities.\n\n \n\n## Who are we?\n\nWe are the Blockchain Infrastructure Team, and we are building the foundation used by other projects at the Status Network. We are researching consensus algorithms, Multi-Party Computation techniques, ZKPs and other cutting-edge solutions with the aim to take the blockchain technology to the next level of security, decentralization and scalability for a wide range of use cases. We are currently in a research phase, working with models and simulations. In the near future, we will start implementing the research. You will have the opportunity to participate in developing -and improving- the state of the art of blockchain technologies, as well as turning it into a reality\n\n## The job\n\n**Responsibilities:**\n- This role is dedicated to pure research\n- Primarily, ensuring that solutions are sound and diving deeper into their formal definition.\n- Additionally, he/she would be regularly going through papers, bringing new ideas and staying up-to-date.\n- Designing, specifying and verifying distributed systems by leveraging formal and experimental techniques.\n- Conducting theoretical and practical analysis of the performance of distributed systems.\n- Designing and analysing incentive systems.\n- Collaborating with both internal and external customers and the teams responsible for the actual implementation.\n- Researching new techniques for designing, analysing and implementing dependable distributed systems.\n- Publishing and presenting research results both internally and externally.\n\n \n**Ideally you will have:**\n[Don’t worry if you don’t meet all of these criteria, we’d still love to hear from you anyway if you think you’d be a great fit for this role!]\n- Strong background in Computer Science and Math, or a related area.\n- Academic background (The ability to analyze, digest and improve the State of the Art in our fields of interest. Specifically, familiarity with formal proofs and/or the scientific method.)\n- Distributed Systems with a focus on Blockchain\n- Analysis of algorithms\n- Familiarity with Python and/or complex systems modeling software\n- Deep knowledge of algorithms (much more academic, such as have dealt with papers, moving from research to pragmatic implementation)\n- Experience in analysing the correctness and security of distributed systems.\n- Familiarity with the application of formal method techniques. \n- Comfortable with “reverse engineering” code in a number of languages including Java, Go, Rust, etc. Even if no experience in these languages, the ability to read and \"reverse engineer\" code of other projects is important.\n- Keen communicator, eager to share your work in a wide variety of contexts, like internal and public presentations, blog posts and academic papers.\n- Capable of deep and creative thinking.\n- Passionate about blockchain technology in general.\n- Able to manage the uncertainties and ambiguities associated with working in a remote-first, distributed, decentralised environment.\n- A strong alignment to our principles: https://status.im/about/#our-principles\n\n\n**Bonus points:**\n- Experience working remotely. \n- Experience working for an open source organization. \n- TLA+/PRISM would be desirable.\n- PhD in Computer Science, Mathematics, or a related area. \n- Experience Multi-Party Computation and Zero-Knowledge Proofs\n- Track record of scientific publications.\n- Previous experience in remote or globally distributed teams.\n\n## Hiring process\n\nThe hiring process for this role will be:\n- Interview with our People Ops team\n- Interview with Alvaro (Team Lead)\n- Interview with Corey (Chief Security Officer)\n- Interview with Jarrad (Cofounder) or Daniel \n\nThe steps may change along the way if we see it makes sense to adapt the interview stages, so please consider the above as a guideline.\n\n \n\n## Compensation\n\nWe are happy to pay salaries in either 100% fiat or any mix of fiat and/or crypto. For more information regarding benefits at Status: https://people-ops.status.im/tag/perks/\n","lastmodified":"2023-07-16T15:52:26.106715491Z","tags":["role"]},"/private/roles/rust-developer":{"title":"Rust Developer","content":"\n# Role: Rust Developer\nat Status\n\nRemote, Worldwide\n\n**About Status**\n\nStatus is an organization building the tools and infrastructure for the advancement of a secure, private, and open web3. We have been completely distributed since inception. Our team is currently 100+ core contributors strong and welcomes a growing number of community members from all walks of life, scattered all around the globe. We care deeply about open source, and our organizational structure has a minimal hierarchy and no fixed work hours. We believe in working with a high degree of autonomy while supporting the organization's priorities.\n\n**About Logos**\n\nA group of Status Contributors is also involved in a new community lead project, called Logos, and this particular role will enable you to also focus on this project. Logos is a grassroots movement to provide trust-minimized, corruption-resistant governing services and social institutions to underserved citizens. \n\nLogos’ infrastructure will provide a base for the provisioning of the next-generation of governing services and social institutions - paving the way to economic opportunities for those who need them most, whilst respecting basic human rights through the network’s design.You can read more about Logos here: [in this small handbook](https://github.com/acid-info/public-assets/blob/master/logos-manual.pdf) for mindful readers like yourself.\n\n**Who are we?**\n\nWe are the Blockchain Infrastructure Team, and we are building the foundation used by other projects at the [Status Network](https://statusnetwork.com/). We are researching consensus algorithms, Multi-Party Computation techniques, ZKPs and other cutting-edge solutions with the aim to take the blockchain technology to the next level of security, decentralization and scalability for a wide range of use cases. We are currently in a research phase, working with models and simulations. In the near future, we will start implementing the research. You will have the opportunity to participate in developing -and improving- the state of the art of blockchain technologies, as well as turning it into a reality.\n\n**Responsibilities:**\n\n- Develop and maintenance of internal rust libraries\n- 1st month: comfortable with dev framework, simulation app. Improve python lib?\n- 2th-3th month: Start dev of prototype node services\n\n**Ideally you will have:**\n\n- “Extensive” Rust experience (Async programming is a must) \n Ideally they have some GitHub projects to show\n- Experience with Python\n- Strong competency in developing and maintaining complex libraries or applications\n- Experience in, and passion for, blockchain technology.\n- A strong alignment to our principles: [https://status.im/about/#our-principles](https://status.im/about/#our-principles) \n \n\n**Bonus points if**\n\n-  E.g. Comfortable working remotely and asynchronously\n-  Experience working for an open source organization.  \n-  Peer-to-peer or networking experience\n\n_[Don’t worry if you don’t meet all of these criteria, we’d still love to hear from you anyway if you think you’d be a great fit for this role!]_\n\n**Compensation**\n\nWe are happy to pay in either 100% fiat or any mix of fiat and/or crypto. For more information regarding benefits at Status: [https://people-ops.status.im/tag/perks/](https://people-ops.status.im/tag/perks/)\n\n**Hiring Process** \n\nThe hiring process for this role will be:\n\n1. Interview with Maya (People Ops team)\n2. Interview with Corey (Logos Program Owner)\n3. Interview with Daniel (Engineering Lead)\n4. Interview with Jarrad (Cofounder)","lastmodified":"2023-07-16T15:52:26.106715491Z","tags":["role","engineering","rust"]},"/private/roles/zero-knowledge-research-engineer":{"title":"Zero Knowledge Research Engineer","content":"at Status\n\nRemote, Worldwide\n\n**About Status**\n\nStatus is building the tools and infrastructure for the advancement of a secure, private, and open web3. \n\nWith the high level goals of preserving the right to privacy, mitigating the risk of censorship, and promoting economic trade in a transparent, open manner, Status is building a community where anyone is welcome to join and contribute.\n\nAs an organization, Status seeks to push the web3 ecosystem forward through research, creation of developer tools, and support of the open source community. \n\nAs a product, Status is an open source, Ethereum-based app that gives users the power to chat, transact, and access a revolutionary world of DApps on the decentralized web. But Status is also building foundational infrastructure for the whole Ethereum ecosystem, including the Nimbus ETH 1.0 and 2.0 clients, the Keycard hardware wallet, and the Waku messaging protocol (a continuation of Whisper).\n\nAs a team, Status has been completely distributed since inception.  Our team is currently 100+ core contributors strong, and welcomes a growing number of community members from all walks of life, scattered all around the globe. \n\nWe care deeply about open source, and our organizational structure has minimal hierarchy and no fixed work hours. We believe in working with a high degree of autonomy while supporting the organization's priorities.\n\n**Who are we**\n\n[Vac](http://vac.dev/) **builds** [public good](https://en.wikipedia.org/wiki/Public_good) protocols for the decentralized web.\n\nWe do applied research based on which we build protocols, libraries and publications. Custodians of protocols that reflect [a set of principles](http://vac.dev/principles) - liberty, privacy, etc.\n\nYou can see a sample of some of our work here: [Vac, Waku v2 and Ethereum Messaging](https://vac.dev/waku-v2-ethereum-messaging), [Privacy-preserving p2p economic spam protection in Waku v2](https://vac.dev/rln-relay), [Waku v2 RFC](https://rfc.vac.dev/spec/10/). Our attitude towards ZK: [Vac \u003c3 ZK](https://forum.vac.dev/t/vac-3-zk/97).\n\n**The role**\n\nThis role will be part of a new team that will make a provable and private WASM engine that runs everywhere. As a research engineer, you will be responsible for researching, designing, analyzing and implementing circuits that allow for proving private computation of execution in WASM. This includes having a deep understanding of relevant ZK proof systems and tooling (zk-SNARK, Circom, Plonk/Halo 2, zk-STARK, etc), as well as different architectures (zk-EVM Community Effort, Polygon Hermez and similar) and their trade-offs. You will collaborate with the Vac Research team, and work with requirements from our new Logos program. As one of the first hires of a greenfield project, you are expected to take on significant responsibility,  while collaborating with other research engineers, including compiler engineers and senior Rust engineers. \n \n\n**Key responsibilities** \n\n- Research, analyze and design proof systems and architectures for private computation\n- Be familiar and adapt to research needs zero-knowledge circuits written in Rust Design and implement zero-knowledge circuits in Rust\n- Write specifications and communicate research findings through write-ups\n- Break down complex problems, and know what can and what can’t be dealt with later\n- Perform security analysis, measure performance of and debug circuits\n\n**You ideally will have**\n\n- Very strong academic or engineering background (PhD-level or equivalent in industry); relevant research experience\n- Experience with low level/strongly typed languages (C/C++/Go/Rust or Java/C#)\n- Experience with Open Source software\n- Deep understanding of Zero-Knowledge proof systems (zk-SNARK, circom, Plonk/Halo2, zk-STARK), elliptic curve cryptography, and circuit design\n- Keen communicator, eager to share your work in a wide variety of contexts, like internal and public presentations, blog posts and academic papers.\n- Experience in, and passion for, blockchain technology.\n- A strong alignment to our principles: [https://status.im/about/#our-principles](https://status.im/about/#our-principles)\n\n**Bonus points if** \n\n- Experience in provable and/or private computation (zkEVM, other ZK VM)\n- Rust Zero Knowledge tooling\n- Experience with WebAssemblyWASM\n\n[Don’t worry if you don’t meet all of these criteria, we’d still love to hear from you anyway if you think you’d be a great fit for this role. Just explain to us why in your cover letter].\n\n**Hiring process** \n\nThe hiring process for this role will be:\n\n1. Interview with Angel/Maya from our Talent team\n2. Interview with team member from the Vac team\n3. Pair programming task with the Vac team\n4. Interview with Oskar, the Vac team lead\n5. Interview with Jacek, Program lead\n\nThe steps may change along the way if we see it makes sense to adapt the interview stages, so please consider the above as a guideline.\n\n**Compensation**\n\nWe are happy to pay in either 100% fiat or any mix of fiat and/or crypto. For more information regarding benefits at Status: [https://people-ops.status.im/tag/perks/](https://people-ops.status.im/tag/perks/)","lastmodified":"2023-07-16T15:52:26.106715491Z","tags":["engineering","role","zero knowledge"]},"/roadmap/acid/updates/2023-08-02":{"title":"2023-08-02 Acid weekly","content":"\n## Leads roundup - acid\n\n**Al / Comms**\n\n- Status app relaunch comms campaign plan in the works. Approx. date for launch 31.08.\n- Logos comms + growth plan post launch is next up TBD.\n- Will be waiting for specs for data room, raise etc.\n- Hires: split the role for content studio to be more realistic in getting top level talent.\n\n**Matt / Copy**\n\n- Initiative updating old documentation like CC guide to reflect broader scope of BUs\n- Brand guidelines/ modes of presentation are in process\n- Wikipedia entry on network states and virtual states is live on \n\n**Eddy / Digital Comms**\n\n- Logos Discord will be completed by EOD.\n- Codex Discord will be done tomorrow.\n - LPE rollout plan, currently working on it, will be ready EOW\n- Podcast rollout needs some\n- Overarching BU plan will be ready in next couple of weeks as things on top have taken priority.\n\n**Amir / Studio**\n\n- Started execution of LPE for new requirements, broken down in smaller deliveries. Looking to have it working and live by EOM.\n- Hires: still looking for 3 positions with main focus on developer side. \n\n**Jonny / Podcast**\n\n- Podcast timelines are being set. In production right now. Nick delivered graphics for HiO but we need a full pack.\n- First HiO episode is in the works. Will be ready in 2 weeks to fit in the rollout of the LPE.\n\n**Louisa / Events**\n\n- Global strategy paper for wider comms plan.\n- Template for processes and executions when preparing events.\n- Decision made with Carl to move Network State event to November in satellite of other events. Looking into ETH Lisbon / Staking Summit etc.\n - Seoul Q4 hackathon is already in the works. Needs bounty planning.","lastmodified":"2023-08-04T14:33:33.42151646Z","tags":["acid-updates"]},"/roadmap/codex/updates/2023-07-21":{"title":"2023-07-21 Codex weekly","content":"\n## Codex update 07/12/2023 to 07/21/2023\n\nOverall we continue working in various directions, distributed testing, marketplace, p2p client, research, etc...\n\nOur main milestone is to have a fully functional testnet with the marketplace and durability guarantees deployed by end of year. A lot of grunt work is being done to make that possible. Progress is steady, but there are lots of stabilization and testing \u0026 infra related work going on.\n\nWe're also onboarding several new members to the team (4 to be precise), this will ultimately accelerate our progress, but it requires some upfront investment from some of the more experienced team members.\n\n### DevOps/Infrastructure:\n\n- Adopted nim-codex Docker builds for Dist Tests.\n- Ordered Dedicated node on Hetzner.\n- Configured Hetzner StorageBox for local backup on Dedicated server.\n- Configured new Logs shipper and Grafana in Dist-Tests cluster.\n- Created Geth and Prometheus Docker images for Dist-Tests.\n- Created a separate codex-contracts-eth Docker image for Dist-Tests.\n- Set up Ingress Controller in Dist-Tests cluster.\n\n### Testing:\n\n- Set up deployer to gather metrics.\n- Debugging and identifying potential deadlock in the Codex client.\n- Added metrics, built image, and ran tests.\n- Updated dist-test log for Kibana compatibility.\n- Ran dist-tests on a new master image.\n- Debugging continuous tests.\n\n### Development:\n\n- Worked on codex-dht nimble updates and fixing key format issue.\n- Updated CI and split Windows CI tests to run on two CI machines.\n- Continued updating dependencies in codex-dht.\n- Fixed decoding large manifests ([PR #479](https://github.com/codex-storage/nim-codex/pull/497)).\n- Explored the existing implementation of NAT Traversal techniques in `nim-libp2p`.\n\n### Research\n\n- Exploring additional directions for remote verification techniques and the interplay of different encoding approaches and cryptographic primitives\n - https://eprint.iacr.org/2021/1500.pdf\n - https://dankradfeist.de/ethereum/2021/06/18/pcs-multiproofs.html\n - https://eprint.iacr.org/2021/1544.pdf\n- Onboarding Balázs as our ZK researcher/engineer\n- Continued research in DAS related topics\n - Running simulation on newly setup infrastructure\n- Devised a new direction to reduce metadata overhead and enable remote verification https://github.com/codex-storage/codex-research/blob/master/design/metadata-overhead.md\n- Looked into NAT Traversal ([issue #166](https://github.com/codex-storage/nim-codex/issues/166)).\n\n### Cross-functional (Combination of DevOps/Testing/Development):\n\n- Fixed discovery related issues.\n- Planned Codex Demo update for the Logos event and prepared environment for the demo.\n- Described requirements for Dist Tests logs format.\n- Configured new Logs shipper and Grafana in Dist-Tests cluster.\n- Dist Tests logs adoption requirements - Updated log format for Kibana compatibility.\n- Hetzner Dedicated server was configured.\n- Set up Hetzner StorageBox for local backup on Dedicated server.\n- Configured new Logs shipper in Dist-Tests cluster.\n- Setup Grafana in Dist-Tests cluster.\n- Created a separate codex-contracts-eth Docker image for Dist-Tests.\n- Setup Ingress Controller in Dist-Tests cluster.\n\n---\n\n#### Conversations\n1. zk_id _—_ 07/24/2023 11:59 AM\n\u003e \n\u003e We've explored VDI for rollups ourselves in the last week, curious to know your thoughts\n2. dryajov _—_ 07/25/2023 1:28 PM\n\u003e \n\u003e It depends on what you mean, from a high level (A)VID is probably the closest thing to DAS in academic research, in fact DAS is probably either a subset or a superset of VID, so it's definitely worth digging into. But I'm not sure what exactly you're interested in, in the context of rollups...\n1. zk_id _—_ 07/25/2023 3:28 PM\n \n The part of the rollups seems to be the base for choosing proofs that scale linearly with the amount of nodes (which makes it impractical for large numbers of nodes). The protocol is very simple, and would only need to instead provide constant proofs with the Kate commitments (at the cost of large computational resources is my understanding). This was at least the rationale that I get from reading the paper and the conversation with Bunz, one of the founders of the Espresso shared sequencer (which is where I found the first reference to this paper). I guess my main open question is why would you do the sampling if you can do VID in the context of blockchains as well. With the proofs of dispersal on-chain, you wouldn't need to do that for the agreement of the dispersal. You still would need the sampling for the light clients though, of course.\n \n2. dryajov _—_ 07/25/2023 8:31 PM\n \n \u003e I guess my main open question is why would you do the sampling if you can do VID in the context of blockchains as well. With the proofs of dispersal on-chain, you wouldn't need to do that for the agreement of the dispersal.\n \n Yeah, great question. What follows is strictly IMO, as I haven't seen this formally contrasted anywhere, so my reasoning can be wrong in subtle ways.\n \n - (A)VID - **dispersing** and storing data in a verifiable manner\n - Sampling - verifying already **dispersed** data\n \n tl;dr Sampling allows light nodes to protect against dishonest majority attacks. In other words, a light node cannot be tricked to follow an incorrect chain by a dishonest validator majority that withholds data. More details are here - [https://dankradfeist.de/ethereum/2019/12/20/data-availability-checks.html](https://dankradfeist.de/ethereum/2019/12/20/data-availability-checks.html \"https://dankradfeist.de/ethereum/2019/12/20/data-availability-checks.html\") ------------- First, DAS implies (A)VID, as there is an initial phase where data is distributed to some subset of nodes. Moreover, these nodes, usually the validators, attest that they received the data and that it is correct. If a majority of validators accepts, then the block is considered correct, otherwise it is rejected. This is the verifiable dispersal part. But what if the majority of validators are dishonest? Can you prevent them from tricking the rest of the network from following the chain?\n \n Dankrad Feist\n \n [Data availability checks](https://dankradfeist.de/ethereum/2019/12/20/data-availability-checks.html)\n \n Primer on data availability checks\n \n3. _[_8:31 PM_]_\n \n ## Dealing with dishonest majorities\n \n This is easy if all the data is downloaded by all nodes all the time, but we're trying to avoid just that. But lets assume, for the sake of the argument, that there are full nodes in the network that download all the data and are able to construct fraud proofs for missing data, can this mitigate the problem? It turns out that it can't, because proving data (un)availability isn't a directly attributable fault - in other words, you can observe/detect it but there is no way you can prove it to the rest of the network reliably. More details here [https://github.com/ethereum/research/wiki/A-note-on-data-availability-and-erasure-coding](https://github.com/ethereum/research/wiki/A-note-on-data-availability-and-erasure-coding \"https://github.com/ethereum/research/wiki/A-note-on-data-availability-and-erasure-coding\") So, if there isn't much that can be done by detecting that a block isn't available, what good is it for? Well nodes can still avoid following the unavailable chain and thus be tricked by a dishonest majority. However, simply attesting that data has been publishing is not enough to prevent a dishonest majority from attacking the network. (edited)\n \n4. dryajov _—_ 07/25/2023 9:06 PM\n \n To complement, the relevant quote from [https://github.com/ethereum/research/wiki/A-note-on-data-availability-and-erasure-coding](https://github.com/ethereum/research/wiki/A-note-on-data-availability-and-erasure-coding \"https://github.com/ethereum/research/wiki/A-note-on-data-availability-and-erasure-coding\"), is:\n \n \u003e Here, fraud proofs are not a solution, because not publishing data is not a uniquely attributable fault - in any scheme where a node (\"fisherman\") has the ability to \"raise the alarm\" about some piece of data not being available, if the publisher then publishes the remaining data, all nodes who were not paying attention to that specific piece of data at that exact time cannot determine whether it was the publisher that was maliciously withholding data or whether it was the fisherman that was maliciously making a false alarm.\n \n The relevant quote from from [https://dankradfeist.de/ethereum/2019/12/20/data-availability-checks.html](https://dankradfeist.de/ethereum/2019/12/20/data-availability-checks.html \"https://dankradfeist.de/ethereum/2019/12/20/data-availability-checks.html\"), is:\n \n \u003e There is one gap in the solution of using fraud proofs to protect light clients from incorrect state transitions: What if a consensus supermajority has signed a block header, but will not publish some of the data (in particular, it could be fraudulent transactions that they will publish later to trick someone into accepting printed/stolen money)? Honest full nodes, obviously, will not follow this chain, as they can’t download the data. But light clients will not know that the data is not available since they don’t try to download the data, only the header. So we are in a situation where the honest full nodes know that something fishy is going on, but they have no means of alerting the light clients, as they are missing the piece of data that might be needed to create a fraud proof.\n \n Both articles are a bit old, but the intuitions still hold.\n \n\nJuly 26, 2023\n\n6. zk_id _—_ 07/26/2023 10:42 AM\n \n Thanks a ton @dryajov ! We are on the same page. TBH it took me a while to get to this point, as it's not an intuitive problem at first. The relationship between the VID and the DAS, and what each is for is crucial for us, btw. Your writing here and your references give us the confidence that we understand the problem and are equipped to evaluate the different solutions. Deeply appreciate that you took the time to write this, and is very valuable.\n \n7. _[_10:45 AM_]_\n \n The dishonest majority is critical scenario for Nomos (essential part of the whole sovereignty narrative), and generally not considered by most blockchain designs\n \n8. zk_id\n \n Thanks a ton @dryajov ! We are on the same page. TBH it took me a while to get to this point, as it's not an intuitive problem at first. The relationship between the VID and the DAS, and what each is for is crucial for us, btw. Your writing here and your references give us the confidence that we understand the problem and are equipped to evaluate the different solutions. Deeply appreciate that you took the time to write this, and is very valuable.\n \n ### dryajov _—_ 07/26/2023 4:42 PM\n \n Great! Glad to help anytime \n \n9. zk_id\n \n The dishonest majority is critical scenario for Nomos (essential part of the whole sovereignty narrative), and generally not considered by most blockchain designs\n \n dryajov _—_ 07/26/2023 4:43 PM\n \n Yes, I'd argue it is crucial in a network with distributed validation, where all nodes are either fully light or partially light nodes.\n \n10. _[_4:46 PM_]_\n \n Btw, there is probably more we can share/compare notes on in this problem space, we're looking at similar things, perhaps from a slightly different perspective in Codex's case, but the work done on DAS with the EF directly is probably very relevant for you as well \n \n\nJuly 27, 2023\n\n12. zk_id _—_ 07/27/2023 3:05 AM\n \n I would love to. Do you have those notes somewhere?\n \n13. zk_id _—_ 07/27/2023 4:01 AM\n \n all the links you have, anything, would be useful\n \n14. zk_id\n \n I would love to. Do you have those notes somewhere?\n \n dryajov _—_ 07/27/2023 4:50 PM\n \n A bit scattered all over the place, mainly from @Leobago and @cskiraly @cskiraly has a draft paper somewhere\n \n\nJuly 28, 2023\n\n16. zk_id _—_ 07/28/2023 5:47 AM\n \n Would love to see anything that is possible\n \n17. _[_5:47 AM_]_\n \n Our setting is much simpler, but any progress that you make (specifically in the computational cost of the polynomial commitments or alternative proofs) would be really useful for us\n \n18. zk_id\n \n Our setting is much simpler, but any progress that you make (specifically in the computational cost of the polynomial commitments or alternative proofs) would be really useful for us\n \n dryajov _—_ 07/28/2023 4:07 PM\n \n Yes, we're also working in this direction as this is crucial for us as well. There should be some result coming soon(tm), now that @bkomuves is helping us with this part.\n \n19. zk_id\n \n Our setting is much simpler, but any progress that you make (specifically in the computational cost of the polynomial commitments or alternative proofs) would be really useful for us\n \n bkomuves _—_ 07/28/2023 4:44 PM\n \n my current view (it's changing pretty often :) is that there is tension between:\n \n - commitment cost\n - proof cost\n - and verification cost\n \n the holy grail which is the best for all of them doesn't seem to exist. Hence, you have to make tradeoffs, and it depends on your specific use case what you should optimize for, or what balance you aim for. we plan to find some points in this 3 dimensional space which are hopefully close to the optimal surface, and in parallel to that figure out what balance to aim for, and then choose a solution based on that (and also based on what's possible, there are external restrictions)\n \n\nJuly 29, 2023\n\n21. bkomuves\n \n my current view (it's changing pretty often :) is that there is tension between: \n \n - commitment cost\n - proof cost\n - and verification cost\n \n  the holy grail which is the best for all of them doesn't seem to exist. Hence, you have to make tradeoffs, and it depends on your specific use case what you should optimize for, or what balance you aim for. we plan to find some points in this 3 dimensional space which are hopefully close to the optimal surface, and in parallel to that figure out what balance to aim for, and then choose a solution based on that (and also based on what's possible, there are external restrictions)\n \n zk_id _—_ 07/29/2023 4:23 AM\n \n I agree. That's also my understanding (although surely much more superficial).\n \n22. _[_4:24 AM_]_\n \n There is also the dimension of computation vs size cost\n \n23. _[_4:25 AM_]_\n \n ie the VID scheme (of the paper that kickstarted this conversation) has all the properties we need, but it scales n^2 in message complexity which makes it lose the properties we are looking for after 1k nodes. We need to scale confortably to 10k nodes.\n \n24. _[_4:29 AM_]_\n \n So we are at the moment most likely to use KZG commitments with a 2d RS polynomial. Basically just copy Ethereum. Reason is:\n \n - Our rollups/EZ leader will generate this, and those are beefier machines than the Base Layer. The base layer nodes just need to verify and sign the EC fragments and return them to complete the VID protocol (and then run consensus on the aggregated signed proofs).\n - If we ever decide to change the design for the VID dispersal to be done by Base Layer leaders (in a multileader fashion), it can be distributed (rows/columns can be reconstructed and proven separately). I don't think we will pursue this, but we will have to if this scheme doesn't scale with the first option.\n \n\nAugust 1, 2023\n\n26. dryajov\n \n A bit scattered all over the place, mainly from @Leobago and @cskiraly @cskiraly has a draft paper somewhere\n \n Leobago _—_ 08/01/2023 1:13 PM\n \n Note much public write-ups yet. You can find some content here:\n \n - [https://blog.codex.storage/data-availability-sampling/](https://blog.codex.storage/data-availability-sampling/ \"https://blog.codex.storage/data-availability-sampling/\")\n \n - [https://github.com/codex-storage/das-research](https://github.com/codex-storage/das-research \"https://github.com/codex-storage/das-research\")\n \n \n We also have a few Jupiter notebooks but they are not public yet. As soon as that content is out we can let you know ![🙂](https://discord.com/assets/da3651e59d6006dfa5fa07ec3102d1f3.svg)\n \n Codex Storage Blog\n \n [Data Availability Sampling](https://blog.codex.storage/data-availability-sampling/)\n \n The Codex team is busy building a new web3 decentralized storage platform with the latest advances in erasure coding and verification systems. Part of the challenge of deploying decentralized storage infrastructure is to guarantee that the data that has been stored and is available for retrieval from the beginning until\n \n GitHub\n \n [GitHub - codex-storage/das-research: This repository hosts all the ...](https://github.com/codex-storage/das-research)\n \n This repository hosts all the research on DAS for the collaboration between Codex and the EF. - GitHub - codex-storage/das-research: This repository hosts all the research on DAS for the collabora...\n \n [](https://opengraph.githubassets.com/39769464ebae80ca62c111bf2acb6af95fde1b9dc6e3c5a9eb56316ea363e3d8/codex-storage/das-research)\n \n ![GitHub - codex-storage/das-research: This repository hosts all the ...](https://images-ext-2.discordapp.net/external/DxXI-YBkzTrPfx_p6_kVpJzvVe6Ix6DrNxgrCbcsjxo/https/opengraph.githubassets.com/39769464ebae80ca62c111bf2acb6af95fde1b9dc6e3c5a9eb56316ea363e3d8/codex-storage/das-research?width=400\u0026height=200)\n \n27. zk_id\n \n So we are at the moment most likely to use KZG commitments with a 2d RS polynomial. Basically just copy Ethereum. Reason is: \n \n - Our rollups/EZ leader will generate this, and those are beefier machines than the Base Layer. The base layer nodes just need to verify and sign the EC fragments and return them to complete the VID protocol (and then run consensus on the aggregated signed proofs).\n - If we ever decide to change the design for the VID dispersal to be done by Base Layer leaders (in a multileader fashion), it can be distributed (rows/columns can be reconstructed and proven separately). I don't think we will pursue this, but we will have to if this scheme doesn't scale with the first option.\n \n dryajov _—_ 08/01/2023 1:55 PM\n \n This might interest you as well - [https://blog.subspace.network/combining-kzg-and-erasure-coding-fc903dc78f1a](https://blog.subspace.network/combining-kzg-and-erasure-coding-fc903dc78f1a \"https://blog.subspace.network/combining-kzg-and-erasure-coding-fc903dc78f1a\")\n \n Medium\n \n [Combining KZG and erasure coding](https://blog.subspace.network/combining-kzg-and-erasure-coding-fc903dc78f1a)\n \n The Hitchhiker’s Guide to Subspace  — Episode II\n \n [](https://miro.medium.com/v2/resize:fit:1200/0*KGb5QHFQEd0cvPeP.png)\n \n ![Combining KZG and erasure coding](https://images-ext-2.discordapp.net/external/LkoJxMEskKGMwVs8XTPVQEEu0senjEQf42taOjAYu0k/https/miro.medium.com/v2/resize%3Afit%3A1200/0%2AKGb5QHFQEd0cvPeP.png?width=400\u0026height=200)\n \n28. _[_1:56 PM_]_\n \n This is a great analysis of the current state of the art in structure of data + commitment and the interplay. I would also recoment reading the first article of the series which it also links to\n \n29. zk_id _—_ 08/01/2023 3:04 PM\n \n Thanks @dryajov @Leobago ! Much appreciated!\n \n30. _[_3:05 PM_]_\n \n Very glad that we can discuss these things with you. Maybe I have some specific questions once I finish reading a huge pile of pending docs that I'm tackling starting today...\n \n31. zk_id _—_ 08/01/2023 6:34 PM\n \n @Leobago @dryajov I was playing with the DAS simulator. It seems the results are a bunch of XML. Is there a way so I visualize the results?\n \n32. zk_id\n \n @Leobago @dryajov I was playing with the DAS simulator. It seems the results are a bunch of XML. Is there a way so I visualize the results?\n \n Leobago _—_ 08/01/2023 6:36 PM\n \n Yes, checkout the visual branch and make sure to enable plotting in the config file, it should produce a bunch of figures ![🙂](https://discord.com/assets/da3651e59d6006dfa5fa07ec3102d1f3.svg)\n \n33. _[_6:37 PM_]_\n \n You might find also some bugs here and there on that branch ![😅](https://discord.com/assets/b45af785b0e648fe2fb7e318a6b8010c.svg)\n \n34. zk_id _—_ 08/01/2023 7:44 PM\n \n Thanks!","lastmodified":"2023-08-03T16:00:41.712472167Z","tags":["codex-updates"]},"/roadmap/codex/updates/2023-08-01":{"title":"2023-08-01 Codex weekly","content":"\n# Codex update Aug 1st\n\n## Client\n\n### Milestone: Merkelizing block data\n\n- Initial design writeup https://github.com/codex-storage/codex-research/blob/master/design/metadata-overhead.md\n - Work break down and review for Ben and Tomasz (epic coming up)\n - This is required to integrate the proving system\n\n### Milestone: Block discovery and retrieval\n\n- Some initial work break down and milestones here - https://docs.google.com/document/d/1hnYWLvFDgqIYN8Vf9Nf5MZw04L2Lxc9VxaCXmp9Jb3Y/edit\n - Initial analysis of block discovery - https://rpubs.com/giuliano_mega/1067876\n - Initial block discovery simulator - https://gmega.shinyapps.io/block-discovery-sim/\n\n### Milestone: Distributed Client Testing\n\n- Lots of work around log collection/analysis and monitoring\n - Details here https://github.com/codex-storage/cs-codex-dist-tests/pull/41\n\n## Marketplace\n\n### Milestone: L2\n\n- Taiko L2 integration\n - This is a first try of running against an L2\n - Mostly done, waiting on related fixes to land before merge - https://github.com/codex-storage/nim-codex/pull/483\n\n### Milestone: Reservations and slot management\n\n- Lots of work around slot reservation and queuing https://github.com/codex-storage/nim-codex/pull/455\n\n## Remote auditing\n\n### Milestone: Implement Poseidon2\n\n- First pass at an implementation by Balazs\n - private repo, but can give access if anyone is interested\n\n### Milestone: Refine proving system\n\n- Lost of thinking around storage proofs and proving systems\n - private repo, but can give access if anyone is interested\n\n## DAS\n\n### Milestone: DHT simulations\n\n- Implementing a DHT in Python for the DAS simulator.\n- Implemented logical error-rates and delays to interactions between DHT clients.","lastmodified":"2023-08-03T16:00:41.712472167Z","tags":["codex-updates"]},"/roadmap/innovation_lab/updates/2023-07-12":{"title":"2023-07-12 Innovation Lab Weekly","content":"\n**Logos Lab** 12th of July\nCurrently working on the Waku Objects prototype, which is a modular system for transactional chat objects.\n\n**Milestone**: deliver the first transactional Waku Object called Payggy (attached some design screenshots).\n\nIt is now possible to make transactions on the blockchain and the objects send notifications over the messaging layer (e.g. Waku) to the other participants. What is left is the proper transaction status management and some polishing.\n\nThere is also work being done on supporting external objects, this enables creating the objects with any web technology. This work will guide the separation of the interfaces between the app and the objects and lead us to release it as an SDK.\n\n**Next milestone**: group chat support\n\nThe design is already done for the group chat functionality. There is ongoing design work for a new Waku Object that would showcase what can be done in a group chat context.\n\nDeployed version of the main branch:\nhttps://waku-objects-playground.vercel.app/\n\nLink to Payggy design files:\nhttps://scene.zeplin.io/project/64ae9e965652632169060c7d\n\nMain development repo:\nhttps://github.com/logos-innovation-lab/waku-objects-playground\n\nContact:\nYou can find us at https://discord.com/channels/973324189794697286/1118949151225413872 or join our discord at https://discord.gg/UtVHf2EU\n\n--- \n\n#### Conversation\n\n1. petty _—_ 07/15/2023 5:49 AM\n \n the `waku-objects` repo is empty. Where is the code storing that part vs the playground that is using them?\n \n2. petty\n \n the `waku-objects` repo is empty. Where is the code storing that part vs the playground that is using them?\n \n3. attila🍀 _—_ 07/15/2023 6:18 AM\n \n at the moment most of the code is in the `waku-objects-playground` repo later we may split it to several repos here is the link: [https://github.com/logos-innovation-lab/waku-objects-playground](https://github.com/logos-innovation-lab/waku-objects-playground \"https://github.com/logos-innovation-lab/waku-objects-playground\")","lastmodified":"2023-08-03T16:00:41.712472167Z","tags":["ilab-updates"]},"/roadmap/innovation_lab/updates/2023-08-02":{"title":"2023-08-02 Innovation Lab weekly","content":"\n**Logos Lab** 2nd of August\nCurrently working on the Waku Objects prototype, which is a modular system for transactional chat objects.\n\nThe last few weeks were a bit slower than usual because there were vacations, one team member got married, there was EthCC and a team offsite. \n\nStill, a lot of progress were made and the team released the first version of a color system in the form of an npm package, which lets the users to choose any color they like to customize their app. It is based on grayscale design and uses luminance, hence the name of the library. Try it in the Playground app or check the links below.\n\n**Milestone**: group chat support\n\nThere is a draft PR for group chat support for private groups and it is expected to be finished this week. At the end we decided to roll our own toy group chat protocol implementation because we did not find anything ready to use. It would have been great if we could have just used an existing implementation.\n\n**Next milestone**: Splitter Waku Object supporting group chats and smart contracts\n\nThis will be the first Waku Object that is meaningful in a group chat context. Also this will demonstrate how to use smart contracts and multiparty transactions.\n\nDeployed version of the main branch:\nhttps://waku-objects-playground.vercel.app/\n\nMain development repo:\nhttps://github.com/logos-innovation-lab/waku-objects-playground\n\nGrayscale design:\nhttps://grayscale.design/\n\nLuminance package on npm:\nhttps://www.npmjs.com/package/@waku-objects/luminance\n\nContact:\nYou can find us at https://discord.com/channels/973324189794697286/1118949151225413872 or join our discord at https://discord.gg/ZMU4yyWG\n\n--- \n\n### Conversation\n\n1. fryorcraken _—_ Yesterday at 10:58 PM\n \n \u003e There is a draft PR for group chat support for private groups and it is expected to be finished this week. At the end we decided to roll our own toy group chat protocol implementation because we did not find anything ready to use. It would have been great if we could have just used an existing implementation.\n \n While status-js does implement chat features, I do not know how nice the API is. Waku is actively hiring a chat sdk lead and golang eng. We will probably also hire a JS engineer (not yet confirmed) to provide nice libraries to enable such use case (1:1 chat, group chat, community chat).\n \n\nAugust 3, 2023\n\n2. fryorcraken\n \n \u003e \u003e There is a draft PR for group chat support for private groups and it is expected to be finished this week. At the end we decided to roll our own toy group chat protocol implementation because we did not find anything ready to use. It would have been great if we could have just used an existing implementation. While status-js does implement chat features, I do not know how nice the API is. Waku is actively hiring a chat sdk lead and golang eng. We will probably also hire a JS engineer (not yet confirmed) to provide nice libraries to enable such use case (1:1 chat, group chat, community chat).\n \n3. attila🍀 _—_ Today at 4:21 AM\n \n This is great news and I think it will help with adoption. I did not find a JS API for status (maybe I was looking at the wrong places), the closest was the `status-js-api` project but that still uses whisper and the repo recommends to use `js-waku` instead ![🙂](https://discord.com/assets/da3651e59d6006dfa5fa07ec3102d1f3.svg) [https://github.com/status-im/status-js-api](https://github.com/status-im/status-js-api \"https://github.com/status-im/status-js-api\") Also I also found the `56/STATUS-COMMUNITIES` spec: [https://rfc.vac.dev/spec/56/](https://rfc.vac.dev/spec/56/ \"https://rfc.vac.dev/spec/56/\") It seems to be quite a complete solution for community management with all the bells and whistles. However our use case is a private group chat for your existing contacts, so it seems to be a bit overkill for that.\n \n4. fryorcraken _—_ Today at 5:32 AM\n \n The repo is status-im/status-web\n \n5. _[_5:33 AM_]_\n \n Spec is [https://rfc.vac.dev/spec/55/](https://rfc.vac.dev/spec/55/ \"https://rfc.vac.dev/spec/55/\")\n \n6. fryorcraken\n \n The repo is status-im/status-web\n \n7. attila🍀 _—_ Today at 6:05 AM\n \n As constructive feedback I can tell you that it is not trivial to find it and use it in other projects It is presented as a React component without documentation and by looking at the code it seems to provide you the whole chat UI of the desktop app, which is not necessarily what you need if you want to embed it in your app It seems to be using this package: [https://www.npmjs.com/package/@status-im/js](https://www.npmjs.com/package/@status-im/js \"https://www.npmjs.com/package/@status-im/js\") Which also does not have documentation I assume that package is built from this: [https://github.com/status-im/status-web/tree/main/packages/status-js](https://github.com/status-im/status-web/tree/main/packages/status-js \"https://github.com/status-im/status-web/tree/main/packages/status-js\") This looks promising, but again there is no documentation. Of course you can use the code to figure out things, but at least I would be interested in what are the requirements and high level architecture (does it require an ethereum RPC endpoint, where does it store data, etc.) so that I can evaluate if this is the right approach for me. So maybe a lesson here is to put effort in the documentation and the presentation as well and if you have the budget then have someone on the team whose main responsibility is that (like a devrel or dev evangelist role)","lastmodified":"2023-08-03T16:00:41.712472167Z","tags":["ilab-updates"]},"/roadmap/nomos/updates/2023-07-24":{"title":"2023-07-24 Nomos weekly","content":"\n**Research**\n\n- Milestone 1: Understanding Data Availability (DA) Problem\n - High-level exploration and discussion on data availability problems in a collaborative offsite meeting in Paris.\n - Explored the necessity and key challenges associated with DA.\n - In-depth study of Verifiable Information Dispersal (VID) as it relates to data availability.\n - **Blocker:** The experimental tests for our specific EC scheme are pending, which is blocking progress to make final decision on KZG + commitments for our architecture.\n- Milestone 2: Privacy for Proof of Stake (PoS)\n - Analyzed the capabilities and limitations of mixnets, specifically within the context of timing attacks in private PoS.\n - Invested time in understanding timing attacks and how Nym mixnet caters to these challenges.\n - Reviewed the Crypsinous paper to understand its privacy vulnerabilities, notably the issue with probabilistic leader election and the vulnerability of anonymous broadcast channels to timing attacks.\n\n**Development**\n\n- Milestone 1: Mixnet and Networking\n - Initiated integration of libp2p to be used as the full node's backend, planning to complete in the next phase.\n - Begun planning for the next steps for mixnet integration, with a focus on understanding the components of the Nym mixnet, its problem-solving mechanisms, and the potential for integrating some of its components into our codebase.\n- Milestone 2: Simulation Application\n - Completed pseudocode for Carnot Simulator, created a test pseudocode, and provided a detailed description of the simulation. The relevant resources can be found at the following links:\n - Carnot Simulator pseudocode (https://github.com/logos-co/nomos-specs/blob/Carnot-Simulation/carnot/carnot_simulation_psuedocode.py)\n - Test pseudocode (https://github.com/logos-co/nomos-specs/blob/Carnot-Simulation/carnot/test_carnot_simulation.py)\n - Description of the simulation (https://www.notion.so/Carnot-Simulation-c025dbab6b374c139004aae45831cf78)\n - Implemented simulation network fixes and warding improvements, and increased the run duration of integration tests. The corresponding pull requests can be accessed here:\n - Simulation network fix (https://github.com/logos-co/nomos-node/pull/262)\n - Vote tally fix (https://github.com/logos-co/nomos-node/pull/268)\n - Increased run duration of integration tests (https://github.com/logos-co/nomos-node/pull/263)\n - Warding improvements (https://github.com/logos-co/nomos-node/pull/269)","lastmodified":"2023-08-03T16:00:41.712472167Z","tags":["nomos-updates"]},"/roadmap/nomos/updates/2023-07-31":{"title":"2023-07-31 Nomos weekly","content":"\n**Nomos 31st July**\n\n[Network implementation and Mixnet]:\n\nResearch\n- Initial analysis on the mixnet Proof of Concept (PoC) was performed, assessing components like Sphinx for packets and delay-forwarder.\n- Considered the use of a new NetworkInterface in the simulation to mimic the mixnet, but currently, no significant benefits from doing so have been identified.\nDevelopment\n- Fixes were made on the Overlay interface.\n- Near completion of the libp2p integration with all tests passing so far, a PR is expected to be opened soon.\n- Link to libp2p PRs: https://github.com/logos-co/nomos-node/pull/278, https://github.com/logos-co/nomos-node/pull/279, https://github.com/logos-co/nomos-node/pull/280, https://github.com/logos-co/nomos-node/pull/281\n- Started working on the foundation of the libp2p-mixnet transport.\n\n[Private PoS]:\n\nResearch\n- Discussions were held on the Privacy PoS (PPoS) proposal, aligning a general direction of team members.\n- Reviews on the PPoS proposal were done.\n- A proposal to merge the PPoS proposal with the efficient one was made, in order to have both privacy and efficiency.\n- Discussions on merging Efficient PoS (EPoS) with PPoS are in progress.\n\n[Carnot]:\n\nResearch\n- Analyzing Bribery attack scenarios, which seem to make Carnot more vulnerable than expected.\n\n\n**Development**\n\n- Improved simulation application to meet test scale requirements (https://github.com/logos-co/nomos-node/pull/274).\n- Created a strategy to solve the large message sending issue in the simulation application.\n\n[Data Availability Sampling (or VID)]:\n\nResearch\n- Conducted an analysis of stored data \"degradation\" problem for data availability, modeling fractions of nodes which leave the system at regular time intervals\n- Continued literature reading on Verifiable Information Dispersal (VID) for DA problem, as well as encoding/commitment schemes.","lastmodified":"2023-08-03T16:00:41.712472167Z","tags":["nomos-updates"]},"/roadmap/vac/updates/2023-07-10":{"title":"2023-07-10 Vac Weekly","content":"- *vc::Deep Research*\n - refined deep research roadmaps https://github.com/vacp2p/research/issues/190, https://github.com/vacp2p/research/issues/192\n - working on comprehensive current/related work study on Validator Privacy\n - working on PoC of Tor push in Nimbus\n - working towards comprehensive current/related work study on gossipsub scaling\n- *vsu::P2P*\n - Prepared Paris talks\n - Implemented perf protocol to compare the performances with other libp2ps https://github.com/status-im/nim-libp2p/pull/925\n- *vsu::Tokenomics*\n - Fixing bugs on the SNT staking contract;\n - Definition of the first formal verification tests for the SNT staking contract;\n - Slides for the Paris off-site\n- *vsu::Distributed Systems Testing*\n - Replicated message rate issue (still on it)\n - First mockup of offline data\n - Nomos consensus test working\n- *vip::zkVM*\n - hiring\n - onboarding new researcher\n - presentation on ECC during Logos Research Call (incl. preparation)\n - more research on nova, considering additional options\n - Identified 3 research questions to be taken into consideration for the ZKVM and the publication\n - Researched Poseidon implementation for Nova, Nova-Scotia, Circom\n- *vip::RLNP2P*\n - finished rln contract for waku product - https://github.com/waku-org/rln-contract\n - fixed homebrew issue that prevented zerokit from building - https://github.com/vacp2p/zerokit/commit/8a365f0c9e5c4a744f70c5dd4904ce8d8f926c34\n - rln-relay: verify proofs based upon bandwidth usage - https://github.com/waku-org/nwaku/commit/3fe4522a7e9e48a3196c10973975d924269d872a\n - RLN contract audit cont' https://hackmd.io/@blockdev/B195lgIth\n","lastmodified":"2023-07-16T15:56:13.810627742Z","tags":["vac-updates"]},"/roadmap/vac/updates/2023-07-17":{"title":"2023-07-17 Vac weekly","content":"\n**Last week**\n- *vc*\n - Vac day in Paris (13th)\n- *vc::Deep Research*\n - working on comprehensive current/related work study on Validator Privacy\n - working on PoC of Tor push in Nimbus: setting up goerli nim-eth2 node\n - working towards comprehensive current/related work study on gossipsub scaling\n- *vsu::P2P*\n - Paris offsite Paris (all CCs)\n- *vsu::Tokenomics*\n - Bugs found and solved in the SNT staking contract\n - attend events in Paris\n- *vsu::Distributed Systems Testing*\n - Events in Paris\n - QoS on all four infras\n - Continue work on theoretical gossipsub analysis (varying regular graph sizes)\n - Peer extraction using WLS (almost finished)\n - Discv5 testing\n - Wakurtosis CI improvements\n - Provide offline data\n- *vip::zkVM*\n - onboarding new researcher\n - Prepared and presented ZKVM work during VAC offsite\n - Deep research on Nova vs Stark in terms of performance and related open questions\n - researching Sangria\n - Worked on NEscience document (https://www.notion.so/Nescience-WIP-0645c738eb7a40869d5650ae1d5a4f4e)\n - zerokit:\n - worked on PR for arc-circom\n- *vip::RLNP2P*\n - offsite Paris\n\n**This week**\n- *vc*\n- *vc::Deep Research*\n - working on comprehensive current/related work study on Validator Privacy\n - working on PoC of Tor push in Nimbus\n - working towards comprehensive current/related work study on gossipsub scaling\n- *vsu::P2P*\n - EthCC \u0026 Logos event Paris (all CCs)\n- *vsu::Tokenomics*\n - Attend EthCC and side events in Paris\n - Integrate staking contracts with radCAD model\n - Work on a new approach for Codex collateral problem\n- *vsu::Distributed Systems Testing*\n - Events in Paris\n - Finish peer extraction, plot the peer connections; script/runs for the analysis, and add data to the Tech Report\n - Restructure the Analysis script and start modelling Status control messages\n - Split Wakurtosis analysis module into separate repository (delayed)\n - Deliver simulation results (incl fixing discv5 error with new Kurtosis version)\n - Second iteration Nomos CI\n- *vip::zkVM*\n - Continue researching on Nova open questions and Sangria\n - Draft the benchmark document (by the end of the week)\n - research hardware for benchmarks\n - research Halo2 cont'\n - zerokit:\n - merge a PR for deployment of arc-circom\n - deal with arc-circom master fail\n- *vip::RLNP2P*\n - offsite paris\n- *blockers*\n - *vip::zkVM:zerokit*: ark-circom deployment to crates io; contact to ark-circom team","lastmodified":"2023-08-03T16:00:41.712472167Z","tags":["vac-updates"]},"/roadmap/vac/updates/2023-07-24":{"title":"2023-08-03 Vac weekly","content":"\nNOTE: This is a first experimental version moving towards the new reporting structure:\n\n**Last week**\n- *vc*\n- *vc::Deep Research*\n - milestone (15%, 2023/11/30) paper on gossipsub improvements ready for submission\n - related work section\n - milestone (15%, 2023/08/31) Nimbus Tor-push PoC\n - basic torpush encode/decode ( https://github.com/vacp2p/nim-libp2p-experimental/pull/1 )\n - milestone (15%, 2023/11/30) paper on Tor push validator privacy\n - (focus on Tor-push PoC)\n- *vsu::P2P*\n - admin/misc\n - EthCC (all CCs)\n- *vsu::Tokenomics*\n - admin/misc\n - Attended EthCC and side events in Paris\n - milestone (30%, 2023/09/30) Codex economic analysis, Codex token utility, Codex collateral management\n - Kicked off a new approach for Codex collateral problem\n - milestone (50%, 2023/08/30) SNT staking smart contract\n - Integrated SNT staking contracts with Python\n - milestone (50%, 2023/07/14) SNT litepaper\n - (delayed)\n - milestone(30%, 2023/09/29) Nomos Token: requirements and constraints\n- *vsu::Distributed Systems Testing*\n - milestone (95%, 2023/07/31) Wakurtosis Waku Report\n - Add timout to injection async call in WLS to avoid further issues (PR #139 https://github.com/vacp2p/wakurtosis/pull/139)\n - Plotting \u0026 analyse 100 msg/s off line Prometehus data\n - milestone (90%, 2023/07/31) Nomos CI testing\n - fixed errors in Nomos consensus simulation\n - milestone (30%, ...) gossipsub model analysis\n - add config options to script, allowing to load configs that can be directly compared to Wakurtosis results\n - added support for small world networks\n - admin/misc\n - Interviews \u0026 reports for SE and STA positions\n - EthCC (1 CC)\n- *vip::zkVM*\n - milestone(50%, 2023/08/31) background/research on existing proof systems (nova, sangria...)\n - (write ups will be available here: https://www.notion.so/zkVM-cd358fe429b14fa2ab38ca42835a8451)\n - Solved the open questions on Nova adn completed the document (will update the page)\n - Reviewed Nescience and working on a document\n - Reviewed partly the write up on FHE\n - writeup for Nova and Sangria; research on super nova\n - reading a new paper revisiting Nova (https://eprint.iacr.org/2023/969)\n - milestone (50%, 2023/08/31) new fair benchmarks + recursive implementations\n - zkvm\n - Researching Nova to understand the folding technique for ZKVM adaptation\n - zerokit\n - Rostyslav became circom-compat maintainer\n- *vip::RLNP2P*\n - milestone (100%, 2023/07/31) rln-relay testnet 3 completed and retro\n - completed\n - milestone (95%, 2023/07/31) RLN-Relay Waku production readiness\n - admin/misc\n - EthCC + offsite\n\n**This week**\n- *vc*\n- *vc::Deep Research*\n - milestone (15%, 2023/11/30) paper on gossipsub improvements ready for submission\n - working on contributions section, based on https://hackmd.io/X1DoBHtYTtuGqYg0qK4zJw\n - milestone (15%, 2023/08/31) Nimbus Tor-push PoC\n - working on establishing a connection via nim-libp2p tor-transport\n - setting up goerli test node (cont')\n - milestone (15%, 2023/11/30) paper on Tor push validator privacy\n - continue working on paper\n- *vsu::P2P*\n - milestone (...)\n - Implement ChokeMessage for GossipSub\n - Continue \"limited flood publishing\" (https://github.com/status-im/nim-libp2p/pull/911)\n- *vsu::Tokenomics*\n - admin/misc:\n - (3 CC days off)\n - Catch up with EthCC talks that we couldn't attend (schedule conflicts)\n - milestone (50%, 2023/07/14) SNT litepaper\n - Start building the SNT agent-based simulation\n- *vsu::Distributed Systems Testing*\n - milestone (100%, 2023/07/31) Wakurtosis Waku Report\n - finalize simulations\n - finalize report\n - milestone (100%, 2023/07/31) Nomos CI testing\n - finalize milestone\n - milestone (30%, ...) gossipsub model analysis\n - Incorporate Status control messages\n - admin/misc\n - Interviews \u0026 reports for SE and STA positions\n - EthCC (1 CC)\n- *vip::zkVM*\n - milestone(50%, 2023/08/31) background/research on existing proof systems (nova, sangria...)\n - Refine the Nescience WIP and FHE documents\n - research HyperNova\n - milestone (50%, 2023/08/31) new fair benchmarks + recursive implementations\n - Continue exploring Nova and other ZKPs and start technical writing on Nova benchmarks\n - zkvm\n - zerokit\n - circom: reach an agreement with other maintainers on master branch situation\n- *vip::RLNP2P*\n - maintenance\n - investigate why docker builds of nwaku are failing [zerokit dependency related]\n - documentation on how to use rln for projects interested (https://discord.com/channels/864066763682218004/1131734908474236968/1131735766163267695)(https://ci.infra.status.im/job/nim-waku/job/manual/45/console)\n - milestone (95%, 2023/07/31) RLN-Relay Waku production readiness\n - revert rln bandwidth reduction based on offsite discussion, move to different validator\n- *blockers*","lastmodified":"2023-08-03T16:00:41.712472167Z","tags":["vac-updates"]},"/roadmap/vac/updates/2023-07-31":{"title":"2023-07-31 Vac weekly","content":"\n- *vc::Deep Research*\n - milestone (20%, 2023/11/30) paper on gossipsub improvements ready for submission\n - proposed solution section\n - milestone (15%, 2023/08/31) Nimbus Tor-push PoC\n - establishing torswitch and testing code\n - milestone (15%, 2023/11/30) paper on Tor push validator privacy\n - addressed feedback on current version of paper\n- *vsu::P2P*\n - nim-libp2p: (100%, 2023/07/31) GossipSub optimizations for ETH's EIP-4844\n - Merged IDontWant (https://github.com/status-im/nim-libp2p/pull/934) \u0026 Limit flood publishing (https://github.com/status-im/nim-libp2p/pull/911) 𝕏\n - This wraps up the \"mandatory\" optimizations for 4844. We will continue working on stagger sending and other optimizations\n - nim-libp2p: (70%, 2023/07/31) WebRTC transport\n- *vsu::Tokenomics*\n - admin/misc\n - 2 CCs off for the week\n - milestone (30%, 2023/09/30) Codex economic analysis, Codex token utility, Codex collateral management\n - milestone (50%, 2023/08/30) SNT staking smart contract\n - milestone (50%, 2023/07/14) SNT litepaper\n - milestone (30%, 2023/09/29) Nomos Token: requirements and constraints\n- *vsu::Distributed Systems Testing*\n - admin/misc\n - Analysis module extracted from wakurtosis repo (https://github.com/vacp2p/wakurtosis/pull/142, https://github.com/vacp2p/DST-Analysis)\n - hiring\n - milestone (99%, 2023/07/31) Wakurtosis Waku Report\n - Re-run simulations\n - merge Discv5 PR (https://github.com/vacp2p/wakurtosis/pull/129).\n - finalize Wakurtosis Tech Report v2\n - milestone (100%, 2023/07/31) Nomos CI testing\n - delivered first version of Nomos CI integration (https://github.com/vacp2p/wakurtosis/pull/141)\n - milestone (30%, 2023/08/31 gossipsub model: Status control messages\n - Waku model is updated to model topics/content-topics\n- *vip::zkVM*\n - milestone(50%, 2023/08/31) background/research on existing proof systems (nova, sangria...)\n - achievment :: nova questions answered (see document in Project: https://www.notion.so/zkVM-cd358fe429b14fa2ab38ca42835a8451)\n - Nescience WIP done (to be delivered next week, priority)\n - FHE review (lower prio)\n - milestone (50%, 2023/08/31) new fair benchmarks + recursive implementations\n - Working on discoveries about other benchmarks done on plonky2, starky, and halo2\n - zkvm\n - zerokit\n - fixed ark-circom master \n - achievment :: publish ark-circom https://crates.io/crates/ark-circom\n - achievment :: publish zerokit_utils https://crates.io/crates/zerokit_utils\n - achievment :: publish rln https://crates.io/crates/rln (𝕏 jointly with RLNP2P)\n- *vip::RLNP2P*\n - milestone (100%, 2023/07/31) RLN-Relay Waku production readiness\n - Updated rln-contract to be more modular - and downstreamed to waku fork of rln-contract - https://github.com/vacp2p/rln-contract and http://github.com/waku-org/waku-rln-contract\n - Deployed to sepolia\n - Fixed rln enabled docker image building in nwaku - https://github.com/waku-org/nwaku/pull/1853\n - zerokit:\n - achievement :: zerokit v0.3.0 release done - https://github.com/vacp2p/zerokit/releases/tag/v0.3.0 (𝕏 jointly with zkVM)\n","lastmodified":"2023-08-03T16:00:41.712472167Z","tags":["vac-updates"]},"/roadmap/waku/updates/2023-07-24":{"title":"2023-07-24 Waku weekly","content":"\nDisclaimer: First attempt playing with the format. Incomplete as not everyone is back and we are still adjusting the milestones.\n\n---\n\n## Docs\n\n### **Milestone**: Foundation for Waku docs (done)\n\n#### _achieved_:\n- overall layout\n- concept docs\n- community/showcase pages\n\n### **Milestone**: Foundation for node operator docs (done)\n#### _achieved_:\n- nodes overview page\n- guide for running nwaku (binaries, source, docker)\n- peer discovery config guide\n- reference docs for config methods and options\n\n### **Milestone**: Foundation for js-waku docs\n#### _achieved_:\n- js-waku overview + installation guide\n- lightpush + filter guide\n- store guide\n- @waku/create-app guide\n\n#### _next:_\n- improve @waku/react guide\n\n#### _blocker:_\n- polyfills issue with [js-waku](https://github.com/waku-org/js-waku/issues/1415)\n\n### **Milestone**: Docs general improvement/incorporating feedback (continuous)\n### **Milestone**: Running nwaku in the cloud\n### **Milestone**: Add Waku guide to learnweb3.io\n### **Milestone**: Encryption docs for js-waku\n### **Milestone**: Advanced node operator doc (postgres, WSS, monitoring, common config)\n### **Milestone**: Foundation for go-waku docs\n### **Milestone**: Foundation for rust-waku-bindings docs\n### **Milestone**: Waku architecture docs\n### **Milestone**: Waku detailed roadmap and milestones\n### **Milestone**: Explain RLN\n\n---\n\n## Eco Dev (WIP)\n\n### **Milestone**: EthCC Logos side event organisation (done)\n### **Milestone**: Community Growth\n#### _achieved_: \n- Wrote several bounties, improved template; setup onboarding flow in Discord.\n\n#### _next_: \n- Review template, publish on GitHub\n\n### **Milestone**: Business Development (continuous)\n#### _achieved_: \n- Discussions with various leads in EthCC\n#### _next_: \n- Booking calls with said leads\n\n### **Milestone**: Setting Up Content Strategy for Waku\n\n#### _achieved_: \n- Discussions with Comms Hubs re Waku Blog \n- expressed needs and intent around future blog post and needed amplification\n- discuss strategies to onboard/involve non-dev and potential CTAs.\n\n### **Milestone**: Web3Conf (dates)\n### **Milestone**: DeCompute conf\n\n---\n\n## Research (WIP)\n\n### **Milestone**: [Autosharding v1](https://github.com/waku-org/nwaku/issues/1846)\n#### _achieved:_ \n- rendezvous hashing \n- weighting function \n- updated LIGHTPUSH to handle autosharding\n\n#### _next:_\n- update FILTER \u0026 STORE for autosharding\n\n---\n\n## nwaku (WIP)\n\n### **Milestone**: Postgres integration.\n#### _achieved:_\n- nwaku can store messages in a Postgres database\n- we started to perform stress tests\n\n#### _next:_\n- Analyse why some messages are not stored during stress tests happened in both sqlite and Postgres, so maybe the issue isn't directly related to _store_.\n\n### **Milestone**: nwaku as a library (C-bindings)\n#### _achieved:_\n- The integration is in progress through N-API framework\n\n#### _next:_\n- Make the nodejs to properly work by running the _nwaku_ node in a separate thread.\n\n---\n\n## go-waku (WIP)\n\n\n---\n\n## js-waku (WIP)\n\n### **Milestone**: [Peer management](https://github.com/waku-org/js-waku/issues/914)\n#### _achieved: \n- spec test for connection manager\n\n### **Milestone**: [Peer Exchange](https://github.com/waku-org/js-waku/issues/1429)\n### **Milestone**: Static Sharding\n#### _next_: \n- start implementation of static sharding in js-waku\n\n### **Milestone**: Developer Experience\n#### _achieved_: \n- js-lip2p upgrade to remove usage of polyfills (draft PR)\n\n#### _next_: \n- merge and release js-libp2p upgrade\n\n### **Milestone**: Waku Relay in the Browser\n\n---","lastmodified":"2023-08-04T14:33:33.42151646Z","tags":["waku-updates"]},"/roadmap/waku/updates/2023-07-31":{"title":"2023-07-31 Waku weekly","content":"\n**Waku 31 Jul**\n\n---\nWaku\nDocs\n\n**Milestone**: Docs general improvement/incorporating feedback (continuous)\n_next:_ rewrite docs in British English\n**Milestone**: Running nwaku in the cloud\n_next:_ publish guides for Digital Ocean, Oracle, Fly.io\n\n---\nWaku\nEco Dev (WIP)\n\n---\nWaku\nResearch\n\n**Milestone**: Detailed network requirements and task breakdown\n_achieved:_ gathering rough network requirements\n_next:_ detailed task breakdown per milestone and effort allocation\n\n**Milestone**: [Autosharding v1](https://github.com/waku-org/nwaku/issues/1846)\n_achieved:_ update FILTER \u0026 STORE for autosharding\n_next:_ RFC review \u0026 updates, code review \u0026 updates\n\n---\nWaku\nnwaku\n\n**Milestone**: nwaku release process automation\n_next_:\n- setup automation to test/simulate current `master` to prevent/limit regressions\n- expand target architectures and platforms for release artifacts (e.g. arm64, Win...)\n**Milestone**: HTTP Rest API for protocols\n_next:_ Filter API added, tests to complete.\n\n---\nWaku\ngo-waku\n\n**Milestone**: Increase Maintability Score. Refer to [CodeClimate report](https://codeclimate.com/github/waku-org/go-waku)\n_next:_ define scope on which issues reported by CodeClimate should be fixed. Initially it should be limited to reduce code complexity and duplication.\n\n\n**Milestone**: RLN updates, refer [issue](https://github.com/waku-org/go-waku/issues/608).\n_achieved_: expose `set_tree`, `key_gen`, `seeded_key_gen`, `extended_seeded_keygen`, `recover_id_secret`, `set_leaf`, `init_tree_with_leaves`, `set_metadata`, `get_metadata` and `get_leaf`; created an example on how to use RLN with go-waku; service node can pass in index to keystore credentials and can verify proofs based on bandwidth usage\n_next_: merkle tree batch operations (in progress); usage of persisted merkle tree db\n\n**Milestone**: Improve test coverage for functional tests of all protocols. Refer to [CodeClimate report]\n_next_: define scope on which code sections should be covered by tests\n\n**Milestone**: C-Bindings\n_next_: update API to match nwaku's (by using callbacks instead of strings that require freeing)\n\n---\nWaku\njs-waku\n\n**Milestone**: [Peer management](https://github.com/waku-org/js-waku/issues/914)\n_achieved_: extend ConnectionManager with EventEmitter and dispatch peers tagged with their discovery + make it public on the Waku interface\n_next_: fallback improvement for peer connect rejection\n\n**Milestone**: [Peer Exchange](https://github.com/waku-org/js-waku/issues/1429)\n_next_: robusting support around peer-exchange for examples;\n**Milestone**: Static Sharding\n_achieved_: WIP implementation of static sharding in js-waku\n_next_: investigation around gauging connection loss;\n\n**Milestone**: Developer Experience\n_achieved_: improve \u0026 update @waku/react; merge and release js-libp2p upgrade\n update examples to latest release + make sure no old/unused packages there\n\n**Milestone**: Maintenance\n_next_: update to libp2p@0.46.0\n_next_: suit of optional tests in pipeline\n\n---","lastmodified":"2023-08-03T16:00:41.712472167Z","tags":["waku-updates"]}} \ No newline at end of file diff --git a/linkmap b/linkmap index db53e1e37..eb94e38c5 100644 --- a/linkmap +++ b/linkmap @@ -1,46 +1,46 @@ -/private/requirements/overview/index.{html} /private/requirements/overview/ -/private/roadmap/consensus/candidates/carnot/overview/index.{html} /private/roadmap/consensus/candidates/carnot/overview/ -/roadmap/codex/updates/2023-07-21/index.{html} /roadmap/codex/updates/2023-07-21/ +/roadmap/codex/updates/2023-08-01/index.{html} /roadmap/codex/updates/2023-08-01/ +/roadmap/innovation_lab/updates/2023-08-02/index.{html} /roadmap/innovation_lab/updates/2023-08-02/ /roadmap/vac/updates/2023-07-17/index.{html} /roadmap/vac/updates/2023-07-17/ +/roadmap/waku/updates/2023-07-24/index.{html} /roadmap/waku/updates/2023-07-24/ +/private/notes/setup/index.{html} /private/notes/setup/ +/private/roadmap/consensus/theory/overview/index.{html} /private/roadmap/consensus/theory/overview/ +/private/roadmap/networking/status-waku-kurtosis/index.{html} /private/roadmap/networking/status-waku-kurtosis/ +/roadmap/acid/updates/2023-08-02/index.{html} /roadmap/acid/updates/2023-08-02/ /roadmap/vac/updates/2023-07-24/index.{html} /roadmap/vac/updates/2023-07-24/ -/private/notes/ignore-notes/index.{html} /private/notes/ignore-notes/ +/roadmap/waku/updates/2023-07-31/index.{html} /roadmap/waku/updates/2023-07-31/ +/private/roadmap/consensus/overview/index.{html} /private/roadmap/consensus/overview/ +/private/roadmap/virtual-machines/overview/index.{html} /private/roadmap/virtual-machines/overview/ +/roadmap/nomos/updates/2023-07-24/index.{html} /roadmap/nomos/updates/2023-07-24/ +/roadmap/vac/updates/2023-07-10/index.{html} /roadmap/vac/updates/2023-07-10/ /private/notes/troubleshooting/index.{html} /private/notes/troubleshooting/ /private/notes/updating/index.{html} /private/notes/updating/ /private/roadmap/consensus/development/prototypes/index.{html} /private/roadmap/consensus/development/prototypes/ -/private/roadmap/networking/overview/index.{html} /private/roadmap/networking/overview/ -/private/roles/distributed-systems-researcher/index.{html} /private/roles/distributed-systems-researcher/ -/roadmap/vac/updates/2023-07-31/index.{html} /roadmap/vac/updates/2023-07-31/ -/private/notes/callouts/index.{html} /private/notes/callouts/ -/private/notes/obsidian/index.{html} /private/notes/obsidian/ -/private/notes/philosophy/index.{html} /private/notes/philosophy/ -/private/roadmap/consensus/theory/snow-family/index.{html} /private/roadmap/consensus/theory/snow-family/ -/private/roles/zero-knowledge-research-engineer/index.{html} /private/roles/zero-knowledge-research-engineer/ -/roadmap/innovation_lab/updates/2023-07-12/index.{html} /roadmap/innovation_lab/updates/2023-07-12/ -/roadmap/nomos/updates/2023-07-31/index.{html} /roadmap/nomos/updates/2023-07-31/ -/private/notes/CJK-+-Latex-Support-%E6%B5%8B%E8%AF%95/index.{html} /private/notes/CJK-+-Latex-Support-%E6%B5%8B%E8%AF%95/ -/private/notes/custom-Domain/index.{html} /private/notes/custom-Domain/ -/private/roadmap/consensus/candidates/claro/index.{html} /private/roadmap/consensus/candidates/claro/ -/private/roadmap/networking/carnot-waku-specification/index.{html} /private/roadmap/networking/carnot-waku-specification/ -/roadmap/innovation_lab/updates/2023-08-02/index.{html} /roadmap/innovation_lab/updates/2023-08-02/ -/private/notes/config/index.{html} /private/notes/config/ -/private/roadmap/consensus/overview/index.{html} /private/roadmap/consensus/overview/ -/roadmap/codex/updates/2023-08-01/index.{html} /roadmap/codex/updates/2023-08-01/ -/private/notes/showcase/index.{html} /private/notes/showcase/ -/private/roadmap/consensus/candidates/carnot/FAQ/index.{html} /private/roadmap/consensus/candidates/carnot/FAQ/ -/private/roadmap/consensus/development/overview/index.{html} /private/roadmap/consensus/development/overview/ /private/roles/rust-developer/index.{html} /private/roles/rust-developer/ -/roadmap/waku/updates/2023-07-31/index.{html} /roadmap/waku/updates/2023-07-31/ -/private/notes/hosting/index.{html} /private/notes/hosting/ +/roadmap/nomos/updates/2023-07-31/index.{html} /roadmap/nomos/updates/2023-07-31/ /private/notes/preview-changes/index.{html} /private/notes/preview-changes/ -/private/notes/search/index.{html} /private/notes/search/ -/private/roadmap/virtual-machines/overview/index.{html} /private/roadmap/virtual-machines/overview/ -/roadmap/acid/updates/2023-08-02/index.{html} /roadmap/acid/updates/2023-08-02/ -/roadmap/waku/updates/2023-07-24/index.{html} /roadmap/waku/updates/2023-07-24/ +/private/notes/showcase/index.{html} /private/notes/showcase/ +/private/roles/distributed-systems-researcher/index.{html} /private/roles/distributed-systems-researcher/ +/roadmap/innovation_lab/updates/2023-07-12/index.{html} /roadmap/innovation_lab/updates/2023-07-12/ +/private/notes/philosophy/index.{html} /private/notes/philosophy/ +/private/roadmap/networking/carnot-waku-specification/index.{html} /private/roadmap/networking/carnot-waku-specification/ /index.html / -/private/roadmap/networking/status-network-agents/index.{html} /private/roadmap/networking/status-network-agents/ -/private/roadmap/networking/status-waku-kurtosis/index.{html} /private/roadmap/networking/status-waku-kurtosis/ -/roadmap/nomos/updates/2023-07-24/index.{html} /roadmap/nomos/updates/2023-07-24/ -/roadmap/vac/updates/2023-07-10/index.{html} /roadmap/vac/updates/2023-07-10/ +/private/notes/CJK-+-Latex-Support-%E6%B5%8B%E8%AF%95/index.{html} /private/notes/CJK-+-Latex-Support-%E6%B5%8B%E8%AF%95/ /private/notes/editing/index.{html} /private/notes/editing/ -/private/notes/setup/index.{html} /private/notes/setup/ -/private/roadmap/consensus/theory/overview/index.{html} /private/roadmap/consensus/theory/overview/ +/private/notes/hosting/index.{html} /private/notes/hosting/ +/private/roadmap/consensus/candidates/carnot/overview/index.{html} /private/roadmap/consensus/candidates/carnot/overview/ +/private/roles/zero-knowledge-research-engineer/index.{html} /private/roles/zero-knowledge-research-engineer/ +/roadmap/codex/updates/2023-07-21/index.{html} /roadmap/codex/updates/2023-07-21/ +/private/notes/config/index.{html} /private/notes/config/ +/private/notes/custom-Domain/index.{html} /private/notes/custom-Domain/ +/private/notes/obsidian/index.{html} /private/notes/obsidian/ +/private/requirements/overview/index.{html} /private/requirements/overview/ +/private/notes/callouts/index.{html} /private/notes/callouts/ +/private/roadmap/consensus/candidates/claro/index.{html} /private/roadmap/consensus/candidates/claro/ +/private/roadmap/consensus/development/overview/index.{html} /private/roadmap/consensus/development/overview/ +/roadmap/vac/updates/2023-07-31/index.{html} /roadmap/vac/updates/2023-07-31/ +/private/roadmap/networking/overview/index.{html} /private/roadmap/networking/overview/ +/private/roadmap/networking/status-network-agents/index.{html} /private/roadmap/networking/status-network-agents/ +/private/notes/ignore-notes/index.{html} /private/notes/ignore-notes/ +/private/notes/search/index.{html} /private/notes/search/ +/private/roadmap/consensus/candidates/carnot/FAQ/index.{html} /private/roadmap/consensus/candidates/carnot/FAQ/ +/private/roadmap/consensus/theory/snow-family/index.{html} /private/roadmap/consensus/theory/snow-family/ diff --git a/roadmap/acid/updates/2023-08-02/index.html b/roadmap/acid/updates/2023-08-02/index.html index 756cda65d..0dd9955f2 100644 --- a/roadmap/acid/updates/2023-08-02/index.html +++ b/roadmap/acid/updates/2023-08-02/index.html @@ -103,7 +103,7 @@ index: data.index, links: data.links, })), - fetch("https:\/\/roadmap.logos.co\/indices\/contentIndex.fac68475ffec5f5f9e8dca14f02f2bba.min.json") + fetch("https:\/\/roadmap.logos.co\/indices\/contentIndex.7df7b9d5330d839733e334d553ef23c2.min.json") .then(data => data.json()), ]) .then(([{index, links}, content]) => ({ @@ -321,7 +321,7 @@ Aug 3, 2023
  • Podcast timelines are being set. In production right now. Nick delivered graphics for HiO but we need a full pack.
  • First HiO episode is in the works. Will be ready in 2 weeks to fit in the rollout of the LPE.
  • -

    **Louisa / Events **

    +

    Louisa / Events