From 5612d6dfb8b528e28d3c354fe1e218a72b461233 Mon Sep 17 00:00:00 2001 From: calinah Date: Fri, 8 Dec 2023 18:19:22 +0000 Subject: [PATCH] deploy: ce9249835c9b73454e2484204003edca0e153f8b --- 404.html | 4 ++-- ...d-release-channels-a016c97fa21a1eab13467acaeb8b8235.svg} | 2 +- assets/js/4738c5d7.77dc01fb.js | 1 - assets/js/4738c5d7.c6062697.js | 1 + .../{runtime~main.63faf25a.js => runtime~main.45d2a5df.js} | 2 +- blog.html | 4 ++-- blog/archive.html | 4 ++-- blog/first-blog-post.html | 4 ++-- blog/long-blog-post.html | 4 ++-- blog/mdx-blog-post.html | 4 ++-- blog/tags.html | 4 ++-- blog/tags/docusaurus.html | 4 ++-- blog/tags/facebook.html | 4 ++-- blog/tags/hello.html | 4 ++-- blog/tags/hola.html | 4 ++-- blog/welcome.html | 4 ++-- graphcast/design-principles.html | 4 ++-- graphcast/intro.html | 4 ++-- graphcast/radios/graphcast-cli.html | 4 ++-- graphcast/radios/listener-radio.html | 4 ++-- graphcast/radios/subgraph-radio/advanced-configuration.html | 4 ++-- graphcast/radios/subgraph-radio/http-server.html | 4 ++-- graphcast/radios/subgraph-radio/intro.html | 4 ++-- graphcast/radios/subgraph-radio/monitoring.html | 4 ++-- graphcast/radios/subgraph-radio/poi-cross-checking.html | 4 ++-- .../radios/subgraph-radio/subgraph-upgrade-presyncing.html | 4 ++-- graphcast/sdk/intro.html | 4 ++-- graphcast/sdk/radio-dev.html | 4 ++-- graphcast/sdk/registry.html | 4 ++-- img/launchpad-release-channels.svg | 2 +- index.html | 4 ++-- launchpad/advanced/advanced-kubernetes.html | 4 ++-- launchpad/client-side-tooling.html | 4 ++-- launchpad/design-principles.html | 4 ++-- launchpad/faq.html | 4 ++-- launchpad/guides/arbitrum-archive-kubernetes-guide.html | 4 ++-- launchpad/guides/avalanche-archive-kubernetes.html | 4 ++-- launchpad/guides/celo-archive-kubernetes-guide.html | 4 ++-- launchpad/guides/goerli-indexer-guide.html | 4 ++-- launchpad/guides/install-fcos.html | 4 ++-- launchpad/guides/kubeadm-upgrade-cluster-config.html | 4 ++-- launchpad/guides/kubeadm-upgrade-nodes.html | 4 ++-- .../guides/kubernetes-create-cluster-with-kubeadm.html | 4 ++-- launchpad/intro.html | 4 ++-- launchpad/modularity.html | 4 ++-- launchpad/other-resources.html | 4 ++-- launchpad/prerequisites.html | 4 ++-- launchpad/quick-start.html | 4 ++-- launchpad/release-channels.html | 6 +++--- launchpad/server-side-stack.html | 4 ++-- launchpad/supported-namespaces.html | 4 ++-- mips-resources/intro.html | 4 ++-- mips-resources/mips-faq.html | 4 ++-- 53 files changed, 101 insertions(+), 101 deletions(-) rename assets/images/{launchpad-release-channels-8fac7b8a259bd608e3623a6ff75be7d7.svg => launchpad-release-channels-a016c97fa21a1eab13467acaeb8b8235.svg} (71%) delete mode 100644 assets/js/4738c5d7.77dc01fb.js create mode 100644 assets/js/4738c5d7.c6062697.js rename assets/js/{runtime~main.63faf25a.js => runtime~main.45d2a5df.js} (99%) diff --git a/404.html b/404.html index d7a62af3..de3b13c7 100644 --- a/404.html +++ b/404.html @@ -5,13 +5,13 @@ Page Not Found | GraphOps Docs - +
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

- + \ No newline at end of file diff --git a/assets/images/launchpad-release-channels-8fac7b8a259bd608e3623a6ff75be7d7.svg b/assets/images/launchpad-release-channels-a016c97fa21a1eab13467acaeb8b8235.svg similarity index 71% rename from assets/images/launchpad-release-channels-8fac7b8a259bd608e3623a6ff75be7d7.svg rename to assets/images/launchpad-release-channels-a016c97fa21a1eab13467acaeb8b8235.svg index b788c14f..e0998533 100644 --- a/assets/images/launchpad-release-channels-8fac7b8a259bd608e3623a6ff75be7d7.svg +++ b/assets/images/launchpad-release-channels-a016c97fa21a1eab13467acaeb8b8235.svg @@ -1 +1 @@ - \ No newline at end of file + \ No newline at end of file diff --git a/assets/js/4738c5d7.77dc01fb.js b/assets/js/4738c5d7.77dc01fb.js deleted file mode 100644 index d8c711ff..00000000 --- a/assets/js/4738c5d7.77dc01fb.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkdocs=self.webpackChunkdocs||[]).push([[6013],{3905:(e,a,n)=>{n.d(a,{Zo:()=>c,kt:()=>m});var t=n(7294);function r(e,a,n){return a in e?Object.defineProperty(e,a,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[a]=n,e}function i(e,a){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var t=Object.getOwnPropertySymbols(e);a&&(t=t.filter((function(a){return Object.getOwnPropertyDescriptor(e,a).enumerable}))),n.push.apply(n,t)}return n}function s(e){for(var a=1;a=0||(r[n]=e[n]);return r}(e,a);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(t=0;t=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var l=t.createContext({}),p=function(e){var a=t.useContext(l),n=a;return e&&(n="function"==typeof e?e(a):s(s({},a),e)),n},c=function(e){var a=p(e.components);return t.createElement(l.Provider,{value:a},e.children)},u="mdxType",h={inlineCode:"code",wrapper:function(e){var a=e.children;return t.createElement(t.Fragment,{},a)}},d=t.forwardRef((function(e,a){var n=e.components,r=e.mdxType,i=e.originalType,l=e.parentName,c=o(e,["components","mdxType","originalType","parentName"]),u=p(n),d=r,m=u["".concat(l,".").concat(d)]||u[d]||h[d]||i;return n?t.createElement(m,s(s({ref:a},c),{},{components:n})):t.createElement(m,s({ref:a},c))}));function m(e,a){var n=arguments,r=a&&a.mdxType;if("string"==typeof e||r){var i=n.length,s=new Array(i);s[0]=d;var o={};for(var l in a)hasOwnProperty.call(a,l)&&(o[l]=a[l]);o.originalType=e,o[u]="string"==typeof e?e:r,s[1]=o;for(var p=2;p{n.r(a),n.d(a,{assets:()=>l,contentTitle:()=>s,default:()=>h,frontMatter:()=>i,metadata:()=>o,toc:()=>p});var t=n(7462),r=(n(7294),n(3905));const i={sidebar_position:3},s="Release Channels",o={unversionedId:"launchpad/release-channels",id:"launchpad/release-channels",title:"Release Channels",description:"Due to the intricate nature of managing indexing operations for multiple blockchains and their associated dependencies, the Launchpad project is a complex system with numerous interdependencies.",source:"@site/docs/launchpad/release-channels.md",sourceDirName:"launchpad",slug:"/launchpad/release-channels",permalink:"/launchpad/release-channels",draft:!1,editUrl:"https://github.com/graphops/docs/edit/main/docs/launchpad/release-channels.md",tags:[],version:"current",sidebarPosition:3,frontMatter:{sidebar_position:3},sidebar:"launchpadSidebar",previous:{title:"Quick Start",permalink:"/launchpad/quick-start"},next:{title:"Supported Namespaces",permalink:"/launchpad/supported-namespaces"}},l={},p=[],c={toc:p},u="wrapper";function h(e){let{components:a,...i}=e;return(0,r.kt)(u,(0,t.Z)({},c,i,{components:a,mdxType:"MDXLayout"}),(0,r.kt)("h1",{id:"release-channels"},"Release Channels"),(0,r.kt)("p",null,"Due to the intricate nature of managing indexing operations for multiple blockchains and their associated dependencies, the Launchpad project is a complex system with numerous interdependencies. "),(0,r.kt)("p",null,"For a reminder of the various components within Launchpad and their intricate connections, we recommend revisiting our ",(0,r.kt)("a",{parentName:"p",href:"/launchpad/intro"},"Intro"),"."),(0,r.kt)("p",null,"This guide offers a comprehensive walkthrough, outlining the steps, automated and manual, required to introduce a new version release of an application, ie. Erigon, into the 'launchpad-charts' repository as a ",(0,r.kt)("strong",{parentName:"p"},"canary")," release and ultimately transitioning it to a ",(0,r.kt)("strong",{parentName:"p"},"stable")," state within its designated 'launchpad-namespace,' such as Ethereum. "),(0,r.kt)("p",null,"The diagram below provides a visual representation illustrating the interdependence and impact of various components and workflows."),(0,r.kt)("p",null,(0,r.kt)("img",{alt:"Release Channels Flow",src:n(6479).Z,width:"960",height:"540"})),(0,r.kt)("h1",{id:"from-new-version-to-launchpad-namespaces-stable"},"From new version to ",(0,r.kt)("inlineCode",{parentName:"h1"},"launchpad-namespaces")," stable"),(0,r.kt)("p",null,"Below you can find a more comprehensive breakdown of the process, divided into automated workflows within ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-charts")," and ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-namespaces"),", as well as manual operator steps. This process guides the transition of a new application version from the initial ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-charts")," canary release to its eventual stability within the corresponding ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-namespaces"),". For this walkthrough we will use Erigon as an example."),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"launchpad-charts")," "),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"On each run, bot looks-up ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/ledgerwatch/erigon/tags"},"Erigon tags")," and upon finding a new version, opens a PR into ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-charts/pull/133"},(0,r.kt)("inlineCode",{parentName:"a"},"launchpad-charts/charts/erigon")," ")),(0,r.kt)("li",{parentName:"ul"},"The new PR triggers a workflow that publishes a new ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-charts/releases/tag/erigon-0.8.1-canary.1"},(0,r.kt)("inlineCode",{parentName:"a"},"pre-release"))," into the repo."),(0,r.kt)("li",{parentName:"ul"},"Another workflow runs and adds the newly released ",(0,r.kt)("inlineCode",{parentName:"li"},"canary")," chart to the ",(0,r.kt)("inlineCode",{parentName:"li"},"canary")," Helm repo index")),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"launchpad-namespaces")," "),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"On each run, bot checks for new chart releases and upon finding one, pushes an update branch and opens a ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-namespaces/pull/38"},"new PR to namespaces")),(0,r.kt)("li",{parentName:"ul"},"Bot runs again, auto-merges the PR and creates a tag"),(0,r.kt)("li",{parentName:"ul"},"Workflow runs, updates semver tags")),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"operator")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"Tests the new ",(0,r.kt)("inlineCode",{parentName:"li"},"canary")," chart release to verify it is working properly, if it is adds commit to PR to set the ",(0,r.kt)("inlineCode",{parentName:"li"},"stable")," chart release version"),(0,r.kt)("li",{parentName:"ul"},"Updates their helmfile reference to point at new namespace reference and runs changes against ",(0,r.kt)("inlineCode",{parentName:"li"},"eth-goerli")," namespace using ",(0,r.kt)("inlineCode",{parentName:"li"},"task releases:apply -- eth-goerli")),(0,r.kt)("li",{parentName:"ul"},"If the previous task runs successfully and workloads appear healthy, the operator updates their helmfile reference for ",(0,r.kt)("inlineCode",{parentName:"li"},"eth-mainnet")," namespace and runs ",(0,r.kt)("inlineCode",{parentName:"li"},"task releases:apply -- eth-mainnet")),(0,r.kt)("li",{parentName:"ul"},"If ",(0,r.kt)("inlineCode",{parentName:"li"},"task releases:apply -- eth-mainnet")," succeeds and all workloads are healthy, operator manually tags the ",(0,r.kt)("inlineCode",{parentName:"li"},"ethereum")," namespace as ",(0,r.kt)("inlineCode",{parentName:"li"},"stable"))),(0,r.kt)("admonition",{type:"note"},(0,r.kt)("p",{parentName:"admonition"},"Manually tagging a namespace as ",(0,r.kt)("inlineCode",{parentName:"p"},"stable")," is an intentional process. Our aim is to ensure that workloads undergo comprehensive testing before being tagged as ",(0,r.kt)("inlineCode",{parentName:"p"},"stable")," which signals to users readiness for running on ",(0,r.kt)("inlineCode",{parentName:"p"},"mainnet"),".")),(0,r.kt)("p",null,"Alongside the ability to choose between ",(0,r.kt)("inlineCode",{parentName:"p"},"canary")," or ",(0,r.kt)("inlineCode",{parentName:"p"},"stable")," releases based on user risk preferences, we've also enabled the capability to manually override a specific chart version during namespace deployment."),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-yaml"},' - path: git::https://github.com/graphops/launchpad-namespaces.git@ethereum/helmfile.yaml?ref=ethereum-stable/latest\n selectorsInherited: true\n values:\n - helmDefaults:\n <<: *helmDefaults\n flavor: "goerli"\n erigon:\n chartVersion: "0.8.1" # to override the chart version the namespace is setup with\n values:\n statefulNode:\n jwt:\n existingSecret:\n name: jwt\n key: jwt\n nimbus:\n values:\n nimbus:\n jwt:\n existingSecret:\n name: jwt\n key: jwt\n')),(0,r.kt)("p",null,"Similarly to being able to override ",(0,r.kt)("inlineCode",{parentName:"p"},"chartVersion"),", users have the ability to override ",(0,r.kt)("inlineCode",{parentName:"p"},"chartUrl")," to specify a self-maintained chart, or a chart maintained by a different organisation."))}h.isMDXComponent=!0},6479:(e,a,n)=>{n.d(a,{Z:()=>t});const t=n.p+"assets/images/launchpad-release-channels-8fac7b8a259bd608e3623a6ff75be7d7.svg"}}]); \ No newline at end of file diff --git a/assets/js/4738c5d7.c6062697.js b/assets/js/4738c5d7.c6062697.js new file mode 100644 index 00000000..2d06a1d8 --- /dev/null +++ b/assets/js/4738c5d7.c6062697.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkdocs=self.webpackChunkdocs||[]).push([[6013],{3905:(e,a,n)=>{n.d(a,{Zo:()=>c,kt:()=>m});var t=n(7294);function r(e,a,n){return a in e?Object.defineProperty(e,a,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[a]=n,e}function i(e,a){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var t=Object.getOwnPropertySymbols(e);a&&(t=t.filter((function(a){return Object.getOwnPropertyDescriptor(e,a).enumerable}))),n.push.apply(n,t)}return n}function o(e){for(var a=1;a=0||(r[n]=e[n]);return r}(e,a);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(t=0;t=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var l=t.createContext({}),p=function(e){var a=t.useContext(l),n=a;return e&&(n="function"==typeof e?e(a):o(o({},a),e)),n},c=function(e){var a=p(e.components);return t.createElement(l.Provider,{value:a},e.children)},u="mdxType",h={inlineCode:"code",wrapper:function(e){var a=e.children;return t.createElement(t.Fragment,{},a)}},d=t.forwardRef((function(e,a){var n=e.components,r=e.mdxType,i=e.originalType,l=e.parentName,c=s(e,["components","mdxType","originalType","parentName"]),u=p(n),d=r,m=u["".concat(l,".").concat(d)]||u[d]||h[d]||i;return n?t.createElement(m,o(o({ref:a},c),{},{components:n})):t.createElement(m,o({ref:a},c))}));function m(e,a){var n=arguments,r=a&&a.mdxType;if("string"==typeof e||r){var i=n.length,o=new Array(i);o[0]=d;var s={};for(var l in a)hasOwnProperty.call(a,l)&&(s[l]=a[l]);s.originalType=e,s[u]="string"==typeof e?e:r,o[1]=s;for(var p=2;p{n.r(a),n.d(a,{assets:()=>l,contentTitle:()=>o,default:()=>h,frontMatter:()=>i,metadata:()=>s,toc:()=>p});var t=n(7462),r=(n(7294),n(3905));const i={sidebar_position:3},o="Release Channels",s={unversionedId:"launchpad/release-channels",id:"launchpad/release-channels",title:"Release Channels",description:"Due to the intricate nature of managing indexing operations for multiple blockchains and their associated dependencies, the Launchpad project is a complex system with numerous interdependencies.",source:"@site/docs/launchpad/release-channels.md",sourceDirName:"launchpad",slug:"/launchpad/release-channels",permalink:"/launchpad/release-channels",draft:!1,editUrl:"https://github.com/graphops/docs/edit/main/docs/launchpad/release-channels.md",tags:[],version:"current",sidebarPosition:3,frontMatter:{sidebar_position:3},sidebar:"launchpadSidebar",previous:{title:"Quick Start",permalink:"/launchpad/quick-start"},next:{title:"Supported Namespaces",permalink:"/launchpad/supported-namespaces"}},l={},p=[],c={toc:p},u="wrapper";function h(e){let{components:a,...i}=e;return(0,r.kt)(u,(0,t.Z)({},c,i,{components:a,mdxType:"MDXLayout"}),(0,r.kt)("h1",{id:"release-channels"},"Release Channels"),(0,r.kt)("p",null,"Due to the intricate nature of managing indexing operations for multiple blockchains and their associated dependencies, the Launchpad project is a complex system with numerous interdependencies. "),(0,r.kt)("p",null,"For a reminder of the various components within Launchpad and their intricate connections, we recommend revisiting our ",(0,r.kt)("a",{parentName:"p",href:"/launchpad/intro"},"Intro"),"."),(0,r.kt)("p",null,"This guide offers a comprehensive walkthrough, outlining the steps, automated and manual, required to introduce a new version release of an application, ie. Erigon, into the ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-charts")," repository as a ",(0,r.kt)("strong",{parentName:"p"},"canary")," release and ultimately transitioning it to a ",(0,r.kt)("strong",{parentName:"p"},"stable")," state within its designated ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-namespace"),", such as Ethereum. "),(0,r.kt)("p",null,"The diagram below provides a visual representation illustrating the interdependence and impact of various components and workflows."),(0,r.kt)("p",null,(0,r.kt)("img",{alt:"Release Channels Flow",src:n(6479).Z,width:"960",height:"540"})),(0,r.kt)("h1",{id:"from-new-version-to-launchpad-namespaces-stable"},"From new version to ",(0,r.kt)("inlineCode",{parentName:"h1"},"launchpad-namespaces")," stable"),(0,r.kt)("p",null,"Below you can find a more comprehensive breakdown of the process, divided into automated workflows within ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-charts")," and ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-namespaces"),", as well as manual operator steps. This process guides the transition of a new application version from the initial ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-charts")," canary release to its eventual stability within the corresponding ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-namespaces"),". For this walkthrough we will use Erigon as an example."),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"launchpad-charts")," "),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"On each run, bot looks-up ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/ledgerwatch/erigon/tags"},"Erigon tags")," and upon finding a new version, opens a PR into ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-charts/pull/133"},(0,r.kt)("inlineCode",{parentName:"a"},"launchpad-charts/charts/erigon")," ")),(0,r.kt)("li",{parentName:"ul"},"The new PR triggers a workflow that publishes a new ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-charts/releases/tag/erigon-0.8.1-canary.1"},(0,r.kt)("inlineCode",{parentName:"a"},"pre-release"))," into the repo."),(0,r.kt)("li",{parentName:"ul"},"Another workflow runs and adds the newly released ",(0,r.kt)("inlineCode",{parentName:"li"},"canary")," chart to the ",(0,r.kt)("inlineCode",{parentName:"li"},"canary")," Helm repo index")),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"launchpad-namespaces")," "),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"On each run, bot checks for new chart releases and upon finding one, pushes an update branch and opens a ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-namespaces/pull/38"},"new PR to namespaces")),(0,r.kt)("li",{parentName:"ul"},"Bot runs again, auto-merges the PR and creates a tag"),(0,r.kt)("li",{parentName:"ul"},"Workflow runs, updates semver tags")),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"operator")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"Tests the new ",(0,r.kt)("inlineCode",{parentName:"li"},"canary")," chart release to verify it is working properly, if it is adds commit to PR to set the ",(0,r.kt)("inlineCode",{parentName:"li"},"stable")," chart release version. Following the merge of this PR, the new ",(0,r.kt)("inlineCode",{parentName:"li"},"stable")," chart release is automatically issued in draft mode. This step provides the operator with an opportunity to review and manually publish the final release, ensuring precise control and quality assurance in the deployment process."),(0,r.kt)("li",{parentName:"ul"},"Run ",(0,r.kt)("inlineCode",{parentName:"li"},"task releases:apply -- eth-goerli")," which should pick changes from latest ",(0,r.kt)("inlineCode",{parentName:"li"},"ethereum")," canary tag that would contain new ",(0,r.kt)("inlineCode",{parentName:"li"},"erigon")," canary chart version (after renovate has run and has picked those up, which it does in 15m intervals)."),(0,r.kt)("li",{parentName:"ul"},"If the previous task runs successfully and workloads appear healthy, the operator updates their helmfile reference to ",(0,r.kt)("inlineCode",{parentName:"li"},"ethereum-canary/latest")," for ",(0,r.kt)("inlineCode",{parentName:"li"},"eth-mainnet")," namespace and runs ",(0,r.kt)("inlineCode",{parentName:"li"},"task releases:apply -- eth-mainnet"),"."),(0,r.kt)("li",{parentName:"ul"},"If ",(0,r.kt)("inlineCode",{parentName:"li"},"task releases:apply -- eth-mainnet")," succeeds and all workloads are healthy, operator manually tags the ",(0,r.kt)("inlineCode",{parentName:"li"},"ethereum")," namespace as ",(0,r.kt)("inlineCode",{parentName:"li"},"stable"))),(0,r.kt)("admonition",{type:"note"},(0,r.kt)("p",{parentName:"admonition"},"Manually tagging a namespace as ",(0,r.kt)("inlineCode",{parentName:"p"},"stable")," is an intentional process. Our aim is to ensure that workloads undergo comprehensive testing before being tagged as ",(0,r.kt)("inlineCode",{parentName:"p"},"stable")," which signals to users readiness for running on ",(0,r.kt)("inlineCode",{parentName:"p"},"mainnet"),".")),(0,r.kt)("p",null,"Alongside the ability to choose between ",(0,r.kt)("inlineCode",{parentName:"p"},"canary")," or ",(0,r.kt)("inlineCode",{parentName:"p"},"stable")," releases based on user risk preferences, we've also enabled the capability to manually override a specific chart version during namespace deployment."),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-yaml"},' - path: git::https://github.com/graphops/launchpad-namespaces.git@ethereum/helmfile.yaml?ref=ethereum-stable/latest\n selectorsInherited: true\n values:\n - helmDefaults:\n <<: *helmDefaults\n flavor: "goerli"\n erigon:\n chartVersion: "0.8.1" # to override the chart version the namespace is setup with\n values:\n statefulNode:\n jwt:\n existingSecret:\n name: jwt\n key: jwt\n nimbus:\n values:\n nimbus:\n jwt:\n existingSecret:\n name: jwt\n key: jwt\n')),(0,r.kt)("p",null,"Similarly to being able to override ",(0,r.kt)("inlineCode",{parentName:"p"},"chartVersion"),", users have the ability to override ",(0,r.kt)("inlineCode",{parentName:"p"},"chartUrl")," to specify a self-maintained chart, or a chart maintained by a different organisation."))}h.isMDXComponent=!0},6479:(e,a,n)=>{n.d(a,{Z:()=>t});const t=n.p+"assets/images/launchpad-release-channels-a016c97fa21a1eab13467acaeb8b8235.svg"}}]); \ No newline at end of file diff --git a/assets/js/runtime~main.63faf25a.js b/assets/js/runtime~main.45d2a5df.js similarity index 99% rename from assets/js/runtime~main.63faf25a.js rename to assets/js/runtime~main.45d2a5df.js index 61d982c6..93fe8641 100644 --- a/assets/js/runtime~main.63faf25a.js +++ b/assets/js/runtime~main.45d2a5df.js @@ -1 +1 @@ -(()=>{"use strict";var e,a,f,c,d,t={},r={};function b(e){var a=r[e];if(void 0!==a)return a.exports;var f=r[e]={exports:{}};return t[e].call(f.exports,f,f.exports,b),f.exports}b.m=t,e=[],b.O=(a,f,c,d)=>{if(!f){var t=1/0;for(i=0;i=d)&&Object.keys(b.O).every((e=>b.O[e](f[o])))?f.splice(o--,1):(r=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[f,c,d]},b.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return b.d(a,{a:a}),a},f=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,b.t=function(e,c){if(1&c&&(e=this(e)),8&c)return e;if("object"==typeof e&&e){if(4&c&&e.__esModule)return e;if(16&c&&"function"==typeof e.then)return e}var d=Object.create(null);b.r(d);var t={};a=a||[null,f({}),f([]),f(f)];for(var r=2&c&&e;"object"==typeof r&&!~a.indexOf(r);r=f(r))Object.getOwnPropertyNames(r).forEach((a=>t[a]=()=>e[a]));return t.default=()=>e,b.d(d,t),d},b.d=(e,a)=>{for(var f in a)b.o(a,f)&&!b.o(e,f)&&Object.defineProperty(e,f,{enumerable:!0,get:a[f]})},b.f={},b.e=e=>Promise.all(Object.keys(b.f).reduce(((a,f)=>(b.f[f](e,a),a)),[])),b.u=e=>"assets/js/"+({53:"935f2afb",110:"66406991",453:"30a24c52",494:"8ba7f970",533:"b2b675dd",653:"4ac6162a",948:"8717b14a",1263:"d2eb8d4c",1477:"b2f554cd",1515:"24ebdd5e",1613:"a19c92ce",1633:"031793e1",1713:"a7023ddc",1723:"1026ed7f",1914:"d9f32620",2267:"59362658",2362:"e273c56f",2398:"55efb065",2535:"814f3328",3089:"a6aa9e1f",3205:"a80da1cf",3237:"1df93b7f",3514:"73664a40",3601:"4fa5284f",3608:"9e4087bc",3936:"d5a57370",3997:"6ceb8cd0",4013:"01a85c17",4089:"bd9dd2f9",4439:"422a9904",5119:"931ae2d5",5207:"ea715d66",5252:"6fb70ffc",5286:"07d86b20",6013:"4738c5d7",6103:"ccc49370",6241:"163dca7f",6254:"2472ae08",6391:"14464846",6686:"ed2299db",6798:"94e1a954",6885:"4ae37811",6938:"608ae6a4",6998:"43c72bb1",7058:"db074018",7178:"096bfee4",7523:"71914854",7633:"9b790421",7645:"a7434565",7751:"a3ac3f10",7918:"17896441",8056:"ca832579",8176:"d7261236",8271:"1c091541",8610:"6875c492",8613:"4515f2ba",8636:"f4f34a3a",9003:"925b3f96",9035:"4c9e35b1",9334:"247783bb",9423:"e904f572",9514:"1be78505",9638:"4ecff493",9642:"7661071f",9700:"e16015ca",9813:"2c9fa8e7",9960:"04eb4478"}[e]||e)+"."+{53:"753e8387",110:"479310be",453:"b07fef1c",494:"e590f9cd",533:"0531b767",653:"64526e18",948:"78963ab1",1263:"fd6e458f",1477:"6c07586f",1515:"42a4eb53",1613:"e67e0e96",1633:"68e1d3d8",1713:"23b2ba87",1723:"3fb7a1bf",1914:"5a4a9fed",2267:"6401aa21",2362:"cf7cab60",2398:"ebd12c11",2535:"9dbd0ebe",3089:"845cad8c",3205:"e7ede2a4",3237:"b3606ef8",3514:"2a3b45ce",3601:"52937c88",3608:"a696b2b3",3936:"384c1c6b",3997:"8bac1f5e",4013:"467cbdf3",4089:"5e9e31da",4439:"4e10387f",4972:"73c7d017",5119:"2df6f295",5207:"925edfec",5252:"42aab89e",5286:"42497bf9",6013:"77dc01fb",6048:"fb9b0605",6103:"d9c41d1e",6241:"1406ae09",6254:"557def3a",6316:"e89c7acb",6391:"23f601d3",6686:"7b8338c3",6798:"f9991880",6885:"f6440e19",6938:"a6023127",6998:"ba8ce235",7058:"4f7591c4",7178:"f73c46b5",7523:"3c7ec2de",7633:"c35cee2c",7645:"8a3a8041",7724:"3186a061",7751:"fc3b5bef",7918:"84e372fb",8056:"2e96bb68",8176:"afd1eded",8271:"60b718c6",8610:"f37b7b5c",8613:"86ab55b5",8636:"94f36cf8",8954:"73b00f6d",9003:"35534246",9035:"2be4405b",9334:"dadfb5a6",9423:"e7ee487c",9487:"ab0ae9a2",9514:"3a6a17fd",9638:"9a636aa0",9642:"aa215b28",9700:"d1830de3",9813:"224a411d",9960:"ec6f7725"}[e]+".js",b.miniCssF=e=>{},b.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),b.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),c={},d="docs:",b.l=(e,a,f,t)=>{if(c[e])c[e].push(a);else{var r,o;if(void 0!==f)for(var n=document.getElementsByTagName("script"),i=0;i{r.onerror=r.onload=null,clearTimeout(s);var d=c[e];if(delete c[e],r.parentNode&&r.parentNode.removeChild(r),d&&d.forEach((e=>e(f))),a)return a(f)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:r}),12e4);r.onerror=l.bind(null,r.onerror),r.onload=l.bind(null,r.onload),o&&document.head.appendChild(r)}},b.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},b.p="/",b.gca=function(e){return e={14464846:"6391",17896441:"7918",59362658:"2267",66406991:"110",71914854:"7523","935f2afb":"53","30a24c52":"453","8ba7f970":"494",b2b675dd:"533","4ac6162a":"653","8717b14a":"948",d2eb8d4c:"1263",b2f554cd:"1477","24ebdd5e":"1515",a19c92ce:"1613","031793e1":"1633",a7023ddc:"1713","1026ed7f":"1723",d9f32620:"1914",e273c56f:"2362","55efb065":"2398","814f3328":"2535",a6aa9e1f:"3089",a80da1cf:"3205","1df93b7f":"3237","73664a40":"3514","4fa5284f":"3601","9e4087bc":"3608",d5a57370:"3936","6ceb8cd0":"3997","01a85c17":"4013",bd9dd2f9:"4089","422a9904":"4439","931ae2d5":"5119",ea715d66:"5207","6fb70ffc":"5252","07d86b20":"5286","4738c5d7":"6013",ccc49370:"6103","163dca7f":"6241","2472ae08":"6254",ed2299db:"6686","94e1a954":"6798","4ae37811":"6885","608ae6a4":"6938","43c72bb1":"6998",db074018:"7058","096bfee4":"7178","9b790421":"7633",a7434565:"7645",a3ac3f10:"7751",ca832579:"8056",d7261236:"8176","1c091541":"8271","6875c492":"8610","4515f2ba":"8613",f4f34a3a:"8636","925b3f96":"9003","4c9e35b1":"9035","247783bb":"9334",e904f572:"9423","1be78505":"9514","4ecff493":"9638","7661071f":"9642",e16015ca:"9700","2c9fa8e7":"9813","04eb4478":"9960"}[e]||e,b.p+b.u(e)},(()=>{var e={1303:0,532:0};b.f.j=(a,f)=>{var c=b.o(e,a)?e[a]:void 0;if(0!==c)if(c)f.push(c[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var d=new Promise(((f,d)=>c=e[a]=[f,d]));f.push(c[2]=d);var t=b.p+b.u(a),r=new Error;b.l(t,(f=>{if(b.o(e,a)&&(0!==(c=e[a])&&(e[a]=void 0),c)){var d=f&&("load"===f.type?"missing":f.type),t=f&&f.target&&f.target.src;r.message="Loading chunk "+a+" failed.\n("+d+": "+t+")",r.name="ChunkLoadError",r.type=d,r.request=t,c[1](r)}}),"chunk-"+a,a)}},b.O.j=a=>0===e[a];var a=(a,f)=>{var c,d,t=f[0],r=f[1],o=f[2],n=0;if(t.some((a=>0!==e[a]))){for(c in r)b.o(r,c)&&(b.m[c]=r[c]);if(o)var i=o(b)}for(a&&a(f);n{"use strict";var e,a,f,c,d,t={},r={};function b(e){var a=r[e];if(void 0!==a)return a.exports;var f=r[e]={exports:{}};return t[e].call(f.exports,f,f.exports,b),f.exports}b.m=t,e=[],b.O=(a,f,c,d)=>{if(!f){var t=1/0;for(i=0;i=d)&&Object.keys(b.O).every((e=>b.O[e](f[o])))?f.splice(o--,1):(r=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[f,c,d]},b.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return b.d(a,{a:a}),a},f=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,b.t=function(e,c){if(1&c&&(e=this(e)),8&c)return e;if("object"==typeof e&&e){if(4&c&&e.__esModule)return e;if(16&c&&"function"==typeof e.then)return e}var d=Object.create(null);b.r(d);var t={};a=a||[null,f({}),f([]),f(f)];for(var r=2&c&&e;"object"==typeof r&&!~a.indexOf(r);r=f(r))Object.getOwnPropertyNames(r).forEach((a=>t[a]=()=>e[a]));return t.default=()=>e,b.d(d,t),d},b.d=(e,a)=>{for(var f in a)b.o(a,f)&&!b.o(e,f)&&Object.defineProperty(e,f,{enumerable:!0,get:a[f]})},b.f={},b.e=e=>Promise.all(Object.keys(b.f).reduce(((a,f)=>(b.f[f](e,a),a)),[])),b.u=e=>"assets/js/"+({53:"935f2afb",110:"66406991",453:"30a24c52",494:"8ba7f970",533:"b2b675dd",653:"4ac6162a",948:"8717b14a",1263:"d2eb8d4c",1477:"b2f554cd",1515:"24ebdd5e",1613:"a19c92ce",1633:"031793e1",1713:"a7023ddc",1723:"1026ed7f",1914:"d9f32620",2267:"59362658",2362:"e273c56f",2398:"55efb065",2535:"814f3328",3089:"a6aa9e1f",3205:"a80da1cf",3237:"1df93b7f",3514:"73664a40",3601:"4fa5284f",3608:"9e4087bc",3936:"d5a57370",3997:"6ceb8cd0",4013:"01a85c17",4089:"bd9dd2f9",4439:"422a9904",5119:"931ae2d5",5207:"ea715d66",5252:"6fb70ffc",5286:"07d86b20",6013:"4738c5d7",6103:"ccc49370",6241:"163dca7f",6254:"2472ae08",6391:"14464846",6686:"ed2299db",6798:"94e1a954",6885:"4ae37811",6938:"608ae6a4",6998:"43c72bb1",7058:"db074018",7178:"096bfee4",7523:"71914854",7633:"9b790421",7645:"a7434565",7751:"a3ac3f10",7918:"17896441",8056:"ca832579",8176:"d7261236",8271:"1c091541",8610:"6875c492",8613:"4515f2ba",8636:"f4f34a3a",9003:"925b3f96",9035:"4c9e35b1",9334:"247783bb",9423:"e904f572",9514:"1be78505",9638:"4ecff493",9642:"7661071f",9700:"e16015ca",9813:"2c9fa8e7",9960:"04eb4478"}[e]||e)+"."+{53:"753e8387",110:"479310be",453:"b07fef1c",494:"e590f9cd",533:"0531b767",653:"64526e18",948:"78963ab1",1263:"fd6e458f",1477:"6c07586f",1515:"42a4eb53",1613:"e67e0e96",1633:"68e1d3d8",1713:"23b2ba87",1723:"3fb7a1bf",1914:"5a4a9fed",2267:"6401aa21",2362:"cf7cab60",2398:"ebd12c11",2535:"9dbd0ebe",3089:"845cad8c",3205:"e7ede2a4",3237:"b3606ef8",3514:"2a3b45ce",3601:"52937c88",3608:"a696b2b3",3936:"384c1c6b",3997:"8bac1f5e",4013:"467cbdf3",4089:"5e9e31da",4439:"4e10387f",4972:"73c7d017",5119:"2df6f295",5207:"925edfec",5252:"42aab89e",5286:"42497bf9",6013:"c6062697",6048:"fb9b0605",6103:"d9c41d1e",6241:"1406ae09",6254:"557def3a",6316:"e89c7acb",6391:"23f601d3",6686:"7b8338c3",6798:"f9991880",6885:"f6440e19",6938:"a6023127",6998:"ba8ce235",7058:"4f7591c4",7178:"f73c46b5",7523:"3c7ec2de",7633:"c35cee2c",7645:"8a3a8041",7724:"3186a061",7751:"fc3b5bef",7918:"84e372fb",8056:"2e96bb68",8176:"afd1eded",8271:"60b718c6",8610:"f37b7b5c",8613:"86ab55b5",8636:"94f36cf8",8954:"73b00f6d",9003:"35534246",9035:"2be4405b",9334:"dadfb5a6",9423:"e7ee487c",9487:"ab0ae9a2",9514:"3a6a17fd",9638:"9a636aa0",9642:"aa215b28",9700:"d1830de3",9813:"224a411d",9960:"ec6f7725"}[e]+".js",b.miniCssF=e=>{},b.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),b.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),c={},d="docs:",b.l=(e,a,f,t)=>{if(c[e])c[e].push(a);else{var r,o;if(void 0!==f)for(var n=document.getElementsByTagName("script"),i=0;i{r.onerror=r.onload=null,clearTimeout(s);var d=c[e];if(delete c[e],r.parentNode&&r.parentNode.removeChild(r),d&&d.forEach((e=>e(f))),a)return a(f)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:r}),12e4);r.onerror=l.bind(null,r.onerror),r.onload=l.bind(null,r.onload),o&&document.head.appendChild(r)}},b.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},b.p="/",b.gca=function(e){return e={14464846:"6391",17896441:"7918",59362658:"2267",66406991:"110",71914854:"7523","935f2afb":"53","30a24c52":"453","8ba7f970":"494",b2b675dd:"533","4ac6162a":"653","8717b14a":"948",d2eb8d4c:"1263",b2f554cd:"1477","24ebdd5e":"1515",a19c92ce:"1613","031793e1":"1633",a7023ddc:"1713","1026ed7f":"1723",d9f32620:"1914",e273c56f:"2362","55efb065":"2398","814f3328":"2535",a6aa9e1f:"3089",a80da1cf:"3205","1df93b7f":"3237","73664a40":"3514","4fa5284f":"3601","9e4087bc":"3608",d5a57370:"3936","6ceb8cd0":"3997","01a85c17":"4013",bd9dd2f9:"4089","422a9904":"4439","931ae2d5":"5119",ea715d66:"5207","6fb70ffc":"5252","07d86b20":"5286","4738c5d7":"6013",ccc49370:"6103","163dca7f":"6241","2472ae08":"6254",ed2299db:"6686","94e1a954":"6798","4ae37811":"6885","608ae6a4":"6938","43c72bb1":"6998",db074018:"7058","096bfee4":"7178","9b790421":"7633",a7434565:"7645",a3ac3f10:"7751",ca832579:"8056",d7261236:"8176","1c091541":"8271","6875c492":"8610","4515f2ba":"8613",f4f34a3a:"8636","925b3f96":"9003","4c9e35b1":"9035","247783bb":"9334",e904f572:"9423","1be78505":"9514","4ecff493":"9638","7661071f":"9642",e16015ca:"9700","2c9fa8e7":"9813","04eb4478":"9960"}[e]||e,b.p+b.u(e)},(()=>{var e={1303:0,532:0};b.f.j=(a,f)=>{var c=b.o(e,a)?e[a]:void 0;if(0!==c)if(c)f.push(c[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var d=new Promise(((f,d)=>c=e[a]=[f,d]));f.push(c[2]=d);var t=b.p+b.u(a),r=new Error;b.l(t,(f=>{if(b.o(e,a)&&(0!==(c=e[a])&&(e[a]=void 0),c)){var d=f&&("load"===f.type?"missing":f.type),t=f&&f.target&&f.target.src;r.message="Loading chunk "+a+" failed.\n("+d+": "+t+")",r.name="ChunkLoadError",r.type=d,r.request=t,c[1](r)}}),"chunk-"+a,a)}},b.O.j=a=>0===e[a];var a=(a,f)=>{var c,d,t=f[0],r=f[1],o=f[2],n=0;if(t.some((a=>0!==e[a]))){for(c in r)b.o(r,c)&&(b.m[c]=r[c]);if(o)var i=o(b)}for(a&&a(f);n Blog | GraphOps Docs - +

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

· One min read
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/archive.html b/blog/archive.html index 7f9813a4..29e46bf0 100644 --- a/blog/archive.html +++ b/blog/archive.html @@ -5,13 +5,13 @@ Archive | GraphOps Docs - + - + \ No newline at end of file diff --git a/blog/first-blog-post.html b/blog/first-blog-post.html index 75f0c1ce..c7b51eea 100644 --- a/blog/first-blog-post.html +++ b/blog/first-blog-post.html @@ -5,13 +5,13 @@ First Blog Post | GraphOps Docs - +

First Blog Post

· One min read
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/long-blog-post.html b/blog/long-blog-post.html index 6de694d5..c0ed1ea9 100644 --- a/blog/long-blog-post.html +++ b/blog/long-blog-post.html @@ -5,13 +5,13 @@ Long Blog Post | GraphOps Docs - +

Long Blog Post

· 3 min read
Endilie Yacop Sucipto

This is the summary of a very long blog post,

Use a <!-- truncate --> comment to limit blog post size in the list view.

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/mdx-blog-post.html b/blog/mdx-blog-post.html index 8428f6a7..1663ef76 100644 --- a/blog/mdx-blog-post.html +++ b/blog/mdx-blog-post.html @@ -5,13 +5,13 @@ MDX Blog Post | GraphOps Docs - +
- + \ No newline at end of file diff --git a/blog/tags.html b/blog/tags.html index bc1a84ee..ef2cde4f 100644 --- a/blog/tags.html +++ b/blog/tags.html @@ -5,13 +5,13 @@ Tags | GraphOps Docs - + - + \ No newline at end of file diff --git a/blog/tags/docusaurus.html b/blog/tags/docusaurus.html index cb2bdd13..2ab826df 100644 --- a/blog/tags/docusaurus.html +++ b/blog/tags/docusaurus.html @@ -5,13 +5,13 @@ 4 posts tagged with "docusaurus" | GraphOps Docs - +

4 posts tagged with "docusaurus"

View All Tags

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

· One min read
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/tags/facebook.html b/blog/tags/facebook.html index 34230f9d..58b54a92 100644 --- a/blog/tags/facebook.html +++ b/blog/tags/facebook.html @@ -5,13 +5,13 @@ One post tagged with "facebook" | GraphOps Docs - +

One post tagged with "facebook"

View All Tags

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

- + \ No newline at end of file diff --git a/blog/tags/hello.html b/blog/tags/hello.html index 5f3de77b..77a39fcd 100644 --- a/blog/tags/hello.html +++ b/blog/tags/hello.html @@ -5,13 +5,13 @@ 2 posts tagged with "hello" | GraphOps Docs - +

2 posts tagged with "hello"

View All Tags

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

- + \ No newline at end of file diff --git a/blog/tags/hola.html b/blog/tags/hola.html index 67c71715..01c48d68 100644 --- a/blog/tags/hola.html +++ b/blog/tags/hola.html @@ -5,13 +5,13 @@ One post tagged with "hola" | GraphOps Docs - +

One post tagged with "hola"

View All Tags

· One min read
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/welcome.html b/blog/welcome.html index 8dbaa248..c0e3fd0a 100644 --- a/blog/welcome.html +++ b/blog/welcome.html @@ -5,13 +5,13 @@ Welcome | GraphOps Docs - +

Welcome

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

- + \ No newline at end of file diff --git a/graphcast/design-principles.html b/graphcast/design-principles.html index 80e88d68..0a9e9cbf 100644 --- a/graphcast/design-principles.html +++ b/graphcast/design-principles.html @@ -5,13 +5,13 @@ Design Principles | GraphOps Docs - +

Design Principles

There are two main components of Graphcast

  • The Graphcast SDK: The base layer SDK which interfaces with The Graph stack and the Waku network. This includes interactions with an Ethereum client, a Graph node client, a client for the Indexer management server, the Network subgraph and the Registry subgraph).
  • Radios: Highly customizable gossip applications, built with the help of the Graphcast SDK, which define the specific message formats and logic around constructing and handling the messages. They are the nodes communicating in the Graphcast Network.

The Graphcast SDK

The SDK is the base layer which is used to abstract all the necessary components of each Radio away from the user. That includes:

  • Establishes a connection to Graphcast via a Waku Gossip node, providing an interface for subscribing to specific topics and broadcasting messages across the network.
  • Interactions with a Graph node and a client for the Indexer management server.
  • Queries to Network and Registry subgraphs.
  • Checks message validity for past message injections, nonexistent blocks and expired timestamps. It also guarantees that messages are signed by an authorised operator address of an active on-chain Indexer (this can be used as a basis for a reputation system).
  • Supports a flexible and customizable configuration of the Graphcast gossip agent, enabling specification of network settings, peer discovery mechanisms, message encoding formats, and more. For detailed instructions on configuring Graphcast to suit your needs, refer to the configuration guide.
  • Topics in Graphcast represent different categories or subjects of information. Nodes can dynamically subscribe to specific topics to receive messages related to those topics. Topics enable efficient message routing and dissemination within the network.
  • Provides comprehensive message handling structure to ensure that messages are reliably transmitted, received, and processed within the network.

Radios

General Radio components

  • Supports Radio for specific use cases.
  • Controls topic subscriptions dynamically for interested topics.
  • Provides Radio type definition used to verify the integrity and authenticity of messages exchanged within the network.
  • Collects Radio-specific information and incorporates it into Graphcast messages along with other relevant metadata.
  • Observes and handles relevant messages received from peers.
  • Provides performance metrics, logs, and API services.

The first Radio built on top of Graphcast is the Subgraph Radio. It's designed to facilitate real-time information exchange among participants in The Graph network and serves as a tool for Indexers and other network participants to share valuable Subgraph data.

With Subgraph Radio, Indexers can run a single Radio instance and track a wide variety of message types and data related to Subgraphs. Different use cases and message types form the different features of the Radio.

Features

Proof of Indexing (POI) cross-checking

Indexers must generate valid POIs to earn indexing rewards. Indexers find it beneficial to alert each other on the health status of subgraphs in community discussions. To alleviate the manual workload, the POI cross-checking feature within Subgraph Radio:

  • Defines message types and topics
  • Collects public POIs from the Graph node and sends them inside of Graphcast messages along with other useful metadata
  • Observes relevant messages and aggregates public POIs sent from other Indexers, in order to compare local POIs to remote POIs
  • Monitors the network for conflicts and takes certain actions if needed, for instance Indexers can configure an alert system to send messages to a custom channel in their Slack workspace, a Discord channel, or a Telegram chat.

Subgraph Upgrade Pre-sync

The subgraph upgrade pre-sync feature provides a way for Subgraph Developers to signal when they plan on releasing a new subgraph version, thereby allowing Indexers to start syncing the subgraph in advance. You can learn more about the feature here.

- + \ No newline at end of file diff --git a/graphcast/intro.html b/graphcast/intro.html index 9f473a6e..5ce8f30d 100644 --- a/graphcast/intro.html +++ b/graphcast/intro.html @@ -5,13 +5,13 @@ Introduction | GraphOps Docs - +

Introduction

Why Graphcast 1

Is there something you'd like to learn from or share with your fellow Indexers in an automated manner, but it's too much hassle or costs too much gas?

Why Graphcast 2

When using the protocol, the cost to broadcast information to other network participants is determined by gas fees on the blockchain.

Why Graphcast 3

Graphcast solves this problem by acting as an optional decentralized, distributed peer-to-peer (P2P) communication tool that allows Indexers across the network to exchange information in real time. The cost of exchanging P2P messages is near zero, with the tradeoff of no data integrity guarantees. Nevertheless, Graphcast aims to provide message validity guarantees (i.e. that the message is valid and signed by a known protocol participant) with an open design space of reputation models.

What is Graphcast

The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following uses cases:

  • Real-time cross-checking of subgraph data integrity, with active bail-out in the case of diverging from stake-weighted POI consensus.
  • Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers.
  • Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc.
  • Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc.
  • Self-reporting on stack information including graph-node version, Postgres version, Ethereum client version, etc.

Learn more

If you want to find out more about the initial idea behind Graphcast, as well as stay up to date with the newest developments, keep an eye on the GRC post on The Graph Forum, or join the Graphcast Discord channel.

Contributing

We welcome and appreciate your contributions! 🤝 ➡️ Graphcast SDK

- + \ No newline at end of file diff --git a/graphcast/radios/graphcast-cli.html b/graphcast/radios/graphcast-cli.html index 1ac2ae9f..b0c79496 100644 --- a/graphcast/radios/graphcast-cli.html +++ b/graphcast/radios/graphcast-cli.html @@ -5,13 +5,13 @@ Graphcast CLI | GraphOps Docs - +

Graphcast CLI

The source code for the Graphcast CLI is available on GitHub.

Introduction

The Graphcast CLI enables sending one-off messages. Currently, it can be used for the Subgraph Upgrade Pre-sync feature of Subgraph Radio.

The Graphcast CLI is configured using config variables. You will need to prepare the following config variables (either as env variables or passing CLI args when running the CLI):

NameDescription and Examples
PRIVATE_KEYPrivate key to the Graphcast ID wallet (precendence over mnemonics).
Example: PRIVATE_KEY=YOUR_PRIVATE_KEY
MNEMONICMnemonic to the Graphcast ID wallet (first address of the wallet is used; Only one of private key or mnemonic is needed).
Example: MNEMONIC=YOUR_MNEMONIC
GRAPH_ACCOUNTGraph account corresponding to Graphcast operator.
Example: GRAPH_ACCOUNT=YOUR_GRAPH_ACCOUNT
REGISTRY_SUBGRAPHSubgraph endpoint to the Graphcast Registry.
Default: https://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-goerli
NETWORK_SUBGRAPHSubgraph endpoint to The Graph network subgraph.
Default: https://api.thegraph.com/subgraphs/name/graphprotocol/graph-network-goerli
GRAPHCAST_NETWORKSupported Graphcast networks: mainnet, testnet.
Default: testnet
LOG_LEVELLogging configuration to set as RUST_LOG.
Default: info
LOG_FORMATSupport logging formats: pretty, json, full, compact.
Default: pretty

The Graphcast CLI code is very extensible and could be altered to send any kind of Graphcast-compatible message to the network.

Usage

The Graphcast CLI supports the following subcommands - upgrade-presync and indexing-status. Both of them work with additional configuration options:

NameDescription and Examples
SUBGRAPH_IDSubgraph id shared by the old and new deployment.
NEW_HASHSubgraph hash for the upgrade version of the subgraph.

The upgrade-presync subcommand has an additional MAX_RETRY variable, which specifies the number of retries for the subcommand. The default value is 5.

Below you can see examples of working CLI commands.

Run with Docker

  1. Pull the Graphcast CLI image
docker pull ghcr.io/graphops/graphcast-cli:latest
  1. Run the image, providing the required configuration variables. Here's a sample configuration:
docker run ghcr.io/graphops/graphcast-cli \
--private-key "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" \
--graph-account "0xe9a1cabd57700b17945fd81feefba82340d9568f" \
upgrade-presync --new-hash "QmVVfLWowm1xkqc41vcygKNwFUvpsDSMbHdHghxmDVmH9x" \
--subgraph-id "CnJMdCkW3pr619gsJVtUPAWxspALPdCMw6o7obzYBNp3"

(or) Run using a pre-built binary

We also provide pre-built binaries for Ubuntu and MacOS, which you can find in the Assets section on each release in the releases page on Github. Simply download the binary, make it executable (chmod a+x ./graphcast-cli-{TAG}-{SYSTEM}) and then run it (using ./graphcast-cli-{TAG}-{SYSTEM}), like this:

./graphcast-cli-0.0.1-macos \
--private-key "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" \
--graph-account "0xe9a1cabd57700b17945fd81feefba82340d9568f" \
upgrade-presync --new-hash "QmVVfLWowm1xkqc41vcygKNwFUvpsDSMbHdHghxmDVmH9x" \
--subgraph-id "CnJMdCkW3pr619gsJVtUPAWxspALPdCMw6o7obzYBNp3"

(or) Run using a pre-built binary

  1. Clone the repo
git clone https://github.com/graphops/graphcast-cli.git
  1. Navigate to the project directory
cd graphcast-cli
  1. Run the CLI
cargo run --release -- --private-key "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" \
--graph-account "0xe9a1cabd57700b17945fd81feefba82340d9568f" \
upgrade-presync --new-hash "QmVVfLWowm1xkqc41vcygKNwFUvpsDSMbHdHghxmDVmH9x" \
--subgraph-id "CnJMdCkW3pr619gsJVtUPAWxspALPdCMw6o7obzYBNp3"
- + \ No newline at end of file diff --git a/graphcast/radios/listener-radio.html b/graphcast/radios/listener-radio.html index e14f7eb0..ef6ef768 100644 --- a/graphcast/radios/listener-radio.html +++ b/graphcast/radios/listener-radio.html @@ -5,13 +5,13 @@ Listener Radio | GraphOps Docs - +

Listener Radio

The source code for Listener Radio is available on GitHub and Docker builds are automatically published as GitHub Packages.

Introduction

This Radio shall monitor Graphcast network by the pubsub topic of graphcast-v[version]-[network]. The Radio will not send messages to the network, but instead will record the messages and generate basic metrics for network monitoring.

Graphcast network is a complex system with numerous nodes and connections, and monitoring it is crucial for maintaining its performance, identifying potential issues, and ensuring its robustness and reliability.

  • Performance Optimization: to identify bottlenecks and areas of inefficiency.
  • Troubleshooting: to quickly diagnose issues within the network, reducing downtime and improving reliability.
  • Security: to immediately detect any unusual activity that might indicate a security breach.
  • Planning and Forecasting: Record valuable data that can be used for planning and forecasting purposes, helping us to make informed decisions about the network's future.

Quick Start

  • Ensure a running Postgres instance
  • Set Postgres url to DATABASE_URL in .env
  • Set general GraphcastAgent environmental variables shown in the below table
  • cargo run from source code (later should use Github actions to build source and dockerize

Basic Configuration

You will need to prepare the following environment variables:

NameDescription and examples
DATABASE_URLPostgres Database URL. The tool comes with automatic database migration, database url passed in must be exist and can be connected.
Example: postgresql://[username]:[password]@[pg_host]:[pg_port]/[db_name]
PRIVATE_KEYPrivate key to the Graphcast ID wallet (Precendence over mnemonics).
Example: 0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef
GRAPH_NODE_STATUS_ENDPOINTURL to a Graph Node Indexing Status endpoint.
Example: http://index-node:8030/graphql
REGISTRY_SUBGRAPHURL to the Graphcast Registry subgraph for your network. Check APIs for your preferred network
NETWORK_SUBGRAPHURL to the Graph Network subgraph. Check APIs for your preferred network
GRAPHCAST_NETWORKThe Graphcast Messaging fleet and pubsub namespace to use.
Mainnet: mainnet
Goerli: testnet

Example message table

idmessage
1{"nonce": 1686182179, "network": "mainnet", "payload": {"content": "0x3f...", "identifier": "QmVhiE4nax9i86UBnBmQCYDzvjWuwHShYh7aspGPQhU5Sj"}, "signature": "dff1...", "block_hash": "276e...", "identifier": "QmVhiE4nax9i86UBnBmQCYDzvjWuwHShYh7aspGPQhU5Sj", "block_number": 17431860}
2{"nonce": 1686182183, "network": "goerli", "payload": {"content": "0xc0...", "identifier": "QmacQnSgia4iDPWHpeY6aWxesRFdb8o5DKZUx96zZqEWrB"}, "signature": "dbd2...", "block_hash": "0198...", "identifier": "QmacQnSgia4iDPWHpeY6aWxesRFdb8o5DKZUx96zZqEWrB", "block_number": 9140860}
......

Advanced Configuration

In the configuration table below is the full list of environment variables you can set, along with example values.

See Basic Configuration above. The following environment variables are optional:

Name (Optional variables)Description and examples
MNEMONICMnemonic to the Graphcast ID wallet (first address of the wallet is used; Only one of PRIVATE_KEY or MNEMONIC is needed). Example: claptrap armchair violin...
COLLECT_MESSAGE_DURATIONSeconds that the Subgraph Radio will wait to collect remote POI attestations before making a comparison with the local POI. Example: 120 for 2 minutes.
COVERAGEToggle for topic coverage level. Possible values: "comprehensive", "on-chain", "minimal". Default is set to "on-chain" coverage.
TOPICSComma separated static list of content topics (subgraphs) to subscribe to. Example: QmWmyoMoctfbAaiEs2G46gpeUmhqFRDW6KWo64y5r581Vz,QmUwCFhXM3f6qH9Ls9Y6gDNURBH7mxsn6JcectgxAz6CwU,QmQ1Lyh3U6YgVP6YX1RgRz6c8GmKkEpokLwPvEtJx6cF1y
WAKU_HOSTInterface onto which to bind the bundled Waku node. Example: 0.0.0.0
WAKU_PORTP2P port on which the bundled Waku node will operate. Example: 60000
WAKU_NODE_KEYStatic Waku Node Key.
BOOT_NODE_ADDRESSESPeer addresses to use as Waku boot nodes. Example: "addr1, addr2, addr3"
SLACK_TOKENSlack Token to use for notifications. Example: xoxp-0123456789-0123456789-0123456789-0123456789
TELEGRAM_TOKENTelegram Bot Token to use for notifications. Example: 123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11
TELEGRAM_CHAT_IDThe ID of the Telegram chat to send messages to. Example: -1001234567890
SLACK_CHANNELName of Slack channel to send messages to (has to be a public channel). Example: poir-notifications
WAKU_LOG_LEVELWaku node logging configuration. Example: INFO (is also the default)
RUST_LOGRust tracing configuration. Example: graphcast_sdk=debug,subgraph_radio=debug, defaults to info for everything
DISCORD_WEBHOOKDiscord webhook URL for notifications. Example: https://discord.com/api/webhooks/123456789012345678/AbCDeFgHiJkLmNoPqRsTuVwXyZaBcDeFgHiJkLmN
METRICS_PORTIf set, the Radio will expose Prometheus metrics on this (off by default). Example: 3001
METRICS_HOSTIf set, the Radio will expose Prometheus metrics on this (off by default). Example: 0.0.0.0
SERVER_HOSTIf SERVER_PORT is set, the Radio will expose an API service on the given host and port. Default: 0.0.0.0
SERVER_PORTIf set, the Radio will expose an API service on the given port (off by default). Example: 8080
LOG_FORMATOptions: pretty - verbose and human readable; json - not verbose and parsable; compact - not verbose and not parsable; full - verbose and not parsible. Default value: pretty.
PERSISTENCE_FILE_PATHRelative path. If set, the Radio will periodically store states of the program to the file in json format (off by default).
DISCV5_ENRSComma separated ENRs for Waku Discv5 bootstrapping. Defaults to empty list.
DISCV5_PORTDiscoverable UDP port. Default: 9000
ID_VALIDATIONDefines the level of validation for message signers used during radio operation. Options include: no-check, valid-address, graphcast-registered, graph-network-account, registered-indexer, indexer

Configurations explained

COVERAGE (topic)

COVERAGE is used to specify the topic coverage level. It controls the range of topics (subgraph ipfs hashes) the Indexer subscribes to in order to process data and participate in the network.

There are three coverage levels available:

  • comprehensive: Subscribe to on-chain topics, user-defined static topics, and subgraph deployments syncing on graph node. This level is useful for Indexers who want to compare public POIs for all deployments syncing on their graph node even if they don't have an active allocations open (their stake will not be taken into account in attestation).
  • on-chain: Subscribe to on-chain topics and user-defined static topics. This is the default coverage level and is suitable for indexers who only want to compare data for deployments with active allocations.
  • minimal: Only subscribe to user-defined static topics. This level is for Indexers who want to limit their participation to specific topics of interest.

Identity validaiton

ID_VALIDATION is used to define level of validation for message signers used during radio operation.

Available Options:

  • no-check: does not perform check on the message signature and does not verify the signer.
  • valid-address: checks the signer to be a valid Ethereum address.
  • graphcast-registered: checks the signer to be registered on Graphcast Registry.
  • graph-network-account: checks the signer to be a Graph account.
  • registered-indexer: checks the signer to be registered on Graphcast Registry and corresponds to an Indexer that satisfies the minimum stake requirement.
  • indexer: checks the signer to be either registered on Graphcast Registry or to be a Graph Account, and corresponds to an Indexer satisfying the minimum stake requirement.

Gossip protocol

WAKU_HOST and WAKU_PORT specify where the bundled Waku node runs. If you want to run multiple Radios, or multiple instances of the same Radio, you should run them on different ports.

If you want to customize the log level, you can toggle RUST_LOG environment variable. Here's an example configuration to get more verbose logging:

RUST_LOG="warn,hyper=warn,graphcast_sdk=debug,subgraph_radio=debug"

Discv5 is an ambient node discovery network for establishing a decentralized network of interconnected Graphcast Radios. Discv5, when used in Graphcast Radios, serves as a dedicated peer-to-peer discovery protocol that empowers Radios to form an efficient, decentralized network. Without Discv5, the traffic within the Graphcast network would largely rely on centrally hosted boot nodes, leading to a less distributed architecture. However, with Discv5, Radios are capable of directly routing messages among themselves, significantly enhancing network decentralization and reducing reliance on the central nodes. If you want to learn more about Discv5, check out the official spec.

Monitoring the Radio

Prometheus & Grafana

The exposed metrics can be scraped by a Prometheus server and displayed in Grafana. In order to use them you have to have a local Prometheus server running and scraping metrics on the provided port. You can specify the metrics host and port by using the environment variables METRICS_PORT and METRICS_HOST.

HTTP Server

The Radio spins up an HTTP server with a GraphQL API when SERVER_HOST and SERVER_PORT environment variables are set. The supported routes are:

  • /health for health status
  • /api/v1/graphql for GET and POST requests with GraphQL playground interface

The GraphQL API now includes:

Below are an example query:

query {
rows {
id
message {
nonce
network
payload {
content
}
}
}

messages {
identifier
nonce
network
blockNumber
blockHash
signature
payload {
identifier
content
}
}
}

example mutation:

mutation{
deleteMessage(id:1)
}
- + \ No newline at end of file diff --git a/graphcast/radios/subgraph-radio/advanced-configuration.html b/graphcast/radios/subgraph-radio/advanced-configuration.html index fce4e401..2afaee4d 100644 --- a/graphcast/radios/subgraph-radio/advanced-configuration.html +++ b/graphcast/radios/subgraph-radio/advanced-configuration.html @@ -5,13 +5,13 @@ Advanced Configuration | GraphOps Docs - +

Advanced Configuration

In the configuration table below is the full list of environment variables you can set, along with example values.

See Basic Configuration in the Introduction. The following environment variables are optional:

Name (Optional variables)Description and examples
MNEMONICMnemonic to the Graphcast ID wallet or the Indexer Operator wallet (first address of the wallet is used; Only one of PRIVATE_KEY or MNEMONIC is needed). Example: claptrap armchair violin...
COLLECT_MESSAGE_DURATIONSeconds that the Subgraph Radio will wait to collect remote POI attestations before making a comparison with the local POI. Example: 120 for 2 minutes.
COVERAGEToggle for topic coverage level. Possible values: "comprehensive", "on-chain", "minimal", "none". Default is set to "comprehensive" coverage.
TOPICSComma separated static list of content topics (subgraphs) to subscribe to. Example: QmWmyoMoctfbAaiEs2G46gpeUmhqFRDW6KWo64y5r581Vz,QmUwCFhXM3f6qH9Ls9Y6gDNURBH7mxsn6JcectgxAz6CwU,QmQ1Lyh3U6YgVP6YX1RgRz6c8GmKkEpokLwPvEtJx6cF1y
WAKU_HOSTInterface onto which to bind the bundled Waku node. Example: 0.0.0.0
WAKU_PORTP2P port on which the bundled Waku node will operate. Example: 60000
WAKU_NODE_KEYStatic Waku Node Key.
BOOT_NODE_ADDRESSESPeer addresses to use as Waku boot nodes. Example: "addr1, addr2, addr3"
SLACK_TOKENSlack Token to use for notifications. Example: xoxp-0123456789-0123456789-0123456789-0123456789
TELEGRAM_TOKENTelegram Bot Token to use for notifications. Example: 123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11
TELEGRAM_CHAT_IDThe ID of the Telegram chat to send messages to. Example: -1001234567890
SLACK_CHANNELName of Slack channel to send messages to (has to be a public channel). Example: poir-notifications
WAKU_LOG_LEVELWaku node logging configuration. Example: INFO (is also the default)
RUST_LOGRust tracing configuration. Example: graphcast_sdk=debug,subgraph_radio=debug, defaults to info for everything
DISCORD_WEBHOOKDiscord webhook URL for notifications. Example: https://discord.com/api/webhooks/123456789012345678/AbCDeFgHiJkLmNoPqRsTuVwXyZaBcDeFgHiJkLmN
METRICS_PORTIf set, the Radio will expose Prometheus metrics on this (off by default). Example: 3001
METRICS_HOSTIf set, the Radio will expose Prometheus metrics on this (off by default). Example: 0.0.0.0
SERVER_HOSTIf SERVER_PORT is set, the Radio will expose an API service on the given host and port. Default: 0.0.0.0
SERVER_PORTIf set, the Radio will expose an API service on the given port (off by default). Example: 8080
LOG_FORMATOptions: pretty - verbose and human readable; json - not verbose and parsable; compact - not verbose and not parsable; full - verbose and not parsible. Default value: pretty.
PERSISTENCE_FILE_PATHRelative path. If set, the Radio will periodically store states of the program to the file in json format (off by default).
DISCV5_ENRSComma separated ENRs for Waku Discv5 bootstrapping. Defaults to empty list.
DISCV5_PORTDiscoverable UDP port. Default: 9000
ID_VALIDATIONDefines the level of validation for message signers used during radio operation. Options include: no-check, valid-address, graphcast-registered, graph-network-account, registered-indexer, indexer. Default: indexer
INDEXER_MANAGEMENT_SERVER_ENDPOINTURL to the Indexer management server of Indexer Agent. Example: http://localhost:18000
AUTO_UPGRADEToggle for the types of subgraphs for which the Radio will send offchain syncing commands to the indexer management server. Default to upgrade all syncing deployments. Possible values: "comprehensive", "on-chain", "minimal", "none". Default is set to "comprehensive" coverage.
RATELIMIT_THRESHOLDSet upgrade intent ratelimit in seconds: only one upgrade per subgraph within the threshold (default: 86400 seconds = 1 day)
PROTOCOL_NETWORKThe protocol network (currently matches with suffix of the provided NETWORK_SUBGRAPH configuration variable)
NOTIFICATION_MODEOptions: live, periodic-report, periodic-update. Learn more about notification modes here. Default: live
NOTIFICATION_INTERVALInterval (in hours) between sending a divergence notification (used in the periodic-update and periodic-report nofitification modes). Learn more about notification modes here. Default: 24
info

For enhanced security, we recommend running Subgraph Radio with an independent Graphcast ID linked to your Indexer account. This Graphcast ID is an Ethereum account authorized to sign Graphcast messages on behalf of your Indexer. By default, Subgraph Radio validates messages received from any signer, that can be resolved to an Indexer address, regardless of whether or not they are registered on the Graphcast registry (though this behavior can be altered by setting the ID_VALIDATION config variable). Learn how to register a Graphcast ID here.

Configurations explained

COVERAGE (topic)

COVERAGE is used to specify the topic coverage level. It controls the range of topics (subgraph ipfs hashes) the Indexer subscribes to in order to process data and participate in the network.

There are three coverage levels available:

  • comprehensive: Subscribe to on-chain topics, user-defined static topics, and subgraph deployments syncing on graph node. This level is useful for Indexers who want to compare public POIs for all deployments syncing on their graph node even if they don't have an active allocations open (their stake will not be taken into account in attestation).
  • on-chain: Subscribe to on-chain topics and user-defined static topics. This is the default coverage level and is suitable for indexers who only want to compare data for deployments with active allocations.
  • minimal: Only subscribe to user-defined static topics. This level is for Indexers who want to limit their participation to specific topics of interest.

Identity validaiton

ID_VALIDATION is used to define level of validation for message signers used during radio operation. We recommend registered-indexer for most strict identity validation, while indexer is a viable option for those who want to use the network before considering Grapchast ID registration. You can choose a sender identity validation mechanism for your radio, based on your use case and security preferences.

Available Options:

  • no-check: Does not perform check on the message signature and does not verify the signer. All messages should pass the sender check.
  • valid-address: Requires the signer to be a valid Ethereum address. Messages should be traceable to an Ethers wallet.
  • graphcast-registered: Requires the signer to be registered on the Graphcast Registry.
  • graph-network-account: signer must be a Graph account.
  • registered-indexer: signer must be registered at Graphcast Registry and correspond to an Indexer satisfying the indexer minimum stake requirement.
  • indexer: signer must be registered at Graphcast Registry or is a Graph Account, and correspond to an Indexer satisfying the indexer minimum stake requirement.

Gossip protocol

WAKU_HOST and WAKU_PORT specify where the bundled Waku node runs. If you want to run multiple Radios, or multiple instances of the same Radio, you should run them on different ports.

If you want to customize the log level, you can toggle RUST_LOG environment variable. Here's an example configuration to get more verbose logging:

RUST_LOG="warn,hyper=warn,graphcast_sdk=debug,subgraph_radio=debug"

Discv5 is an ambient node discovery network for establishing a decentralized network of interconnected Graphcast Radios. Discv5, when used in Graphcast Radios, serves as a dedicated peer-to-peer discovery protocol that empowers Radios to form an efficient, decentralized network. Without Discv5, the traffic within the Graphcast network would largely rely on centrally hosted boot nodes, leading to a less distributed architecture. However, with Discv5, Radios are capable of directly routing messages among themselves, significantly enhancing network decentralization and reducing reliance on the central nodes. If you want to learn more about Discv5, check out the official spec.

Protocol network

Available Options:

  • goerli
  • mainnet
  • arbitrum-one
  • arbitrum-goerli

State management

PERSISTENCE_FILE_PATH configuration variable allows the Radio to maintain operational continuity across sessions. When the file path is set, it triggers the Radio to periodically store its state, including local attestations, remote messages and POI comparison results in a JSON-formatted file at the specified path. This facilitates seamless session transitions and minimizes data loss. In the event of a system disruption, the state can be reloaded from this file, ensuring the Radio can resume operation effectively.

Subgraph Upgrade Pre-sync feature configuration variables

The subgraph upgrade pre-sync feature provides a way for Subgraph Developers to signal when they plan on releasing a new subgraph version, thereby allowing Indexers to start syncing the subgraph in advance. If the Radio operator has set up the notification system, they will get notified whenever a new subgraph upgrade intent message is received.

If the INDEXER_MANAGEMENT_SERVER_ENDPOINT configuration variable has been set, the Radio will send a request to the Indexer Agent to start offchain syncing the new Subgraph deployment.

The AUTO_UPGRADE variable can be toggled to change the coverage level of subgraphs for which the Radio will send offchain syncing commands to the indexer management server.

Configuration options

To configure Subgraph Radio, you can use the following methods:

Using Environment Variables

Example .env file:

PRIVATE_KEY="a2b3c1d4e5f6890e7f6g5h4i3j2k1l0m"
GRAPH_NODE_STATUS_ENDPOINT="http://127.0.0.42:8030/graphql"
REGISTRY_SUBGRAPH="https://api.thegraph.com/subgraphs/name/randomuser/graphcast-registry-mainnet"
NETWORK_SUBGRAPH="https://api.thegraph.com/subgraphs/name/graphprotocol/graph-mainnet"
GRAPHCAST_NETWORK=mainnet
INDEXER_ADDRESS="0xa1b2c3d4e5f6a7b8c9d0e1f2a3b4c5d6"

Using CLI arguments

Pass the configuration options directly as command-line arguments.

docker run ghcr.io/graphops/subgraph-radio \
--private-key "a2b3c1d4e5f6890e7f6g5h4i3j2k1l0m" \
--graph-node-status-endpoint "http://127.0.0.42:8030/graphql" \
--registry-subgraph "https://api.thegraph.com/subgraphs/name/randomuser/graphcast-registry-mainnet" \
--network-subgraph "https://api.thegraph.com/subgraphs/name/graphprotocol/graph-mainnet" \
--graphcast-network mainnet \
--indexer-address "0xa1b2c3d4e5f6a7b8c9d0e1f2a3b4c5d6"

Using a TOML/YAML file

Example TOML configuration file (config.toml):

[graph_stack]
graph_node_status_endpoint = 'http://127.0.0.42:8030/graphql'
indexer_address = '0xa1b2c3d4e5f6a7b8c9d0e1f2a3b4c5d6'
registry_subgraph = 'https://api.thegraph.com/subgraphs/name/randomuser/graphcast-registry-mainnet'
network_subgraph = 'https://api.thegraph.com/subgraphs/name/graphprotocol/graph-mainnet'
private_key = 'a2b3c1d4e5f6890e7f6g5h4i3j2k1l0m'

Then you just need to have the CONFIG_FILE set, either as an env variable - CONFIG_FILE=path/to/config.toml or passed as a CLI arg - --config-file path/to/config.toml.

Example YAML configuration file (config.yaml):

graph_stack:
graph_node_status_endpoint: "http://127.0.0.42:8030/graphql"
indexer_address: "0xa1b2c3d4e5f6a7b8c9d0e1f2a3b4c5d6"
registry_subgraph: "https://api.thegraph.com/subgraphs/name/randomuser/graphcast-registry-mainnet"
network_subgraph: "https://api.thegraph.com/subgraphs/name/graphprotocol/graph-mainnet"
private_key: "a2b3c1d4e5f6890e7f6g5h4i3j2k1l0m"

Then you just need to have the CONFIG_FILE set, either as an env variable - CONFIG_FILE=path/to/config.yaml or passed as a CLI arg - --config-file path/to/config.yaml.

We also have an extensive configuration file template in the repo.

- + \ No newline at end of file diff --git a/graphcast/radios/subgraph-radio/http-server.html b/graphcast/radios/subgraph-radio/http-server.html index 179ea908..20b1b37f 100644 --- a/graphcast/radios/subgraph-radio/http-server.html +++ b/graphcast/radios/subgraph-radio/http-server.html @@ -5,13 +5,13 @@ HTTP Server | GraphOps Docs - +

HTTP Server

The Radio spins up an HTTP server with a GraphQL API when SERVER_HOST and SERVER_PORT environment variables are set. The supported routes are:

  • /health for health status
  • /api/v1/graphql for GET and POST requests with GraphQL playground interface

The GraphQL API now includes several advanced queries:

  • radioPayloadMessages
  • localAttestations
  • upgradeIntentMessages
  • comparisonResults
  • comparisonRatio

Below are some example queries:

query {
radioPayloadMessages {
identifier
nonce
signature
graphAccount
payload {
identifier
content
}
}
localAttestations {
deployment
blockNumber
attestation {
ppoi
}
}
comparisonResults(identifier: "Qm...") {
deployment
blockNumber
resultType
localAttestation {
ppoi
}
attestations {
senders
stakeWeight
ppoi
}
}
comparisonRatio {
deployment
blockNumber
stakeRatio
}
upgradeIntentMessages {
subgraphId
newHash
nonce
graphAccount
}
}

You can customize the returned data from the comparisonRatio query by providing optional arguments - deployment, block and resultType.

query {
comparisonRatio(deployment: "Qm...", block: 17887350, resultType: MATCH) {
deployment
blockNumber
stakeRatio
}
}

In this example, the stakeRatio query will return the stake ratios only for attestations from deployment "Qm..." and block number 17887350, and only for the specified result type.

Note: The result_type field of the filter corresponds to the resultType field in the comparisonResults query. This field represents the type of comparison result.

stakeRatio orders the attestations by stake weight, then computes the ratio of unique senders.

To understand more about the format of the ratio results, check out this section.

These queries provide a clear aggregation of the attestations from remote messages, giving a concise understanding of the Radio's state. The optional filters - deployment, block, and filter - can be used to refine the results.

- + \ No newline at end of file diff --git a/graphcast/radios/subgraph-radio/intro.html b/graphcast/radios/subgraph-radio/intro.html index ec0669e1..bdd80d6f 100644 --- a/graphcast/radios/subgraph-radio/intro.html +++ b/graphcast/radios/subgraph-radio/intro.html @@ -5,13 +5,13 @@ Introduction | GraphOps Docs - +

Introduction

Subgraph Radio

Subgraph Radio is an optional component of the Graph Protocol Indexer Stack. It uses the Graphcast Network to facilitate the exchange of data among Indexers and other participants about Subgraphs.

The source code for the Subgraph Radio is available on GitHub and Docker builds are automatically published as GitHub Packages. Subgraph Radio is also published as a crate on crates.io.

Basic Configuration

The Subgraph Radio can be configured using environment variables, CLI arguments, as well as a .toml or .yaml configuration file. Take a look at the configuration options to learn more. In all cases, users will need to prepare the following configuration variables:

NameDescription and examples
PRIVATE_KEYPrivate key of the Graphcast ID wallet or the Indexer Operator wallet (precendence over MNEMONIC).
Example: 0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef
INDEXER_ADDRESSIndexer address for Graphcast message verification, in all lowercase.
Example: 0xabcdcabdabcdabcdcabdabcdabcdcabdabcdabcd
GRAPH_NODE_STATUS_ENDPOINTURL to a Graph Node Indexing Status endpoint.
Example: http://index-node:8030/graphql
INDEXER_MANAGEMENT_SERVER_ENDPOINTURL to the Indexer management server of Indexer Agent. Example: http://localhost:18000
REGISTRY_SUBGRAPHURL to the Graphcast Registry subgraph for your network. Check APIs for your preferred network
NETWORK_SUBGRAPHURL to the Graph Network subgraph. Check APIs for your preferred network
GRAPHCAST_NETWORKThe Graphcast Messaging fleet and pubsub namespace to use.
Mainnet: mainnet
Goerli: testnet

Run with Docker

  1. Pull the Subgraph Radio image
docker pull ghcr.io/graphops/subgraph-radio:latest
  1. Run the image, providing the required environment variables. Here's a sample mainnet configuration:
docker run \
-e GRAPHCAST_NETWORK="mainnet" \
-e REGISTRY_SUBGRAPH="https://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-mainnet" \
-e NETWORK_SUBGRAPH="https://api.thegraph.com/subgraphs/name/graphprotocol/graph-network-mainnet" \
-e PRIVATE_KEY="PRIVATE_KEY" \
-e GRAPH_NODE_STATUS_ENDPOINT="http://graph-node:8030/graphql" \
-e RUST_LOG="warn,hyper=warn,graphcast_sdk=info,subgraph_radio=info" \
-e INDEXER_ADDRESS="INDEXER_ADDRESS" \
ghcr.io/graphops/subgraph-radio:latest

(or) Run with docker-compose

You can append this service definition to your docker-compose manifest and customise the definitions:

services:
# ... your other service definitions
subgraph-radio:
image: ghcr.io/graphops/subgraph-radio:latest
container_name: subgraph-radio
restart: unless-stopped
environment:
GRAPHCAST_NETWORK: "mainnet"
REGISTRY_SUBGRAPH: "https://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-mainnet"
NETWORK_SUBGRAPH: "https://api.thegraph.com/subgraphs/name/graphprotocol/graph-network-mainnet"
PRIVATE_KEY: "PRIVATE_KEY"
GRAPH_NODE_STATUS_ENDPOINT: "http://graph-node:8030/graphql"
RUST_LOG: "warn,hyper=warn,graphcast_sdk=info,subgraph_radio=info"
INDEXER_ADDRESS: "INDEXER_ADDRESS"
logging:
driver: local

(or) Run as part of StakeSquid's docker-compose setup

Subgraph Radio is included as an optional component in both the mainnet and testnet versions of StakeSquid's guide.

(or) Run using a pre-built binary

We also provide pre-built binaries for Ubuntu and MacOS, which you can find in the Assets section on each release in the releases page on Github. Simply download the binary, make it executable (chmod a+x ./subgraph-radio-{TAG}-{SYSTEM}) and then run it (using ./subgraph-radio-{TAG}-{SYSTEM}).

Developing the Subgraph Radio

Building the image using the Dockerfile locally

If you want to make any changes to the Subgraph Radio codebase, you can use this option.

Prerequisites
  1. Clone this repo and cd into it
  2. Create a .env file that includes at least the required environment variables. To see the full list of environment variables you can provide, check out the Configuration section.
Running the Subgraph Radio inside a Docker container
docker-compose up -d

Building Subgraph Radio locally

To have full control over the Subgraph Radio code and run it directly on your machine (without Docker) you can use this option.

Prerequisites

  1. Clone this repo and cd into it
  2. Make sure you have the following installed:
  • Rust
  • Go
  • Build tools (e.g. the build-essentials package for Debian-based Linux distributions or Xcode Command Line Tools for MacOS)
  • C compiler (e.g. the clang package for Debian-based Linux distribution or Xcode Command Line Tools for MacOS)
  • OpenSSL (e.g. the libssl-dev package for Debian-based Linux distribution or openssl for MacOS)
  • PostreSQL libraries and headers (e.g. the libpq-dev package for Debian-based Linux distribution or postgresql for MacOS)
  1. You have Graph Node syncing your indexer's on-chain allocations.
  2. You have created a .env file that includes at least the required environment variables. To see the full list of environment variables you can provide, check out the Configuration section.

Running the Subgraph Radio natively

cargo run --release
- + \ No newline at end of file diff --git a/graphcast/radios/subgraph-radio/monitoring.html b/graphcast/radios/subgraph-radio/monitoring.html index be38e835..cdf6955ca 100644 --- a/graphcast/radios/subgraph-radio/monitoring.html +++ b/graphcast/radios/subgraph-radio/monitoring.html @@ -5,13 +5,13 @@ Notifications and Monitoring | GraphOps Docs - +

Notifications and Monitoring

Notifications

If the Radio operator has set up a Slack, Discord and/or Telegram bot integration and the Radio finds a POI mismatch, it sends alerts to the designated channels. The operator can also inspect the logs to see if the Radio is functioning properly, if it's sending and receiving messages, if it's comparing normalised POIs, if there is a found POI mismatch, etc.

Notification modes

Subgraph Radio supports three modes of notification, based on the user's preference for how often they'd like to get notified, and what data the notifications contain:

  • live - the Radio sends a notification as soon as it finds a divergence, providing the Subgraph deployment and the block.
  • periodic-update - the Radio sends a notification on a specified interval (default is 24 hours) containing any updates to comparison results that have happened since the previous notification (the notification message format is the same as the one using live mode). If there are no updates it will not send a notification.
  • periodic-report - the Radio sends a notification on a specified interval (default is 24 hours) with a summary of total subgraphs being cross-checked, number of matched subgraphs, number of diverged subgraphs, and a list of the divergent subgraphs and the blocks where the divergence was caught.

The default notification mode if there's Slack/Discord/Telegram integration in place is live.

The notification mode can be toggled using the NOTIFICATION_MODE and NOTIFICATION_INTERVAL configuration variables.

See more information on how to configure notifications, as well as how to set up Slack, Discord and Telegram in the advanced configuration section.

Prometheus & Grafana

The Subgraph Radio exposes metrics that can then be scraped by a Prometheus server and displayed in Grafana. In order to use them you have to have a local Prometheus server running and scraping metrics on the provided port. You can specify the metrics host and port by using the environment variables METRICS_PORT and METRICS_HOST.

Setting up the Grafana dashboard

The Subgraph Radio Grafana dashboard is included by default in Stakesquid's docker-compose stack. If you're not using the stack, below is a walk-through of how you can set it up.

There is a Grafana dashboard config JSON file provided in the repo, which you can import and use to visualise the metrics in Grafana. When importing the dashboard, it will require you to specify two data sources - a Prometheus one and a GraphQL one. For Prometheus you should select the Prometheus instance that you've set up to scrape metrics from Subgraph Radio's metrics host and port. For GraphQL, you'd need to install the GraphQL data source plugin, if you don't have it already installed. Then you need to create a new GraphQL data srouce that points to the GraphQL API of the Radio's integrated HTTP server. For instance, if you've set SERVER_HOST to 0.0.0.0 and SERVER_PORT to 3012, your GraphQL data source would need to point at http://0.0.0.0:3012/api/v1/graphql. You can learn more about the HTTP server in the next section.

Reading the Grafana dashboard

Grafana Dashboard

When the Subgraph Radio Grafana dashboard has been set up, it offers 6 panels:

POI Comparison Overview

At a glance, you can see the number of matching and diverging subgraphs. These two gauges update to reflect the results continiously after each comparison. The reason these are gauges and not counters is because a subgraph's comparison result can change between POI comparison events, for instance you might have a diverging public POI for a given subgraph on block X, but then at block Y it could be matching with the consensus public POI, in that case is would change groups, the number of divergent subgraphs would decrement and the number of matching subgraphs would increment.

Message stats

This includes the validated messages per minute, as well as the total cached messages in the store.

Number of Gossiping Indexers per Subgraph

This panel shows how many Indexers are actively sending public POIs for the subgraphs that you're interested in. This view can be filtered by a specific subgraph.

POI Comparison Results

This is the most insightful and important panel. The data in it is coming directly from the HTTP server's GraphQL endpoint. It shows the most recent comparison results for each subgraph that is being actively cross-checked, as well as the block for which that comparison happened.

The Count Ratio shows the ratio of unique senders that have attested to a public POI for that subgraph on that block. For instance 3:1:1* means that there are three distinct public POIs that were compared. It also means there are four Indexers attesting to public POIs that is different that the locally generated public POI, three of them attest to the same POI and the third Indexer attests to a different one, but none of those two POIs match the locally generated one. If it's 3*:1 it means that the local POI matches with the most often attested POI (highest sender count), meaning that the local Indexer is in that group of three Indexers, and there is one other Indexer who has sent a different POI. If it's 4* it means that there are four Indexers attesting to a given POI and all four POIs are the same (the local one included). The count that has a * sign is where the local attestation is.

Another possible ratio value is 3:0*, the 0* here represents that there is no local public POI, generated for this subgraph on this block (this might happen due to a lot of reasons, one of them being that the subgraph isn't fully synced).

The Stake Ratio is similar to the Count Ratio, but POIs are grouped by stake, so 11686531* means that that is the combined stake backing the public POI for that subgraph on that block (the local Indexer stake is included) where as for example 44141361*:651361 would mean that there are two distinct POIs and hence two different sender groups, and these two stake values are the aggregated stake values behind each of those POIs. The * on the first one means that the local Indexer's stake is attesting to the same public POIs and the local stake is included in that value. Similar to the Count Ratio, if there's a 0*, for instance - 44141361:0*, it means that there is no local public POI, generated for this subgraph on this block (therefore there is no attesting stake from the local Indexer).

Function Call Stats

Shows insights into the frequency of different functions running in the Radio, it helps convey a sense of how often/how many times certain events have happened, like POI comparison, processing a validated message, sending a message, and more.

Number of diverged subgraphs

Count of diverged subgraphs and how it's changed over time.

Locally tracked Public POIs

Number of locally generated public POIs for all of the subgraphs that are actively being cross-checked.

- + \ No newline at end of file diff --git a/graphcast/radios/subgraph-radio/poi-cross-checking.html b/graphcast/radios/subgraph-radio/poi-cross-checking.html index f1ea5fa2..e5f0ef29 100644 --- a/graphcast/radios/subgraph-radio/poi-cross-checking.html +++ b/graphcast/radios/subgraph-radio/poi-cross-checking.html @@ -5,13 +5,13 @@ POI Cross-checking | GraphOps Docs - +

POI Cross-checking

An essential aspect of earning indexing rewards as an Indexer is the generation of valid Proof of Indexing hashes (POIs). These POIs provide evidence of the Indexer's possession of correct data. Submitting invalid POIs could lead to a Dispute and possible slashing by the protocol. With Subgraph Radio's POI feature, Indexers gain confidence knowing that their POIs are continually cross-verified against those of other participating Indexers. Should there be a discrepancy in POIs, Subgraph Radio functions as an early warning system, alerting the Indexer within minutes.

All POIs generated through Subgraph Radio are public (normalized), meaning they are hashed with a 0x0 Indexer Address and can be compared between Indexers. However, these public POIs are not valid for on-chain reward submission. Subgraph Radio groups and weighs public POIs according to the aggregate stake in GRT attesting to each. The normalized POI with the most substantial aggregate attesting stake is deemed canonical and used for comparisons with your local Indexer POIs.

POI Cross-checking

Determining which Subgraphs to gossip about

Subgraph Radio will gossip about different subgraphs depending on the COVERAGE configuration (see more). By default, the Radio will gossip about all healthy subgraphs, whether they are allocated to or not.

Subgraph Radio periodically polls the Graph Node for new blocks on all relevant networks and constructs Graphcast topics on each allocation identified by subgraph deployment IPFS hash. Chainheads for these networks are updated with data from the Graph Node, and the Radio ensures that it is always using the latest chainhead when processing messages.

Gathering and comparing normalised POIs

At a given interval, the Radio fetches the normalised POI for each deployment. This interval is defined in blocks different for each network. It then saves those public POIs, and as other Indexers running the Radio start doing the same, messages start propagating through the network. The Radio saves each message and processes them on a given interval.

The messages include a nonce (UNIX timestamp), block number, signature (used to derive the sender's on-chain Indexer address) and network. Before saving an entry to the map, the Radio operator verifies through the Graph network subgraph for the sender's on-chain identity and amount of tokens staked, which is used during comparisons later on.

At another interval, the Radio compares the local public POIs with the collected remote ones. The remote POIs are sorted so that for each subgraph (on each block), the POI that is backed by the most on-chain stake is selected. This means that the combined stake of all Indexers that attested to it is considered, not just the highest staking Indexer. The top POI is then compared with the local POIs for that subgraph at that block to determine consensus.

If there is a mismatch and if the Radio operator has set up a Slack, Discord and/or Telegram bot integration, the Radio will send alerts to the designated channels.

After a successful comparison, the attestations that have been checked are removed from the store.

Sequence Diagram

- + \ No newline at end of file diff --git a/graphcast/radios/subgraph-radio/subgraph-upgrade-presyncing.html b/graphcast/radios/subgraph-radio/subgraph-upgrade-presyncing.html index 8ce062eb..504c7111 100644 --- a/graphcast/radios/subgraph-radio/subgraph-upgrade-presyncing.html +++ b/graphcast/radios/subgraph-radio/subgraph-upgrade-presyncing.html @@ -5,13 +5,13 @@ Subgraph Upgrade Pre-syncing | GraphOps Docs - +

Subgraph Upgrade Pre-syncing

The Subgraph Upgrade Pre-sync feature provides a way for Subgraph Developers to signal when they plan on releasing a new subgraph version, thereby allowing Indexers to start syncing the subgraph in advance. Subgraph Developers can use the Graphcast CLI to send a message to all Indexers, interested in the given subgraph.

Upgrade Presyncing

As an Indexer running Subgraph Radio

As long as there is a valid configuration for AUTO_UPGRADE and INDEXER_MANAGEMENT_SERVER_ENDPOINT (see Advanced Configuration), Subgraph Radio will process Upgrade Intent Messages and automatically begin offchain syncing new Subgraph Deployments.

Rate Limits

In order to prevent spam, Subgraph Radio implements a rate limit on Upgrade Intent Messages. By default, Subgraph Radio will permit one upgrade for an existing Subgraph Deployment per day.

As a Subgraph Developer

Send an Upgrade Intent Message

Refer to the usage section of Graphcast CLI to learn more about different ways to send a UpgradeIntentMessage, as well as the different configurations options available.

Example:

docker run ghcr.io/graphops/graphcast-cli \
# pass the address for subgraph deployer
--graph-account "0xe9a1cabd57700b17945fd81feefba82340d9568f" \
# pass the pk for the subgraph deployer
--private-key "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" \
# specify we want to send an upgrade presync message
upgrade-presync \
# specify the subgraph ID
--subgraph-id "CnJMdCkW3pr619gsJVtUPAWxspALPdCMw6o7obzYBNp3" \
# specify the new subgraph deployment hash
--new-hash "QmVVfLWowm1xkqc41vcygKNwFUvpsDSMbHdHghxmDVmH9x"

This is what the final log should look like after successfully sending the message:

INFO graphcast_cli::operator::operation: Sent message, msg_id: "0x126c76b7a5e9a30b3834807e0e02f9858191d153746ae7aebdef90bd4bae9b7a"
at src/operator/operation.rs:37

Check Indexing Status

After sending an UpgradeIntentMessage, a Subgraph Developer can periodically check the indexing status of the new subgraph deployment using the public API of the Indexers who actively allocate on the current version of the subgraph.

Same arguments here can be used as the argument for UpgradeIntentMessage. However, gossiping is not involved in this operation and the queries are made through deterministic queries.

Command for querying for the indexing status:

docker run ghcr.io/graphops/graphcast-cli \
--private-key "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" \
--graph-account "0xe9a1cabd57700b17945fd81feefba82340d9568f" \
indexing-status --new-hash "QmVVfLWowm1xkqc41vcygKNwFUvpsDSMbHdHghxmDVmH9x" \
--subgraph-id "CnJMdCkW3pr619gsJVtUPAWxspALPdCMw6o7obzYBNp3"

Sequence Diagram

- + \ No newline at end of file diff --git a/graphcast/sdk/intro.html b/graphcast/sdk/intro.html index 1b58edc3..d15a7ea5 100644 --- a/graphcast/sdk/intro.html +++ b/graphcast/sdk/intro.html @@ -5,13 +5,13 @@ Introduction | GraphOps Docs - +

Introduction

Graphcast SDK is a decentralized, distributed peer-to-peer (P2P) communication tool that enables users across the network to exchange information in real-time. It is designed to overcome the high cost of signaling or coordination between blockchain participants by enabling off-chain communication (gossip/cheap talk). This is particularly useful for applications where real-time communication is essential but the cost of on-chain transactions is prohibitive.

How it Works

The SDK serves as a base layer for Radio developers, providing essential components to build their applications without starting from scratch. These components include:

  1. Connection to the Graphcast network: Forms a communication network and provides an interface to subscribe to receive messages on specific topics and to broadcast messages onto the network. Allows for real-time communication between different nodes in the network.

  2. Interactions with Graph entities: This allows for necessary interactions with Graph node, Graph network subgraph, Graphcast registry.

An example of a ping-pong Radio is provided in the examples folder, which leverages the base layer and defines the specific logic around constructing and sending messages, as well as receiving and handling them. This example can serve as a starting point for developers looking to build their own Radios.

Network Configurations

A Graphcast radio can interact with many parts of The Graph network modularly. The network configurations actively supported by the team include mainnet (Ethereum mainnet and Arbitrum One) and testnet (Goerli and Arbitrum Goerli). You are free to define and use your own Graphcast Network and Graphcast Registry. This flexibility allows for a wide range of applications and use cases.

Contributing

Contributions are welcome and appreciated! Please refer to the Contributor Guide, Code Of Conduct, and Security Notes for this repository. These documents provide guidelines for how to contribute to the project in a way that is beneficial to all parties involved.

Upgrading and Testing

Updates to the SDK will be merged into the main branch once their release PR has been approved. For testing, it is recommended to use nextest as your test runner. You can run the suite using the command cargo nextest run. Regular testing is crucial to ensure the stability and reliability of the software.

Resources

- + \ No newline at end of file diff --git a/graphcast/sdk/radio-dev.html b/graphcast/sdk/radio-dev.html index 8eefd8ab..89d73e95 100644 --- a/graphcast/sdk/radio-dev.html +++ b/graphcast/sdk/radio-dev.html @@ -5,13 +5,13 @@ Radio Development | GraphOps Docs - +

Radio Development

Do you want to build robust, peer-to-peer messaging apps that automatically exchange valuable data with other Indexers in real time? Do you have an idea for what data could be useful to share that could lead to greater communication efficiency in The Graph network as a whole? Then you want to build a Radio on top of the Graphcast network.

For a more complex and full example of the Graphcast SDK being used to create a Subgraph Radio, take a look at this repo.

A simple ping pong example

Let's take a look at the simplest possible example of a Radio, built on top of Graphcast - a ping pong app. When one participant sends Ping, all the others in the network are listening on the ping pong topic will send Pong back. Pretty straightforward.

Register a Graphcast ID

We recommend that you register a Graphcast ID for your on-chain Indexer address. You can learn what a Graphcast ID is and how to register one here.

Once you complete those steps you will have a Graphcast ID that is authorized to sign messages on behalf of your Indexer.

Populate your .env file

You now need to export a few environment variables:

NameDescription and examples
PRIVATE_KEYPrivate key to the Graphcast ID wallet or Indexer Operator wallet (Precendence over MNEMONICS).
Example: 0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef
REGISTRY_SUBGRAPHURL to the Graphcast Registry subgraph for your network. Check APIs for your preferred network
NETWORK_SUBGRAPHURL to the Graph Network subgraph. Check APIs for your preferred network
GRAPHCAST_NETWORKThe Graphcast Messaging fleet and pubsub namespace to use. For this example you should use testnet

A few dependencies

Make sure you have the following installed:

  • Rust
  • Go
  • Build tools (e.g. the build-essentials package for Debian-based Linux distributions or Xcode Command Line Tools for MacOS)
  • C compiler (e.g. the clang package for Debian-based Linux distribution or Xcode Command Line Tools for MacOS)
  • OpenSSL (e.g. the libssl-dev package for Debian-based Linux distribution or openssl for MacOS)
  • PostreSQL libraries and headers (e.g. the libpq-dev package for Debian-based Linux distribution or postgresql for MacOS)

Start off with a new Rust project (cargo new ping-pong). Then add the following dependencies to you Cargo.toml file:

[dependencies]
graphcast-sdk = "0.4.0"
once_cell = "1.15"
tokio = { version = "1.1.1", features = ["full"] }
anyhow = "1.0.39"
ethers = "1.0.0"
dotenv = "0.15.0"
tracing = "0.1"
ethers-contract = "1.0.0"
ethers-core = "1.0.0"
ethers-derive-eip712 = "1.0.0"
prost = "0.11"
serde = "1.0.147"
serde_derive = "1.0.114"

The imports

Open your main.rs file and add the following imports:

// For date and time utils
use chrono::Utc;

// Load environment variables from .env file
use dotenv::dotenv;

// Import Arc and Mutex for thread-safe sharing of data across threads
use std::sync::{Arc, Mutex};

// Import Graphcast SDK types and functions for agent configuration, message handling, and more
use graphcast_sdk::graphcast_agent::{GraphcastAgent, GraphcastAgentConfig};

// Import sleep and Duration for handling time intervals and thread delays
use std::{thread::sleep, time::Duration};

// Import AsyncMutex for asynchronous mutual exclusion of shared resources
use tokio::sync::Mutex as AsyncMutex;

// Import tracing macros for logging and diagnostic purposes
use tracing::{debug, error, info, trace};

// Import SimpleMessage from the crate's types module
use types::SimpleMessage;

// Import Config from the crate's config module
use config::Config;

use crate::types::{GRAPHCAST_AGENT, MESSAGES};

// Include the local config and types modules
mod config;
mod types;

Structure

Everything we need will be inside the main() function. And since we'll be using async code we have to annotate it with #[tokio::main], we can start off with something as simple as:

#[tokio::main]
async fn main() {
// TODO: Radio logic
}

Before diving into the contents of the main function, let's quickly populate the other two files we need - config.rs and types.rs.

Let's take a look at types.rs first:

use async_graphql::SimpleObject;
use ethers_contract::EthAbiType;
use ethers_core::types::transaction::eip712::Eip712;
use ethers_derive_eip712::*;
use graphcast_sdk::graphcast_agent::GraphcastAgent;
use prost::Message;
use serde::{Deserialize, Serialize};

// Import the OnceCell container for lazy initialization of global/static data
use once_cell::sync::OnceCell;
use std::sync::{Arc, Mutex};

/// A global static (singleton) instance of A GraphcastMessage vector.
/// It is used to save incoming messages after they've been validated, in order
/// defer their processing for later, because async code is required for the processing but
/// it is not allowed in the handler itself.
pub static MESSAGES: OnceCell<Arc<Mutex<Vec<SimpleMessage>>>> = OnceCell::new();

/// The Graphcast Agent instance must be a global static variable (for the time being).
/// This is because the Radio handler requires a static immutable context and
/// the handler itself is being passed into the Graphcast Agent, so it needs to be static as well.
pub static GRAPHCAST_AGENT: OnceCell<GraphcastAgent> = OnceCell::new();

/// Make a test radio type
#[derive(Eip712, EthAbiType, Clone, Message, Serialize, Deserialize, SimpleObject)]
#[eip712(
name = "Graphcast Ping-Pong Radio",
version = "0",
chain_id = 1,
verifying_contract = "0xc944e90c64b2c07662a292be6244bdf05cda44a7"
)]
pub struct SimpleMessage {
#[prost(string, tag = "1")]
pub identifier: String,
#[prost(string, tag = "2")]
pub content: String,
}

impl SimpleMessage {
pub fn new(identifier: String, content: String) -> Self {
SimpleMessage {
identifier,
content,
}
}

pub fn radio_handler(&self) {
MESSAGES
.get()
.expect("Could not retrieve messages")
.lock()
.expect("Could not get lock on messages")
.push(self.clone());
}
}

SimpleMessage defines the structure that all messages for this Radio must follow.

RadioPayloadMessage is decorated with several macros - #[derive(Eip712, EthAbiType, Clone, Message, Serialize, Deserialize)], which automatically implement certain traits that are required by the SDK.

The #[eip712] macro is used to define information that is used in EIP-712, a standard for structuring typed data in Ethereum transactions.

Now let's see the config.rs file:

use clap::Parser;
use ethers::signers::WalletError;
use graphcast_sdk::build_wallet;
use graphcast_sdk::graphcast_agent::message_typing::IdentityValidation;
use graphcast_sdk::init_tracing;
use graphcast_sdk::wallet_address;
use serde::{Deserialize, Serialize};
use tracing::info;

#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
#[clap(
name = "ping-pong-radio",
about = "A simple example for using the Graphcast SDK to build Radios",
author = "GraphOps"
)]
pub struct Config {
#[clap(
long,
value_name = "ENDPOINT",
env = "GRAPH_NODE_STATUS_ENDPOINT",
help = "API endpoint to the Graph Node Status Endpoint"
)]
pub graph_node_endpoint: Option<String>,
#[clap(
long,
value_name = "KEY",
value_parser = Config::parse_key,
env = "PRIVATE_KEY",
hide_env_values = true,
help = "Private key to the Graphcast ID wallet (Precendence over mnemonics)",
)]
pub private_key: Option<String>,
#[clap(
long,
value_name = "KEY",
value_parser = Config::parse_key,
env = "MNEMONIC",
hide_env_values = true,
help = "Mnemonic to the Graphcast ID wallet (first address of the wallet is used; Only one of private key or mnemonic is needed)",
)]
pub mnemonic: Option<String>,
#[clap(
long,
value_name = "SUBGRAPH",
env = "REGISTRY_SUBGRAPH",
help = "Subgraph endpoint to the Graphcast Registry",
default_value = "https://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-goerli"
)]
pub registry_subgraph: String,
#[clap(
long,
value_name = "INDEXER_ADDRESS",
env = "INDEXER_ADDRESS",
help = "Graph account corresponding to Graphcast operator"
)]
pub indexer_address: String,
#[clap(
long,
value_name = "SUBGRAPH",
env = "NETWORK_SUBGRAPH",
help = "Subgraph endpoint to The Graph network subgraph",
default_value = "https://gateway.testnet.thegraph.com/network"
)]
pub network_subgraph: String,
#[clap(
long,
value_name = "LOG_FORMAT",
env = "LOG_FORMAT",
help = "Support logging formats: pretty, json, full, compact",
long_help = "pretty: verbose and human readable; json: not verbose and parsable; compact: not verbose and not parsable; full: verbose and not parsible",
possible_values = ["pretty", "json", "full", "compact"],
default_value = "full"
)]
pub log_format: String,
#[clap(
long,
value_name = "ID_VALIDATION",
value_enum,
env = "ID_VALIDATION",
default_value = "valid-address",
help = "Identity validation mechanism for senders (message signers)",
long_help = "Identity validation mechanism for senders (message signers)\n
no-check: all messages signer is valid, \n
valid-address: signer needs to be an valid Eth address, \n
graphcast-registered: must be registered at Graphcast Registry, \n
graph-network-account: must be a Graph account, \n
registered-indexer: must be registered at Graphcast Registry, correspond to and Indexer statisfying indexer minimum stake requirement, \n
indexer: must be registered at Graphcast Registry or is a Graph Account, correspond to and Indexer statisfying indexer minimum stake requirement"
)]
pub id_validation: IdentityValidation,
}

impl Config {
/// Parse config arguments
pub fn args() -> Self {
// TODO: load config file before parse (maybe add new level of subcommands)
let config = Config::parse();
init_tracing(config.log_format.clone()).expect("Could not set up global default subscriber for logger, check environmental variable `RUST_LOG` or the CLI input `log-level`");
config
}

/// Validate that private key as an Eth wallet
fn parse_key(value: &str) -> Result<String, WalletError> {
// The wallet can be stored instead of the original private key
let wallet = build_wallet(value)?;
let addr = wallet_address(&wallet);
info!(address = addr, "Resolved Graphcast id");
Ok(String::from(value))
}
}

This file defines the Config struct and its associated methods for handling configuration options of our Radio. This outlines the basic configuration that all Radios have to define.

The configuration options can be provided through command-line arguments, environment variables, or a combination of both. The Config struct parses and validates these options, it also initializes the tracing system for logging purposes.

Methods

  • args(): Parses and returns the configuration options from command-line arguments and environment variables.
  • parse_key(value: &str): Validates a given private key by attempting to create an Ethereum wallet with it. Returns the private key as a string if successful.

Instantiate the essentials

From here on, all following code will be in the main function. To start off, we define a name for our Radio, read the provided environment variables and instantiate our configuration struct.

// This can be any string
let radio_name = "ping-pong".to_string();
// Loads the environment variables from .env
dotenv().ok();

// Instantiates the configuration struct based on provided environment variables or CLI args
let config = Config::args();
let _parent_span = tracing::info_span!("main").entered();

Now let's instantiate a few variables that will do all the heavy lifting for us.

// Subtopics are optionally provided and used as the content topic identifier of the message subject,
// if not provided then they are usually generated based on indexer allocations
let subtopics: Vec<String> = vec!["ping-pong-content-topic".to_string()];

// GraphcastAgentConfig defines the configuration that the SDK expects from all Radios, regardless of their specific functionality
let graphcast_agent_config = GraphcastAgentConfig::new(
config.private_key.expect("No private key provided"),
config.indexer_address,
radio_name,
config.registry_subgraph,
config.network_subgraph,
config.id_validation.clone(),
config.graph_node_endpoint,
None,
Some("testnet".to_string()),
Some(subtopics),
None,
None,
None,
None,
Some(true),
// Example ENR address
Some(vec![String::from("enr:-JK4QBcfVXu2YDeSKdjF2xE5EDM5f5E_1Akpkv_yw_byn1adESxDXVLVjapjDvS_ujx6MgWDu9hqO_Az_CbKLJ8azbMBgmlkgnY0gmlwhAVOUWOJc2VjcDI1NmsxoQOUZIqKLk5xkiH0RAFaMGrziGeGxypJ03kOod1-7Pum3oN0Y3CCfJyDdWRwgiMohXdha3UyDQ")]),
None,
)
.await
.unwrap_or_else(|e| panic!("Could not create GraphcastAgentConfig: {e}"));

GraphcastAgentConfig takes in an optional vector for content topics. Here we explicitly provide a singleton vector of "ping-pong-content-topic", but you can define topics based on the radio's use case needs. If you leave the field as None, then the agent will automatically fetch your indexer's active allocations and create a list of topics in the format of radio application name + the allocated subgraph deployments' IPFS hash.

Next, we will instantiate a GraphcastAgent:

debug!("Initializing the Graphcast Agent");
let (graphcast_agent, waku_msg_receiver) = GraphcastAgent::new(graphcast_agent_config)
.await
.expect("Could not create Graphcast agent");

GraphcastAgent is the main struct through which the Radios communicate with the SDK.

And lastly for the setup part, we need to run two one-off setters for GraphcastAgent and for the incoming messages store:

// A one-off setter to load the Graphcast Agent into the global static variable
_ = GRAPHCAST_AGENT.set(graphcast_agent);

// A one-off setter to instantiate an empty vec before populating it with incoming messages
_ = MESSAGES.set(Arc::new(Mutex::new(vec![])));

Awesome, we're all set to start with the actual Radio logic now!

Sending messages

We'll define a helper function that holds the logic of sending messages to the Graphcast network:

// Helper function to reuse message sending code
async fn send_message(payload: SimpleMessage) {
if let Err(e) = GRAPHCAST_AGENT
.get()
.expect("Could not retrieve Graphcast agent")
.send_message(
// The identifier can be any string that suits your Radio logic
// If it doesn't matter for your Radio logic (like in this case), you can just use a UUID or a hardcoded string
"ping-pong-content-topic",
payload,
Utc::now().timestamp(),
)
.await
{
error!(error = tracing::field::debug(&e), "Failed to send message");
};
}

Again, the identifier that we define as ping-pong-content-topic can be any string that suits your Radio logic, if it doesn't really matter for your use case (like in the ping-pong Radio case) you can just use a UUID or a hardcoded string.

Receiving and handling messages

We now know how to send message, but how do we receive and handle message from other network participants?

After GossipAgent validates the incoming messages, we provide a custom callback handler that specifies what to do with the message. In this handler we cache the message for later aggregation and processing, but depending on your Radio use case you are free any data storage option - a database, a custom data structure or a simple vector.

Here is a simple handler that does just that:

// The handler specifies what to do with incoming messages.
// This is where you can define multiple message types and how they gets handled by the radio
// by chaining radio payload typed decode and handler functions
tokio::spawn(async move {
for msg in waku_msg_receiver {
trace!(
"Radio operator received a Waku message from Graphcast agent, now try to fit it to Graphcast Message with Radio specified payload"
);
let _ = GRAPHCAST_AGENT
.get()
.expect("Could not retrieve Graphcast agent")
.decoder::<SimpleMessage>(msg.payload())
.await
.map(|msg| {
msg.payload.radio_handler();
})
.map_err(|err| {
error!(
error = tracing::field::debug(&err),
"Failed to handle Waku signal"
);
err
});
}
});

GRAPHCAST_AGENT
.get()
.expect("Could not retrieve Graphcast agent")
.register_handler()
.expect("Could not register handler");

The main loop

Great, we're almost there! We have a way to pass messages back and forth 🏓. But sending a one-off message is no fun, we want to create some sort of scheduled and continuous logic of message exchange, and perhaps the easiest way to do that is to use a block number as cue.

We'll start listening to Ethereum blocks coming from the Graph Node and on each block we'll do a simple check - if the block number is even we'll send a "Ping" message, and if it's odd we'll process the messages we've received. After processing the messages we'll clear our store.

let mut block_number = 0;

loop {
block_number += 1;
info!(block = block_number, "🔗 Block number");
if block_number & 2 == 0 {
// If block number is even, send ping message
let msg = SimpleMessage::new(
"table".to_string(),
std::env::args().nth(1).unwrap_or("Ping".to_string()),
);
send_message(msg).await;
} else {
// If block number is odd, process received messages
let messages = AsyncMutex::new(
MESSAGES
.get()
.expect("Could not retrieve messages")
.lock()
.expect("Could not get lock on messages"),
);
for msg in messages.lock().await.iter() {
if msg.content == *"Ping" {
let replay_msg = SimpleMessage::new("table".to_string(), "Pong".to_string());
send_message(replay_msg).await;
};
}

// Clear message store after processing
messages.lock().await.clear();
}

// Wait before next block check
sleep(Duration::from_secs(5));
}

The finished Radio

Congratulations, you've now written you first full Graphcast Radio! The finished code is also available in this repo, the only important difference is in the dependencies.

That's awesome. But how do we run it?

You can start up the ping-pong Radio using cargo run.

You can spawn more instances of the ping-pong Radio and examine how they interact with each other in the terminal logs.

Now there's just one more thing to do - have fun examining the logs & be proud of yourself - you made it! 🥂 From here on out, the only limit to the Radios you can build is your own imagination.

- + \ No newline at end of file diff --git a/graphcast/sdk/registry.html b/graphcast/sdk/registry.html index 6abd7308..5598f05e 100644 --- a/graphcast/sdk/registry.html +++ b/graphcast/sdk/registry.html @@ -5,13 +5,13 @@ Registry Contract | GraphOps Docs - +

Registry Contract

The Graphcast Registry contracts allow an address to set a GraphcastID by calling setGraphcastID(indexer_address, graphcastID_address) as either an Indexer or an Indexer operator, or calling setGraphcastID(graphcastID_address) as the Indexer address. The relationship between an Indexer address to its GraphcastID is limited to 1:1, and cannot be set to itself. This restriction provides consistency and security for the Indexer identity to operate on Graphcast as one GraphcastID operating across Radio applications. To learn more about the registry, you can check out the Github repository.

There are also subgraphs for these registry contracts. They provide information on both the Indexer registration status and the GraphcastID registration status, specifically mapping the indexer registered on The Graph service registry contract to GraphcastID registered on the Graphcast registry contract.

Register a Graphcast ID

The Graphcast Registry contract maps Graphcast IDs to Indexers in the Graph Protocol. With a unique Graphcast ID, an Indexer can sign messages for the Radio, eliminating the need to expose their private Indexer (or Indexer Operator) key or mnemonic. This provides an added layer of security, protecting Indexers' sensitive information while enabling participation in the Graphcast Network.

Here is a brief overview of the accounts you'll be interacting with:

Account NameDescription
Indexer AccountThe existing account associated with your Graph Protocol Indexer. This may be a Token Lock Contract address, or a multisig or EOA address.
Indexer Operator AccountAn account you have registered as an Operator for your Indexer. You can use the Operator account that you pass to indexer-agent.
Graphcast ID AccountA new account that you will create that is used by Graphcast Radio instances to sign messages on behalf of your Indexer.

You'll need to use a registered Indexer Operator account for your Indexer to register a Graphcast ID.

tip

You can register multiple Operators for your Indexer in parallel. If you would prefer not to import the Operator account that you use with indexer-agent into your wallet in order to register your Graphcast ID, you can generate and register a dedicated operator account for this purpose. After you have registered your Graphcast ID, you can deregister the dedicated operator if you desire.

  1. Generate a new Ethereum account to act as your Graphcast ID, keeping the details safe. Be sure to select the Ethereum network, and save the mnemonic, as well as the address and private key for the first account. This is your Graphcast ID.
  2. Import your Indexer Operator private key into your wallet (e.g. MetaMask or Frame) in order to send a transaction to register your Graphcast ID.
  3. Navigate to the Graphcast registry contract for your preferred network and register your Graphcast ID.
  4. Call setGraphcastIDFor(indexer_address, graphcast_id), passing in your Indexer Address and Graphcast ID. Neither address should be your Indexer Operator address that is being used to sign the transaction.
  5. Submit your transaction and wait for it to be included in a block.

Registry endpoints

NetworkRegistry ContractSubgraph API
Ethereum-mainnet0x89f97698d6006f25570cd2e31737d3d22aedcbcfhttps://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-mainnet
Ethereum-goerli0x26ebbA649FAa7b56FDB8DE9Ea17aF3504B76BFA0https://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-goerli
Arbitrum-one0xfae79e8cb8fbac2408e5baf89262bd92b6ca464ahttps://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-arb-one
Arbitrum-goerli0x50c2d70a41ecefe4cc54a331457ea204ecf97292https://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-arbitrum-go
info

Each Graphcast ID can be associated with a single Indexer. To revoke a Graphcast ID for your Indexer, call setGraphcastIDFor(indexer_address, graphcast_id) with a Graphcast ID of 0x0 using a registered Indexer Operator Account.

Subgraph APIs

Here we list out the APIs the team supports actively. For network subgraph endpoint, We recommend you to expose your indexer-service's endpoint at /network queries with authentication. You can also index and serve registry subgraph but they are not currently deployed on the decentralized network.

Here are the endpoints available on the hosted service.

Protocol NetworkGraphcast NetworkRegistry Subgraph EndpointNetwork Subgraph Endpoint
Ethereum Mainnetmainnethttps://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-mainnethttps://api.thegraph.com/subgraphs/name/graphprotocol/graph-network-mainnet
Goerlitestnethttps://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-goerlihttps://api.thegraph.com/subgraphs/name/graphprotocol/graph-network-goerli
Arbitrum-Onemainnethttps://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-arb-onehttps://api.thegraph.com/subgraphs/name/graphprotocol/graph-network-arbitrum
Arbitrum-Goerlitestnethttps://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-arbitrum-gohttps://api.thegraph.com/subgraphs/name/graphprotocol/graph-network-arbitrum-goerli
- + \ No newline at end of file diff --git a/img/launchpad-release-channels.svg b/img/launchpad-release-channels.svg index b788c14f..e0998533 100644 --- a/img/launchpad-release-channels.svg +++ b/img/launchpad-release-channels.svg @@ -1 +1 @@ - \ No newline at end of file + \ No newline at end of file diff --git a/index.html b/index.html index 0931b89b..a2f7b7d6 100644 --- a/index.html +++ b/index.html @@ -5,13 +5,13 @@ GraphOps Docs | GraphOps Docs - +
Image copyright Eko Purnomo, courtesy of the Noun Project

Deploy, monitor and scale your Indexer on Kubernetes using Launchpad

Launchpad provides a toolbox for smoothly operating your Graph Protocol Indexer on Kubernetes

Image copyright Eko Purnomo, courtesy of the Noun Project

Join Graphcast to coordinate with other Indexers using Radios

Run Radios (P2P apps) in your stack to coordinate with other Indexers via the Graphcast Network

- + \ No newline at end of file diff --git a/launchpad/advanced/advanced-kubernetes.html b/launchpad/advanced/advanced-kubernetes.html index 318e7321..861dd413 100644 --- a/launchpad/advanced/advanced-kubernetes.html +++ b/launchpad/advanced/advanced-kubernetes.html @@ -5,13 +5,13 @@ Considerations for Kubernetes installation using FCOS | GraphOps Docs - +

Considerations for Kubernetes installation using FCOS

This guide provides a general walkthrough for installing Kubernetes using Fedora CoreOS (FCOS) as the base operating system.

Prerequisites

Before proceeding with this guide, ensure you have a solid understanding of how FCOS works and the steps required to install and enable FCOS as detailed in Install FCOS Guide.

Additionally, a clear grasp of the fundamental Kubernetes architecture will greatly aid in navigating the guidance outlined ahead.

Key components for Kubernetes Installation

To set up Kubernetes on any node, you will require the kubeadm tool and a compatible container runtime.

Key features of kubeadm include:

  • Cluster Initialization: kubeadm helps you initialize the control plane node of a Kubernetes cluster. It handles tasks like generating TLS certificates, creating the Kubernetes configuration files, and setting up the initial control plane components.

  • Node Joining: You can use kubeadm to add worker nodes (also known as worker or minion nodes) to the cluster. This involves generating the necessary credentials and configurations for nodes to communicate with the control plane.

  • Upgrades: kubeadm assists in upgrading a Kubernetes cluster to a newer version by providing commands to perform version-specific upgrade tasks.

  • Configurations: The tool helps generate the necessary Kubernetes configuration files (e.g., kubeconfig) that enable communication between different components of the cluster.

  • Networking: While kubeadm itself does not handle networking directly, it can help you integrate with various networking solutions, such as Calico, Flannel, or others.

  • Token Management: kubeadm uses tokens for securely joining nodes to the cluster. It manages the generation and distribution of these tokens.

  • Certificate Management: kubeadm manages TLS certificates required for secure communication between cluster components.

  • Configuration Validation: kubeadm performs preflight checks to validate whether the host system is ready for cluster creation or joining.

note

If you opt for a multi-node Kubernetes cluster, your Butane configurations will differ based on the specific role each node plays, whether it's a control plane or a worker node.

Butane config for Kubernetes control-planes

Running kubeadm init is the first step in setting up the Kubernetes control plane, but there are several additional tasks you need to perform to ensure that the control plane is fully functional and secure:

  1. Install kubectl: After running kubeadm init, you'll receive instructions on how to set up the kubectl command-line tool will be used to interact with the Kubernetes cluster.

  2. Set Up Network Plugin: Kubernetes requires a network plugin to enable communication between pods and nodes. Choose a network plugin that suits your needs (e.g., Calico, Flannel, Cilium) and install it on the cluster.

  3. Secure the Control Plane: Apply security best practices to the control plane components. For example, you can restrict access to the API server, enable RBAC (Role-Based Access Control), and set up authentication and authorization mechanisms.

  4. Back Up Certificates: Back up the Kubernetes certificates generated during the kubeadm init process. These certificates are critical for secure communication within the cluster.

  5. Configure Load Balancing: If you're setting up a high-availability control plane, you might need to configure load balancing for the API server to distribute traffic among multiple control plane nodes.

Remember that this list provides a general overview of the tasks you need to complete after running kubeadm init. The specific steps may vary depending on your cluster's requirements and the components you choose to install.

Butane config for Kubernetes worker nodes

On a worker node you need to perform the following steps for installing Kubernetes:

  1. Install the Container Runtime of your choice. This runtime is responsible for managing and running containers.

  2. Install the kubelet: The kubelet is the primary node agent responsible for managing containers on the node and ensuring they match the desired state described in the Kubernetes manifest files.

  3. Run kubeadm join: Once the container runtime and kubelet are installed and properly configured on the worker node, you can run the kubeadm join command to connect the node to the cluster's control plane.

  4. Network Configuration: After the node is joined to the cluster, you might need to configure network plugins (e.g., Calico, Flannel) to enable communication between nodes and pods.

- + \ No newline at end of file diff --git a/launchpad/client-side-tooling.html b/launchpad/client-side-tooling.html index 975a6f28..cccd3809 100644 --- a/launchpad/client-side-tooling.html +++ b/launchpad/client-side-tooling.html @@ -5,13 +5,13 @@ Client Side Tooling | GraphOps Docs - +

Client Side Tooling

Launchpad comes with an opinionated set of tools on your local machine, layered over one another to provide a declarative workflow to manage your cluster software stack.

Client Side Stack

These tools do not run on your servers, but on your local machine. They form the command & control center that you use to send instructions to your cluster.

Installing on your local machine

Launchpad comes with a task to install local dependencies on your machine. See the Quick Start Guide for more information.

Understanding the tools in the client-side stack

Taskfile

Taskfile is a simple task runner for automation and devops tasks. It allows you to define tasks in a single file, Taskfile.yml, and run them in a consistent, cross-platform way. It can be used to automate anything from building and deploying applications to running tests and linting code. Taskfile is written in Go and is easy to install and use.

Launchpad uses task as the primary command line interface. You can also define your own tasks!

Helm

Helm is a package manager for Kubernetes that helps you manage and automate the deployment of your applications. It allows you to define, install, and upgrade Kubernetes resources in a consistent, versioned way. Helm uses a simple templating syntax to allow you to parameterize your deployments and create reusable chart templates. Helm also provides a variety of pre-built charts for popular software.

Launchpad uses Helm to deploy packages (Helm Charts) into your cluster.

Helmfile

Helmfile is a tool for managing multiple Helm charts together using a single file. It allows you to define a set of Helm releases together in a file, and then use a single command to install, upgrade, or delete all of the releases at once. This makes it easy to manage complex, multi-chart applications. Helmfile is written in Go and is easy to install and use.

Launchpad uses Helmfile to declare and manage sets of related Helm releases.

Kustomize

Kustomize lets you customize raw, template-free YAML files for multiple purposes, leaving the original YAML untouched and usable as is. It is used by helmfile for some of its features.

Kubectl

Kubectl is the command-line interface for Kubernetes that allows you to deploy, scale, and manage applications on a Kubernetes cluster. It provides a simple, easy-to-use command-line interface for performing common Kubernetes tasks such as creating and managing pods, services, and deployments.

Launchpad uses Kubectl to interact with your Kubernetes cluster.

- + \ No newline at end of file diff --git a/launchpad/design-principles.html b/launchpad/design-principles.html index 11a0ad69..856eed4c 100644 --- a/launchpad/design-principles.html +++ b/launchpad/design-principles.html @@ -5,13 +5,13 @@ Design Principles | GraphOps Docs - + - + \ No newline at end of file diff --git a/launchpad/faq.html b/launchpad/faq.html index fad751a9..5573d8dd 100644 --- a/launchpad/faq.html +++ b/launchpad/faq.html @@ -5,13 +5,13 @@ Frequently Asked Questions (FAQs) | GraphOps Docs - +

Frequently Asked Questions (FAQs)

Here are answers to some commonly asked questions. If you have a question that is not covered here, feel free to ask.


Table of Contents


Do I need a server for launchpad-starter?

Q: Do I need a server for launchpad-starter?

A: No! The Client Side Tooling that comes with Launchpad should be run on your local machine. These tools are only used to instruct your cluster what to do..


When you setup postgres, how do you configure the zfs storage parameters?

Q: When you setup postgres, how do you configure the zfs storage parameters (eg the block size, compression, etc) ?

A: Persistent workloads consume Persistent Volumes that use some specific StorageClass (an abstraction). Storage Providers in Kubernetes (like openebs/zfs-localpv), do the operational work of "implementing" those Storage Classes. It is the StorageClass object/resource that would have that particular ZFS setup controlled by parameters. Here's an example of a zfs StorageClass that sets some parameters:

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
meta.helm.sh/release-name: openebs-zfs-storageclass
meta.helm.sh/release-namespace: storage
labels:
app.kubernetes.io/managed-by: Helm
launchpad.graphops.xyz/layer: base
launchpad.graphops.xyz/namespace: storage
name: openebs-zfs-localpv-compressed-128k
parameters:
compression: "on"
fstype: zfs
poolname: zpool
recordsize: "128k"
provisioner: zfs.csi.openebs.io
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true

Is there a way to inject a pretuned postgres config into the chart?

Q: Is there a way to inject a pretuned postgres config into the chart? Or is that a post deployment step?

A: Yes. The resource-injector chart allows us to inject a pre-tuned postgres db. This postgresql is a CRD (custom-resource-definition) that is consumed by the postgres-operator (which does the whole operational work of creating the database, setting up users, etc.. as well as replication, backups are also a possiblity). It really allows a great deal of flexibility in terms of what/how it setups the database!


Why are my stateful pods in Pending state and their expected pvc showing WaitForFirstConsumer errors?

Q: Why are my stateful pods in Pending state and their expected pvc showing WaitForFirstConsumer errors?

  Normal  WaitForPodScheduled   26m (x19 over 31m)   persistentvolume-controller  waiting for pod kube-prometheus-stack-grafana-75b74df8fb-2vwbr to be scheduled
Normal WaitForPodScheduled 47s (x102 over 26m) persistentvolume-controller waiting for pod kube-prometheus-stack-grafana-75b74df8fb-2vwbr to be scheduled

or

Normal  WaitForFirstConsumer  6m52s                   persistentvolume-controller  waiting for first consumer to be created before binding     

A: Using volumeBindingMode: WaitForFirstConsumer although needed for both openebs-rawfile-localpv and openebs-zfs-localpv seems to misbehave when there is a storageClass set as default in the cluster (the storageClass definition has the following annotation: storageclass.kubernetes.io/is-default-class: "true"). Making sure there is no default storageClass should fix this issue.


Do I need to use Cilium for Launchpad?

Q: Do I need a specific CNI (Cilium, Calico etc) in order to use Launchpad?

A: The Launchpad stack will work regardless of CNI used and in more general terms should work will all Kubernetes clusters - so you can customize your cluster how you prefer. In our Kubernetes guide we use Cilium due to its use of eBPF technology. This advanced approach offers a significant boost in efficiency, especially noticeable when managing a large number of nodes. It scales well and ensures lower latency, which is crucial for high-performance requirements. While Calico does enjoy a broader base of community support and is a strong choice with its iptables routing, Cilium has the upper advantage due to its performance and its more expansive set of features.

It's important to acknowledge that while Cilium has better performance and features than Calico, it is a bit trickier to set up. Our decision isn't influenced by Launchpad; it's purely a preference based on the operational benefits that Cilium brings to our infrastructure.

Need More Help?

If your question is not answered here, you can message us on the #kubernetes-launchpad channel on graphprotocol Discord or you can open an issue on our launchpad-namespaces or launchpad-charts repos.

- + \ No newline at end of file diff --git a/launchpad/guides/arbitrum-archive-kubernetes-guide.html b/launchpad/guides/arbitrum-archive-kubernetes-guide.html index f35ef11d..07c2074a 100644 --- a/launchpad/guides/arbitrum-archive-kubernetes-guide.html +++ b/launchpad/guides/arbitrum-archive-kubernetes-guide.html @@ -5,14 +5,14 @@ Arbitrum Archive Mainnet Node Guide | GraphOps Docs - +

Arbitrum Archive Mainnet Node Guide

danger

This Quick Start guide has not yet been updated for Launchpad V2.

This guide is intended to be an end to end walk-through of running an Arbitrum Archive Mainnet Node in an existing Kubernetes cluster. Sync times are reported to be in the range of 1 week on dedicated hardware. The node consists of 2 parts, the classic part and the nitro hardfork. The classic part is only required to request archive data for blocks before the hardfork and takes the aforementioned 1 weeks to sync from scratch. The nitro history is shorter and can be quickly synced within 3 days.

Arbitrum Nitro has a built-in proxy to redirect queries with block numbers below it’s genesis block (they’re sent to the Arbitrum Classic node)

Prerequisites

All the Launchpad Prerequisites apply if running a Kubernetes cluster using Launchpad, so be sure to read them first. This guide can be used with existing Kubernetes clusters as well.

You will need:

  • an ethereum-mainnet RPC endpoint
  • CPU: 4 Cores / 8 Threads
  • RAM: 16 GiB
  • Storage: 3 TiB NVMe SSD

If running a Kubernetes cluster using Launchpad

  1. Check that the cluster is running and healthy - review Quick Start guide for more info.
  2. In your private infra repo pull in latest launchpad-starter changes
task launchpad:pull-upstream-starter
  1. Pull in latest-core changes
task launchpad:update-core
  1. blockchain node data snapshot Arbitrum Classic provides functionality to download data from a snapshot. Review all files in [<your-private-copy-of-launchpad-starter>/helmfiles/release-names/arbitrum-mainnet/](https://github.com/graphops/launchpad-starter/blob/main/helmfiles/release-values/arbitrum-mainnet/) before deploying the chart
arbitrum:
restoreSnapshot:
enable: true
snapshotUrl: https://a-link-to-your-snapshot-archive.tar.gz
mode: streaming # or multipart depending on chain
  1. connect to eth-mainnet-rpc-node Both Arbitrum Classic and Arbitrum Nitro connect to l1 via the following commands:
arbitrum:
extraArgs:
- --node.chain-id=42161 # determines Arbitrum network - 42161 mainnet
- --l1.url=http://a-link-to-your-eth-mainnet-url:8545
nitro:
extraArgs:
- --http.api=net,web3,eth,debug
- --l2.chain-id=42161 # determines Arbitrum network - 42161 mainnet
- --l1.url=http://a-link-to-your-eth-mainnet-url:8545
- --node.rpc.classic-redirect=http://arbitrum-classic-archive-trace-mainnet-0:8547/
- --init.url=https://snapshot.arbitrum.io/mainnet/nitro.tar

Deploying with helm in a Kubernetes cluster outside Launchpad

You can find blockchain related helm packages 'here'

Given that Arbitrum needs both Nitro and classic to run use the following commands:

Deploy Arbitrum Classic

We'll first deploy Arbitrum Classic as Arbitrum Nitro needs to connect to the Classic endpoint.

Create a values arbitrum-classic.yaml file with the following contents

arbitrum:
extraArgs:
- --node.chain-id=42161 # determines Arbitrum network - 42161 mainnet
- --l1.url=http://a-link-to-your-eth-mainnet-url:8545
restoreSnapshot:
enable: true
snapshotUrl: https://a-link-to-your-snapshot-archive.tar.gz
mode: streaming # or multipart depending on chain

Deploy helm-chart:

helm repo add graphops http://graphops.github.io/launchpad-charts
helm install --dry-run arbitrum-classic graphops/arbitrum-classic:latest --namespace arbitrum-mainnet --value arbitrum-classic.yaml

Deploy Arbitrum Nitro

Create a values arbitrum-nitro.yaml file with the following contents

nitro:
extraArgs:
- --http.api=net,web3,eth,debug
- --l2.chain-id=42161 # determines Arbitrum network - 42161 mainnet
- --l1.url=http://a-link-to-your-eth-mainnet-url:8545
- --node.rpc.classic-redirect=http://arbitrum-classic:8547/ # replace `arbitrum-classic` with the name of your arbitrum-classic release deployed at the previous step
- --init.url=https://snapshot.arbitrum.io/mainnet/nitro.tar

Deploy helm-chart:

helm install --dry-run arbitrum-nitro graphops/arbitrum-classic:latest --namespace arbitrum-mainnet --value arbitrum-nitro.yaml
- + \ No newline at end of file diff --git a/launchpad/guides/avalanche-archive-kubernetes.html b/launchpad/guides/avalanche-archive-kubernetes.html index 686fbbb8..8d10223a 100644 --- a/launchpad/guides/avalanche-archive-kubernetes.html +++ b/launchpad/guides/avalanche-archive-kubernetes.html @@ -5,14 +5,14 @@ Avalanche Archive Mainnet Node Guide | GraphOps Docs - +

Avalanche Archive Mainnet Node Guide

danger

This Quick Start guide has not yet been updated for Launchpad V2.

This guide is intended to be an end to end walk-through of running an Avalanche Archive Mainnet Node in an existing Kubernetes cluster. Sync times are reported to be in the range of 3 weeks on dedicated hardware.

Prerequisites

All the Launchpad Prerequisites apply if running a Kubernetes cluster using Launchpad, so be sure to read them first. This guide can be used with existing Kubernetes clusters as well.

For avalanche workload you will need:

  • CPU: 4 Cores / 8 Threads
  • RAM: 16 GiB
  • Storage: 3 TiB NVMe SSD

If running a Kubernetes cluster using Launchpad

  1. Check that the cluster is running and healthy - review Quick Start guide for more info.
  2. In your private infra repo pull in latest launchpad-starter changes
task launchpad:pull-upstream-starter
  1. Pull in latest-core changes
task launchpad:update-core
  1. Check default values- double-check values and update as needed in <your-private-copy-of-launchpad-starter>/helmfiles/release-names/arbitrum-mainnet/avalanche-archive-trace-mainnet-0.yaml

  2. Deploy avalanche-mainnet namespace

task releases:apply avalanche-mainnet

Deploying with helm in a Kubernetes cluster outside Launchpad

You can find blockchain related helm packages here

By default avalanche is told what type of node to run by the following default toml config:

configTemplate: |
# Store configuration in toml format
snowman-api-enabled = "false"
eth-apis = [
"eth",
"eth-filter",
"net",
"web3",
"internal-eth",
"internal-blockchain",
"internal-transaction",
"internal-tx-pool",
"internal-account"
]
metrics-enabled = "true"
pruning-enabled = "false"
state-sync-enabled = "false"

Override the above config by providing a new one in a values file and deploy:

helm repo add graphops http://graphops.github.io/launchpad-charts
helm install --dry-run avalanche graphops/avalanche:latest --namespace avalanche-mainnet --values avalanche-mainnet.yaml
- + \ No newline at end of file diff --git a/launchpad/guides/celo-archive-kubernetes-guide.html b/launchpad/guides/celo-archive-kubernetes-guide.html index cb125792..0c8417d6 100644 --- a/launchpad/guides/celo-archive-kubernetes-guide.html +++ b/launchpad/guides/celo-archive-kubernetes-guide.html @@ -5,14 +5,14 @@ Celo Archive Mainnet Node Guide | GraphOps Docs - +

Celo Archive Mainnet Node Guide

danger

This Quick Start guide has not yet been updated for Launchpad V2.

This guide is intended to be an end to end walk-through of running an Celo Archive Mainnet Node in an existing Kubernetes cluster. Sync times are reported to be in the range of 4 days on dedicated hardware.

Prerequisites

All the Launchpad Prerequisites apply if running a Kubernetes cluster using Launchpad, so be sure to read them first. This guide can be used with existing Kubernetes clusters as well.

For Celo workload you will need:

  • CPU: 4 Cores / 8 Threads
  • RAM: 16 GiB
  • Storage: 3 TiB NVMe SSD

If running a Kubernetes cluster using Launchpad

  1. Check that the cluster is running and healthy - review Quick Start guide for more info.
  2. In your private infra repo pull in latest launchpad-starter changes
task launchpad:pull-upstream-starter
  1. Pull in latest-core changes
task launchpad:update-core
  1. Check default values- double-check values and update as needed in <your-private-copy-of-launchpad-starter>/helmfiles/release-names/arbitrum-mainnet/celo-archive-trace-mainnet-0.yaml

  2. Deploy celo-mainnet namespace

task releases:apply celo-mainnet

Deploying with helm in a Kubernetes cluster outside Launchpad

You can find blockchain related helm packages here

Create a values celo-mainnet.yaml file with the following contents or similar:

celo:
extraArgs:
- --verbosity 3
- --syncmode full
- --gcmode archive
- --txlookuplimit=0
- --cache.preimages
- --http.corsdomain=*
- --ws # enable ws
- --http.api=eth,net,web3,debug,admin,personal

Deploy helm-chart:

helm repo add graphops http://graphops.github.io/launchpad-charts
helm install --dry-run celo graphops/celo:latest --namespace celo-mainnet --values celo-mainnet.yaml
- + \ No newline at end of file diff --git a/launchpad/guides/goerli-indexer-guide.html b/launchpad/guides/goerli-indexer-guide.html index 416038c0..4369ec32 100644 --- a/launchpad/guides/goerli-indexer-guide.html +++ b/launchpad/guides/goerli-indexer-guide.html @@ -5,13 +5,13 @@ Goerli Indexer Guide | GraphOps Docs - +

Goerli Indexer Guide

This guide is intended to be an end to end walk-through of setting up an Indexer running on the Graph Protocol Testnet on the Ethereum Goerli network.

Prerequisites

All the Launchpad Prerequisites apply, so be sure to read them first.

You will need:

  • At least one server running Ubuntu 22.04 with keypair authenticated SSH access
  • A web browser with MetaMask installed

What we're going to do

  1. Create all the relevant Ethereum accounts, fund them, register them on-chain with the protocol
  2. Follow the Quick Start guide to set up our local machine, and then set up our cluster of servers, and finally deploy the Graph Stack
  3. Configure DNS, verify ingress and TLS is working
  4. Allocate to subgraph deployments
  5. Verify that we are serving queries

Create the Indexer and Operator Ethereum accounts

We will need to set up two new Ethereum accounts:

  1. The Indexer account: this account is your Indexer's identity, and is used to stake GRT into the protocol. This account owns your in-protocol GRT. This key should be kept very safe!
  2. The Operator account: this account is authorised to perform some operational actions (like managing allocations) on behalf of your Indexer. The key for this account will live on your server(s). The Indexer Software uses this account to automate interactions with the protocol. This account does not own your GRT, but can take actions that put your GRT at risk (e.g. submitting a bad POI could make you liable to slashing). You can replace the Operator account with a new one at any time.

Generating mnemonics for your new accounts

Ian Coleman's BIP39 generator is great for quickly generating new mnemonics and their derived accounts.

  1. Set the "Coin" to "ETH - Ethereum".
  2. Click Generate
  3. Take note of: the mnemonic and (optionally, if you follow this guide) all details for the first derived address

Generate two new mnemonics and save their details.

tip

When setting up your Indexer account for mainnet, use a more secure method, like a hardware wallet, for generating your Indexer account.

Funding our new accounts

Both our new accounts will need ETH in order to pay for transaction costs. The Operator account will be used for all automated protocol interactions, so it will need a lot more ETH than the Indexer account. The Indexer account will need to pay for gas to stake GRT into the protocol, and set various metadata and parameters.

For the Goeli testnet, there are a number of ETH faucets that can be used to fund your new accounts. You can find various options here: https://faucetlink.to/goerli

Our Indexer account will also need at least 100,000 Goerli GRT in order to stake in the protocol. If you are a MIPs participant, you should follow the relevant instructions to get that GRT. Otherwise there is a Goerli GRT faucet available in the Graph Protocol Discord.

Registering our Indexer and Operator accounts

We will use The Graph's Testnet Network app to register our new accounts with the protocol on-chain.

  1. Use MetaMask to import the private key for your Indexer mnemonic's first derived account. This will allow us to transact as our Indexer.
  2. Navigate to "Indexing" under your profile dropdown menu
  3. Click the Stake button and Stake at least 100k GRT (first allowing GRT token access with an approval transaction, followed by the stake transaction)
  4. Navigate to "Settings" under your profile dropdown menu, and then to "Operators" in the left hand menu
  5. Click the plus symbol, paste in your Operator mnemonic's first derived address, click Add and submit the transaction
  6. (optionally) Navigate to other settings to configure profile details

Launching off the pad!

Now that our accounts are ready, let's follow the Quick Start guide to:

  1. Create a new repository for our infrastructure
  2. Configure our local machine to command our cluster
  3. Configure our servers into a Kubernetes cluster
  4. Deploy core cluster services and the Graph Indexing stack into our cluster
- + \ No newline at end of file diff --git a/launchpad/guides/install-fcos.html b/launchpad/guides/install-fcos.html index 7d921342..2bac8337 100644 --- a/launchpad/guides/install-fcos.html +++ b/launchpad/guides/install-fcos.html @@ -5,14 +5,14 @@ FCOS Installation | GraphOps Docs - +

FCOS Installation

Fedora CoreOS (FCOS) is an open-source container-focused operating system that is:

  • minimal
  • automatically updated
  • designed for clusters but can also be used standalone.

It is optimized for Kubernetes and includes technology from CoreOS Container Linux and Fedora Atomic Host, providing a secure and scalable container host for workloads.

Here are key differences between FCOS and traditional operating systems:

  • Package management: FCOS uses rpm-ostree for atomic updates, while traditional OSes use package managers like apt or yum.
  • Security: FCOS includes SELinux for enhanced security, while traditional OSes may require additional security configurations.
  • Containerization: FCOS is designed for container workloads, while traditional OSes may need extra setup for containers.
  • Automatic updates: FCOS provides automatic updates, while traditional OSes may require manual updates.
  • Minimal footprint: FCOS is optimized for running containers at scale, while traditional OSes have a broader range of software and features.

This guide takes you through the different considerations required to install and configure FCOS. NOTE the following instructions are for guidance only and do not represent step by step instructions.

Picking the right installation method

To install and configure FCOS, you need to use the coreos-installer tool. The following options for booting the OS are available:

  • Installing on bare metal:

    • Booting from live ISO using a KVM
    • Booting from PXE or iPXE
    • Booting from a container
    • Installing coreos-installer using cargo (not officially documented) but a good option for anyone running Hetzner servers or any other provider that doesn't offer PXE/iPXE boot and is not officially supporting FCOS images. The officially supported alternative for this option would be booting from live ISO. Example of coreos-installer install using cargo:
      # install packages necessary for coreos-installer
      apt update && apt upgrade
      apt install pkg-config libssl-dev libzstd-dev
      # install cargo
      curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
      source "$HOME/.cargo/env"
      cargo install coreos-installer
      # install butane
      wget https://github.com/coreos/butane/releases/download/$YOUR_BUTANE_VERSION/butane-x86_64-unknown-linux-gnu
      chmod +x butane-x86_64-unknown-linux-gnu
  • Installing on cloud servers/VMs:

    • official FCOS images can be used to provision new servers for AWS and GCP - can be found under Cloud Launchable section on downloads page

Once an installation image is picked, time to customize the system.

Create a configuration

FedoraCoreOS follows the principles of immutable infrastructure, where the operating system and application components are treated as immutable, meaning they are not modified after deployment. Updates are delivered through "automatic updates" managed by the OS , following a rolling update strategy. New instances with updated images are provisioned, and old instances are replaced.

Treating the operating system as immutable:

  • reduces configuration drift
  • enhances system reliability
  • stateful components or data can still exist outside the operating system and have their own mechanisms for persistence and updates

To customize a Fedora CoreOS (FCOS) system, a configuration file needs to be provided which will be used by Ignition to provision the system.

This file will be used to customize various aspects of the system, such as creating a user, adding a trusted SSH key, enabling systemd services, and more.

To create an ignition file:

  • define a butane config in YAML format using the specification. Your butane file should contain the following minimum sections:
    • Ignition Version: Specify the version of the Ignition configuration format to use
    • Storage Configuration: Define the disk layout and filesystems for your FCOS installation. This includes partitioning, formatting, and mounting options.
    • Passwd Users: Set up user accounts for logging into the FCOS instance.
    • Systemd Units: Configure systemd units to manage services and perform system-level tasks.
    • Networkd Units: Configure network settings, including network interfaces, IP addressing, and DNS as required.
  • These are just the basic building blocks for a Butane configuration file. Depending on your specific requirements, you may need to include additional configuration options such as users, SSH keys, systemd units, networking, etc. You can refer to the Butane documentation and the FCOS documentation for more details and advanced configuration options.
  • An example of a butane file you can get started with containing the minimum requirement:
    variant: fcos
    version: 1.4.0
    storage:
    disks:
    - device: /dev/sda
    partitions:
    - number: 1
    size: 512MiB
    label: root
    filesystem: ext4
    should_exist: true
    filesystems:
    - name: root
    mount:
    path: /
    device: /dev/disk/by-partlabel/root
    format: true
    passwd:
    users:
    - name: myuser
    ssh_authorized_keys:
    - ssh-rsa AAAA...
    systemd:
    units:
    - name: my-service.service
    enable: true
    contents: |
    [Unit]
    Description=My Service

    [Service]
    ExecStart=/usr/bin/my-service
    networkd:
    units:
    - name: 00-eth0.network
    contents: |
    [Match]
    Name=eth0

    [Network]
    DHCP=ipv4
  • use the butane cli (formally Fedora CoreOS Config Transpiler (fcct)) to convert the YAML config into a valid ignition configuration (JSON format).
    butane --pretty --strict < /tmp/config.bu > /tmp/config.ign
    # or if using podman
    sudo podman run --interactive --rm [quay.io/coreos/butane:release](http://quay.io/coreos/butane:release) --pretty --strict < /tmp/config.bu > /tmp/config.ign

Install new OS with coreos-installer

Next pass the config.ign file to coreos-installer.

coreos-installer install /dev/sda -i config.ign /tmp/config.ign

If you've run the above command the folowing will happen on the host

  1. The CoreOS Installer will install the Fedora CoreOS operating system onto the specified device (in this case, /dev/sda) using the provided Ignition configuration file (/tmp/config.ign).
  2. The installation process will partition and format the device, copy necessary files, and configure the bootloader.
  3. At this point user should reboot the system.
  4. Upon reboot, the system will start up with the newly installed Fedora CoreOS.
  5. After the initial boot, Fedora CoreOS will automatically manage updates using the rpm-ostree tool. It will fetch and apply updates in an atomic manner, ensuring a consistent and reliable system.
  6. You can log in to the system and start using Fedora CoreOS. As an immutable operating system, any modifications to the system outside of automatic updates are typically done by updating the Ignition configuration file and performing a reboot to apply the changes.

Next steps

The outlined steps mark the initial phase of grasping the workings of FCOS. For the different components that you'd need to include in your butane config to install Kuberneter follow Advanced Kubernetes.

- + \ No newline at end of file diff --git a/launchpad/guides/kubeadm-upgrade-cluster-config.html b/launchpad/guides/kubeadm-upgrade-cluster-config.html index 6ff29a03..2e767c4b 100644 --- a/launchpad/guides/kubeadm-upgrade-cluster-config.html +++ b/launchpad/guides/kubeadm-upgrade-cluster-config.html @@ -5,13 +5,13 @@ Upgrading Kubernetes ClusterConfig with kubeadm | GraphOps Docs - +

Upgrading Kubernetes ClusterConfig with kubeadm

When managing a Kubernetes cluster with kubeadm, there could be scenarios where you need to update the ClusterConfiguration independently of performing version upgrades. This guide walks you through those steps.

Kubeadm maintains the cluster configuration within a ConfigMap (kubeadm-config) in the kube-system namespace. If you modify this ConfigMap, the changes won’t be applied automatically to the running control plane components.

To apply the changes to a control-plane node, you will have to perform a kubeadm upgrade after editing the kubeadm-config ConfigMap. The general steps would look like this:

Pick a control-plane node to be the first to upgrade, followed by:

1: Edit kubeadm-config ConfigMap with desired changes:

kubectl edit cm -o yaml kubeadm-config -n kube-system

2: Verify the upgrade plan:

kubeadm upgrade plan

3: Perform the upgrade:

kubeadm upgrade apply v1.28.3

Note: When using kubectl upgrade apply, a version must be specified. If you do not intend to upgrade the Kubernetes version, simply specify the currently installed version. This allows you to apply changes without altering the Kubernetes version.

Steps 2 and 3 will need to be performed against every single node, both control-planes and worker nodes as applicable depending on the changes. Once those steps are performed you should see etcd and kubeapi-server pods restarted. After you perform these steps, the changes you made in the kubeadm-config ConfigMap the new configuration will be active.

Note: When making modifications that affect etcd, it’s crucial to confirm that the changes have been successfully applied. Ensure that the new etcd pod is integrated into the cluster and maintains a minimum quorum of two before proceeding to apply changes to the subsequent control plane. This step is vital for sustaining the stability and resilience of the cluster during the update process.

- + \ No newline at end of file diff --git a/launchpad/guides/kubeadm-upgrade-nodes.html b/launchpad/guides/kubeadm-upgrade-nodes.html index f0f5bb9f..02c59e22 100644 --- a/launchpad/guides/kubeadm-upgrade-nodes.html +++ b/launchpad/guides/kubeadm-upgrade-nodes.html @@ -5,13 +5,13 @@ Upgrading Kubernetes with kubeadm | GraphOps Docs - +

Upgrading Kubernetes with kubeadm

In this guide we will use as an example upgrading from kubernetes v1.28.1 to v1.28.3

The the control-plane nodes must be upgraded first, followed by the worker nodes.

Upgrade Control-Plane Nodes

Pick a control-plane node to be the first to upgrade, followed by:

1: Upgrading kubeadm and kubectl to the latest patch version of the desired major version:

apt-get update
apt-mark unhold kubeadm
apt-get install -y kubeadm='1.28.3-*'
apt-mark hold kubeadm

2: Verify the upgrade plan:

kubeadm upgrade plan v1.28.3

3: Drain the node:

kubectl drain <node-name> --ignore-daemonsets

4: Perform the upgrade:

kubeadm upgrade apply v1.28.3

5: Upgrade the node's CRI-O or other container runtime to an appropriate version if need be. For CRI-O that would be changing the minor version in the repositories added to /etc/apt/sources.list.d and then running:

apt-get update
apt-get install cri-o cri-o-runc
systemctl daemon-reload
systemctl restart crio

6: Upgrade kubelet and kubectl

apt-get update
apt-mark unhold kubelet
apt-mark unhold kubectl
apt-get install -y kubelet='1.28.3-*'
apt-get install -y kubectl='1.28.3-*'
apt-mark hold kubelet
apt-mark hold kubectl

7: Restart kubelet

systemctl daemon-reload
systemctl restart kubelet

8: Uncordon the node

kubectl uncordon <node-name>

9: Possibly, upgrade CNI. Particularly if it's a minor version upgrade there may be a need to update the CNI to a new version as well according to the vendor's release notes for the upgrade process

Upgrade remaining Control-Plane Nodes

For the remaining control-plane nodes, execute steps 1 to 8, one at a time but:

  • step 2 is skipped, no need to plan the upgrade anymore
  • step 4 is replaced by:
kubeadm upgrade node

Upgrade Worker Nodes

After all the control-plane nodes are upgraded, it's time to upgrade your worker nodes by following the previous steps from 1 to 8 but:

  • step 2 is skipped
  • step 4 is replaced by:
kubeadm upgrade node

Note: You can upgrade as many worker nodes in parallel as you see fit and/or find adequate to your availability requirements, as the nodes being upgraded will be drained from workloads.

- + \ No newline at end of file diff --git a/launchpad/guides/kubernetes-create-cluster-with-kubeadm.html b/launchpad/guides/kubernetes-create-cluster-with-kubeadm.html index 2f708ef2..d07d17d7 100644 --- a/launchpad/guides/kubernetes-create-cluster-with-kubeadm.html +++ b/launchpad/guides/kubernetes-create-cluster-with-kubeadm.html @@ -5,7 +5,7 @@ Kubernetes Guide - Bootstrapping with Kubeadm | GraphOps Docs - + @@ -15,7 +15,7 @@ Kubeadm always bootstraps a cluster as a single control-plane node, and other nodes are added after the bootstrapping.

We're going to create a YAML file instead of passing all the options as flags to kubeadm. Create a cluster-config.yaml file as the following:

apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
cgroup-driver: systemd
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
networking:
serviceSubnet: "10.96.0.0/16"
podSubnet: "10.10.0.0/16"
kubernetesVersion: "v1.25.9"
controlPlaneEndpoint: <endpoint_ip_or_dns>

where you must replace <endpoint_ip_or_dns> by the control-plane's endpoint and, optionally, choose a different podSubnet and/or serviceSubnet. Documentation on all the many configuration options available can be found here.

next, you can use kubeadm to bootstrap the cluster with:

kubeadm init --upload-certs --config cluster-config.yaml

after which, if all goes well, one should see output similar to this:

[root@demo /]# kubeadm init
I0515 19:48:51.424146 1642628 version.go:256] remote version is much newer: v1.27.1; falling back to: stable-1.25
[init] Using Kubernetes version: v1.25.9
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [demo kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 134.177.177.107]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [demo localhost] and IPs [134.177.177.107 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [demo localhost] and IPs [134.177.177.107 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 4.502328 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node demo as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node demo as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
[bootstrap-token] Using token: 4y3umx.fnuv7v9pgp4jn74b
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane node by running the following command on each as a root:

kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 --control-plane --certificate-key f8902e114ef118304e561c3ecd4d0b543adc226b7a07f675f56564185ffe0c07

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use kubeadm init phase upload-certs to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866

(Note: Save these kubeadm join commands presented in this output, as they contain secrets that will be required to add more nodes in future steps.)

this being a control-plane node, kubeadm will have created a kubeconfig file in /etc/kubernetes/admin.conf. A kubeconfig file is a YAML file that contains the required metadata and credentials to talk to the cluster, such as certificates/tokens and endpoint specification. Kubectl will use whatever kubeconfig file is pointed at by the KUBECONFIG environment variable, or, by default, the file in ~/.kube/config. So, as suggested in the output, we should do:

export KUBECONFIG=/etc/kubernetes/admin.conf

now kubectl should be setup to interact your the cluster. Try it by doing the following command:

[root@demo /]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
demo Ready control-plane 10s v1.25.9

Installing a CNI

Kubernetes follows a very modular API interface based design. Some of those components, like the CSI (https://kubernetes.io/blog/2019/01/15/container-storage-interface-ga/ ) CNI (https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) or Ingress controller, come together to form the core of most kubernetes platform setups.

The CNI is the module that will take care of enabling networking between containers and services in different nodes, or setup each container's networking properties. As such, it is a critical next step in order to add more nodes to the cluster, or even run workload containers.

We have chosen to use cilium as a CNI solution, but there are many options to choose from.

We'll go ahead and fetch the cilium binary from upstream by running the following script:

CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/master/stable.txt)
CLI_ARCH=amd64
cd /usr/local/bin
curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum
sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin
rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}

and then proceed to install cilium with default options by running:

/usr/local/bin/cilium install

Adding more control-plane nodes

If you have gone with the default topology setup, kubeadm should be instantiating etcd instances co-located with your control-plane nodes. Given that and the fact that etcd is a majority quorum based system, it's especially important that for a high-availability setup, you'll keep an odd (i.e: one, three, five, ...) number of control-plane nodes. As such, the minimum number of control-plane nodes that can offer high-availability would be three.

To add more control-plane nodes you need to first get the hosts ready for such by:

  • preparing the node OS as required
  • provisioning the required tools and software as in the first bootstrapping node (container runtime engine, kubelet, kubeadm, kubectl, ...)

and then execute, on that node, the appropriate kubeadm join command as shown in the previous kubeadm init output. For a control-plane node, that takes the form:

kubeadm join <endpoint> --token <secret> --discovery-token-ca-cert-hash sha256:<hash> --control-plane --certificate-key <secret>

Note: the kubeadm join commands shown after bootstrapping the cluster or, rather, the secrets uploaded and displayed are temporary and expire after a certain time. In case you lost them or they've expired, you can re-upload new certificates and display the new ones, on the bootstrapping control-plane node, by running:

kubeadm init phase upload-certs --upload-certs
kubeadm token create --print-join-command

Adding worker nodes

To add worker nodes to your cluster, first get them ready by:

  • preparing the node OS as required
  • provisioning the required tools and software as in the first bootstrapping node (container runtime engine, kubelet, kubeadm, kubectl, ...)

Next, you can run the appropriate kubeadm join command that was displayed at cluster bootstrap. It has the form:

kubeadm join <endpoint> --token <secret> --discovery-token-ca-cert-hash sha256:<hash>

In case you haven't saved that output, you can run (on one of the existing control-plane cluster members) the following command:

kubeadm token create --print-join-command

which will display the appropriate kubeadm join command and the relevant secrets, again.

QuickStart on Ubuntu 22.04 with CRI-O

Note: This guide assumes you'll be running these commands as root.

Prerequisites

1: Enable the required kernel modules on boot:

cat <<EOF > /etc/modules-load.d/crio-network.conf
overlay
br_netfilter
EOF

and load them now:

modprobe overlay
modprobe br_netfilter

2: Set appropriate networking sysctl toggles:

cat <<EOF > /etc/sysctl.d/99-kubernetes.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF

and apply them immediately:

sysctl --system

3: Disable swap:

swapoff -a

and take care to disable swap setup on boot, in case it is enabled (maybe on /etc/fstab)

Install packages

4: Install dependencies:

apt-get update
apt-get install -y apt-transport-https ca-certificates curl gpg

5: Set variables for CRI-O commands:

export OS="xUbuntu_22.04"
export VERSION="1.28"

6: Install CRI-O:

echo "deb [signed-by=/usr/share/keyrings/libcontainers-archive-keyring.gpg] https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/ /" > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
echo "deb [signed-by=/usr/share/keyrings/libcontainers-crio-archive-keyring.gpg] https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/ /" > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cri-o:$VERSION.list

mkdir -p /usr/share/keyrings
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/Release.key | gpg --dearmor -o /usr/share/keyrings/libcontainers-archive-keyring.gpg
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/Release.key | gpg --dearmor -o /usr/share/keyrings/libcontainers-crio-archive-keyring.gpg

apt-get update
apt-get install cri-o cri-o-runc

systemctl daemon-reload
systemctl enable --now crio

7: Install kubernetes packages:

curl -fsSL https://pkgs.k8s.io/core:/stable:/v${VERSION}/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v${VERSION}/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list
apt-get update
apt-get install -y kubelet kubeadm kubectl

8: Hold package versions so they don't auto-update:

apt-mark hold kubelet kubeadm kubectl

Initialize the Cluster

9: Create a kubeadm config for initializing the Cluster:

cat << EOF > /tmp/cluster-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
cgroup-driver: systemd
node-ip: 10.110.0.2
taints: []
skipPhases:
- addon/kube-proxy
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
networking:
serviceSubnet: "10.96.0.0/16"
podSubnet: "10.10.0.0/16"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
node-cidr-mask-size: "20"
kubernetesVersion: "v1.28.3"
controlPlaneEndpoint: 10.110.0.2
EOF

Note: If you intend to setup a HA Cluster, you should take care of setting up the VIP beforehand (be it by creating a Load Balancer in a Cloud Provider, or using a bare-metal solution based on something like Keepalived). That VIP (or DNS) should go into the controlPlaneEndpoint, as changing this after creating the Cluster is an elaborate endeavour.

We are specifying a particular node-IP to ensure usage of the internal interface, as our node has multiple interfaces/IPs. We are also skipping the kube-proxy installation because we plan to use Cilium CNI, which will replace kube-proxy.

10: Initialize the Cluster:

kubeadm init --upload-certs --config /tmp/cluster-config.yaml

11: Copy kubeconfig to ~/.kube/config:

mkdir ~/.kube
cp /etc/kubernetes/admin.conf ~/.kube/config

12: Verify the cluster is online and ready with kubectl get nodes:

NAME                             STATUS   ROLES           AGE   VERSION
ubuntu-s-2vcpu-4gb-amd-ams3-01 Ready control-plane 85m v1.28.3

Install Cilium CNI

13: Install cilium binary

CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/master/stable.txt)
CLI_ARCH=amd64
cd /usr/local/bin
curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum
sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin
rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}

14: Install cilium CNI with cilium install:

ℹ  Using Cilium version 1.14.2
🔮 Auto-detected cluster name: kubernetes
🔮 Auto-detected kube-proxy has not been installed
ℹ Cilium will fully replace all functionalities of kube-proxy

15: Wait a minute and verify it has been deployed successfully with cilium status:

    /¯¯\
/¯¯\__/¯¯\ Cilium: OK
\__/¯¯\__/ Operator: OK
/¯¯\__/¯¯\ Envoy DaemonSet: disabled (using embedded mode)
\__/¯¯\__/ Hubble Relay: disabled
\__/ ClusterMesh: disabled

Deployment cilium-operator Desired: 1, Ready: 1/1, Available: 1/1
DaemonSet cilium Desired: 1, Ready: 1/1, Available: 1/1
Containers: cilium Running: 1
cilium-operator Running: 1
Cluster Pods: 2/2 managed by Cilium
Helm chart version: 1.14.2
Image versions cilium quay.io/cilium/cilium:v1.14.2@sha256:6263f3a3d5d63b267b538298dbeb5ae87da3efacf09a2c620446c873ba807d35: 1
cilium-operator quay.io/cilium/operator-generic:v1.14.2@sha256:52f70250dea22e506959439a7c4ea31b10fe8375db62f5c27ab746e3a2af866d: 1

Congratulations! 🎉

Add more nodes

Control-Plane nodes

1: On each node, repeat the previous steps for prerequisites and package installs (steps 1 to 8)

2: Create a kubeadm join config:

cat << EOF > /tmp/join-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
kind: JoinConfiguration
discovery:
bootstrapToken:
token: <token>
apiServerEndpoint: <control plane endpoint>
caCertHashes:
- <ca cert hash>
nodeRegistration:
kubeletExtraArgs:
cgroup-driver: systemd
node-ip: 10.110.0.5
controlPlane:
certificateKey: <ca certificate key>
EOF

The <token>,<ca cert hash> and <ca certificate key> will have been output by kubeadm at the initialization step (previous step 10). If you don't have them anymore or the token has expired, you can get a new certificateKey with:

kubeadm init phase upload-certs --upload-certs

and obtain the token and certificate hash with:

kubeadm token create --print-join-command

We're setting node-ip here because our nodes have multiple IPs and we want to specify which interface the services should listen on.

3: On each node, provided each join-config.yaml has been adjusted if required, join the node with:

kubeadm join --config /tmp/join-config.yaml

Worker nodes

1: On each node, repeat the previous steps for prerequisites and package installs (steps 1 to 8)

2: Create a kubeadm join config:

cat << EOF > /tmp/join-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
kind: JoinConfiguration
discovery:
bootstrapToken:
token: <token>
apiServerEndpoint: 10.110.0.2:6443
caCertHashes:
- <ca cert hash>
nodeRegistration:
kubeletExtraArgs:
cgroup-driver: systemd
node-ip: 10.110.0.7
taints: []
EOF

The <token> and <ca cert hash> will have been output by kubeadm at the initialization step (previous step 10). If you don't have them anymore or the token has expired, you can obtain them again by running on a control-plane node:

kubeadm token create --print-join-command

We're setting node-ip here because our nodes have multiple IPs and we want to specify which interface the services should listen on.

3: On each node, provided each join-config.yaml has been adjusted if required, join the node with:

kubeadm join --config /tmp/join-config.yaml

4: Label the new worker nodes, by running on a control-plane node:

kubectl label node <node_name> node-role.kubernetes.io/worker=""
- + \ No newline at end of file diff --git a/launchpad/intro.html b/launchpad/intro.html index ac9e275e..4852575e 100644 --- a/launchpad/intro.html +++ b/launchpad/intro.html @@ -5,13 +5,13 @@ Introduction | GraphOps Docs - +

Introduction

Launchpad is a toolkit for running a Graph Protocol Indexer on Kubernetes. It aims to provide the fastest path to production multi-chain indexing, with sane security and performance defaults. It should work well whether you have a single node cluster or twenty. It is comprised of an opinionated set of tools on your local machine, layered over one another to provide a declarative workflow to manage your deployments stack.

There are three major components to be aware of:

  1. Launchpad Starter (graphops/launchpad-starter): A starting point for every new Launchpad deployment
  2. Launchpad Charts (graphops/launchpad-charts): A collection of Helm Charts for blockchains and web3 apps
  3. Launchpad Namespaces (graphops/launchpad-namespaces): A collection of preconfigured Kubernetes Namespaces using Helmfile

Launchpad components

Features

  • Actively maintained by GraphOps and contributors
  • An opinionated starter (launchpad-starter) to define and manage your stack in a declarative, version controlled manner
  • A collection of Helm Charts for deploying and monitoring blockchain nodes and Graph Protocol Indexers in Kubernetes, with P2P NodePort support
  • Preconfigured namespaces for core cluster functions (logging, monitoring, etc) and major blockchains
  • An automated dependency update pipeline for graphops/launchpad-charts and graphops/launchpad-namespaces

Next Steps

  • Read the Prerequisites section to understand what you need to bring
  • Read the Quick Start guide to get up and running
  • Look at the repositories above on GitHub to understand how they work
  • Review Advanced Topics to understand more advanced behavior
- + \ No newline at end of file diff --git a/launchpad/modularity.html b/launchpad/modularity.html index b254291b..979ef9ac 100644 --- a/launchpad/modularity.html +++ b/launchpad/modularity.html @@ -5,13 +5,13 @@ Modularity | GraphOps Docs - +

Modularity

The full Launchpad stack contains:

  1. Launchpad Starter (graphops/launchpad-starter): A starting point for new Launchpad deployments
  2. Launchpad Charts (graphops/launchpad-charts): A collection of Helm Charts for blockchains and web3 apps
  3. Launchpad Namespaces (graphops/launchpad-namespaces): A collection of preconfigured Kubernetes Namespaces using Helmfile

We have designed Launchpad with modularity in mind therefore users can pick what elements of the stack to utilise in their own infrastructure. As such below you can find some options.

Using launchpad-starter

Using launchpad-starter as a starter repo for your own IaaC repo is the recommended approach. launchpad-starter comes with a sane set of defaults and leverages Helmfile to declaratively specify and orchestrate releases of software in your Kubernetes cluster.

See our Quick Start guide and the launchpad-starter repo for more information.

Using launchpad-namespaces without launchpad-starter

As a user, you have the flexibility to choose whether or not to utilize the launchpad-starter repository.

If you decide not to use it, you can create your own repository that includes a straightforward helmfile.yaml file, which will orchestrate the execution of various launchpad-namespaces that align with your specific requirements. An illustrative example can be found in sample.helmfile.yaml.

By opting out of launchpad-starter, you are essentially choosing not to leverage:

  • Taskfile definitions that encompass commonly utilized tasks
  • The automated process that installs all essential local tool dependencies on your personal machine
  • The regularly refreshed sample.helmfile.yaml configuration

Using launchpad-charts without launchpad-namespaces or launchpad-starter

Users also have the choice to exclusively utilise launchpad-charts only.

For example if you wanted to run one of our charts manually without utilising helmfile:

helm repo add graphops https://graphops.github.io/launchpad-charts
helm install erigon graphops/erigon

Another option could be to utilise ArgoCD as a GitOps continuous delivery tool for managing Kubernetes applications. In this case the user would not need launchpad-starter or launchpad-namespaces and instead could use the launchpad-charts in conjunction with ArgoCD. An example of how to configure Argo with helm-charts can be found here.

- + \ No newline at end of file diff --git a/launchpad/other-resources.html b/launchpad/other-resources.html index 0e629511..38d86ff3 100644 --- a/launchpad/other-resources.html +++ b/launchpad/other-resources.html @@ -5,13 +5,13 @@ Other Resources | GraphOps Docs - + - + \ No newline at end of file diff --git a/launchpad/prerequisites.html b/launchpad/prerequisites.html index 50d0c6b5..018039d3 100644 --- a/launchpad/prerequisites.html +++ b/launchpad/prerequisites.html @@ -5,13 +5,13 @@ Prerequisites | GraphOps Docs - +

Prerequisites

You will need some things to use Launchpad for your infrastructure:

A basic understanding of infrastructure

We expect that you are familiar with infrastructure basics, including:

  • Linux
  • Networking, DNS
  • SSH and authentication
  • Storage fundamentals
  • Basic system administration

A basic, functional knowledge of git

The Launchpad stack advocates for declarative, version controlled infrastructure. This means the declarative state of your infrastructure will be committed into a private git repo as it evolves over time. You will need to be able to perform basic git workflows like:

  • Staging files (e.g. git add .)
  • Committing changes and pushing code (e.g. git push origin main)
  • Viewing the repo history (e.g. git show, git log, or using GitHub)

More advanced users will benefit from understanding how to pull and rebase, but this is not a requirement.

A basic understanding of operating a Graph Protocol Indexer

We will assume a basic understanding of the Graph Protocol Indexing stack, as well as some of the operational requirements of Indexing.

See Other Resources for links to helpful resources.

A client machine

Launchpad comes with a series of tools that should run on a client device. This is most likely your local machine. These tools should not run on your servers. Instead, they help you instruct your cluster of servers to do what you want.

Currently, Launchpad comes with support for Linux and MacOS clients. Windows is currently not supported, though you may be able to use Launchpad using the Windows Subsystem for Linux.

Knowledge of Kubernetes and a Kubernetes cluster

The Launchpad project requires a certain level of familiarity with Kubernetes and its intricacies. The extent of this Kubernetes expertise depends on your choice of cluster. Opting for a managed cluster from a leading Cloud Provider requires less intensive Kubernetes knowledge, as operating such a cluster is more straightforward, necessitating only a fundamental grasp of different Kubernetes resource types.

However, it's essential to note that managed clusters can be very costly when running blockchains. In contrast, selecting a self-managed cluster demands a deeper understanding, encompassing all components necessary for cluster provisioning and management - for more details on this please checkout our Kubernetes guide. Regardless of your choice, you'll need to create a Kubernetes cluster. This can involve setting up a self-managed cluster, as outlined in our Fedora CoreOS guide, or opting for a managed cluster provided by a major Cloud Provider like AWS or GCP.

Operational knowledge of Helm

Launchpad operates in tandem with Helm and Helm Charts. However, no need to worry if you're new to Helm or chart authoring – we've got you covered. Launchpad leverages a combination of widely used and publicly available charts (ie. grafana/helm-charts), along with our in-house helm-charts, launchpad-charts. This ensures a seamless experience without the need for in-depth Helm expertise.

In addition, we abstracted some of the Helm usage by using tasks ( ie. task releases:apply or task releases:delete) as outlined in our Quick Start guide. As such, all you need is a basic understanding of Helm's core functions and release management. Writing helm-charts is not a prerequisite for most users, as we provide the necessary charts to streamline your experience.

Willingness to learn and contribute

Launchpad is a collaborative effort to create the best UX for Graph Protocol Indexers on Kubernetes. The Launchpad stack provides an opinionated set of defaults and recipes for success, but to be an advanced operator you will need to learn Kubernetes and many of the other tools in the stack. With Launchpad, you have guard rails to guide you in your journey towards mastering operating your Indexer on Kubernetes.

Please contribute back when you are able!

- + \ No newline at end of file diff --git a/launchpad/quick-start.html b/launchpad/quick-start.html index 7ac6a77f..f8c02d4d 100644 --- a/launchpad/quick-start.html +++ b/launchpad/quick-start.html @@ -5,7 +5,7 @@ Quick Start | GraphOps Docs - + @@ -16,7 +16,7 @@ The path for this Namespace, under helmfiles, would then look like:

- path: git::https://github.com/graphops/launchpad-namespaces.git@storage/helmfile.yaml?ref=storage-v1.2

pinning to an exact version:

Your ?ref= would look like this, for the storage namespace: ?ref=storage-v1.2.2. The path for this Namespace, under helmfiles, would then look like:

- path: git::https://github.com/graphops/launchpad-namespaces.git@storage/helmfile.yaml?ref=storage-v1.2.2

following the latest canary:

Your ?ref= would look like this, for the storage namespace: ?ref=storage-canary/latest. The path for this Namespace, under helmfiles, would then look like:

- path: git::https://github.com/graphops/launchpad-namespaces.git@storage/helmfile.yaml?ref=storage-canary/latest

We would recommend that you either follow the latest stable releases, or pin to a specific version.

note

For full implementation details and other comprehensive notes about launchpad-namespaces please visit the github repo.

Pulling in starter changes

From time to time, you may want to update your infra repo with the latest changes from our starter.

Launchpad comes with a built in task to do this:

task launchpad:pull-upstream-starter
- + \ No newline at end of file diff --git a/launchpad/release-channels.html b/launchpad/release-channels.html index 46d5c869..6693a539 100644 --- a/launchpad/release-channels.html +++ b/launchpad/release-channels.html @@ -5,13 +5,13 @@ Release Channels | GraphOps Docs - +
-

Release Channels

Due to the intricate nature of managing indexing operations for multiple blockchains and their associated dependencies, the Launchpad project is a complex system with numerous interdependencies.

For a reminder of the various components within Launchpad and their intricate connections, we recommend revisiting our Intro.

This guide offers a comprehensive walkthrough, outlining the steps, automated and manual, required to introduce a new version release of an application, ie. Erigon, into the 'launchpad-charts' repository as a canary release and ultimately transitioning it to a stable state within its designated 'launchpad-namespace,' such as Ethereum.

The diagram below provides a visual representation illustrating the interdependence and impact of various components and workflows.

Release Channels Flow

From new version to launchpad-namespaces stable

Below you can find a more comprehensive breakdown of the process, divided into automated workflows within launchpad-charts and launchpad-namespaces, as well as manual operator steps. This process guides the transition of a new application version from the initial launchpad-charts canary release to its eventual stability within the corresponding launchpad-namespaces. For this walkthrough we will use Erigon as an example.

launchpad-charts

  • On each run, bot looks-up Erigon tags and upon finding a new version, opens a PR into launchpad-charts/charts/erigon
  • The new PR triggers a workflow that publishes a new pre-release into the repo.
  • Another workflow runs and adds the newly released canary chart to the canary Helm repo index

launchpad-namespaces

  • On each run, bot checks for new chart releases and upon finding one, pushes an update branch and opens a new PR to namespaces
  • Bot runs again, auto-merges the PR and creates a tag
  • Workflow runs, updates semver tags

operator

  • Tests the new canary chart release to verify it is working properly, if it is adds commit to PR to set the stable chart release version
  • Updates their helmfile reference to point at new namespace reference and runs changes against eth-goerli namespace using task releases:apply -- eth-goerli
  • If the previous task runs successfully and workloads appear healthy, the operator updates their helmfile reference for eth-mainnet namespace and runs task releases:apply -- eth-mainnet
  • If task releases:apply -- eth-mainnet succeeds and all workloads are healthy, operator manually tags the ethereum namespace as stable
note

Manually tagging a namespace as stable is an intentional process. Our aim is to ensure that workloads undergo comprehensive testing before being tagged as stable which signals to users readiness for running on mainnet.

Alongside the ability to choose between canary or stable releases based on user risk preferences, we've also enabled the capability to manually override a specific chart version during namespace deployment.

  - path: git::https://github.com/graphops/launchpad-namespaces.git@ethereum/helmfile.yaml?ref=ethereum-stable/latest
selectorsInherited: true
values:
- helmDefaults:
<<: *helmDefaults
flavor: "goerli"
erigon:
chartVersion: "0.8.1" # to override the chart version the namespace is setup with
values:
statefulNode:
jwt:
existingSecret:
name: jwt
key: jwt
nimbus:
values:
nimbus:
jwt:
existingSecret:
name: jwt
key: jwt

Similarly to being able to override chartVersion, users have the ability to override chartUrl to specify a self-maintained chart, or a chart maintained by a different organisation.

- +

Release Channels

Due to the intricate nature of managing indexing operations for multiple blockchains and their associated dependencies, the Launchpad project is a complex system with numerous interdependencies.

For a reminder of the various components within Launchpad and their intricate connections, we recommend revisiting our Intro.

This guide offers a comprehensive walkthrough, outlining the steps, automated and manual, required to introduce a new version release of an application, ie. Erigon, into the launchpad-charts repository as a canary release and ultimately transitioning it to a stable state within its designated launchpad-namespace, such as Ethereum.

The diagram below provides a visual representation illustrating the interdependence and impact of various components and workflows.

Release Channels Flow

From new version to launchpad-namespaces stable

Below you can find a more comprehensive breakdown of the process, divided into automated workflows within launchpad-charts and launchpad-namespaces, as well as manual operator steps. This process guides the transition of a new application version from the initial launchpad-charts canary release to its eventual stability within the corresponding launchpad-namespaces. For this walkthrough we will use Erigon as an example.

launchpad-charts

  • On each run, bot looks-up Erigon tags and upon finding a new version, opens a PR into launchpad-charts/charts/erigon
  • The new PR triggers a workflow that publishes a new pre-release into the repo.
  • Another workflow runs and adds the newly released canary chart to the canary Helm repo index

launchpad-namespaces

  • On each run, bot checks for new chart releases and upon finding one, pushes an update branch and opens a new PR to namespaces
  • Bot runs again, auto-merges the PR and creates a tag
  • Workflow runs, updates semver tags

operator

  • Tests the new canary chart release to verify it is working properly, if it is adds commit to PR to set the stable chart release version. Following the merge of this PR, the new stable chart release is automatically issued in draft mode. This step provides the operator with an opportunity to review and manually publish the final release, ensuring precise control and quality assurance in the deployment process.
  • Run task releases:apply -- eth-goerli which should pick changes from latest ethereum canary tag that would contain new erigon canary chart version (after renovate has run and has picked those up, which it does in 15m intervals).
  • If the previous task runs successfully and workloads appear healthy, the operator updates their helmfile reference to ethereum-canary/latest for eth-mainnet namespace and runs task releases:apply -- eth-mainnet.
  • If task releases:apply -- eth-mainnet succeeds and all workloads are healthy, operator manually tags the ethereum namespace as stable
note

Manually tagging a namespace as stable is an intentional process. Our aim is to ensure that workloads undergo comprehensive testing before being tagged as stable which signals to users readiness for running on mainnet.

Alongside the ability to choose between canary or stable releases based on user risk preferences, we've also enabled the capability to manually override a specific chart version during namespace deployment.

  - path: git::https://github.com/graphops/launchpad-namespaces.git@ethereum/helmfile.yaml?ref=ethereum-stable/latest
selectorsInherited: true
values:
- helmDefaults:
<<: *helmDefaults
flavor: "goerli"
erigon:
chartVersion: "0.8.1" # to override the chart version the namespace is setup with
values:
statefulNode:
jwt:
existingSecret:
name: jwt
key: jwt
nimbus:
values:
nimbus:
jwt:
existingSecret:
name: jwt
key: jwt

Similarly to being able to override chartVersion, users have the ability to override chartUrl to specify a self-maintained chart, or a chart maintained by a different organisation.

+ \ No newline at end of file diff --git a/launchpad/server-side-stack.html b/launchpad/server-side-stack.html index 3ce0e51b..6a07149f 100644 --- a/launchpad/server-side-stack.html +++ b/launchpad/server-side-stack.html @@ -5,13 +5,13 @@ Server Side Stack | GraphOps Docs - +

Server Side Stack

Server Side Stack

Your Kubernetes Cluster

Launchpad V2 requires users to bring their own Kubernetes cluster. This approach ensures that users are not tied to a specific Kubernetes distribution or mode of installation.

For users seeking more detailed guidance with regards to a Kubernetes setup, we have created guides that outline the steps one should consider when installing and deploying Fedora CoreOS (FCOS) - an auto-updating, minimal, container-focused OS, designed for clusters or standalone use and optimized for Kubernetes. These guides are designed to assist you in getting started with the process of setting up and guide you through the management of your Kubernetes cluster. For guidance on Fedora CoreOS setup and considerations, skip to Install FCOS Guide.

Once your Kubernetes cluster is ready, head over to Quick Start.

Launchpad Namespaces

Server Side Stack

See the Launchpad Namespaces repository for details about which namespaces are available, as well as which Helm Releases are specified within each one.

- + \ No newline at end of file diff --git a/launchpad/supported-namespaces.html b/launchpad/supported-namespaces.html index ab44d125..b105f807 100644 --- a/launchpad/supported-namespaces.html +++ b/launchpad/supported-namespaces.html @@ -5,13 +5,13 @@ Supported Namespaces | GraphOps Docs - +

Supported Namespaces

Launchpad includes a number of prepackaged Kubernetes namespaces (see Launchpad Namespaces repo), which in turn reference Helm Charts in the Launchpad Charts repository, as well as third-party Charts. GraphOps maintains support for these namespaces, meaning that we:

  • Track upstream releases and test them
  • Move these releases through canary and stable release channels for both launchpad-charts and launchpad-namespaces
  • Evolve the Launchpad stack to meet the evolving operational needs of these applications
  • Offer support for operators experiencing challenges with these namespaces

This strategy is rooted in GraphOps' active usage of these namespaces and the applications within them. For more details on how a new application makes it from a canary release all the way to a stable launchpad-namespace please check out our release-channels guide

We welcome third-party contributors to add support for additional namespaces and applications.

Using custom releases and deploying sets of applications not defined in launchpad-namespaces

Launchpad's architecture is designed to be highly flexible and does not constrain you to deploying launchpad-namespaces.

To incorporate releases not covered within a namespace, you can utilize the helmfile.yaml that you generated during the Quick Start process.

For instance, if you required the implementation of kafka-operator for specific workloads, you would add the following code to the repositories and releases sections:

repositories:
- name: strimzi
url: https://strimzi.io/charts/

releases:
- name: strimzi
namespace: kafka
createNamespace: true
chart: strimzi/strimzi-kafka-operator
missingFileHandler: Warn
values:
- watchAnyNamespace: true
note

If you're considering the integration of a blockchain that currently falls outside the scope of Launchpad's Supported Namespaces, it's worth noting that including a new release in your helmfile.yaml might require an extra step of creating a custom Helm Chart. While certain publicly available charts (ie. Teku, Lighthouse) might be regularly maintained by external contributors, you might encounter cases where other charts are not readily supported.

- + \ No newline at end of file diff --git a/mips-resources/intro.html b/mips-resources/intro.html index 27656f48..594d8e51 100644 --- a/mips-resources/intro.html +++ b/mips-resources/intro.html @@ -5,13 +5,13 @@ Introduction | GraphOps Docs - +

Introduction

It's an exciting time to be participating in The Graph ecosystem! During Graph Day 2022 Yaniv Tal announced the sunsetting of the hosted service, a moment The Graph ecosystem has been working towards for many years.

To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the Migration Infrastructure Providers (MIPs) program.

The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer.

The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to migration grants for subgraph developers using multi-chain subgraphs.

Useful Resources

- + \ No newline at end of file diff --git a/mips-resources/mips-faq.html b/mips-resources/mips-faq.html index 429cec11..65074b7d 100644 --- a/mips-resources/mips-faq.html +++ b/mips-resources/mips-faq.html @@ -5,13 +5,13 @@ MIPs FAQs | GraphOps Docs - +

MIPs FAQs

1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed?

Yes, it is indeed.

For context, the arbitration charter, learn more about the charter here, specifies the methodology for generating a POI for a failed subgraph.

A community member, SunTzu, has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo here.

2. Which chain will the MIPs program incentivise first?

The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3.

3. How will new chains be added to the MIPs program?

New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support.

4. How will we know when the network is ready for a new chain?

The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs.

5. How are rewards divided per chain?

Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network.

6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that?

You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See The MIPs notion page to learn more about the phases.

7. When will rewards be distributed?

MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle.

8. How does scoring work?

Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on:

Subgraph Coverage

  • Are you providing maximal support for subgraphs per chain?

  • During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support.

Quality Of Service

  • Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)?

  • Is the Indexer supporting dapp developers being reactive to their needs?

Is Indexer allocating efficiently, contributing to the overall health of the network?

Community Support

  • Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain?

  • Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum?

9. How will the Discord role be assigned?

Moderators will assign the roles in the next few days.

10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards?

Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet.

11. At what point do you expect participants to add a mainnet deployment?

There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be shared in this notion page soon.

12. Will rewards be subject to vesting?

The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement.

13. For teams with more than one member, will all the team members be given a MIPs Discord role?

Yes

14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet?

Yes

15. During the MIPs program, will there be a period to dispute invalid POI?

To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation

17. Can we combine two vesting contracts?

No. The options are: you can delegate one to the other one or run two separate indexers.

18. KYC Questions?

Please email info@thegraph.foundation

19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready?

Yes

We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies.

21. What is “handler gas cost”?

It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains.

- + \ No newline at end of file