diff --git a/.github/workflows/test-docs.yaml b/.github/workflows/test-docs.yaml new file mode 100644 index 0000000000..6f1d6eba18 --- /dev/null +++ b/.github/workflows/test-docs.yaml @@ -0,0 +1,37 @@ +# This is a GitHub workflow defining a set of jobs with a set of steps. +# ref: https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions +# +name: Test docs + +on: + pull_request: + paths: + - "docs/**" + - "**/test-docs.yaml" + push: + paths: + - "docs/**" + - "**/test-docs.yaml" + branches-ignore: + - "dependabot/**" + - "pre-commit-ci-update-config" + workflow_dispatch: + +jobs: + test-docs: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install deps + run: pip install -r docs/requirements.txt + + # linkcheck is done separately from this in doc-links.yml, scheduled to + # run every day and open/update an issue + - name: make dirhtml + run: | + cd docs + make dirhtml SPHINXOPTS='--color -W --keep-going' diff --git a/docs/environment.yml b/docs/environment.yml index 94f1bffdab..0b5c2c2028 100644 --- a/docs/environment.yml +++ b/docs/environment.yml @@ -2,6 +2,6 @@ name: infrastructure-docs channels: - conda-forge dependencies: - - python=3.10 + - python=3.11 - pip: - -r requirements.txt diff --git a/docs/howto/features/cloud-access.md b/docs/howto/features/cloud-access.md index 7d3c07114d..a28e1f861e 100644 --- a/docs/howto/features/cloud-access.md +++ b/docs/howto/features/cloud-access.md @@ -47,9 +47,9 @@ This AWS IAM Role is managed via terraform. `````{tab-set} ````{tab-item} GCP :sync: gcp-key - ```yaml + ``` hub_cloud_permissions = { - "": { + "" : { allow_access_to_external_requester_pays_buckets : true, bucket_admin_access : ["bucket-1", "bucket-2"] hub_namespace : "" @@ -60,9 +60,9 @@ This AWS IAM Role is managed via terraform. ````{tab-item} AWS :sync: aws-key - ```bash + ``` hub_cloud_permissions = { - "": { + "" : { bucket_admin_access : ["bucket-1", "bucket-2"] hub_namespace : "" } diff --git a/docs/howto/features/dask.md b/docs/howto/features/dask.md index e987d53325..b714b5940d 100644 --- a/docs/howto/features/dask.md +++ b/docs/howto/features/dask.md @@ -1,11 +1,11 @@ -(howto:features:daskhubs)= +(howto:features:daskhub)= # Add support for daskhubs in an existing cluster ## GCP Setting up dask nodepools with terraform can be done by adding the following to the cluster's terraform config file: -```terraform +``` # Setup a single node pool for dask workers. # # A not yet fully established policy is being developed about using a single diff --git a/docs/howto/features/dedicated-nodepool.md b/docs/howto/features/dedicated-nodepool.md index e41bd41f40..66e22198a1 100644 --- a/docs/howto/features/dedicated-nodepool.md +++ b/docs/howto/features/dedicated-nodepool.md @@ -13,9 +13,9 @@ Some hubs on shared clusters require dedicated nodepools, for a few reasons: 1. Setup a new nodepool in terraform, via the `.tfvars` for the cluster. Add the new nodepool to `notebook_nodes`: - ```terraform + ``` notebook_nodes = { - "": { + "" : { min: 0, max: 100, machine_type: "", @@ -25,16 +25,16 @@ Some hubs on shared clusters require dedicated nodepools, for a few reasons: taints: [{ key: "2i2c.org/community", value: "", - effect: "NO_SCHEDULE" + effect: "NO_SCHEDULE", }], gpu: { enabled: false, type: "", - count: 0 + count: 0, }, resource_labels: { - "community": "" - } + "community": "", + }, } } ``` diff --git a/docs/howto/features/gpu.md b/docs/howto/features/gpu.md index c279e888b8..3bac96fd4d 100644 --- a/docs/howto/features/gpu.md +++ b/docs/howto/features/gpu.md @@ -40,7 +40,7 @@ The `notebook_nodes` variable for our GCP terraform accepts a `gpu` parameter, which can be used to provision a GPU nodepool. An example would look like: -```terraform +``` notebook_nodes = { "gpu-t4": { min: 0, @@ -49,16 +49,16 @@ notebook_nodes = { gpu: { enabled: true, type: "nvidia-tesla-t4", - count: 1 + count: 1, }, # Optional, in case we run into resource exhaustion in the main zone zones: [ "us-central1-a", "us-central1-b", "us-central1-c", - "us-central1-f" - ] - } + "us-central1-f", + ], + }, } ``` diff --git a/docs/howto/upgrade-cluster/aws.md b/docs/howto/upgrade-cluster/aws.md index 11251368cc..e707e1a13b 100644 --- a/docs/howto/upgrade-cluster/aws.md +++ b/docs/howto/upgrade-cluster/aws.md @@ -18,7 +18,7 @@ cluster is unused or that the maintenance is communicated ahead of time. 1. *Install or upgrade CLI tools* - Install required tools as documented in [](new-cluster:aws-required-tools), + Install required tools as documented in [](new-cluster:prerequisites), and ensure you have a recent version of eksctl. ```{warning} @@ -35,7 +35,7 @@ cluster is unused or that the maintenance is communicated ahead of time. Before upgrading an EKS cluster, it could be a good time to consider changes to `eksctl/template.jsonnet` since this cluster's jsonnet template was last generated, which it was initially according to - [](new-cluster:aws:generate-cluster-files). + [](new-cluster:generate-cluster-files). To do this first ensure `git status` reports no changes, then generate new cluster files using the deployer script, then restore changes to everything diff --git a/docs/hub-deployment-guide/hubs/other-hub-ops/move-hubs/new-url.md b/docs/hub-deployment-guide/hubs/other-hub-ops/move-hubs/new-url.md index 23aea2edbe..ba1c46feb7 100644 --- a/docs/hub-deployment-guide/hubs/other-hub-ops/move-hubs/new-url.md +++ b/docs/hub-deployment-guide/hubs/other-hub-ops/move-hubs/new-url.md @@ -35,16 +35,27 @@ we have deployed, e.g., renaming the previous 'researchdelight' hub to 'showcase judgment call to make. ``` -1. [Add a redirect](domain-redirects) from the old URL to the new one +1. Add a redirect from the old URL to the new one -1. Open a Pull Request with the changes for review + In the `support.values.yaml` file for the cluster, set up automatic + redirection of users going to the old domain name to arrive at the new new + domain name. -1. Once the PR has been approved: + ```yaml + redirects: + rules: + - from: + to: + ``` + +2. Open a Pull Request with the changes for review + +3. Once the PR has been approved: 1. Update A/CNAME records in Namecheap for the new URL - 1. Update the relevant OAuth app for the new URL - 1. Merge the PR + 2. Update the relevant OAuth app for the new URL + 3. Merge the PR -1. If you also changed the `name` field within the +4. If you also changed the `name` field within the `cluster.yaml` file, [delete the old hub namespace in helm](delete-a-hub). It is recommended to [migrate the data](copy-home-dirs) first. diff --git a/docs/hub-deployment-guide/new-cluster/new-cluster.md b/docs/hub-deployment-guide/new-cluster/new-cluster.md index f843bb3d2f..c35b1fc5e0 100644 --- a/docs/hub-deployment-guide/new-cluster/new-cluster.md +++ b/docs/hub-deployment-guide/new-cluster/new-cluster.md @@ -15,6 +15,7 @@ terraform to provision supporting infrastructure, such as storage buckets. This guide will assume you have already followed the guidance in [](/topic/infrastructure/cluster-design) to select the appropriate infrastructure. +(new-cluster:prerequisites)= ## Prerequisites `````{tab-set} @@ -87,6 +88,7 @@ N/A ```` ````` +(new-cluster:generate-cluster-files)= ### Generate cluster files We automatically generate the files required to setup a new cluster: @@ -426,6 +428,7 @@ terraform apply -var-file=projects/$CLUSTER_NAME.tfvars Congratulations, you've just deployed a new cluster! +(new-cluster:terraform:cluster-credentials)= ## Exporting and Encrypting the Cluster Access Credentials In the previous step, we will have created an IAM user with just enough permissions for automatic deployment of hubs from CI/CD. Since these credentials are checked-in to our git repository and made public, they should have least amount of permissions possible. @@ -694,7 +697,7 @@ the usernames of the 2i2c engineers on this particular AWS account, and run the following command to give them access: ```{note} -You can modify the command output by running `terraform output -raw eksctl_iam_command` as described in [](new-cluster:aws:terraform:cicd). +You can modify the command output by running `terraform output -raw eksctl_iam_command` as described in [](new-cluster:terraform:cluster-credentials). ``` ```bash diff --git a/docs/hub-deployment-guide/new-cluster/smce.md b/docs/hub-deployment-guide/new-cluster/smce.md index bd585eff37..a3be2e7ae4 100644 --- a/docs/hub-deployment-guide/new-cluster/smce.md +++ b/docs/hub-deployment-guide/new-cluster/smce.md @@ -26,12 +26,12 @@ is handled for us by someone else. 4. This engineer should now create user accounts for all other 2i2c engineers, and make sure they are all part of the `SMCE-ProjectAdmins` group. -Once this is done, steps for the regular [AWS Cluster Setup](new-cluster:aws) can proceed, -until completion of [provisioning credentials for CI/CD](new-cluster:aws:terraform:cicd). +Once this is done, steps for the regular [AWS Cluster Setup](new-cluster:new-cluster) can proceed, +until completion of [provisioning credentials for CI/CD](new-cluster:terraform:cluster-credentials). ## Getting a MFA exemption for our `hub-continuous-deployer` user -At the completion of [provisioning credentials for CI/CD](new-cluster:aws:terraform:cicd), +At the completion of [provisioning credentials for CI/CD](new-cluster:terraform:cluster-credentials), we will have a IAM user named `hub-continuous-deployer` provisioned. This is what we use to deploy from GitHub actions, but also to deploy from our local machines. The MFA requirement needs to be exempted for this user before we can continue and actually deploy our hubs. diff --git a/docs/hub-deployment-guide/runbooks/phase1/aws-external-account.md b/docs/hub-deployment-guide/runbooks/phase1/aws-external-account.md index c803887afc..6ac1a72c3b 100644 --- a/docs/hub-deployment-guide/runbooks/phase1/aws-external-account.md +++ b/docs/hub-deployment-guide/runbooks/phase1/aws-external-account.md @@ -21,5 +21,4 @@ There are some steps to do before the deploy: Create a **User group** with admin permissions. ``` -1. Continue with the cluster setup as usual (following [new cluster on AWS](aws)). - On the section [](new-cluster:aws-setup-credentials) follow the steps for "For accounts without AWS SSO". +1. Continue with the cluster setup as usual (following [new cluster runbook](hub-deployment-guide:runbooks:phase2)). diff --git a/docs/hub-deployment-guide/runbooks/phase1/new-aws-account.md b/docs/hub-deployment-guide/runbooks/phase1/new-aws-account.md index 3997ed9709..e62aee0779 100644 --- a/docs/hub-deployment-guide/runbooks/phase1/new-aws-account.md +++ b/docs/hub-deployment-guide/runbooks/phase1/new-aws-account.md @@ -44,7 +44,7 @@ More information on these terms can be found in [](cloud-access:aws). * On the "Review and submit assignments" page, click "Submit". You have successfully created a new AWS account and connected it to our AWS Organization's Management Account! -Now, [setup a new cluster](new-cluster:aws) inside it via Terraform. +Now, [setup a new cluster](new-cluster:new-cluster) inside it via Terraform. (hub-deployment-guide:cloud-accounts:aws-quotas)= ## Checking quotas and requesting increases diff --git a/docs/hub-deployment-guide/runbooks/phase2/index.md b/docs/hub-deployment-guide/runbooks/phase2/index.md index 6d9cd3e3b4..03dc411cbf 100644 --- a/docs/hub-deployment-guide/runbooks/phase2/index.md +++ b/docs/hub-deployment-guide/runbooks/phase2/index.md @@ -71,4 +71,4 @@ All of the following steps must be followed in order to consider phase 2 complet 4. **Register the new cluster with the central 2i2c Grafana** - Follow the steps in [](register-new-cluster-with-central-grafana) so that the cluster you just added will be findable from the 2i2c central Grafana. \ No newline at end of file + Follow the steps in [](register-new-cluster-with-central-grafana) so that the cluster you just added will be findable from the 2i2c central Grafana. diff --git a/docs/index.md b/docs/index.md index c902988784..655d03e246 100644 --- a/docs/index.md +++ b/docs/index.md @@ -42,7 +42,7 @@ sre-guide/common-problems-solutions.md These sections walk an engineer step-by-step through the workflow of setting up a new 2i2c-managed JupyterHub. -- If there isn't a cloud account setup yet, start with [](new-cloud-account) +- If there isn't a cloud account setup yet, start with [](hub-deployment-guide:runbooks:phase1) - If there isn't a Kubernetes cluster setup yet, start with [](new-cluster) - If all you need to do is deploy the hub, start with [](new-hub) diff --git a/docs/sre-guide/support/community-domains.md b/docs/sre-guide/support/community-domains.md index a633c62d2b..a5366c47ed 100644 --- a/docs/sre-guide/support/community-domains.md +++ b/docs/sre-guide/support/community-domains.md @@ -107,8 +107,8 @@ it does not, wait for about 15 minutes and try again - DNS propagation may take ```yaml redirects: rules: - - from: <2i2c-managed-domain> - to: + - from: <2i2c-managed-domain> + to: ``` 7. Make a PR with your changes. diff --git a/docs/topic/access-creds/cloud-auth.md b/docs/topic/access-creds/cloud-auth.md index ebeecaa4f0..b2a7656987 100644 --- a/docs/topic/access-creds/cloud-auth.md +++ b/docs/topic/access-creds/cloud-auth.md @@ -116,7 +116,7 @@ To do so, follow these steps: AWS accounts we create. 4. Create the account! They'll receive an email with appropriate instructions. -(cloud-access:aws-individual-accnts)= +(cloud-access:aws-individual-accounts)= ### Access individual AWS accounts For AWS accounts that are managed by clients, we use an individual AWS account for each team member, and ask the client to provide us access for each person. @@ -192,8 +192,8 @@ are used to provide access to the AWS account from your terminal. expire in 12 hours, and you will need to re-authenticate. ```{note} -Currently, the only accounts that enforce MFA are some [individual accounts](cloud-access:aws-individual-accnts) not under 2i2c's organisation SSO. -Though in the future, we may enforce MFA for our orgnisation as well. +Currently, the only accounts that enforce MFA are some [individual accounts](cloud-access:aws-individual-accounts) not under 2i2c's organisation SSO. +Though in the future, we may enforce MFA for our organisation as well. ``` % TODO: Add instructions for Azure as well. diff --git a/docs/topic/billing/chargeable-resources.md b/docs/topic/billing/chargeable-resources.md index 985fa85fde..d82fc0cbb5 100644 --- a/docs/topic/billing/chargeable-resources.md +++ b/docs/topic/billing/chargeable-resources.md @@ -476,9 +476,7 @@ exposed to the public internet at all, which theoretically adds another layer of security as well. ```{image} https://hackmd.io/_uploads/H1IYevYi6.png) -:alt: Dr. Who Meme about Cloud NAT - -One of the many memes about Cloud NAT being expensive, from [QuinnyPig](https://twitter.com/QuinnyPig/status/1357391731902341120). Many seem far more violent. See [this post](https://www.lastweekinaws.com/blog/the-aws-managed-nat-gateway-is-unpleasant-and-not-recommended/) for more information. +:alt: Dr. Who Meme about Cloud NAT. One of the many memes about Cloud NAT being expensive, from [QuinnyPig](https://twitter.com/QuinnyPig/status/1357391731902341120). Many seem far more violent. See [this post](https://www.lastweekinaws.com/blog/the-aws-managed-nat-gateway-is-unpleasant-and-not-recommended/) for more information. ``` However, using a cloud NAT for outbound internet access is the **single most