Skip to content

Commit

Permalink
Merge pull request #333 from salasberryfin/e2e-add-nodegroup-validati…
Browse files Browse the repository at this point in the history
…on-release-v2-8

test: add node group test to e2e
  • Loading branch information
mjura committed Feb 6, 2024
2 parents d58d8f5 + c8b2236 commit 0350dd7
Show file tree
Hide file tree
Showing 2 changed files with 107 additions and 2 deletions.
105 changes: 105 additions & 0 deletions test/e2e/basic_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package e2e
import (
"fmt"

"github.com/aws/aws-sdk-go/aws"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
eksv1 "github.com/rancher/eks-operator/pkg/apis/eks.cattle.io/v1"
Expand Down Expand Up @@ -56,4 +57,108 @@ var _ = Describe("BasicCluster", func() {
return fmt.Errorf("cluster is not ready yet. Current phase: %s", currentCluster.Status.Phase)
}, waitLong, pollInterval).ShouldNot(HaveOccurred())
})

It("Successfully adds and removes a node group", func() {
initialNodeGroups := eksConfig.DeepCopy().Spec.NodeGroups

Expect(cl.Get(ctx, runtimeclient.ObjectKey{Name: cluster.Name}, cluster)).Should(Succeed())
patch := runtimeclient.MergeFrom(cluster.DeepCopy())

nodeGroup := eksv1.NodeGroup{
NodegroupName: aws.String("ng1"),
DiskSize: aws.Int64(20),
InstanceType: aws.String("t3.medium"),
DesiredSize: aws.Int64(1),
MaxSize: aws.Int64(10),
MinSize: aws.Int64(1),
RequestSpotInstances: aws.Bool(false),
}

cluster.Spec.EKSConfig.NodeGroups = append(cluster.Spec.EKSConfig.NodeGroups, nodeGroup)

Expect(cl.Patch(ctx, cluster, patch)).Should(Succeed())

By("Waiting for cluster to start adding node group")
Eventually(func() error {
currentCluster := &eksv1.EKSClusterConfig{}

if err := cl.Get(ctx, runtimeclient.ObjectKey{
Name: cluster.Name,
Namespace: eksClusterConfigNamespace,
}, currentCluster); err != nil {
return err
}

if currentCluster.Status.Phase == "updating" && len(currentCluster.Spec.NodeGroups) == 2 {
return nil
}

return fmt.Errorf("cluster didn't create new new node group. Current phase: %s", currentCluster.Status.Phase)
}, waitLong, pollInterval).ShouldNot(HaveOccurred())

By("Waiting for cluster to finish adding node group")
Eventually(func() error {
currentCluster := &eksv1.EKSClusterConfig{}

if err := cl.Get(ctx, runtimeclient.ObjectKey{
Name: cluster.Name,
Namespace: eksClusterConfigNamespace,
}, currentCluster); err != nil {
return err
}

if currentCluster.Status.Phase == "active" && len(currentCluster.Spec.NodeGroups) == 2 {
return nil
}

return fmt.Errorf("cluster didn't finish adding node group. Current phase: %s, node group count %d", currentCluster.Status.Phase, len(currentCluster.Spec.NodeGroups))
}, waitLong, pollInterval).ShouldNot(HaveOccurred())

By("Restoring initial node groups")

Expect(cl.Get(ctx, runtimeclient.ObjectKey{Name: cluster.Name}, cluster)).Should(Succeed())
patch = runtimeclient.MergeFrom(cluster.DeepCopy())

cluster.Spec.EKSConfig.NodeGroups = initialNodeGroups

Expect(cl.Patch(ctx, cluster, patch)).Should(Succeed())

By("Waiting for cluster to start removing node group")
Eventually(func() error {
currentCluster := &eksv1.EKSClusterConfig{}

if err := cl.Get(ctx, runtimeclient.ObjectKey{
Name: cluster.Name,
Namespace: eksClusterConfigNamespace,
}, currentCluster); err != nil {
return err
}

if currentCluster.Status.Phase == "updating" && len(currentCluster.Spec.NodeGroups) == 1 {
return nil
}

return fmt.Errorf("cluster didn't start removing node group. Current phase: %s, node group count %d", currentCluster.Status.Phase, len(currentCluster.Spec.NodeGroups))
}, waitLong, pollInterval).ShouldNot(HaveOccurred())

By("Waiting for cluster to finish removing node group")
Eventually(func() error {
currentCluster := &eksv1.EKSClusterConfig{}

if err := cl.Get(ctx, runtimeclient.ObjectKey{
Name: cluster.Name,
Namespace: eksClusterConfigNamespace,
}, currentCluster); err != nil {
return err
}

if currentCluster.Status.Phase == "active" && len(currentCluster.Spec.NodeGroups) == 1 {
return nil
}

return fmt.Errorf("cluster didn't finish removing node group. Current phase: %s, node group count %d", currentCluster.Status.Phase, len(currentCluster.Spec.NodeGroups))
}, waitLong, pollInterval).ShouldNot(HaveOccurred())

By("Done waiting for cluster to finish removing node group")
})
})
4 changes: 2 additions & 2 deletions test/e2e/templates/basic-cluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ spec:
amazonCredentialSecret: default:aws-credentials
imported: false
kmsKey: ""
kubernetesVersion: "1.25"
kubernetesVersion: "1.26"
loggingTypes: []
nodeGroups:
- desiredSize: 2
Expand All @@ -26,7 +26,7 @@ spec:
subnets: []
tags: {}
userData: ""
version: "1.25"
version: "1.26"
privateAccess: false
publicAccess: true
publicAccessSources: []
Expand Down

0 comments on commit 0350dd7

Please sign in to comment.