Skip to content

Commit

Permalink
Refactor EKS P1 and Sync tests (#202)
Browse files Browse the repository at this point in the history
* Move AfterEach to the top layer

Signed-off-by: Parthvi Vala <[email protected]>

* Fix BeforeEach and make k8sVersion & upgradeToVersion local

Signed-off-by: Parthvi Vala <[email protected]>

* More changes to suite file

Signed-off-by: Parthvi Vala <[email protected]>

* Add focus for debugging

Signed-off-by: Parthvi Vala <[email protected]>

* Fix failures

Signed-off-by: Parthvi Vala <[email protected]>

* Run tests on released version only

Signed-off-by: Parthvi Vala <[email protected]>

* changes post-rebase

Signed-off-by: Parthvi Vala <[email protected]>

---------

Signed-off-by: Parthvi Vala <[email protected]>
  • Loading branch information
valaparthvi authored Nov 29, 2024
1 parent 2656826 commit 086e2fd
Show file tree
Hide file tree
Showing 6 changed files with 150 additions and 123 deletions.
6 changes: 4 additions & 2 deletions hosted/aks/p1/p1_import_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,10 @@ import (
)

var _ = Describe("P1Import", func() {
var k8sVersion string
var cluster *management.Cluster
var (
cluster *management.Cluster
k8sVersion string
)
BeforeEach(func() {
var err error
k8sVersion, err = helper.GetK8sVersion(ctx.RancherAdminClient, ctx.CloudCredID, location, false)
Expand Down
67 changes: 34 additions & 33 deletions hosted/eks/p1/p1_import_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,10 @@ import (
)

var _ = Describe("P1Import", func() {
var cluster *management.Cluster
var (
cluster *management.Cluster
k8sVersion string
)

BeforeEach(func() {
var err error
Expand All @@ -29,40 +32,48 @@ var _ = Describe("P1Import", func() {
err = helper.DeleteEKSClusterOnAWS(region, clusterName)
Expect(err).To(BeNil())
} else {
fmt.Println("Skipping downstream cluster deletion: ", clusterName)
GinkgoLogr.Info(fmt.Sprintf("Skipping downstream cluster deletion: %s", clusterName))
}
})

When("a cluster is imported for upgrade", func() {
Context("Upgrade Testing", func() {
var upgradeToVersion string

BeforeEach(func() {
var err error
k8sVersion, err = helper.GetK8sVersion(ctx.RancherAdminClient, true)
Expect(err).To(BeNil())
GinkgoLogr.Info(fmt.Sprintf("Using kubernetes version %s for cluster %s", k8sVersion, clusterName))

err = helper.CreateEKSClusterOnAWS(region, clusterName, k8sVersion, "1", helpers.GetCommonMetadataLabels())
Expect(err).To(BeNil())
cluster, err = helper.ImportEKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCredID, region)
Expect(err).To(BeNil())
cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient)
upgradeToVersion, err = helper.GetK8sVersion(ctx.RancherAdminClient, false)
Expect(err).To(BeNil())
})

It("Upgrade version of node group only", func() {
testCaseID = 88
upgradeNodeKubernetesVersionGTCPCheck(cluster, ctx.RancherAdminClient)
})
When("a cluster is imported", func() {

// eks-operator/issues/752
XIt("should successfully update a cluster while it is still in updating state", func() {
testCaseID = 104
updateClusterInUpdatingState(cluster, ctx.RancherAdminClient)
})
BeforeEach(func() {
err := helper.CreateEKSClusterOnAWS(region, clusterName, k8sVersion, "1", helpers.GetCommonMetadataLabels())
Expect(err).To(BeNil())
cluster, err = helper.ImportEKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCredID, region)
Expect(err).To(BeNil())
cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient)
Expect(err).To(BeNil())
})

It("Update k8s version of cluster and add node groups", func() {
testCaseID = 90
upgradeCPAndAddNgCheck(cluster, ctx.RancherAdminClient)
It("Upgrade version of node group only", func() {
testCaseID = 88
upgradeNodeKubernetesVersionGTCPCheck(cluster, ctx.RancherAdminClient, upgradeToVersion)
})

// eks-operator/issues/752
XIt("should successfully update a cluster while it is still in updating state", func() {
testCaseID = 104
updateClusterInUpdatingState(cluster, ctx.RancherAdminClient, upgradeToVersion)
})

It("Update k8s version of cluster and add node groups", func() {
testCaseID = 90
upgradeCPAndAddNgCheck(cluster, ctx.RancherAdminClient, upgradeToVersion)
})
})
})

Expand Down Expand Up @@ -146,19 +157,9 @@ var _ = Describe("P1Import", func() {
})

It("Update cluster logging types", func() {
// https://github.com/rancher/eks-operator/issues/938
testCaseID = 77

var err error
loggingTypes := []string{"api", "audit", "authenticator", "controllerManager", "scheduler"}
By("Adding the LoggingTypes", func() {
cluster, err = helper.UpdateLogging(cluster, ctx.RancherAdminClient, loggingTypes, true)
Expect(err).To(BeNil())
})

By("Removing the LoggingTypes", func() {
cluster, err = helper.UpdateLogging(cluster, ctx.RancherAdminClient, []string{loggingTypes[0]}, true)
Expect(err).To(BeNil())
})
updateLoggingCheck(cluster, ctx.RancherAdminClient)
})

It("Update Tags and Labels", func() {
Expand Down
71 changes: 36 additions & 35 deletions hosted/eks/p1/p1_provisioning_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,10 @@ import (
)

var _ = Describe("P1Provisioning", func() {
var cluster *management.Cluster
var (
cluster *management.Cluster
k8sVersion string
)

var _ = BeforeEach(func() {
var err error
Expand All @@ -31,7 +34,7 @@ var _ = Describe("P1Provisioning", func() {
err := helper.DeleteEKSHostCluster(cluster, ctx.RancherAdminClient)
Expect(err).To(BeNil())
} else {
fmt.Println("Skipping downstream cluster deletion: ", clusterName)
GinkgoLogr.Info(fmt.Sprintf("Skipping downstream cluster deletion: %s", clusterName))
}
})

Expand Down Expand Up @@ -77,7 +80,9 @@ var _ = Describe("P1Provisioning", func() {
Eventually(func() bool {
cluster, err := ctx.RancherAdminClient.Management.Cluster.ByID(cluster.ID)
Expect(err).To(BeNil())
return cluster.Transitioning == "error" && strings.Contains(cluster.TransitioningMessage, "is not unique within the cluster")
// checking for both the messages since different operator version shows different messages. To be removed once the message is updated.
// New Message: NodePool names must be unique within the [c-dnzzk] cluster to avoid duplication
return cluster.Transitioning == "error" && (strings.Contains(cluster.TransitioningMessage, "is not unique within the cluster") || strings.Contains(cluster.TransitioningMessage, "NodePool names must be unique"))
}, "1m", "3s").Should(BeTrue())
})

Expand Down Expand Up @@ -162,7 +167,8 @@ var _ = Describe("P1Provisioning", func() {
Expect(amiID).To(Equal("AL2_x86_64_GPU"))
})

When("a cluster is created for upgrade", func() {
Context("Upgrade testing", func() {
var upgradeToVersion string

BeforeEach(func() {
var err error
Expand All @@ -171,27 +177,33 @@ var _ = Describe("P1Provisioning", func() {
upgradeToVersion, err = helper.GetK8sVersion(ctx.RancherAdminClient, false)
Expect(err).To(BeNil())
GinkgoLogr.Info(fmt.Sprintf("While provisioning, using kubernetes version %s for cluster %s", k8sVersion, clusterName))

cluster, err = helper.CreateEKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCredID, k8sVersion, region, nil)
Expect(err).To(BeNil())
cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient)
Expect(err).To(BeNil())
})

It("Upgrade version of node group only", func() {
testCaseID = 126
upgradeNodeKubernetesVersionGTCPCheck(cluster, ctx.RancherAdminClient)
})
When("a cluster is created", func() {

It("Update k8s version of cluster and add node groups", func() {
testCaseID = 125
upgradeCPAndAddNgCheck(cluster, ctx.RancherAdminClient)
})
BeforeEach(func() {
var err error
cluster, err = helper.CreateEKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCredID, k8sVersion, region, nil)
Expect(err).To(BeNil())
cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient)
Expect(err).To(BeNil())
})

It("Upgrade version of node group only", func() {
testCaseID = 126
upgradeNodeKubernetesVersionGTCPCheck(cluster, ctx.RancherAdminClient, upgradeToVersion)
})

It("Update k8s version of cluster and add node groups", func() {
testCaseID = 125
upgradeCPAndAddNgCheck(cluster, ctx.RancherAdminClient, upgradeToVersion)
})

// eks-operator/issues/752
XIt("should successfully update a cluster while it is still in updating state", func() {
testCaseID = 148
updateClusterInUpdatingState(cluster, ctx.RancherAdminClient)
// eks-operator/issues/752
XIt("should successfully update a cluster while it is still in updating state", func() {
testCaseID = 148
updateClusterInUpdatingState(cluster, ctx.RancherAdminClient, upgradeToVersion)
})
})
})

Expand All @@ -205,21 +217,10 @@ var _ = Describe("P1Provisioning", func() {
Expect(err).To(BeNil())
})

// eks-operator/issues/938
XIt("Update cluster logging types", func() {
It("Update cluster logging types", func() {
// https://github.com/rancher/eks-operator/issues/938
testCaseID = 128

var err error
loggingTypes := []string{"api", "audit", "authenticator", "controllerManager", "scheduler"}
By("Adding the LoggingTypes", func() {
cluster, err = helper.UpdateLogging(cluster, ctx.RancherAdminClient, loggingTypes, true)
Expect(err).To(BeNil())
})

By("Removing the LoggingTypes", func() {
cluster, err = helper.UpdateLogging(cluster, ctx.RancherAdminClient, []string{loggingTypes[0]}, true)
Expect(err).To(BeNil())
})
updateLoggingCheck(cluster, ctx.RancherAdminClient)
})

It("Update Tags and Labels", func() {
Expand Down
68 changes: 42 additions & 26 deletions hosted/eks/p1/p1_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,10 @@ import (
)

var (
ctx helpers.Context
clusterName, k8sVersion, upgradeToVersion string
testCaseID int64
region = helpers.GetEKSRegion()
ctx helpers.Context
clusterName string
testCaseID int64
region = helpers.GetEKSRegion()
)

func TestP1(t *testing.T) {
Expand Down Expand Up @@ -72,11 +72,11 @@ var _ = ReportAfterEach(func(report SpecReport) {
})

// updateClusterInUpdatingState runs checks to ensure cluster in an updating state can be updated
func updateClusterInUpdatingState(cluster *management.Cluster, client *rancher.Client) {
var exists bool
upgradeToVersion, err := helper.GetK8sVersion(client, false)
Expect(err).To(BeNil())

func updateClusterInUpdatingState(cluster *management.Cluster, client *rancher.Client, upgradeToVersion string) {
var (
exists bool
err error
)
cluster, err = helper.UpgradeClusterKubernetesVersion(cluster, upgradeToVersion, client, false)
Expect(err).To(BeNil())
Expect(*cluster.EKSConfig.KubernetesVersion).To(Equal(upgradeToVersion))
Expand All @@ -85,7 +85,8 @@ func updateClusterInUpdatingState(cluster *management.Cluster, client *rancher.C
Expect(err).To(BeNil())

loggingTypes := []string{"api"}
helper.UpdateLogging(cluster, client, loggingTypes, false)
cluster, err = helper.UpdateLogging(cluster, client, loggingTypes, false)
Expect(err).To(BeNil())
Expect(*cluster.EKSConfig.LoggingTypes).Should(HaveExactElements(loggingTypes))

err = clusters.WaitClusterToBeUpgraded(client, cluster.ID)
Expand All @@ -103,7 +104,7 @@ func updateClusterInUpdatingState(cluster *management.Cluster, client *rancher.C
}, "15m", "30s").Should(BeTrue())
}

func syncK8sVersionUpgradeCheck(cluster *management.Cluster, client *rancher.Client, upgradeNodeGroup bool) {
func syncK8sVersionUpgradeCheck(cluster *management.Cluster, client *rancher.Client, upgradeNodeGroup bool, k8sVersion, upgradeToVersion string) {
var err error
GinkgoLogr.Info("Upgrading cluster to version:" + upgradeToVersion)

Expand Down Expand Up @@ -157,14 +158,14 @@ func syncK8sVersionUpgradeCheck(cluster *management.Cluster, client *rancher.Cli
}
}

func syncRancherToAWSCheck(cluster *management.Cluster, client *rancher.Client) {
func syncRancherToAWSCheck(cluster *management.Cluster, client *rancher.Client, k8sVersion, upgradeToVersion string) {
var err error
loggingTypes := []string{"api", "audit", "authenticator", "controllerManager", "scheduler"}
currentNodeGroupNumber := len(cluster.EKSConfig.NodeGroups)
initialNodeCount := *cluster.EKSConfig.NodeGroups[0].DesiredSize

By("upgrading control plane", func() {
syncK8sVersionUpgradeCheck(cluster, client, false)
syncK8sVersionUpgradeCheck(cluster, client, false, k8sVersion, upgradeToVersion)
})

By("scaling up the NodeGroup", func() {
Expand All @@ -178,7 +179,8 @@ func syncRancherToAWSCheck(cluster *management.Cluster, client *rancher.Client)
}

// Verify the new edits reflect in AWS and existing details do NOT change
out, err := helper.GetFromEKS(region, clusterName, "cluster", "'.[]|.Version'")
var out string
out, err = helper.GetFromEKS(region, clusterName, "cluster", "'.[]|.Version'")
Expect(err).To(BeNil())
Expect(out).To(Equal(upgradeToVersion))

Expand All @@ -200,7 +202,8 @@ func syncRancherToAWSCheck(cluster *management.Cluster, client *rancher.Client)
Expect(*cluster.EKSConfig.LoggingTypes).ShouldNot(HaveExactElements(loggingTypes))

// Verify the new edits reflect in AWS console and existing details do NOT change
out, err := helper.GetFromEKS(region, clusterName, "cluster", "'.[]|.Version'")
var out string
out, err = helper.GetFromEKS(region, clusterName, "cluster", "'.[]|.Version'")
Expect(err).To(BeNil())
Expect(out).To(Equal(upgradeToVersion))

Expand All @@ -218,7 +221,8 @@ func syncRancherToAWSCheck(cluster *management.Cluster, client *rancher.Client)
Expect(len(cluster.EKSConfig.NodeGroups)).To(Equal(currentNodeGroupNumber + 1))

// Verify the new edits reflect in AWS console and existing details do NOT change
out, err := helper.GetFromEKS(region, clusterName, "nodegroup", "'.|length'")
var out string
out, err = helper.GetFromEKS(region, clusterName, "nodegroup", "'.|length'")
Expect(err).To(BeNil())
Expect(strconv.Atoi(out)).To(Equal(currentNodeGroupNumber + 1))

Expand All @@ -230,19 +234,19 @@ func syncRancherToAWSCheck(cluster *management.Cluster, client *rancher.Client)
}

// upgradeNodeKubernetesVersionGTCP upgrades Nodegroup version greater than Controlplane's
func upgradeNodeKubernetesVersionGTCPCheck(cluster *management.Cluster, client *rancher.Client) {
var err error
upgradeToVersion, err = helper.GetK8sVersion(client, false)
Expect(err).To(BeNil())
func upgradeNodeKubernetesVersionGTCPCheck(cluster *management.Cluster, client *rancher.Client, upgradeToVersion string) {
GinkgoLogr.Info("Upgrading only Nodegroup's EKS version to: " + upgradeToVersion)
var err error
cluster, err = helper.UpgradeNodeKubernetesVersion(cluster, upgradeToVersion, client, false, false)
Expect(err).To(BeNil())

// wait until the error is visible on the cluster
Eventually(func() bool {
cluster, err := client.Management.Cluster.ByID(cluster.ID)
cluster, err = client.Management.Cluster.ByID(cluster.ID)
Expect(err).To(BeNil())
return cluster.Transitioning == "error" && strings.Contains(cluster.TransitioningMessage, "are not compatible: the node group version may only be up to three minor versions older than the cluster version")
// checking for both the messages since different operator version shows different messages. To be removed once the message is updated.
// New message: versions for cluster [1.29] and nodegroup [1.30] not compatible: all nodegroup kubernetes versions must be equal to or one minor version lower than the cluster kubernetes version
return cluster.Transitioning == "error" && strings.Contains(cluster.TransitioningMessage, "not compatible")
}, "1m", "3s").Should(BeTrue())
}

Expand All @@ -266,13 +270,10 @@ func invalidAccessValuesCheck(cluster *management.Cluster, client *rancher.Clien
Expect(err).To(MatchError(ContainSubstring("public access, private access, or both must be enabled")))
}

func upgradeCPAndAddNgCheck(cluster *management.Cluster, client *rancher.Client) {

func upgradeCPAndAddNgCheck(cluster *management.Cluster, client *rancher.Client, upgradeToVersion string) {
var err error
originalLen := len(cluster.EKSConfig.NodeGroups)
newNodeGroupName := pointer.String(namegen.AppendRandomString("ng"))
upgradeToVersion, err = helper.GetK8sVersion(client, false)
Expect(err).To(BeNil())
GinkgoLogr.Info("Upgrading control plane to version:" + upgradeToVersion)

By("upgrading the ControlPlane", func() {
Expand Down Expand Up @@ -374,3 +375,18 @@ func updateTagsAndLabels(cluster *management.Cluster, client *rancher.Client) {
}
})
}

// Automates Qase: 128 and 77
func updateLoggingCheck(cluster *management.Cluster, client *rancher.Client) {
var err error
loggingTypes := []string{"api", "audit", "authenticator", "controllerManager", "scheduler"}
By("Adding the LoggingTypes", func() {
cluster, err = helper.UpdateLogging(cluster, client, loggingTypes, true)
Expect(err).To(BeNil())
})

By("Removing the LoggingTypes", func() {
cluster, err = helper.UpdateLogging(cluster, client, []string{loggingTypes[0]}, true)
Expect(err).To(BeNil())
})
}
Loading

0 comments on commit 086e2fd

Please sign in to comment.