From 18b24e02390a06371db680cf2fad3d08d3b864f2 Mon Sep 17 00:00:00 2001 From: Nootan Singh Date: Fri, 5 Jan 2024 12:07:11 +0530 Subject: [PATCH 1/6] Added cluster tags to nodepool inputs for terraform Signed-off-by: Nootan Singh --- internal/resources/ekscluster/data_source.go | 16 +++++++++++++++- internal/resources/ekscluster/helpers.go | 14 ++++++++++++++ .../resources/ekscluster/resource_ekscluster.go | 10 ++++++++-- .../resources/ekscluster/resource_nodepool.go | 1 - 4 files changed, 37 insertions(+), 4 deletions(-) diff --git a/internal/resources/ekscluster/data_source.go b/internal/resources/ekscluster/data_source.go index ec6fe9da1..8b6c9cb9c 100644 --- a/internal/resources/ekscluster/data_source.go +++ b/internal/resources/ekscluster/data_source.go @@ -144,6 +144,9 @@ func setResourceData(d *schema.ResourceData, eksCluster *eksmodel.VmwareTanzuMan // see the explanation of this in the func doc of nodepoolPosMap npPosMap := nodepoolPosMap(tfNodepools) + // get nodepool mapping of names with their details + npDataMap := nodepoolDetailsMap(tfNodepools) + nodepools := make([]*eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolDefinition, len(tfNodepools)) for _, np := range remoteNodepools { @@ -157,6 +160,8 @@ func setResourceData(d *schema.ResourceData, eksCluster *eksmodel.VmwareTanzuMan if pos, ok := npPosMap[np.FullName.Name]; ok { nodepools[pos] = npDef + // Add tf file nodepool tags as part of nodepool tags so that it will not show difference + nodepools[pos].Spec.Tags = npDataMap[np.FullName.Name].Spec.Tags } else { nodepools = append(nodepools, npDef) } @@ -173,7 +178,6 @@ func setResourceData(d *schema.ResourceData, eksCluster *eksmodel.VmwareTanzuMan if err := d.Set(specKey, spec); err != nil { return errors.Wrapf(err, "Failed to set the spec for cluster %s", eksCluster.FullName.Name) } - return nil } @@ -191,3 +195,13 @@ func nodepoolPosMap(nps []*eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolD return ret } + +// Returns mapping of nodepool names to their corresponding details in the array. +func nodepoolDetailsMap(nps []*eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolDefinition) map[string]*eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolDefinition { + ret := map[string]*eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolDefinition{} + for _, np := range nps { + ret[np.Info.Name] = np + } + + return ret +} diff --git a/internal/resources/ekscluster/helpers.go b/internal/resources/ekscluster/helpers.go index 1b8ee218e..463985938 100644 --- a/internal/resources/ekscluster/helpers.go +++ b/internal/resources/ekscluster/helpers.go @@ -193,3 +193,17 @@ func setEquality(s1, s2 []string) bool { return true } + +func copyClusterTagsToNodepools(nodepoolTags map[string]string, eksTags map[string]string) map[string]string { + npTags := make(map[string]string) + if len(nodepoolTags) > 0 { + npTags = nodepoolTags + } + for tmcTag, tmcVal := range eksTags { + if _, ok := npTags[tmcTag]; !ok { + npTags[tmcTag] = tmcVal + } + } + + return npTags +} diff --git a/internal/resources/ekscluster/resource_ekscluster.go b/internal/resources/ekscluster/resource_ekscluster.go index 2e21ad705..0c68a8c96 100644 --- a/internal/resources/ekscluster/resource_ekscluster.go +++ b/internal/resources/ekscluster/resource_ekscluster.go @@ -586,7 +586,10 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, m interf clusterFn := constructFullname(d) clusterSpec, nps := constructEksClusterSpec(d) - + // Copy tags from cluster to nodepool + for _, npDefData := range nps { + npDefData.Spec.Tags = copyClusterTagsToNodepools(npDefData.Spec.Tags, clusterSpec.Config.Tags) + } clusterReq := &eksmodel.VmwareTanzuManageV1alpha1EksclusterCreateUpdateEksClusterRequest{ EksCluster: &eksmodel.VmwareTanzuManageV1alpha1EksclusterEksCluster{ FullName: clusterFn, @@ -676,6 +679,10 @@ func resourceClusterInPlaceUpdate(ctx context.Context, d *schema.ResourceData, m clusterSpec, nodepools := constructEksClusterSpec(d) + // Copy tags from cluster to nodepool + for _, npDefData := range nodepools { + npDefData.Spec.Tags = copyClusterTagsToNodepools(npDefData.Spec.Tags, clusterSpec.Config.Tags) + } // EKS cluster update API on TMC side ignores nodepools passed to it. // The nodepools have to be updated via separate nodepool API, hence we // deal with them separately. @@ -726,7 +733,6 @@ func resourceClusterImporter(ctx context.Context, d *schema.ResourceData, m inte if err = d.Set(NameKey, resp.EksCluster.FullName.Name); err != nil { return nil, errors.Wrapf(err, "Failed to set name for the cluster %s", resp.EksCluster.FullName.Name) } - err = setResourceData(d, resp.EksCluster, npresp.Nodepools) if err != nil { return nil, errors.Wrapf(err, "Failed to set resource data during import for %s", resp.EksCluster.FullName.Name) diff --git a/internal/resources/ekscluster/resource_nodepool.go b/internal/resources/ekscluster/resource_nodepool.go index d442f42cd..b8b72329b 100644 --- a/internal/resources/ekscluster/resource_nodepool.go +++ b/internal/resources/ekscluster/resource_nodepool.go @@ -350,7 +350,6 @@ func flattenSpec(item *eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolSpec) if len(item.SubnetIds) > 0 { data[subnetIdsKey] = item.SubnetIds } - data[tagsKey] = item.Tags if len(item.Taints) > 0 { From 212b9fc633e5d559f31e28b4f8e766315aebf85a Mon Sep 17 00:00:00 2001 From: Nootan Singh Date: Tue, 9 Jan 2024 11:10:33 +0530 Subject: [PATCH 2/6] Resolved linting issues Signed-off-by: Nootan Singh --- internal/resources/ekscluster/data_source.go | 1 + internal/resources/ekscluster/helpers.go | 1 + internal/resources/ekscluster/resource_ekscluster.go | 2 ++ internal/resources/ekscluster/resource_nodepool.go | 1 + 4 files changed, 5 insertions(+) diff --git a/internal/resources/ekscluster/data_source.go b/internal/resources/ekscluster/data_source.go index 8b6c9cb9c..f5c74f894 100644 --- a/internal/resources/ekscluster/data_source.go +++ b/internal/resources/ekscluster/data_source.go @@ -178,6 +178,7 @@ func setResourceData(d *schema.ResourceData, eksCluster *eksmodel.VmwareTanzuMan if err := d.Set(specKey, spec); err != nil { return errors.Wrapf(err, "Failed to set the spec for cluster %s", eksCluster.FullName.Name) } + return nil } diff --git a/internal/resources/ekscluster/helpers.go b/internal/resources/ekscluster/helpers.go index 463985938..2c3ce9866 100644 --- a/internal/resources/ekscluster/helpers.go +++ b/internal/resources/ekscluster/helpers.go @@ -199,6 +199,7 @@ func copyClusterTagsToNodepools(nodepoolTags map[string]string, eksTags map[stri if len(nodepoolTags) > 0 { npTags = nodepoolTags } + for tmcTag, tmcVal := range eksTags { if _, ok := npTags[tmcTag]; !ok { npTags[tmcTag] = tmcVal diff --git a/internal/resources/ekscluster/resource_ekscluster.go b/internal/resources/ekscluster/resource_ekscluster.go index 0c68a8c96..f0e9f823f 100644 --- a/internal/resources/ekscluster/resource_ekscluster.go +++ b/internal/resources/ekscluster/resource_ekscluster.go @@ -590,6 +590,7 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, m interf for _, npDefData := range nps { npDefData.Spec.Tags = copyClusterTagsToNodepools(npDefData.Spec.Tags, clusterSpec.Config.Tags) } + clusterReq := &eksmodel.VmwareTanzuManageV1alpha1EksclusterCreateUpdateEksClusterRequest{ EksCluster: &eksmodel.VmwareTanzuManageV1alpha1EksclusterEksCluster{ FullName: clusterFn, @@ -733,6 +734,7 @@ func resourceClusterImporter(ctx context.Context, d *schema.ResourceData, m inte if err = d.Set(NameKey, resp.EksCluster.FullName.Name); err != nil { return nil, errors.Wrapf(err, "Failed to set name for the cluster %s", resp.EksCluster.FullName.Name) } + err = setResourceData(d, resp.EksCluster, npresp.Nodepools) if err != nil { return nil, errors.Wrapf(err, "Failed to set resource data during import for %s", resp.EksCluster.FullName.Name) diff --git a/internal/resources/ekscluster/resource_nodepool.go b/internal/resources/ekscluster/resource_nodepool.go index b8b72329b..d442f42cd 100644 --- a/internal/resources/ekscluster/resource_nodepool.go +++ b/internal/resources/ekscluster/resource_nodepool.go @@ -350,6 +350,7 @@ func flattenSpec(item *eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolSpec) if len(item.SubnetIds) > 0 { data[subnetIdsKey] = item.SubnetIds } + data[tagsKey] = item.Tags if len(item.Taints) > 0 { From d31a14cf5e1e6424a8f022b4e5937c8c2826b07a Mon Sep 17 00:00:00 2001 From: Nootan Singh Date: Mon, 29 Jan 2024 13:31:11 +0530 Subject: [PATCH 3/6] Filter out cluster tags from nodepool tags while showing diff Signed-off-by: Nootan Singh --- internal/resources/ekscluster/data_source.go | 24 ++++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/internal/resources/ekscluster/data_source.go b/internal/resources/ekscluster/data_source.go index f5c74f894..84c1881b4 100644 --- a/internal/resources/ekscluster/data_source.go +++ b/internal/resources/ekscluster/data_source.go @@ -143,10 +143,6 @@ func setResourceData(d *schema.ResourceData, eksCluster *eksmodel.VmwareTanzuMan // see the explanation of this in the func doc of nodepoolPosMap npPosMap := nodepoolPosMap(tfNodepools) - - // get nodepool mapping of names with their details - npDataMap := nodepoolDetailsMap(tfNodepools) - nodepools := make([]*eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolDefinition, len(tfNodepools)) for _, np := range remoteNodepools { @@ -157,11 +153,10 @@ func setResourceData(d *schema.ResourceData, eksCluster *eksmodel.VmwareTanzuMan }, Spec: np.Spec, } + npDef.Spec.Tags = filterOutClusterTags(np.Spec.Tags, eksCluster.Spec.Config.Tags) if pos, ok := npPosMap[np.FullName.Name]; ok { nodepools[pos] = npDef - // Add tf file nodepool tags as part of nodepool tags so that it will not show difference - nodepools[pos].Spec.Tags = npDataMap[np.FullName.Name].Spec.Tags } else { nodepools = append(nodepools, npDef) } @@ -197,12 +192,17 @@ func nodepoolPosMap(nps []*eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolD return ret } -// Returns mapping of nodepool names to their corresponding details in the array. -func nodepoolDetailsMap(nps []*eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolDefinition) map[string]*eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolDefinition { - ret := map[string]*eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolDefinition{} - for _, np := range nps { - ret[np.Info.Name] = np +// filterOutClusterTags is used to remove cluster tags from nodepool while checking diff. +func filterOutClusterTags(npTags, clusterTags map[string]string) map[string]string { + npWithoutClusterTags := make(map[string]string) + + for k, v := range npTags { + if val, ok := clusterTags[k]; !ok { + npWithoutClusterTags[k] = v + } else if v != val { + npWithoutClusterTags[k] = v + } } - return ret + return npWithoutClusterTags } From 229c50fcc18b2bad1297b848269e7956c4c80fc2 Mon Sep 17 00:00:00 2001 From: Nootan Singh Date: Fri, 2 Feb 2024 00:03:28 +0530 Subject: [PATCH 4/6] Added nodepool tags in test config file for acceptance testing and corrected for acceptance EKS cluster testing Signed-off-by: Nootan Singh --- .../ekscluster/resource_ekscluster.go | 2 +- .../ekscluster/resource_ekscluster_test.go | 13 ++++- internal/resources/testing/test_config.go | 54 +++++++++++-------- internal/resources/testing/test_helper.go | 2 +- 4 files changed, 46 insertions(+), 25 deletions(-) diff --git a/internal/resources/ekscluster/resource_ekscluster.go b/internal/resources/ekscluster/resource_ekscluster.go index f0e9f823f..d4a43753f 100644 --- a/internal/resources/ekscluster/resource_ekscluster.go +++ b/internal/resources/ekscluster/resource_ekscluster.go @@ -495,7 +495,7 @@ func constructAddonsConfig(data []interface{}) *eksmodel.VmwareTanzuManageV1alph addonsConfig := &eksmodel.VmwareTanzuManageV1alpha1EksclusterAddonsConfig{} if len(data) == 0 || data[0] == nil { - return addonsConfig + return nil } addonsConfigData, _ := data[0].(map[string]interface{}) diff --git a/internal/resources/ekscluster/resource_ekscluster_test.go b/internal/resources/ekscluster/resource_ekscluster_test.go index 6e1a0e0c1..6afccc83b 100644 --- a/internal/resources/ekscluster/resource_ekscluster_test.go +++ b/internal/resources/ekscluster/resource_ekscluster_test.go @@ -422,6 +422,9 @@ func getMockEksClusterSpec(accountID string, templateID string) (eksmodel.Vmware RoleArn: controlPlaneRoleARN, Tags: map[string]string{ "tmc.cloud.vmware.com/tmc-managed": "true", + "testclustertag": "testclustertagvalue", + "testingtag": "testingtagvalue", + "testsametag": "testsametagval", }, KubernetesNetworkConfig: &eksmodel.VmwareTanzuManageV1alpha1EksclusterKubernetesNetworkConfig{ ServiceCidr: "10.100.0.0/16", @@ -467,7 +470,10 @@ func getMockEksClusterSpec(accountID string, templateID string) (eksmodel.Vmware CapacityType: "ON_DEMAND", RootDiskSize: 40, Tags: map[string]string{ - "testnptag": "testnptagvalue", + "testnptag": "testnptagvalue", + "testingtag": "testingnptagvalue", + "testsametag": "testsametagval", + "testclustertag": "testclustertagvalue", }, NodeLabels: map[string]string{ "testnplabelkey": "testnplabelvalue", @@ -508,7 +514,10 @@ func getMockEksClusterSpec(accountID string, templateID string) (eksmodel.Vmware Spec: &eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolSpec{ RoleArn: workerRoleArn, Tags: map[string]string{ - "testnptag": "testnptagvalue", + "testnptag": "testnptagvalue", + "testingtag": "testingnptagvalue", + "testsametag": "testsametagval", + "testclustertag": "testclustertagvalue", }, NodeLabels: map[string]string{ "testnplabelkey": "testnplabelvalue", diff --git a/internal/resources/testing/test_config.go b/internal/resources/testing/test_config.go index caa44d3ec..72bf77782 100644 --- a/internal/resources/testing/test_config.go +++ b/internal/resources/testing/test_config.go @@ -18,6 +18,7 @@ const testDefaultCreateEksClusterScript = ` config { kubernetes_version = "{{.KubernetesVersion}}" role_arn = "arn:aws:iam::{{.AWSAccountNumber}}:role/control-plane.{{.CloudFormationTemplateID}}.eks.tmc.cloud.vmware.com" + tags = { "testclustertag" : "testclustertagvalue", "testingtag": "testingtagvalue", "testsametag":"testsametagval"} kubernetes_network_config { service_cidr = "10.100.0.0/16" // Forces new } @@ -35,13 +36,13 @@ const testDefaultCreateEksClusterScript = ` "0.0.0.0/0", ] security_groups = [ // Forces new - "sg-0b77767aa25e20fec", + "sg-0a6768722e9716768", ] subnet_ids = [ // Forces new - "subnet-0c285da60b373a4cc", - "subnet-0be854d94fa197cb7", - "subnet-04975d535cf761785", - "subnet-0d50aa17c694457c9", + "subnet-0a184f6302af32a86", + "subnet-0ed95d5c212ac62a1", + "subnet-0526ecaecde5b1bf7", + "subnet-06897e1063cc0cf4e", ] } } @@ -54,22 +55,29 @@ const testDefaultCreateEksClusterScript = ` spec { // Refer to nodepool's schema role_arn = "arn:aws:iam::{{.AWSAccountNumber}}:role/worker.{{.CloudFormationTemplateID}}.eks.tmc.cloud.vmware.com" - ami_type = "AL2_x86_64" // Forces New + ami_type = "CUSTOM" // Forces New capacity_type = "ON_DEMAND" - root_disk_size = 20 // Default: 20GiB, forces New - tags = { "testnptag" : "testnptagvalue" } + ami_info { + ami_id = "ami-2qu8409oisdfj0qw" + override_bootstrap_cmd = "#!/bin/bash\n/etc/eks/bootstrap.sh tf-test-ami" + } + remote_access { + ssh_key = "anshulc" + security_groups = ["sg-0a6768722e9716768"] + } + root_disk_size = 40 // Default: 20GiB, forces New + tags = { "testnptag" : "testnptagvalue", "testingtag": "testingnptagvalue"} node_labels = { "testnplabelkey" : "testnplabelvalue" } subnet_ids = [ // Required, forces new - "subnet-0c285da60b373a4cc", - "subnet-0be854d94fa197cb7", - "subnet-04975d535cf761785", - "subnet-0d50aa17c694457c9", + "subnet-0a184f6302af32a86", + "subnet-0ed95d5c212ac62a1", + "subnet-0526ecaecde5b1bf7", + "subnet-06897e1063cc0cf4e", ] - scaling_config { - desired_size = 2 - max_size = 2 - min_size = 2 + desired_size = 4 + max_size = 8 + min_size = 1 } update_config { max_unavailable_nodes = "2" @@ -89,13 +97,17 @@ const testDefaultCreateEksClusterScript = ` spec { // Refer to nodepool's schema role_arn = "arn:aws:iam::{{.AWSAccountNumber}}:role/worker.{{.CloudFormationTemplateID}}.eks.tmc.cloud.vmware.com" - tags = { "testnptag" : "testnptagvalue" } + tags = { "testnptag" : "testnptagvalue", "testingtag": "testingnptagvalue"} node_labels = { "testnplabelkey" : "testnplabelvalue" } + launch_template { + name = "PLACE_HOLDER" + version = "PLACE_HOLDER" + } subnet_ids = [ // Required, forces new - "subnet-0c285da60b373a4cc", - "subnet-0be854d94fa197cb7", - "subnet-04975d535cf761785", - "subnet-0d50aa17c694457c9", + "subnet-0a184f6302af32a86", + "subnet-0ed95d5c212ac62a1", + "subnet-0526ecaecde5b1bf7", + "subnet-06897e1063cc0cf4e", ] scaling_config { desired_size = 4 diff --git a/internal/resources/testing/test_helper.go b/internal/resources/testing/test_helper.go index 4ef8f65c1..6b707ff82 100644 --- a/internal/resources/testing/test_helper.go +++ b/internal/resources/testing/test_helper.go @@ -202,7 +202,7 @@ func TestGetDefaultEksAcceptanceConfig() *TestAcceptanceConfig { AWSAccountNumber: "919197287370", Region: "us-west-2", ClusterGroupName: "default", - KubernetesVersion: "1.26", + KubernetesVersion: "1.23", CredentialName: "PLACE_HOLDER", CloudFormationTemplateID: "PLACE_HOLDER", } From 7ad4b6afbdce9c58273a9c0a44307306cd1626cf Mon Sep 17 00:00:00 2001 From: Nootan Singh Date: Fri, 2 Feb 2024 12:41:06 +0530 Subject: [PATCH 5/6] Added logic nodepool tags should not be same as cluster tags Signed-off-by: Nootan Singh --- internal/resources/ekscluster/helpers.go | 13 ++++++++++--- .../ekscluster/resource_ekscluster.go | 12 ++++++++++-- .../ekscluster/resource_ekscluster_test.go | 19 ++++++++----------- internal/resources/testing/test_config.go | 6 +++--- 4 files changed, 31 insertions(+), 19 deletions(-) diff --git a/internal/resources/ekscluster/helpers.go b/internal/resources/ekscluster/helpers.go index 2c3ce9866..3b4bb21af 100644 --- a/internal/resources/ekscluster/helpers.go +++ b/internal/resources/ekscluster/helpers.go @@ -7,6 +7,8 @@ package ekscluster import ( "reflect" + "github.com/pkg/errors" + eksmodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/ekscluster" ) @@ -194,17 +196,22 @@ func setEquality(s1, s2 []string) bool { return true } -func copyClusterTagsToNodepools(nodepoolTags map[string]string, eksTags map[string]string) map[string]string { +func copyClusterTagsToNodepools(nodepoolTags map[string]string, eksTags map[string]string) (map[string]string, error) { npTags := make(map[string]string) + + var err error + if len(nodepoolTags) > 0 { npTags = nodepoolTags } for tmcTag, tmcVal := range eksTags { - if _, ok := npTags[tmcTag]; !ok { + if val, ok := npTags[tmcTag]; !ok { npTags[tmcTag] = tmcVal + } else if val == tmcVal { + err = errors.Errorf("key:%v, val:%v", tmcTag, val) } } - return npTags + return npTags, err } diff --git a/internal/resources/ekscluster/resource_ekscluster.go b/internal/resources/ekscluster/resource_ekscluster.go index d4a43753f..4f78804bd 100644 --- a/internal/resources/ekscluster/resource_ekscluster.go +++ b/internal/resources/ekscluster/resource_ekscluster.go @@ -588,7 +588,12 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, m interf clusterSpec, nps := constructEksClusterSpec(d) // Copy tags from cluster to nodepool for _, npDefData := range nps { - npDefData.Spec.Tags = copyClusterTagsToNodepools(npDefData.Spec.Tags, clusterSpec.Config.Tags) + var err error + npDefData.Spec.Tags, err = copyClusterTagsToNodepools(npDefData.Spec.Tags, clusterSpec.Config.Tags) + + if err != nil { + return diag.FromErr(errors.Wrap(err, "Nodepool tags should not be same as cluster tags")) + } } clusterReq := &eksmodel.VmwareTanzuManageV1alpha1EksclusterCreateUpdateEksClusterRequest{ @@ -682,7 +687,10 @@ func resourceClusterInPlaceUpdate(ctx context.Context, d *schema.ResourceData, m // Copy tags from cluster to nodepool for _, npDefData := range nodepools { - npDefData.Spec.Tags = copyClusterTagsToNodepools(npDefData.Spec.Tags, clusterSpec.Config.Tags) + npDefData.Spec.Tags, err = copyClusterTagsToNodepools(npDefData.Spec.Tags, clusterSpec.Config.Tags) + if err != nil { + return diag.FromErr(errors.Wrap(err, "Nodepool tags should not be same as cluster tags")) + } } // EKS cluster update API on TMC side ignores nodepools passed to it. // The nodepools have to be updated via separate nodepool API, hence we diff --git a/internal/resources/ekscluster/resource_ekscluster_test.go b/internal/resources/ekscluster/resource_ekscluster_test.go index 6afccc83b..c1cfb0c77 100644 --- a/internal/resources/ekscluster/resource_ekscluster_test.go +++ b/internal/resources/ekscluster/resource_ekscluster_test.go @@ -422,9 +422,8 @@ func getMockEksClusterSpec(accountID string, templateID string) (eksmodel.Vmware RoleArn: controlPlaneRoleARN, Tags: map[string]string{ "tmc.cloud.vmware.com/tmc-managed": "true", - "testclustertag": "testclustertagvalue", - "testingtag": "testingtagvalue", - "testsametag": "testsametagval", + "testtag": "testval", + "newtesttag": "newtestval", }, KubernetesNetworkConfig: &eksmodel.VmwareTanzuManageV1alpha1EksclusterKubernetesNetworkConfig{ ServiceCidr: "10.100.0.0/16", @@ -470,10 +469,9 @@ func getMockEksClusterSpec(accountID string, templateID string) (eksmodel.Vmware CapacityType: "ON_DEMAND", RootDiskSize: 40, Tags: map[string]string{ - "testnptag": "testnptagvalue", - "testingtag": "testingnptagvalue", - "testsametag": "testsametagval", - "testclustertag": "testclustertagvalue", + "testnptag": "testnptagvalue", + "newtesttag": "testingtagvalue", + "testtag": "testval", }, NodeLabels: map[string]string{ "testnplabelkey": "testnplabelvalue", @@ -514,10 +512,9 @@ func getMockEksClusterSpec(accountID string, templateID string) (eksmodel.Vmware Spec: &eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolSpec{ RoleArn: workerRoleArn, Tags: map[string]string{ - "testnptag": "testnptagvalue", - "testingtag": "testingnptagvalue", - "testsametag": "testsametagval", - "testclustertag": "testclustertagvalue", + "testnptag": "testnptagvalue", + "newtesttag": "testingtagvalue", + "testtag": "testval", }, NodeLabels: map[string]string{ "testnplabelkey": "testnplabelvalue", diff --git a/internal/resources/testing/test_config.go b/internal/resources/testing/test_config.go index 72bf77782..bc6cf3a0e 100644 --- a/internal/resources/testing/test_config.go +++ b/internal/resources/testing/test_config.go @@ -18,7 +18,7 @@ const testDefaultCreateEksClusterScript = ` config { kubernetes_version = "{{.KubernetesVersion}}" role_arn = "arn:aws:iam::{{.AWSAccountNumber}}:role/control-plane.{{.CloudFormationTemplateID}}.eks.tmc.cloud.vmware.com" - tags = { "testclustertag" : "testclustertagvalue", "testingtag": "testingtagvalue", "testsametag":"testsametagval"} + tags = { "testtag" : "testval", "newtesttag": "newtestval"} kubernetes_network_config { service_cidr = "10.100.0.0/16" // Forces new } @@ -66,7 +66,7 @@ const testDefaultCreateEksClusterScript = ` security_groups = ["sg-0a6768722e9716768"] } root_disk_size = 40 // Default: 20GiB, forces New - tags = { "testnptag" : "testnptagvalue", "testingtag": "testingnptagvalue"} + tags = { "testnptag" : "testnptagvalue", "newtesttag": "testingtagvalue"} node_labels = { "testnplabelkey" : "testnplabelvalue" } subnet_ids = [ // Required, forces new "subnet-0a184f6302af32a86", @@ -97,7 +97,7 @@ const testDefaultCreateEksClusterScript = ` spec { // Refer to nodepool's schema role_arn = "arn:aws:iam::{{.AWSAccountNumber}}:role/worker.{{.CloudFormationTemplateID}}.eks.tmc.cloud.vmware.com" - tags = { "testnptag" : "testnptagvalue", "testingtag": "testingnptagvalue"} + tags = { "testnptag" : "testnptagvalue", "newtesttag": "testingtagvalue"} node_labels = { "testnplabelkey" : "testnplabelvalue" } launch_template { name = "PLACE_HOLDER" From c63ad8150e4c48c1854f71a7a4a269543e2b0c43 Mon Sep 17 00:00:00 2001 From: Nootan Singh Date: Fri, 9 Feb 2024 14:22:07 +0530 Subject: [PATCH 6/6] Added changes to run acceptance testing for EKS Signed-off-by: Nootan Singh --- internal/resources/testing/test_config.go | 31 ++++------------------- 1 file changed, 5 insertions(+), 26 deletions(-) diff --git a/internal/resources/testing/test_config.go b/internal/resources/testing/test_config.go index bc6cf3a0e..ad302f1e7 100644 --- a/internal/resources/testing/test_config.go +++ b/internal/resources/testing/test_config.go @@ -36,13 +36,10 @@ const testDefaultCreateEksClusterScript = ` "0.0.0.0/0", ] security_groups = [ // Forces new - "sg-0a6768722e9716768", + "sg-09247a89b01962bd9", ] subnet_ids = [ // Forces new - "subnet-0a184f6302af32a86", - "subnet-0ed95d5c212ac62a1", - "subnet-0526ecaecde5b1bf7", - "subnet-06897e1063cc0cf4e", + "subnet-0e3bcd8e3c06a4bf0", "subnet-06427cefa730aeae7", "subnet-07c17b758e92356f6", "subnet-0a081ddc6ff1070d0" ] } } @@ -55,24 +52,13 @@ const testDefaultCreateEksClusterScript = ` spec { // Refer to nodepool's schema role_arn = "arn:aws:iam::{{.AWSAccountNumber}}:role/worker.{{.CloudFormationTemplateID}}.eks.tmc.cloud.vmware.com" - ami_type = "CUSTOM" // Forces New + ami_type = "AL2_x86_64" // Forces New capacity_type = "ON_DEMAND" - ami_info { - ami_id = "ami-2qu8409oisdfj0qw" - override_bootstrap_cmd = "#!/bin/bash\n/etc/eks/bootstrap.sh tf-test-ami" - } - remote_access { - ssh_key = "anshulc" - security_groups = ["sg-0a6768722e9716768"] - } root_disk_size = 40 // Default: 20GiB, forces New tags = { "testnptag" : "testnptagvalue", "newtesttag": "testingtagvalue"} node_labels = { "testnplabelkey" : "testnplabelvalue" } subnet_ids = [ // Required, forces new - "subnet-0a184f6302af32a86", - "subnet-0ed95d5c212ac62a1", - "subnet-0526ecaecde5b1bf7", - "subnet-06897e1063cc0cf4e", + "subnet-0e3bcd8e3c06a4bf0", "subnet-06427cefa730aeae7", "subnet-07c17b758e92356f6", "subnet-0a081ddc6ff1070d0" ] scaling_config { desired_size = 4 @@ -99,15 +85,8 @@ const testDefaultCreateEksClusterScript = ` role_arn = "arn:aws:iam::{{.AWSAccountNumber}}:role/worker.{{.CloudFormationTemplateID}}.eks.tmc.cloud.vmware.com" tags = { "testnptag" : "testnptagvalue", "newtesttag": "testingtagvalue"} node_labels = { "testnplabelkey" : "testnplabelvalue" } - launch_template { - name = "PLACE_HOLDER" - version = "PLACE_HOLDER" - } subnet_ids = [ // Required, forces new - "subnet-0a184f6302af32a86", - "subnet-0ed95d5c212ac62a1", - "subnet-0526ecaecde5b1bf7", - "subnet-06897e1063cc0cf4e", + "subnet-0e3bcd8e3c06a4bf0", "subnet-06427cefa730aeae7", "subnet-07c17b758e92356f6", "subnet-0a081ddc6ff1070d0" ] scaling_config { desired_size = 4