Skip to content

Commit

Permalink
Merge pull request #369 from vmware/feature/support-tagging-from-clus…
Browse files Browse the repository at this point in the history
…ter-to-nodepool

Applied cluster tags to nodepools, don't report this as a terraform change
  • Loading branch information
snootan authored Feb 21, 2024
2 parents 61cf1f8 + c63ad81 commit ed4b47e
Show file tree
Hide file tree
Showing 6 changed files with 75 additions and 25 deletions.
17 changes: 16 additions & 1 deletion internal/resources/ekscluster/data_source.go
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,6 @@ func setResourceData(d *schema.ResourceData, eksCluster *eksmodel.VmwareTanzuMan

// see the explanation of this in the func doc of nodepoolPosMap
npPosMap := nodepoolPosMap(tfNodepools)

nodepools := make([]*eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolDefinition, len(tfNodepools))

for _, np := range remoteNodepools {
Expand All @@ -216,6 +215,7 @@ func setResourceData(d *schema.ResourceData, eksCluster *eksmodel.VmwareTanzuMan
},
Spec: np.Spec,
}
npDef.Spec.Tags = filterOutClusterTags(np.Spec.Tags, eksCluster.Spec.Config.Tags)

if pos, ok := npPosMap[np.FullName.Name]; ok {
nodepools[pos] = npDef
Expand Down Expand Up @@ -253,3 +253,18 @@ func nodepoolPosMap(nps []*eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolD

return ret
}

// filterOutClusterTags is used to remove cluster tags from nodepool while checking diff.
func filterOutClusterTags(npTags, clusterTags map[string]string) map[string]string {
npWithoutClusterTags := make(map[string]string)

for k, v := range npTags {
if val, ok := clusterTags[k]; !ok {
npWithoutClusterTags[k] = v
} else if v != val {
npWithoutClusterTags[k] = v
}
}

return npWithoutClusterTags
}
22 changes: 22 additions & 0 deletions internal/resources/ekscluster/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ package ekscluster
import (
"reflect"

"github.com/pkg/errors"

eksmodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/ekscluster"
)

Expand Down Expand Up @@ -193,3 +195,23 @@ func setEquality(s1, s2 []string) bool {

return true
}

func copyClusterTagsToNodepools(nodepoolTags map[string]string, eksTags map[string]string) (map[string]string, error) {
npTags := make(map[string]string)

var err error

if len(nodepoolTags) > 0 {
npTags = nodepoolTags
}

for tmcTag, tmcVal := range eksTags {
if val, ok := npTags[tmcTag]; !ok {
npTags[tmcTag] = tmcVal
} else if val == tmcVal {
err = errors.Errorf("key:%v, val:%v", tmcTag, val)
}
}

return npTags, err
}
18 changes: 17 additions & 1 deletion internal/resources/ekscluster/resource_ekscluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -506,7 +506,7 @@ func constructAddonsConfig(data []interface{}) *eksmodel.VmwareTanzuManageV1alph
addonsConfig := &eksmodel.VmwareTanzuManageV1alpha1EksclusterAddonsConfig{}

if len(data) == 0 || data[0] == nil {
return addonsConfig
return nil
}

addonsConfigData, _ := data[0].(map[string]interface{})
Expand Down Expand Up @@ -597,6 +597,15 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, m interf

clusterFn := constructFullname(d)
clusterSpec, nps := constructEksClusterSpec(d)
// Copy tags from cluster to nodepool
for _, npDefData := range nps {
var err error
npDefData.Spec.Tags, err = copyClusterTagsToNodepools(npDefData.Spec.Tags, clusterSpec.Config.Tags)

if err != nil {
return diag.FromErr(errors.Wrap(err, "Nodepool tags should not be same as cluster tags"))
}
}

clusterReq := &eksmodel.VmwareTanzuManageV1alpha1EksclusterCreateUpdateEksClusterRequest{
EksCluster: &eksmodel.VmwareTanzuManageV1alpha1EksclusterEksCluster{
Expand Down Expand Up @@ -687,6 +696,13 @@ func resourceClusterInPlaceUpdate(ctx context.Context, d *schema.ResourceData, m

clusterSpec, nodepools := constructEksClusterSpec(d)

// Copy tags from cluster to nodepool
for _, npDefData := range nodepools {
npDefData.Spec.Tags, err = copyClusterTagsToNodepools(npDefData.Spec.Tags, clusterSpec.Config.Tags)
if err != nil {
return diag.FromErr(errors.Wrap(err, "Nodepool tags should not be same as cluster tags"))
}
}
// EKS cluster update API on TMC side ignores nodepools passed to it.
// The nodepools have to be updated via separate nodepool API, hence we
// deal with them separately.
Expand Down
10 changes: 8 additions & 2 deletions internal/resources/ekscluster/resource_ekscluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -422,6 +422,8 @@ func getMockEksClusterSpec(accountID string, templateID string) (eksmodel.Vmware
RoleArn: controlPlaneRoleARN,
Tags: map[string]string{
"tmc.cloud.vmware.com/tmc-managed": "true",
"testtag": "testval",
"newtesttag": "newtestval",
},
KubernetesNetworkConfig: &eksmodel.VmwareTanzuManageV1alpha1EksclusterKubernetesNetworkConfig{
ServiceCidr: "10.100.0.0/16",
Expand Down Expand Up @@ -467,7 +469,9 @@ func getMockEksClusterSpec(accountID string, templateID string) (eksmodel.Vmware
CapacityType: "ON_DEMAND",
RootDiskSize: 40,
Tags: map[string]string{
"testnptag": "testnptagvalue",
"testnptag": "testnptagvalue",
"newtesttag": "testingtagvalue",
"testtag": "testval",
},
NodeLabels: map[string]string{
"testnplabelkey": "testnplabelvalue",
Expand Down Expand Up @@ -508,7 +512,9 @@ func getMockEksClusterSpec(accountID string, templateID string) (eksmodel.Vmware
Spec: &eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolSpec{
RoleArn: workerRoleArn,
Tags: map[string]string{
"testnptag": "testnptagvalue",
"testnptag": "testnptagvalue",
"newtesttag": "testingtagvalue",
"testtag": "testval",
},
NodeLabels: map[string]string{
"testnplabelkey": "testnplabelvalue",
Expand Down
31 changes: 11 additions & 20 deletions internal/resources/testing/test_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ const testDefaultCreateEksClusterScript = `
config {
kubernetes_version = "{{.KubernetesVersion}}"
role_arn = "arn:aws:iam::{{.AWSAccountNumber}}:role/control-plane.{{.CloudFormationTemplateID}}.eks.tmc.cloud.vmware.com"
tags = { "testtag" : "testval", "newtesttag": "newtestval"}
kubernetes_network_config {
service_cidr = "10.100.0.0/16" // Forces new
}
Expand All @@ -35,13 +36,10 @@ const testDefaultCreateEksClusterScript = `
"0.0.0.0/0",
]
security_groups = [ // Forces new
"sg-0b77767aa25e20fec",
"sg-09247a89b01962bd9",
]
subnet_ids = [ // Forces new
"subnet-0c285da60b373a4cc",
"subnet-0be854d94fa197cb7",
"subnet-04975d535cf761785",
"subnet-0d50aa17c694457c9",
"subnet-0e3bcd8e3c06a4bf0", "subnet-06427cefa730aeae7", "subnet-07c17b758e92356f6", "subnet-0a081ddc6ff1070d0"
]
}
}
Expand All @@ -56,20 +54,16 @@ const testDefaultCreateEksClusterScript = `
role_arn = "arn:aws:iam::{{.AWSAccountNumber}}:role/worker.{{.CloudFormationTemplateID}}.eks.tmc.cloud.vmware.com"
ami_type = "AL2_x86_64" // Forces New
capacity_type = "ON_DEMAND"
root_disk_size = 20 // Default: 20GiB, forces New
tags = { "testnptag" : "testnptagvalue" }
root_disk_size = 40 // Default: 20GiB, forces New
tags = { "testnptag" : "testnptagvalue", "newtesttag": "testingtagvalue"}
node_labels = { "testnplabelkey" : "testnplabelvalue" }
subnet_ids = [ // Required, forces new
"subnet-0c285da60b373a4cc",
"subnet-0be854d94fa197cb7",
"subnet-04975d535cf761785",
"subnet-0d50aa17c694457c9",
"subnet-0e3bcd8e3c06a4bf0", "subnet-06427cefa730aeae7", "subnet-07c17b758e92356f6", "subnet-0a081ddc6ff1070d0"
]
scaling_config {
desired_size = 2
max_size = 2
min_size = 2
desired_size = 4
max_size = 8
min_size = 1
}
update_config {
max_unavailable_nodes = "2"
Expand All @@ -89,13 +83,10 @@ const testDefaultCreateEksClusterScript = `
spec {
// Refer to nodepool's schema
role_arn = "arn:aws:iam::{{.AWSAccountNumber}}:role/worker.{{.CloudFormationTemplateID}}.eks.tmc.cloud.vmware.com"
tags = { "testnptag" : "testnptagvalue" }
tags = { "testnptag" : "testnptagvalue", "newtesttag": "testingtagvalue"}
node_labels = { "testnplabelkey" : "testnplabelvalue" }
subnet_ids = [ // Required, forces new
"subnet-0c285da60b373a4cc",
"subnet-0be854d94fa197cb7",
"subnet-04975d535cf761785",
"subnet-0d50aa17c694457c9",
"subnet-0e3bcd8e3c06a4bf0", "subnet-06427cefa730aeae7", "subnet-07c17b758e92356f6", "subnet-0a081ddc6ff1070d0"
]
scaling_config {
desired_size = 4
Expand Down
2 changes: 1 addition & 1 deletion internal/resources/testing/test_helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ func TestGetDefaultEksAcceptanceConfig() *TestAcceptanceConfig {
AWSAccountNumber: "919197287370",
Region: "us-west-2",
ClusterGroupName: "default",
KubernetesVersion: "1.26",
KubernetesVersion: "1.23",
CredentialName: "PLACE_HOLDER",
CloudFormationTemplateID: "PLACE_HOLDER",
}
Expand Down

0 comments on commit ed4b47e

Please sign in to comment.