Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Applied cluster tags to nodepools, don't report this as a terraform change #369

Merged
merged 6 commits into from
Feb 21, 2024
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 16 additions & 1 deletion internal/resources/ekscluster/data_source.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,6 @@ func setResourceData(d *schema.ResourceData, eksCluster *eksmodel.VmwareTanzuMan

// see the explanation of this in the func doc of nodepoolPosMap
npPosMap := nodepoolPosMap(tfNodepools)

nodepools := make([]*eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolDefinition, len(tfNodepools))

for _, np := range remoteNodepools {
Expand All @@ -154,6 +153,7 @@ func setResourceData(d *schema.ResourceData, eksCluster *eksmodel.VmwareTanzuMan
},
Spec: np.Spec,
}
npDef.Spec.Tags = filterOutClusterTags(np.Spec.Tags, eksCluster.Spec.Config.Tags)

if pos, ok := npPosMap[np.FullName.Name]; ok {
nodepools[pos] = npDef
Expand Down Expand Up @@ -191,3 +191,18 @@ func nodepoolPosMap(nps []*eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolD

return ret
}

// filterOutClusterTags is used to remove cluster tags from nodepool while checking diff.
func filterOutClusterTags(npTags, clusterTags map[string]string) map[string]string {
npWithoutClusterTags := make(map[string]string)

for k, v := range npTags {
if val, ok := clusterTags[k]; !ok {
npWithoutClusterTags[k] = v
} else if v != val {
npWithoutClusterTags[k] = v
}
}

return npWithoutClusterTags
}
15 changes: 15 additions & 0 deletions internal/resources/ekscluster/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -193,3 +193,18 @@ func setEquality(s1, s2 []string) bool {

return true
}

func copyClusterTagsToNodepools(nodepoolTags map[string]string, eksTags map[string]string) map[string]string {
npTags := make(map[string]string)
if len(nodepoolTags) > 0 {
npTags = nodepoolTags
}

for tmcTag, tmcVal := range eksTags {
if _, ok := npTags[tmcTag]; !ok {
npTags[tmcTag] = tmcVal
}
}

return npTags
}
10 changes: 9 additions & 1 deletion internal/resources/ekscluster/resource_ekscluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -495,7 +495,7 @@ func constructAddonsConfig(data []interface{}) *eksmodel.VmwareTanzuManageV1alph
addonsConfig := &eksmodel.VmwareTanzuManageV1alpha1EksclusterAddonsConfig{}

if len(data) == 0 || data[0] == nil {
return addonsConfig
return nil
}

addonsConfigData, _ := data[0].(map[string]interface{})
Expand Down Expand Up @@ -586,6 +586,10 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, m interf

clusterFn := constructFullname(d)
clusterSpec, nps := constructEksClusterSpec(d)
// Copy tags from cluster to nodepool
for _, npDefData := range nps {
npDefData.Spec.Tags = copyClusterTagsToNodepools(npDefData.Spec.Tags, clusterSpec.Config.Tags)
}

clusterReq := &eksmodel.VmwareTanzuManageV1alpha1EksclusterCreateUpdateEksClusterRequest{
EksCluster: &eksmodel.VmwareTanzuManageV1alpha1EksclusterEksCluster{
Expand Down Expand Up @@ -676,6 +680,10 @@ func resourceClusterInPlaceUpdate(ctx context.Context, d *schema.ResourceData, m

clusterSpec, nodepools := constructEksClusterSpec(d)

// Copy tags from cluster to nodepool
for _, npDefData := range nodepools {
npDefData.Spec.Tags = copyClusterTagsToNodepools(npDefData.Spec.Tags, clusterSpec.Config.Tags)
}
// EKS cluster update API on TMC side ignores nodepools passed to it.
// The nodepools have to be updated via separate nodepool API, hence we
// deal with them separately.
Expand Down
13 changes: 11 additions & 2 deletions internal/resources/ekscluster/resource_ekscluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -422,6 +422,9 @@ func getMockEksClusterSpec(accountID string, templateID string) (eksmodel.Vmware
RoleArn: controlPlaneRoleARN,
Tags: map[string]string{
"tmc.cloud.vmware.com/tmc-managed": "true",
"testclustertag": "testclustertagvalue",
"testingtag": "testingtagvalue",
"testsametag": "testsametagval",
},
KubernetesNetworkConfig: &eksmodel.VmwareTanzuManageV1alpha1EksclusterKubernetesNetworkConfig{
ServiceCidr: "10.100.0.0/16",
Expand Down Expand Up @@ -467,7 +470,10 @@ func getMockEksClusterSpec(accountID string, templateID string) (eksmodel.Vmware
CapacityType: "ON_DEMAND",
RootDiskSize: 40,
Tags: map[string]string{
"testnptag": "testnptagvalue",
"testnptag": "testnptagvalue",
"testingtag": "testingnptagvalue",
"testsametag": "testsametagval",
"testclustertag": "testclustertagvalue",
},
NodeLabels: map[string]string{
"testnplabelkey": "testnplabelvalue",
Expand Down Expand Up @@ -508,7 +514,10 @@ func getMockEksClusterSpec(accountID string, templateID string) (eksmodel.Vmware
Spec: &eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolSpec{
RoleArn: workerRoleArn,
Tags: map[string]string{
"testnptag": "testnptagvalue",
"testnptag": "testnptagvalue",
"testingtag": "testingnptagvalue",
"testsametag": "testsametagval",
"testclustertag": "testclustertagvalue",
},
NodeLabels: map[string]string{
"testnplabelkey": "testnplabelvalue",
Expand Down
54 changes: 33 additions & 21 deletions internal/resources/testing/test_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ const testDefaultCreateEksClusterScript = `
config {
kubernetes_version = "{{.KubernetesVersion}}"
role_arn = "arn:aws:iam::{{.AWSAccountNumber}}:role/control-plane.{{.CloudFormationTemplateID}}.eks.tmc.cloud.vmware.com"
tags = { "testclustertag" : "testclustertagvalue", "testingtag": "testingtagvalue", "testsametag":"testsametagval"}
kubernetes_network_config {
service_cidr = "10.100.0.0/16" // Forces new
}
Expand All @@ -35,13 +36,13 @@ const testDefaultCreateEksClusterScript = `
"0.0.0.0/0",
]
security_groups = [ // Forces new
"sg-0b77767aa25e20fec",
"sg-0a6768722e9716768",
]
subnet_ids = [ // Forces new
"subnet-0c285da60b373a4cc",
"subnet-0be854d94fa197cb7",
"subnet-04975d535cf761785",
"subnet-0d50aa17c694457c9",
"subnet-0a184f6302af32a86",
"subnet-0ed95d5c212ac62a1",
"subnet-0526ecaecde5b1bf7",
"subnet-06897e1063cc0cf4e",
]
}
}
Expand All @@ -54,22 +55,29 @@ const testDefaultCreateEksClusterScript = `
spec {
// Refer to nodepool's schema
role_arn = "arn:aws:iam::{{.AWSAccountNumber}}:role/worker.{{.CloudFormationTemplateID}}.eks.tmc.cloud.vmware.com"
ami_type = "AL2_x86_64" // Forces New
ami_type = "CUSTOM" // Forces New
capacity_type = "ON_DEMAND"
root_disk_size = 20 // Default: 20GiB, forces New
tags = { "testnptag" : "testnptagvalue" }
ami_info {
ami_id = "ami-2qu8409oisdfj0qw"
override_bootstrap_cmd = "#!/bin/bash\n/etc/eks/bootstrap.sh tf-test-ami"
}
remote_access {
ssh_key = "anshulc"
security_groups = ["sg-0a6768722e9716768"]
}
root_disk_size = 40 // Default: 20GiB, forces New
tags = { "testnptag" : "testnptagvalue", "testingtag": "testingnptagvalue"}
node_labels = { "testnplabelkey" : "testnplabelvalue" }
subnet_ids = [ // Required, forces new
"subnet-0c285da60b373a4cc",
"subnet-0be854d94fa197cb7",
"subnet-04975d535cf761785",
"subnet-0d50aa17c694457c9",
"subnet-0a184f6302af32a86",
"subnet-0ed95d5c212ac62a1",
"subnet-0526ecaecde5b1bf7",
"subnet-06897e1063cc0cf4e",
]

scaling_config {
desired_size = 2
max_size = 2
min_size = 2
desired_size = 4
max_size = 8
min_size = 1
}
update_config {
max_unavailable_nodes = "2"
Expand All @@ -89,13 +97,17 @@ const testDefaultCreateEksClusterScript = `
spec {
// Refer to nodepool's schema
role_arn = "arn:aws:iam::{{.AWSAccountNumber}}:role/worker.{{.CloudFormationTemplateID}}.eks.tmc.cloud.vmware.com"
tags = { "testnptag" : "testnptagvalue" }
tags = { "testnptag" : "testnptagvalue", "testingtag": "testingnptagvalue"}
node_labels = { "testnplabelkey" : "testnplabelvalue" }
launch_template {
name = "PLACE_HOLDER"
version = "PLACE_HOLDER"
}
subnet_ids = [ // Required, forces new
"subnet-0c285da60b373a4cc",
"subnet-0be854d94fa197cb7",
"subnet-04975d535cf761785",
"subnet-0d50aa17c694457c9",
"subnet-0a184f6302af32a86",
"subnet-0ed95d5c212ac62a1",
"subnet-0526ecaecde5b1bf7",
"subnet-06897e1063cc0cf4e",
]
scaling_config {
desired_size = 4
Expand Down
2 changes: 1 addition & 1 deletion internal/resources/testing/test_helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ func TestGetDefaultEksAcceptanceConfig() *TestAcceptanceConfig {
AWSAccountNumber: "919197287370",
Region: "us-west-2",
ClusterGroupName: "default",
KubernetesVersion: "1.26",
KubernetesVersion: "1.23",
CredentialName: "PLACE_HOLDER",
CloudFormationTemplateID: "PLACE_HOLDER",
}
Expand Down
Loading