Skip to content

Commit d7b296c

Browse files
author
Igor Kanshyn
committed
Adding secondary IP fields, wrote non-comprehensive validation. Signed-off-by: Igor Kanshyn <ikanshyn@vmware.com>
1 parent bc2a10e commit d7b296c

11 files changed

+313
-35
lines changed

README.md

+8
Original file line numberDiff line numberDiff line change
@@ -211,6 +211,14 @@ The plugin will read the descriptions and schema of each resource and data sourc
211211
212212
Please refer to `examples` folder to perform CRUD operations with Tanzu Mission Control provider for various resources
213213
214+
# Troubleshooting
215+
216+
## Executions of a different version of the provider
217+
Terraform will always look for the latest version of the provided and will use it even if you have just built a previous version.
218+
Terraform caches all known builds/versions in the cache folder located in ```~/.terraform.d``` folder.
219+
220+
Delete ```~/.terraform.d/plugins/vmware``` folder to remove all cached versions of the plugin
221+
214222
# Support
215223
216224
The Tanzu Mission Control Terraform provider is now VMware supported as well as community supported. For bugs and feature requests please open a Github Issue and label it appropriately or contact VMware support.

internal/models/akscluster/cluster_s_k_u_name.go

+3
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,9 @@ const (
3333

3434
// VmwareTanzuManageV1alpha1AksclusterClusterSKUNameBASIC captures enum value "BASIC".
3535
VmwareTanzuManageV1alpha1AksclusterClusterSKUNameBASIC VmwareTanzuManageV1alpha1AksclusterClusterSKUName = "BASIC"
36+
37+
// VmwareTanzuManageV1alpha1AksclusterClusterNetworkPluginModeOverlay captures value "overlay".
38+
VmwareTanzuManageV1alpha1AksclusterClusterNetworkPluginModeOverlay VmwareTanzuManageV1alpha1AksclusterClusterSKUName = "overlay"
3639
)
3740

3841
// for schema.

internal/models/akscluster/network_config.go

+3
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,9 @@ type VmwareTanzuManageV1alpha1AksclusterNetworkConfig struct {
3131
// Network plugin of the cluster. The valid value is azure, kubenet and none.
3232
NetworkPlugin string `json:"networkPlugin,omitempty"`
3333

34+
// Network plugin mode of the cluster. The valid values are overlay and ''.
35+
NetworkPluginMode string `json:"networkPluginMode,omitempty"`
36+
3437
// Network policy of the cluster. The valid value is azure and calico.
3538
NetworkPolicy string `json:"networkPolicy,omitempty"`
3639

internal/resources/akscluster/akscluster_mapper.go

+10-5
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,6 @@ SPDX-License-Identifier: MPL-2.0
66
package akscluster
77

88
import (
9-
"errors"
10-
119
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
1210

1311
"github.com/vmware/terraform-provider-tanzu-mission-control/internal/helper"
@@ -260,6 +258,10 @@ func constructNetworkConfig(data []any) (*models.VmwareTanzuManageV1alpha1Aksclu
260258
helper.SetPrimitiveValue(v, &networkConfig.NetworkPlugin, networkPluginKey)
261259
}
262260

261+
if v, ok := networkConfigData[networkPluginModeKey]; ok {
262+
helper.SetPrimitiveValue(v, &networkConfig.NetworkPluginMode, networkPluginModeKey)
263+
}
264+
263265
if v, ok := networkConfigData[networkPolicyKey]; ok {
264266
helper.SetPrimitiveValue(v, &networkConfig.NetworkPolicy, networkPolicyKey)
265267
}
@@ -288,9 +290,11 @@ func constructNetworkConfig(data []any) (*models.VmwareTanzuManageV1alpha1Aksclu
288290
networkConfig.PodCidrs = helper.SetPrimitiveList[string](v.([]any))
289291
}
290292

291-
if networkConfig.NetworkPlugin == "kubenet" && (networkConfig.DNSServiceIP != "" || networkConfig.ServiceCidrs != nil) {
292-
return networkConfig, errors.New("can not set network_config.dns_service_ip or network_config.service_cidr when network_config.network_plugin is set to kubenet")
293-
}
293+
// Validation. DNS Server Ip or service CIDR cannot be set is network plugin is 'kubenet'
294+
//TODO: this does not look like a valid requirement so far. Need an extra check
295+
//if networkConfig.NetworkPlugin == "kubenet" && (networkConfig.DNSServiceIP != "" || networkConfig.ServiceCidrs != nil) {
296+
// return networkConfig, errors.New("can not set network_config.dns_service_ip or network_config.service_cidr when network_config.network_plugin is set to kubenet")
297+
//}
294298

295299
return networkConfig, nil
296300
}
@@ -575,6 +579,7 @@ func toNetworkConfigMap(config *models.VmwareTanzuManageV1alpha1AksclusterNetwor
575579
data := make(map[string]any)
576580
data[loadBalancerSkuKey] = config.LoadBalancerSku
577581
data[networkPluginKey] = config.NetworkPlugin
582+
data[networkPluginModeKey] = config.NetworkPluginMode
578583
data[networkPolicyKey] = config.NetworkPolicy
579584
data[dnsPrefixKey] = config.DNSPrefix
580585
data[dnsServiceIPKey] = config.DNSServiceIP

internal/resources/akscluster/akscluster_mapper_test.go

+1
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ func Test_ConstructAKSCluster(t *testing.T) {
2828
}
2929

3030
func Test_ConstructAKSCluster_withInvalidNetworkConfig(t *testing.T) {
31+
t.Skip("Skip before we are sure that we have a valid requirements")
3132
tests := []*schema.ResourceData{
3233
schema.TestResourceDataRaw(t, akscluster.ClusterSchema, aTestClusterDataMap(withNetworkPlugin("kubenet"))),
3334
schema.TestResourceDataRaw(t, akscluster.ClusterSchema, aTestClusterDataMap(withNetworkPlugin("kubenet"), withoutNetworkDNSServiceIP)),

internal/resources/akscluster/constants.go

+1
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ const (
4949
networkConfigKey = "network_config"
5050
loadBalancerSkuKey = "load_balancer_sku"
5151
networkPluginKey = "network_plugin"
52+
networkPluginModeKey = "network_plugin_mode"
5253
networkPolicyKey = "network_policy"
5354
dnsServiceIPKey = "dns_service_ip"
5455
dockerBridgeCidrKey = "docker_bridge_cidr"

internal/resources/akscluster/helpers_test.go

+64-11
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,16 @@ func withStatusSuccess(c *models.VmwareTanzuManageV1alpha1AksCluster) {
5151
}
5252
}
5353

54+
func withTestPodCIDR(c *models.VmwareTanzuManageV1alpha1AksCluster) {
55+
c.Spec = &models.VmwareTanzuManageV1alpha1AksclusterSpec{
56+
Config: &models.VmwareTanzuManageV1alpha1AksclusterClusterConfig{
57+
NetworkConfig: &models.VmwareTanzuManageV1alpha1AksclusterNetworkConfig{
58+
PodCidrs: []string{"10.1.0.0/16"},
59+
},
60+
},
61+
}
62+
}
63+
5464
func withNodepoolStatusSuccess(c *models.VmwareTanzuManageV1alpha1AksclusterNodepoolNodepool) {
5565
c.Status = &models.VmwareTanzuManageV1alpha1AksclusterNodepoolStatus{
5666
Phase: models.VmwareTanzuManageV1alpha1AksclusterNodepoolPhaseREADY.Pointer(),
@@ -118,7 +128,7 @@ func aTestCluster(w ...clusterWither) *models.VmwareTanzuManageV1alpha1AksCluste
118128
LoadBalancerSku: "load-balancer",
119129
NetworkPlugin: "azure",
120130
NetworkPolicy: "policy",
121-
PodCidrs: []string{"127.0.0.3"},
131+
PodCidrs: []string{""},
122132
ServiceCidrs: []string{"127.0.0.4"},
123133
},
124134
StorageConfig: &models.VmwareTanzuManageV1alpha1AksclusterStorageConfig{
@@ -202,6 +212,30 @@ func withNetworkPlugin(plugin string) mapWither {
202212
}
203213
}
204214

215+
func withPodCIDR(podCIDR []any) mapWither {
216+
return func(m map[string]any) {
217+
specs := m["spec"].([]any)
218+
spec := specs[0].(map[string]any)
219+
configs := spec["config"].([]any)
220+
config := configs[0].(map[string]any)
221+
networks := config["network_config"].([]any)
222+
network := networks[0].(map[string]any)
223+
network["pod_cidr"] = podCIDR
224+
}
225+
}
226+
227+
func withNetworkPluginMode(pluginMode string) mapWither {
228+
return func(m map[string]any) {
229+
specs := m["spec"].([]any)
230+
spec := specs[0].(map[string]any)
231+
configs := spec["config"].([]any)
232+
config := configs[0].(map[string]any)
233+
networks := config["network_config"].([]any)
234+
network := networks[0].(map[string]any)
235+
network["network_plugin_mode"] = pluginMode
236+
}
237+
}
238+
205239
func withoutNetworkDNSServiceIP(m map[string]any) {
206240
specs := m["spec"].([]any)
207241
spec := specs[0].(map[string]any)
@@ -260,6 +294,22 @@ func withNodepoolMode(mode string) mapWither {
260294
}
261295
}
262296

297+
func withNodeSubnetId(nodeSubnetId string) mapWither {
298+
return func(m map[string]any) {
299+
specs := m["spec"].([]any)
300+
spec := specs[0].(map[string]any)
301+
spec["vnet_subnet_id"] = nodeSubnetId
302+
}
303+
}
304+
305+
func withPodSubnetId(podSubnetId string) mapWither {
306+
return func(m map[string]any) {
307+
specs := m["spec"].([]any)
308+
spec := specs[0].(map[string]any)
309+
spec["pod_subnet_id"] = podSubnetId
310+
}
311+
}
312+
263313
func aTestClusterDataMap(w ...mapWither) map[string]any {
264314
m := map[string]any{
265315
"credential_name": "test-cred",
@@ -300,14 +350,15 @@ func aTestClusterDataMap(w ...mapWither) map[string]any {
300350
"ssh_keys": []any{"key1", "key2"},
301351
}},
302352
"network_config": []any{map[string]any{
303-
"load_balancer_sku": "load-balancer",
304-
"network_plugin": "azure",
305-
"network_policy": "policy",
306-
"dns_service_ip": "127.0.0.1",
307-
"docker_bridge_cidr": "127.0.0.2",
308-
"pod_cidr": []any{"127.0.0.3"},
309-
"service_cidr": []any{"127.0.0.4"},
310-
"dns_prefix": "net-prefix",
353+
"load_balancer_sku": "load-balancer",
354+
"network_plugin": "azure",
355+
"network_plugin_mode": "",
356+
"network_policy": "policy",
357+
"dns_service_ip": "127.0.0.1",
358+
"docker_bridge_cidr": "127.0.0.2",
359+
"pod_cidr": []any{""},
360+
"service_cidr": []any{"127.0.0.4"},
361+
"dns_prefix": "net-prefix",
311362
}},
312363
"storage_config": []any{map[string]any{
313364
"enable_disk_csi_driver": true,
@@ -417,7 +468,8 @@ func aTestNodePool(w ...nodepoolWither) *models.VmwareTanzuManageV1alpha1Aksclus
417468
UpgradeConfig: &models.VmwareTanzuManageV1alpha1AksclusterNodepoolUpgradeConfig{
418469
MaxSurge: "50%",
419470
},
420-
VnetSubnetID: "subnet-1",
471+
VnetSubnetID: "vnet-1/subnets/subnet-1",
472+
PodSubnetID: "vnet-1/subnets/subnet-2",
421473
},
422474
}
423475

@@ -453,7 +505,8 @@ func aTestNodepoolDataMap(w ...mapWither) map[string]any {
453505
"value": "tval",
454506
},
455507
},
456-
"vnet_subnet_id": "subnet-1",
508+
"vnet_subnet_id": "vnet-1/subnets/subnet-1",
509+
"pod_subnet_id": "vnet-1/subnets/subnet-2",
457510
"node_labels": map[string]any{"label": "val"},
458511
"tags": map[string]any{"tmc.node.tag": "val"},
459512
"auto_scaling_config": []any{map[string]any{

internal/resources/akscluster/resource_akscluster.go

+47-18
Original file line numberDiff line numberDiff line change
@@ -42,12 +42,29 @@ func resourceClusterCreate(ctx context.Context, data *schema.ResourceData, confi
4242
return diag.Errorf("error while retrieving Tanzu auth config")
4343
}
4444

45+
cluster, cErr := ConstructCluster(data)
46+
if cErr != nil {
47+
return diag.FromErr(cErr)
48+
}
49+
4550
nodepools := ConstructNodepools(data)
46-
if err := validate(nodepools); err != nil {
51+
52+
if err := validateCluster(cluster); err != nil {
4753
return diag.FromErr(err)
4854
}
4955

50-
if err := createOrUpdateCluster(data, tc.TMCConnection.AKSClusterResourceService); err != nil {
56+
// validate all node pools together
57+
if err := validateAllNodePools(nodepools); err != nil {
58+
return diag.FromErr(err)
59+
}
60+
// Validate every node pool
61+
for _, nodepool := range nodepools {
62+
if err := validateNodePool(cluster, nodepool); err != nil {
63+
return diag.FromErr(err)
64+
}
65+
}
66+
67+
if err := createOrUpdateCluster(cluster, data, tc.TMCConnection.AKSClusterResourceService); err != nil {
5168
return diag.FromErr(err)
5269
}
5370

@@ -168,24 +185,9 @@ func resourceClusterImporter(_ context.Context, data *schema.ResourceData, confi
168185
return []*schema.ResourceData{data}, nil
169186
}
170187

171-
// validate returns an error configuration will result in a cluster that will fail to create.
172-
func validate(nodepools []*models.VmwareTanzuManageV1alpha1AksclusterNodepoolNodepool) error {
173-
for _, n := range nodepools {
174-
if *n.Spec.Mode == models.VmwareTanzuManageV1alpha1AksclusterNodepoolModeSYSTEM {
175-
return nil
176-
}
177-
}
178-
179-
return errors.New("AKS cluster must contain at least 1 SYSTEM nodepool")
180-
}
181-
182188
// createOrUpdateCluster creates an AKS cluster in TMC. It is possible the cluster already exists in which case the
183189
// existing cluster is updated with any node pools defined in the configuration.
184-
func createOrUpdateCluster(data *schema.ResourceData, client akscluster.ClientService) error {
185-
cluster, cErr := ConstructCluster(data)
186-
if cErr != nil {
187-
return cErr
188-
}
190+
func createOrUpdateCluster(cluster *models.VmwareTanzuManageV1alpha1AksCluster, data *schema.ResourceData, client akscluster.ClientService) error {
189191

190192
clusterReq := &models.VmwareTanzuManageV1alpha1AksclusterCreateAksClusterRequest{AksCluster: cluster}
191193
createResp, err := client.AksClusterResourceServiceCreate(clusterReq)
@@ -237,6 +239,33 @@ func updateClusterConfig(ctx context.Context, data *schema.ResourceData, cluster
237239
return pollUntilReady(ctxTimeout, data, tc.TMCConnection, getPollInterval(ctx))
238240
}
239241

242+
// validateCluster returns an error configuration will result in a cluster that will fail to create.
243+
func validateCluster(cluster *models.VmwareTanzuManageV1alpha1AksCluster) error {
244+
nc := cluster.Spec.Config.NetworkConfig
245+
246+
// Pod subNetId cannot be set for network CNI 'kubenet' or 'azure' without overlay.
247+
if nc.NetworkPlugin != "azure" && nc.NetworkPluginMode == "overlay" {
248+
return errors.New("network_plugin_mode 'overlay' can only be used if network_plugin is set to 'azure'")
249+
}
250+
// podCIDR cannot be set if network-plugin is 'azure' without 'overlay'
251+
if nc.NetworkPlugin == "azure" && nc.NetworkPluginMode != "overlay" && !emptyStringArray(nc.PodCidrs) {
252+
return errors.New("podCIDR cannot be set if network-plugin is 'azure' without 'overlay'")
253+
}
254+
return nil
255+
}
256+
257+
func emptyStringArray(strArray []string) bool {
258+
if len(strArray) == 0 {
259+
return true
260+
}
261+
for _, value := range strArray {
262+
if value != "" {
263+
return false
264+
}
265+
}
266+
return true
267+
}
268+
240269
func pollUntilReady(ctx context.Context, data *schema.ResourceData, mc *client.TanzuMissionControl, interval time.Duration) error {
241270
ticker := time.NewTicker(interval)
242271
defer ticker.Stop()

0 commit comments

Comments
 (0)