From f84f4944480590434c7f8745e9d4fa9ccfbdbde6 Mon Sep 17 00:00:00 2001 From: Sreenivas Manyam Rajaram Date: Wed, 13 Dec 2023 17:06:10 -0800 Subject: [PATCH 01/22] wait for mgmt cluster healthy and kubeconfig implemented --- internal/models/ekscluster/spec.go | 6 +++ internal/resources/ekscluster/constants.go | 2 + internal/resources/ekscluster/data_source.go | 42 +++++++++++++++++ .../resources/ekscluster/data_source_test.go | 46 +++++++++++++++++-- .../ekscluster/resource_ekscluster.go | 20 ++++++++ 5 files changed, 113 insertions(+), 3 deletions(-) diff --git a/internal/models/ekscluster/spec.go b/internal/models/ekscluster/spec.go index 85791040f..c8d6aa31c 100644 --- a/internal/models/ekscluster/spec.go +++ b/internal/models/ekscluster/spec.go @@ -28,6 +28,12 @@ type VmwareTanzuManageV1alpha1EksclusterSpec struct { // Optional proxy name is the name of the Proxy Config // to be used for the cluster. ProxyName string `json:"proxyName,omitempty"` + + // Agent name of the cluster. + AgentName string `json:"agentName,omitempty"` + + // Arn of the cluster. + Arn string `json:"arn,omitempty"` } // MarshalBinary interface implementation diff --git a/internal/resources/ekscluster/constants.go b/internal/resources/ekscluster/constants.go index 65fee7809..3a5d2b2b9 100644 --- a/internal/resources/ekscluster/constants.go +++ b/internal/resources/ekscluster/constants.go @@ -69,4 +69,6 @@ const ( releaseVersionKey = "release_version" readyCondition = "Ready" errorSeverity = "ERROR" + waitForKubeconfig = "wait_for_kubeconfig" + kubeconfigKey = "kubeconfig" ) diff --git a/internal/resources/ekscluster/data_source.go b/internal/resources/ekscluster/data_source.go index ec6fe9da1..bca713050 100644 --- a/internal/resources/ekscluster/data_source.go +++ b/internal/resources/ekscluster/data_source.go @@ -10,6 +10,8 @@ import ( "log" "strconv" "time" + // "os" + // "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -20,6 +22,9 @@ import ( "github.com/vmware/terraform-provider-tanzu-mission-control/internal/helper" eksmodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/ekscluster" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/common" + + clustermodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/cluster" + configModels "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/kubeconfig" ) func DataSourceTMCEKSCluster() *schema.Resource { @@ -79,6 +84,35 @@ func dataSourceTMCEKSClusterRead(ctx context.Context, d *schema.ResourceData, m return true, nil } + // if val := os.Getenv("WAIT_FOR_HEALTHY"); strings.EqualFold(val, "TRUE") || val == "" { + if isWaitForKubeconfig(d) { + clusFullName := &clustermodel.VmwareTanzuManageV1alpha1ClusterFullName { Name: resp.EksCluster.Spec.AgentName, OrgID: clusterFn.OrgID, ManagementClusterName: "eks", ProvisionerName: "eks" } + clusterResp, err := config.TMCConnection.ClusterResourceService.ManageV1alpha1ClusterResourceServiceGet(clusFullName) + if err != nil { + log.Printf("Unable to get Tanzu Mission Control cluster entry, name : %s", clusterFn.Name) + } + + if !isManagemetClusterHealthy(clusterResp) { + log.Printf("[DEBUG] waiting for cluster(%s) to be in Healthy status", clusterFn.Name) + return true, nil + } + + fn := &configModels.VmwareTanzuManageV1alpha1ClusterFullName{ + ManagementClusterName: "eks", + ProvisionerName: "eks", + Name: resp.EksCluster.Spec.AgentName, + OrgID: "", + } + resp, err := config.TMCConnection.KubeConfigResourceService.KubeconfigServiceGet(fn) + + if kubeConfigReady(err, resp) { + if err = d.Set(kubeconfigKey, resp.Kubeconfig); err != nil { + log.Printf("Failed to set Kubeconfig") + return true, nil + } + } + } + return false, nil } @@ -124,6 +158,14 @@ func dataSourceTMCEKSClusterRead(ctx context.Context, d *schema.ResourceData, m return diags } +func isManagemetClusterHealthy(cluster *clustermodel.VmwareTanzuManageV1alpha1ClusterGetClusterResponse) bool { + return cluster.Cluster.Status.Health != nil && *cluster.Cluster.Status.Health == clustermodel.VmwareTanzuManageV1alpha1CommonClusterHealthHEALTHY +} + +func kubeConfigReady(err error, resp *configModels.VmwareTanzuManageV1alpha1ClusterKubeconfigGetKubeconfigResponse) bool { + return err == nil && *resp.Status == configModels.VmwareTanzuManageV1alpha1ClusterKubeconfigGetKubeconfigResponseStatusREADY +} + func setResourceData(d *schema.ResourceData, eksCluster *eksmodel.VmwareTanzuManageV1alpha1EksclusterEksCluster, remoteNodepools []*eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolNodepool) error { status := map[string]interface{}{ // TODO: add condition diff --git a/internal/resources/ekscluster/data_source_test.go b/internal/resources/ekscluster/data_source_test.go index 625a5b0bb..0df694e76 100644 --- a/internal/resources/ekscluster/data_source_test.go +++ b/internal/resources/ekscluster/data_source_test.go @@ -1,6 +1,3 @@ -//go:build ekscluster -// +build ekscluster - /* Copyright 2022 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 @@ -14,6 +11,7 @@ import ( "github.com/stretchr/testify/require" eksmodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/ekscluster" + clustermodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/cluster" ) func TestNodepoolPosMap(t *testing.T) { @@ -62,3 +60,45 @@ func TestNodepoolPosMap(t *testing.T) { }) } } + +func TestIsManagemetClusterHealthy(t *testing.T) { + tests := []struct { + name string + cluster *clustermodel.VmwareTanzuManageV1alpha1ClusterGetClusterResponse + response bool + err error + }{ + { + name: "Not healthy", + cluster: &clustermodel.VmwareTanzuManageV1alpha1ClusterGetClusterResponse{ + Cluster: &clustermodel.VmwareTanzuManageV1alpha1ClusterCluster{ + Status: &clustermodel.VmwareTanzuManageV1alpha1ClusterStatus{ + Health: clustermodel.NewVmwareTanzuManageV1alpha1CommonClusterHealth(clustermodel.VmwareTanzuManageV1alpha1CommonClusterHealthUNHEALTHY), + }, + }, + }, + response: false, + err: nil, + }, + { + name: "Healthy", + cluster: &clustermodel.VmwareTanzuManageV1alpha1ClusterGetClusterResponse{ + Cluster: &clustermodel.VmwareTanzuManageV1alpha1ClusterCluster{ + Status: &clustermodel.VmwareTanzuManageV1alpha1ClusterStatus{ + Health: clustermodel.NewVmwareTanzuManageV1alpha1CommonClusterHealth(clustermodel.VmwareTanzuManageV1alpha1CommonClusterHealthHEALTHY), + }, + }, + }, + response: true, + err: nil, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.response != isManagemetClusterHealthy(test.cluster) { + t.Errorf("expected function output to match") + } + }) + } +} \ No newline at end of file diff --git a/internal/resources/ekscluster/resource_ekscluster.go b/internal/resources/ekscluster/resource_ekscluster.go index 2e21ad705..b4fc5e220 100644 --- a/internal/resources/ekscluster/resource_ekscluster.go +++ b/internal/resources/ekscluster/resource_ekscluster.go @@ -86,6 +86,17 @@ var clusterSchema = map[string]*schema.Schema{ return true }, }, + waitForKubeconfig: { + Type: schema.TypeBool, + Description: "Wait until pinniped extension is ready to provide kubeconfig", + Default: false, + Optional: true, + }, + kubeconfigKey: { + Type: schema.TypeString, + Description: "Kubeconfig for connecting to newly created cluster base64 encoded. This will only be returned if you have elected to wait for kubeconfig.", + Computed: true, + }, } var clusterSpecSchema = &schema.Schema{ @@ -958,3 +969,12 @@ func flattenEniConfig(item *eksmodel.VmwareTanzuManageV1alpha1EksclusterEniConfi return data } + +func isWaitForKubeconfig(data *schema.ResourceData) bool { + v := data.Get(waitForKubeconfig) + if v != nil { + return v.(bool) + } + + return false +} \ No newline at end of file From ef2dbc52384faaf1f62dcc3cd8e43b2306e84336 Mon Sep 17 00:00:00 2001 From: Sreenivas Manyam Rajaram Date: Wed, 13 Dec 2023 17:23:37 -0800 Subject: [PATCH 02/22] Go formatting and Lint --- internal/resources/ekscluster/data_source.go | 13 +++++-------- .../resources/ekscluster/data_source_test.go | 18 +++++++++--------- .../ekscluster/resource_ekscluster.go | 4 ++-- 3 files changed, 16 insertions(+), 19 deletions(-) diff --git a/internal/resources/ekscluster/data_source.go b/internal/resources/ekscluster/data_source.go index bca713050..079713c16 100644 --- a/internal/resources/ekscluster/data_source.go +++ b/internal/resources/ekscluster/data_source.go @@ -10,8 +10,6 @@ import ( "log" "strconv" "time" - // "os" - // "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -84,19 +82,18 @@ func dataSourceTMCEKSClusterRead(ctx context.Context, d *schema.ResourceData, m return true, nil } - // if val := os.Getenv("WAIT_FOR_HEALTHY"); strings.EqualFold(val, "TRUE") || val == "" { if isWaitForKubeconfig(d) { - clusFullName := &clustermodel.VmwareTanzuManageV1alpha1ClusterFullName { Name: resp.EksCluster.Spec.AgentName, OrgID: clusterFn.OrgID, ManagementClusterName: "eks", ProvisionerName: "eks" } + clusFullName := &clustermodel.VmwareTanzuManageV1alpha1ClusterFullName{Name: resp.EksCluster.Spec.AgentName, OrgID: clusterFn.OrgID, ManagementClusterName: "eks", ProvisionerName: "eks"} clusterResp, err := config.TMCConnection.ClusterResourceService.ManageV1alpha1ClusterResourceServiceGet(clusFullName) if err != nil { log.Printf("Unable to get Tanzu Mission Control cluster entry, name : %s", clusterFn.Name) } - + if !isManagemetClusterHealthy(clusterResp) { log.Printf("[DEBUG] waiting for cluster(%s) to be in Healthy status", clusterFn.Name) return true, nil } - + fn := &configModels.VmwareTanzuManageV1alpha1ClusterFullName{ ManagementClusterName: "eks", ProvisionerName: "eks", @@ -112,7 +109,7 @@ func dataSourceTMCEKSClusterRead(ctx context.Context, d *schema.ResourceData, m } } } - + return false, nil } @@ -159,7 +156,7 @@ func dataSourceTMCEKSClusterRead(ctx context.Context, d *schema.ResourceData, m } func isManagemetClusterHealthy(cluster *clustermodel.VmwareTanzuManageV1alpha1ClusterGetClusterResponse) bool { - return cluster.Cluster.Status.Health != nil && *cluster.Cluster.Status.Health == clustermodel.VmwareTanzuManageV1alpha1CommonClusterHealthHEALTHY + return cluster.Cluster.Status.Health != nil && *cluster.Cluster.Status.Health == clustermodel.VmwareTanzuManageV1alpha1CommonClusterHealthHEALTHY } func kubeConfigReady(err error, resp *configModels.VmwareTanzuManageV1alpha1ClusterKubeconfigGetKubeconfigResponse) bool { diff --git a/internal/resources/ekscluster/data_source_test.go b/internal/resources/ekscluster/data_source_test.go index 0df694e76..a44660313 100644 --- a/internal/resources/ekscluster/data_source_test.go +++ b/internal/resources/ekscluster/data_source_test.go @@ -10,8 +10,8 @@ import ( "github.com/stretchr/testify/require" - eksmodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/ekscluster" clustermodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/cluster" + eksmodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/ekscluster" ) func TestNodepoolPosMap(t *testing.T) { @@ -63,10 +63,10 @@ func TestNodepoolPosMap(t *testing.T) { func TestIsManagemetClusterHealthy(t *testing.T) { tests := []struct { - name string - cluster *clustermodel.VmwareTanzuManageV1alpha1ClusterGetClusterResponse + name string + cluster *clustermodel.VmwareTanzuManageV1alpha1ClusterGetClusterResponse response bool - err error + err error }{ { name: "Not healthy", @@ -78,7 +78,7 @@ func TestIsManagemetClusterHealthy(t *testing.T) { }, }, response: false, - err: nil, + err: nil, }, { name: "Healthy", @@ -88,9 +88,9 @@ func TestIsManagemetClusterHealthy(t *testing.T) { Health: clustermodel.NewVmwareTanzuManageV1alpha1CommonClusterHealth(clustermodel.VmwareTanzuManageV1alpha1CommonClusterHealthHEALTHY), }, }, - }, - response: true, - err: nil, + }, + response: true, + err: nil, }, } @@ -101,4 +101,4 @@ func TestIsManagemetClusterHealthy(t *testing.T) { } }) } -} \ No newline at end of file +} diff --git a/internal/resources/ekscluster/resource_ekscluster.go b/internal/resources/ekscluster/resource_ekscluster.go index b4fc5e220..41ad6c974 100644 --- a/internal/resources/ekscluster/resource_ekscluster.go +++ b/internal/resources/ekscluster/resource_ekscluster.go @@ -96,7 +96,7 @@ var clusterSchema = map[string]*schema.Schema{ Type: schema.TypeString, Description: "Kubeconfig for connecting to newly created cluster base64 encoded. This will only be returned if you have elected to wait for kubeconfig.", Computed: true, - }, + }, } var clusterSpecSchema = &schema.Schema{ @@ -977,4 +977,4 @@ func isWaitForKubeconfig(data *schema.ResourceData) bool { } return false -} \ No newline at end of file +} From b4ab0b94da6f462f9c28d942472d2626d5228cd6 Mon Sep 17 00:00:00 2001 From: Sreenivas Manyam Rajaram Date: Wed, 13 Dec 2023 18:16:16 -0800 Subject: [PATCH 03/22] Updated docs --- docs/data-sources/ekscluster.md | 2 ++ docs/resources/ekscluster.md | 21 ++++++++++----------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/docs/data-sources/ekscluster.md b/docs/data-sources/ekscluster.md index baed52eeb..1215dec35 100644 --- a/docs/data-sources/ekscluster.md +++ b/docs/data-sources/ekscluster.md @@ -48,10 +48,12 @@ data "tanzu-mission-control_ekscluster" "tf_eks_cluster" { - `meta` (Block List, Max: 1) Metadata for the resource (see [below for nested schema](#nestedblock--meta)) - `ready_wait_timeout` (String) Wait timeout duration until cluster resource reaches READY state. Accepted timeout duration values like 5s, 45m, or 3h, higher than zero - `spec` (Block List, Max: 1) Spec for the cluster (see [below for nested schema](#nestedblock--spec)) +- `wait_for_kubeconfig` (Boolean) Wait until pinniped extension is ready to provide kubeconfig ### Read-Only - `id` (String) The ID of this resource. +- `kubeconfig` (String) Kubeconfig for connecting to newly created cluster base64 encoded. This will only be returned if you have elected to wait for kubeconfig. - `status` (Map of String) Status of the cluster diff --git a/docs/resources/ekscluster.md b/docs/resources/ekscluster.md index 520a1d126..47dda329c 100644 --- a/docs/resources/ekscluster.md +++ b/docs/resources/ekscluster.md @@ -80,22 +80,19 @@ resource "tanzu-mission-control_ekscluster" "tf_eks_cluster" { ] } - addons_config { + addons_config { // this whole section is optional vpc_cni_config { eni_config { - id = "subnet-0a680171b6330619f" // Required, should belong to the same VPC as the cluster - security_groups = [ + id = "subnet-0a680171b6330619f" // Required, need not belong to the same VPC as the cluster, subnets provided in vpc_cni_config are expected to be in different AZs + security_groups = [ //optional, if not provided, the cluster security group will be used "sg-00c96ad9d02a22522", ] } eni_config { - id = "subnet-06feb0bb0451cda78" // Required, should belong to the same VPC as the cluster - security_groups = [ - "sg-00c96ad9d02a22522", - ] + id = "subnet-06feb0bb0451cda79" // Required, need not belong to the same VPC as the cluster, subnets provided in vpc_cni_config are expected to be in different AZs } } - } + } } nodepool { @@ -213,10 +210,12 @@ resource "tanzu-mission-control_ekscluster" "tf_eks_cluster" { - `meta` (Block List, Max: 1) Metadata for the resource (see [below for nested schema](#nestedblock--meta)) - `ready_wait_timeout` (String) Wait timeout duration until cluster resource reaches READY state. Accepted timeout duration values like 5s, 45m, or 3h, higher than zero - `spec` (Block List, Max: 1) Spec for the cluster (see [below for nested schema](#nestedblock--spec)) +- `wait_for_kubeconfig` (Boolean) Wait until pinniped extension is ready to provide kubeconfig ### Read-Only - `id` (String) The ID of this resource. +- `kubeconfig` (String) Kubeconfig for connecting to newly created cluster base64 encoded. This will only be returned if you have elected to wait for kubeconfig. - `status` (Map of String) Status of the cluster @@ -283,14 +282,14 @@ Optional: Optional: -- `vpc_cni_config` (Block List, Max: 1) VPC CNI addon config contains the configuration for the VPC CNI addon of the cluster. (see [below for nested schema](#nestedblock--spec--config--addons_config--vpc_cni_config)) +- `vpc_cni_config` (Block List, Max: 1) VPC CNI addon config contains the configuration for the VPC CNI addon of the cluster (see [below for nested schema](#nestedblock--spec--config--addons_config--vpc_cni_config)) ### Nested Schema for `spec.config.addons_config.vpc_cni_config` Optional: -- `eni_config` (Block List) ENI config is the VPC CNI Elastic Network Interface config for providing the configuration of subnet and security groups for pods in each AZ. Subnets need not be in the same VPC as the cluster. The subnets provided across eniConfigs should be in different availability zones. Nodepool subnets need to be in the same AZ as the AZs used in ENIConfig. (see [below for nested schema](#nestedblock--spec--config--addons_config--vpc_cni_config--eni_config)) +- `eni_config` (Block List) ENI config is the VPC CNI Elastic Network Interface config for providing the configuration of subnet and security groups for pods in each AZ. Subnets need not be in the same VPC as the cluster. The subnets provided across eniConfigs should be in different availability zones. Nodepool subnets need to be in the same AZ as the AZs used in ENIConfig. (see [below for nested schema](#nestedblock--spec--config--addons_config--vpc_cni_config--eni_config)) ### Nested Schema for `spec.config.addons_config.vpc_cni_config.eni_config` @@ -301,7 +300,7 @@ Required: Optional: -- `security_groups` (Set of String) List of security group is optional and if not provided default security group created by EKS will be used. +- `security_groups` (Set of String) List of security group is optional and if not provided default security group created by EKS will be used. From 9bb12defa8db75d8925254a5fa6b7f212754b522 Mon Sep 17 00:00:00 2001 From: Sreenivas Manyam Rajaram Date: Wed, 13 Dec 2023 18:46:48 -0800 Subject: [PATCH 04/22] Fixed the linting error --- internal/resources/ekscluster/data_source.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/resources/ekscluster/data_source.go b/internal/resources/ekscluster/data_source.go index 079713c16..3e1e9be90 100644 --- a/internal/resources/ekscluster/data_source.go +++ b/internal/resources/ekscluster/data_source.go @@ -87,6 +87,7 @@ func dataSourceTMCEKSClusterRead(ctx context.Context, d *schema.ResourceData, m clusterResp, err := config.TMCConnection.ClusterResourceService.ManageV1alpha1ClusterResourceServiceGet(clusFullName) if err != nil { log.Printf("Unable to get Tanzu Mission Control cluster entry, name : %s", clusterFn.Name) + return true, nil } if !isManagemetClusterHealthy(clusterResp) { From c2d8d6117014ec24b3f322b176696290f4bca992 Mon Sep 17 00:00:00 2001 From: Sreenivas Manyam Rajaram Date: Thu, 14 Dec 2023 14:34:47 -0800 Subject: [PATCH 05/22] Fixed linting issues --- internal/resources/ekscluster/data_source.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/resources/ekscluster/data_source.go b/internal/resources/ekscluster/data_source.go index 3e1e9be90..6195d585d 100644 --- a/internal/resources/ekscluster/data_source.go +++ b/internal/resources/ekscluster/data_source.go @@ -85,6 +85,7 @@ func dataSourceTMCEKSClusterRead(ctx context.Context, d *schema.ResourceData, m if isWaitForKubeconfig(d) { clusFullName := &clustermodel.VmwareTanzuManageV1alpha1ClusterFullName{Name: resp.EksCluster.Spec.AgentName, OrgID: clusterFn.OrgID, ManagementClusterName: "eks", ProvisionerName: "eks"} clusterResp, err := config.TMCConnection.ClusterResourceService.ManageV1alpha1ClusterResourceServiceGet(clusFullName) + // nolint: wsl if err != nil { log.Printf("Unable to get Tanzu Mission Control cluster entry, name : %s", clusterFn.Name) return true, nil From edcab5df0f5f9554d72e1ff0b6c1b761b2a58b03 Mon Sep 17 00:00:00 2001 From: Sreenivas Manyam Rajaram Date: Fri, 15 Dec 2023 17:05:23 -0800 Subject: [PATCH 06/22] Addressed initial review comments --- internal/resources/ekscluster/data_source.go | 43 +++++++++++++++---- .../resources/ekscluster/data_source_test.go | 18 +++++++- 2 files changed, 51 insertions(+), 10 deletions(-) diff --git a/internal/resources/ekscluster/data_source.go b/internal/resources/ekscluster/data_source.go index 6195d585d..73eb748af 100644 --- a/internal/resources/ekscluster/data_source.go +++ b/internal/resources/ekscluster/data_source.go @@ -83,32 +83,49 @@ func dataSourceTMCEKSClusterRead(ctx context.Context, d *schema.ResourceData, m } if isWaitForKubeconfig(d) { - clusFullName := &clustermodel.VmwareTanzuManageV1alpha1ClusterFullName{Name: resp.EksCluster.Spec.AgentName, OrgID: clusterFn.OrgID, ManagementClusterName: "eks", ProvisionerName: "eks"} + clusFullName := &clustermodel.VmwareTanzuManageV1alpha1ClusterFullName{ + Name: resp.EksCluster.Spec.AgentName, + OrgID: clusterFn.OrgID, + ManagementClusterName: "eks", + ProvisionerName: "eks", + } clusterResp, err := config.TMCConnection.ClusterResourceService.ManageV1alpha1ClusterResourceServiceGet(clusFullName) // nolint: wsl if err != nil { - log.Printf("Unable to get Tanzu Mission Control cluster entry, name : %s", clusterFn.Name) - return true, nil + log.Printf("Unable to get Tanzu Mission Control cluster entry, name : %s, error : %s", clusterFn.Name, err.Error()) + return true, err } - if !isManagemetClusterHealthy(clusterResp) { + mgmtClusterHealthy, err := isManagemetClusterHealthy(clusterResp) + if err != nil { log.Printf("[DEBUG] waiting for cluster(%s) to be in Healthy status", clusterFn.Name) return true, nil + } else { + if !mgmtClusterHealthy { + log.Printf("[DEBUG] waiting for cluster(%s) to be in Healthy status", clusterFn.Name) + return true, nil + } } fn := &configModels.VmwareTanzuManageV1alpha1ClusterFullName{ ManagementClusterName: "eks", ProvisionerName: "eks", Name: resp.EksCluster.Spec.AgentName, - OrgID: "", } resp, err := config.TMCConnection.KubeConfigResourceService.KubeconfigServiceGet(fn) + if err != nil { + log.Printf("Unable to get Tanzu Mission Control Kubeconfig entry, name : %s, error : %s", fn.Name, err.Error()) + return true, err + } if kubeConfigReady(err, resp) { if err = d.Set(kubeconfigKey, resp.Kubeconfig); err != nil { - log.Printf("Failed to set Kubeconfig") - return true, nil + log.Printf("Failed to set Kubeconfig for cluster %s, error : %s", clusterFn.Name, err.Error()) + return false, err } + } else { + log.Printf("[DEBUG] waiting for cluster(%s)'s Kubeconfig to be in Ready status", clusterFn.Name) + return true, nil } } @@ -157,8 +174,16 @@ func dataSourceTMCEKSClusterRead(ctx context.Context, d *schema.ResourceData, m return diags } -func isManagemetClusterHealthy(cluster *clustermodel.VmwareTanzuManageV1alpha1ClusterGetClusterResponse) bool { - return cluster.Cluster.Status.Health != nil && *cluster.Cluster.Status.Health == clustermodel.VmwareTanzuManageV1alpha1CommonClusterHealthHEALTHY +func isManagemetClusterHealthy(cluster *clustermodel.VmwareTanzuManageV1alpha1ClusterGetClusterResponse) (bool, error) { + if cluster == nil || cluster.Cluster == nil || cluster.Cluster.Status == nil || cluster.Cluster.Status.Health == nil { + return false, errors.New("cluster data is invalid or nil") + } + + if *cluster.Cluster.Status.Health == clustermodel.VmwareTanzuManageV1alpha1CommonClusterHealthHEALTHY { + return true, nil + } + + return false, nil } func kubeConfigReady(err error, resp *configModels.VmwareTanzuManageV1alpha1ClusterKubeconfigGetKubeconfigResponse) bool { diff --git a/internal/resources/ekscluster/data_source_test.go b/internal/resources/ekscluster/data_source_test.go index a44660313..c4de14fe7 100644 --- a/internal/resources/ekscluster/data_source_test.go +++ b/internal/resources/ekscluster/data_source_test.go @@ -8,6 +8,7 @@ package ekscluster import ( "testing" + "github.com/pkg/errors" "github.com/stretchr/testify/require" clustermodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/cluster" @@ -92,11 +93,26 @@ func TestIsManagemetClusterHealthy(t *testing.T) { response: true, err: nil, }, + { + name: "Error", + cluster: &clustermodel.VmwareTanzuManageV1alpha1ClusterGetClusterResponse{ + Cluster: &clustermodel.VmwareTanzuManageV1alpha1ClusterCluster{ + Status: nil, + }, + }, + response: false, + err: errors.New("cluster data is invalid or nil"), + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - if test.response != isManagemetClusterHealthy(test.cluster) { + result, err := isManagemetClusterHealthy(test.cluster) + if err != nil { + if err.Error() != test.err.Error() { + t.Errorf("expected error to match") + } + } else if test.response != result { t.Errorf("expected function output to match") } }) From 1e8ee49beb3f4a9639fd7d760a1b9b60ee2970fd Mon Sep 17 00:00:00 2001 From: Sreenivas Manyam Rajaram Date: Fri, 15 Dec 2023 21:27:50 -0800 Subject: [PATCH 07/22] Addressed lint issues and gofmt --- internal/resources/ekscluster/data_source.go | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/internal/resources/ekscluster/data_source.go b/internal/resources/ekscluster/data_source.go index 73eb748af..095960332 100644 --- a/internal/resources/ekscluster/data_source.go +++ b/internal/resources/ekscluster/data_source.go @@ -84,10 +84,10 @@ func dataSourceTMCEKSClusterRead(ctx context.Context, d *schema.ResourceData, m if isWaitForKubeconfig(d) { clusFullName := &clustermodel.VmwareTanzuManageV1alpha1ClusterFullName{ - Name: resp.EksCluster.Spec.AgentName, - OrgID: clusterFn.OrgID, - ManagementClusterName: "eks", - ProvisionerName: "eks", + Name: resp.EksCluster.Spec.AgentName, + OrgID: clusterFn.OrgID, + ManagementClusterName: "eks", + ProvisionerName: "eks", } clusterResp, err := config.TMCConnection.ClusterResourceService.ManageV1alpha1ClusterResourceServiceGet(clusFullName) // nolint: wsl @@ -100,11 +100,9 @@ func dataSourceTMCEKSClusterRead(ctx context.Context, d *schema.ResourceData, m if err != nil { log.Printf("[DEBUG] waiting for cluster(%s) to be in Healthy status", clusterFn.Name) return true, nil - } else { - if !mgmtClusterHealthy { - log.Printf("[DEBUG] waiting for cluster(%s) to be in Healthy status", clusterFn.Name) - return true, nil - } + } else if !mgmtClusterHealthy { + log.Printf("[DEBUG] waiting for cluster(%s) to be in Healthy status", clusterFn.Name) + return true, nil } fn := &configModels.VmwareTanzuManageV1alpha1ClusterFullName{ @@ -113,6 +111,7 @@ func dataSourceTMCEKSClusterRead(ctx context.Context, d *schema.ResourceData, m Name: resp.EksCluster.Spec.AgentName, } resp, err := config.TMCConnection.KubeConfigResourceService.KubeconfigServiceGet(fn) + // nolint: wsl if err != nil { log.Printf("Unable to get Tanzu Mission Control Kubeconfig entry, name : %s, error : %s", fn.Name, err.Error()) return true, err From c94758022a101d7d09f8cfca69ad7add7bb6c35c Mon Sep 17 00:00:00 2001 From: Sreenivas Manyam Rajaram Date: Tue, 19 Dec 2023 17:27:42 -0800 Subject: [PATCH 08/22] Fixed additional review comments --- internal/resources/ekscluster/data_source.go | 17 +++++++---------- .../resources/ekscluster/data_source_test.go | 2 +- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/internal/resources/ekscluster/data_source.go b/internal/resources/ekscluster/data_source.go index 095960332..dd4bd5756 100644 --- a/internal/resources/ekscluster/data_source.go +++ b/internal/resources/ekscluster/data_source.go @@ -96,13 +96,10 @@ func dataSourceTMCEKSClusterRead(ctx context.Context, d *schema.ResourceData, m return true, err } - mgmtClusterHealthy, err := isManagemetClusterHealthy(clusterResp) - if err != nil { - log.Printf("[DEBUG] waiting for cluster(%s) to be in Healthy status", clusterFn.Name) - return true, nil - } else if !mgmtClusterHealthy { + clusterHealthy, err := isClusterHealthy(clusterResp) + if err != nil || !clusterHealthy { log.Printf("[DEBUG] waiting for cluster(%s) to be in Healthy status", clusterFn.Name) - return true, nil + return true, err } fn := &configModels.VmwareTanzuManageV1alpha1ClusterFullName{ @@ -117,7 +114,7 @@ func dataSourceTMCEKSClusterRead(ctx context.Context, d *schema.ResourceData, m return true, err } - if kubeConfigReady(err, resp) { + if kubeConfigReady(resp) { if err = d.Set(kubeconfigKey, resp.Kubeconfig); err != nil { log.Printf("Failed to set Kubeconfig for cluster %s, error : %s", clusterFn.Name, err.Error()) return false, err @@ -173,7 +170,7 @@ func dataSourceTMCEKSClusterRead(ctx context.Context, d *schema.ResourceData, m return diags } -func isManagemetClusterHealthy(cluster *clustermodel.VmwareTanzuManageV1alpha1ClusterGetClusterResponse) (bool, error) { +func isClusterHealthy(cluster *clustermodel.VmwareTanzuManageV1alpha1ClusterGetClusterResponse) (bool, error) { if cluster == nil || cluster.Cluster == nil || cluster.Cluster.Status == nil || cluster.Cluster.Status.Health == nil { return false, errors.New("cluster data is invalid or nil") } @@ -185,8 +182,8 @@ func isManagemetClusterHealthy(cluster *clustermodel.VmwareTanzuManageV1alpha1Cl return false, nil } -func kubeConfigReady(err error, resp *configModels.VmwareTanzuManageV1alpha1ClusterKubeconfigGetKubeconfigResponse) bool { - return err == nil && *resp.Status == configModels.VmwareTanzuManageV1alpha1ClusterKubeconfigGetKubeconfigResponseStatusREADY +func kubeConfigReady(resp *configModels.VmwareTanzuManageV1alpha1ClusterKubeconfigGetKubeconfigResponse) bool { + return *resp.Status == configModels.VmwareTanzuManageV1alpha1ClusterKubeconfigGetKubeconfigResponseStatusREADY } func setResourceData(d *schema.ResourceData, eksCluster *eksmodel.VmwareTanzuManageV1alpha1EksclusterEksCluster, remoteNodepools []*eksmodel.VmwareTanzuManageV1alpha1EksclusterNodepoolNodepool) error { diff --git a/internal/resources/ekscluster/data_source_test.go b/internal/resources/ekscluster/data_source_test.go index c4de14fe7..7dbfca8a1 100644 --- a/internal/resources/ekscluster/data_source_test.go +++ b/internal/resources/ekscluster/data_source_test.go @@ -107,7 +107,7 @@ func TestIsManagemetClusterHealthy(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - result, err := isManagemetClusterHealthy(test.cluster) + result, err := isClusterHealthy(test.cluster) if err != nil { if err.Error() != test.err.Error() { t.Errorf("expected error to match") From 5bb3f977b17e698167c9b3377a9b726079c4b376 Mon Sep 17 00:00:00 2001 From: Gavin Shaw Date: Tue, 19 Dec 2023 18:28:31 -0500 Subject: [PATCH 09/22] Add support for managed identity to be specified during terraform apply for aks clusters Signed-off-by: Gavin Shaw --- internal/models/akscluster/cluster_config.go | 3 + .../akscluster/managed_identity_config.go | 38 +++++++++++ .../akscluster/managed_identity_type.go | 50 ++++++++++++++ .../user_assigned_identity_config.go | 37 ++++++++++ .../resources/akscluster/akscluster_mapper.go | 67 +++++++++++++++++++ internal/resources/akscluster/constants.go | 2 + internal/resources/akscluster/helpers_test.go | 1 + internal/resources/akscluster/schema.go | 39 +++++++++++ 8 files changed, 237 insertions(+) create mode 100644 internal/models/akscluster/managed_identity_config.go create mode 100644 internal/models/akscluster/managed_identity_type.go create mode 100644 internal/models/akscluster/user_assigned_identity_config.go diff --git a/internal/models/akscluster/cluster_config.go b/internal/models/akscluster/cluster_config.go index 9574eca70..579f0235e 100644 --- a/internal/models/akscluster/cluster_config.go +++ b/internal/models/akscluster/cluster_config.go @@ -50,6 +50,9 @@ type VmwareTanzuManageV1alpha1AksclusterClusterConfig struct { // The metadata to apply to the cluster to assist with categorization and organization. Tags map[string]string `json:"tags,omitempty"` + // The managed identity to apply to the cluster. + IdentityConfig *VmwareTanzuManageV1alpha1AksclusterManagedIdentityConfig `json:"identityConfig,omitempty"` + // Kubernetes version of the cluster. Version string `json:"version,omitempty"` } diff --git a/internal/models/akscluster/managed_identity_config.go b/internal/models/akscluster/managed_identity_config.go new file mode 100644 index 000000000..c49600c47 --- /dev/null +++ b/internal/models/akscluster/managed_identity_config.go @@ -0,0 +1,38 @@ +/* +Copyright 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package models + +import "github.com/go-openapi/swag" + +// VmwareTanzuManageV1alpha1AksclusterManagedIdentityConfig The managed identity config. +// +// swagger:model vmware.tanzu.manage.v1alpha1.akscluster.ManagedIdentityConfig +type VmwareTanzuManageV1alpha1AksclusterManagedIdentityConfig struct { + Type *VmwareTanzuManageV1alpha1AksclusterManagedIdentityType `json:"type,omitempty"` + + UserAssignedIdentityType *VmwareTanzuManageV1alpha1AksclusterUserAssignedIdentityTypeConfig `json:"userAssigned,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1AksclusterManagedIdentityConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1AksclusterManagedIdentityConfig) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1AksclusterManagedIdentityConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/akscluster/managed_identity_type.go b/internal/models/akscluster/managed_identity_type.go new file mode 100644 index 000000000..6dbecea8d --- /dev/null +++ b/internal/models/akscluster/managed_identity_type.go @@ -0,0 +1,50 @@ +/* +Copyright 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package models + +import ( + "encoding/json" +) + +// VmwareTanzuManageV1alpha1AksclusterManagedIdentityType Managed identity type options of identity config. +// +// - IDENTITY_TYPE_SYSTEM_ASSIGNED: Indicates that a system assigned managed identity should be used by the cluster. +// - IDENTITY_TYPE_USER_ASSIGNED: Indicates that a user assigned managed identity should be used by the cluster. +// +// swagger:model vmware.tanzu.manage.v1alpha1.akscluster.ManagedIdentityType +type VmwareTanzuManageV1alpha1AksclusterManagedIdentityType string + +func NewVmwareTanzuManageV1alpha1AksclusterManagedIdentityType(value VmwareTanzuManageV1alpha1AksclusterManagedIdentityType) *VmwareTanzuManageV1alpha1AksclusterManagedIdentityType { + return &value +} + +// Pointer returns a pointer to a freshly-allocated VmwareTanzuManageV1alpha1AksclusterManagedIdentityType. +func (m VmwareTanzuManageV1alpha1AksclusterManagedIdentityType) Pointer() *VmwareTanzuManageV1alpha1AksclusterManagedIdentityType { + return &m +} + +const ( + + // VmwareTanzuManageV1alpha1AksclusterManagedIdentityTypeSYSTEMASSIGNED captures enum value "IDENTITY_TYPE_SYSTEM_ASSIGNED". + VmwareTanzuManageV1alpha1AksclusterManagedIdentityTypeSYSTEMASSIGNED VmwareTanzuManageV1alpha1AksclusterManagedIdentityType = "IDENTITY_TYPE_SYSTEM_ASSIGNED" + + // VmwareTanzuManageV1alpha1AksclusterManagedIdentityTypeUSERASSIGNED captures enum value "IDENTITY_TYPE_USER_ASSIGNED". + VmwareTanzuManageV1alpha1AksclusterManagedIdentityTypeUSERASSIGNED VmwareTanzuManageV1alpha1AksclusterManagedIdentityType = "IDENTITY_TYPE_USER_ASSIGNED" +) + +// for schema. +var vmwareTanzuManageV1alpha1AksclusterManagedIdentityTypeEnum []interface{} + +func init() { + var res []VmwareTanzuManageV1alpha1AksclusterManagedIdentityType + if err := json.Unmarshal([]byte(`["IDENTITY_TYPE_SYSTEM_ASSIGNED","IDENTITY_TYPE_USER_ASSIGNED"]`), &res); err != nil { + panic(err) + } + + for _, v := range res { + vmwareTanzuManageV1alpha1AksclusterManagedIdentityTypeEnum = append(vmwareTanzuManageV1alpha1AksclusterManagedIdentityTypeEnum, v) + } +} diff --git a/internal/models/akscluster/user_assigned_identity_config.go b/internal/models/akscluster/user_assigned_identity_config.go new file mode 100644 index 000000000..1263a9bfa --- /dev/null +++ b/internal/models/akscluster/user_assigned_identity_config.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package models + +import "github.com/go-openapi/swag" + +// VmwareTanzuManageV1alpha1AksclusterUserAssignedIdentityTypeConfig The managed identity config. +// +// swagger:model vmware.tanzu.manage.v1alpha1.akscluster.UserAssignedIdentityTypeConfig + +type VmwareTanzuManageV1alpha1AksclusterUserAssignedIdentityTypeConfig struct { + ManagedResourceID string `json:"resourceId,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1AksclusterUserAssignedIdentityTypeConfig) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1AksclusterUserAssignedIdentityTypeConfig) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1AksclusterUserAssignedIdentityTypeConfig + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/resources/akscluster/akscluster_mapper.go b/internal/resources/akscluster/akscluster_mapper.go index ac06123f4..089e61c34 100644 --- a/internal/resources/akscluster/akscluster_mapper.go +++ b/internal/resources/akscluster/akscluster_mapper.go @@ -140,6 +140,11 @@ func constructConfig(data []any) *models.VmwareTanzuManageV1alpha1AksclusterClus helper.SetPrimitiveValue(v, &config.NodeResourceGroupName, nodeResourceGroupNameKey) } + if v, ok := configData[identityConfigKey]; ok { + data, _ := v.([]any) + config.IdentityConfig = constructManagedIdentityConfig(data) + } + return config } @@ -433,6 +438,44 @@ func constructAutoUpgradeConfig(data []any) *models.VmwareTanzuManageV1alpha1Aks return autoUpgradeConfig } +func constructManagedIdentityConfig(data []any) *models.VmwareTanzuManageV1alpha1AksclusterManagedIdentityConfig { + if len(data) < 1 { + return nil + } + + // ManagedIdentityConfig schema defines max 1 + managedIdentityConfigData, _ := data[0].(map[string]any) + managedIdentityConfig := &models.VmwareTanzuManageV1alpha1AksclusterManagedIdentityConfig{} + + if v, ok := managedIdentityConfigData[typeKey]; ok { + identityType := models.VmwareTanzuManageV1alpha1AksclusterManagedIdentityType(v.(string)) + managedIdentityConfig.Type = &identityType + } + + if v, ok := managedIdentityConfigData[userAssignedKey]; ok { + data, _ := v.([]any) + managedIdentityConfig.UserAssignedIdentityType = constructUserAssignedIdentityConfig(data) + } + + return managedIdentityConfig +} + +func constructUserAssignedIdentityConfig(data []any) *models.VmwareTanzuManageV1alpha1AksclusterUserAssignedIdentityTypeConfig { + if len(data) < 1 { + return nil + } + + // UserAssignedIdentityConfig schema defines max 1 + userAssignedIdentityConfigData, _ := data[0].(map[string]any) + userAssignedIdentityConfig := &models.VmwareTanzuManageV1alpha1AksclusterUserAssignedIdentityTypeConfig{} + + if v, ok := userAssignedIdentityConfigData[resourceIDKey]; ok { + helper.SetPrimitiveValue(v, &userAssignedIdentityConfig.ManagedResourceID, resourceIDKey) + } + + return userAssignedIdentityConfig +} + func ToAKSClusterMap(cluster *models.VmwareTanzuManageV1alpha1AksCluster, nodepools []*models.VmwareTanzuManageV1alpha1AksclusterNodepoolNodepool) any { if cluster == nil { return []any{} @@ -483,6 +526,7 @@ func toConfigMap(config *models.VmwareTanzuManageV1alpha1AksclusterClusterConfig data[storageConfigKey] = toStorageConfigMap(config.StorageConfig) data[addonsConfigKey] = toAddonConfigMap(config.AddonsConfig) data[autoUpgradeConfigKey] = toAutoUpgradeConfigMap(config.AutoUpgradeConfig) + data[identityConfigKey] = toManagedIdentityConfigMap(config.IdentityConfig) return []any{data} } @@ -642,6 +686,29 @@ func toAutoUpgradeConfigMap(config *models.VmwareTanzuManageV1alpha1AksclusterAu return []any{data} } +func toManagedIdentityConfigMap(config *models.VmwareTanzuManageV1alpha1AksclusterManagedIdentityConfig) []any { + if config == nil { + return []any{} + } + + data := make(map[string]any) + data[typeKey] = helper.PtrString(config.Type) + data[userAssignedKey] = toUserAssignedIdentityTypeConfigMap(config.UserAssignedIdentityType) + + return []any{data} +} + +func toUserAssignedIdentityTypeConfigMap(config *models.VmwareTanzuManageV1alpha1AksclusterUserAssignedIdentityTypeConfig) []any { + if config == nil { + return []any{} + } + + data := make(map[string]any) + data[resourceIDKey] = config.ManagedResourceID + + return []any{data} +} + func toNodePoolList(nodepools []*models.VmwareTanzuManageV1alpha1AksclusterNodepoolNodepool) []any { n := make([]any, 0, len(nodepools)) for _, v := range nodepools { diff --git a/internal/resources/akscluster/constants.go b/internal/resources/akscluster/constants.go index 6a8e262cd..5c8aab4b1 100644 --- a/internal/resources/akscluster/constants.go +++ b/internal/resources/akscluster/constants.go @@ -106,4 +106,6 @@ const ( upgradeConfigKey = "upgrade_config" maxSurgeKey = "max_surge" kubeconfigKey = "kubeconfig" + identityConfigKey = "identity_config" + userAssignedKey = "user_assigned" ) diff --git a/internal/resources/akscluster/helpers_test.go b/internal/resources/akscluster/helpers_test.go index bb517db2a..15422d635 100644 --- a/internal/resources/akscluster/helpers_test.go +++ b/internal/resources/akscluster/helpers_test.go @@ -372,6 +372,7 @@ func aTestClusterDataMap(w ...mapWither) map[string]any { "auto_upgrade_config": []any{map[string]any{ "upgrade_channel": "STABLE", }}, + "identity_config": []any{}, }}, "nodepool": []any{ aTestNodepoolDataMap(), diff --git a/internal/resources/akscluster/schema.go b/internal/resources/akscluster/schema.go index 75d92574d..0b4711a01 100644 --- a/internal/resources/akscluster/schema.go +++ b/internal/resources/akscluster/schema.go @@ -205,6 +205,13 @@ var ClusterConfig = &schema.Resource{ MaxItems: 1, Elem: AutoUpgradeConfig, }, + identityConfigKey: { + Type: schema.TypeList, + Description: "Managed Identity Config", + Optional: true, + MaxItems: 1, + Elem: ManagedIdentityConfig, + }, }, } @@ -514,6 +521,38 @@ var AutoUpgradeConfig = &schema.Resource{ }, } +var ManagedIdentityConfig = &schema.Resource{ + Schema: map[string]*schema.Schema{ + typeKey: { + Type: schema.TypeString, + Description: "Type of managed identity used by the cluster (default IDENTITY_TYPE_SYSTEM_ASSIGNED). Allowed values include: IDENTITY_TYPE_SYSTEM_ASSIGNED or IDENTITY_TYPE_USER_ASSIGNED", + Optional: true, + Default: "IDENTITY_TYPE_SYSTEM_ASSIGNED", + ValidateDiagFunc: validation.ToDiagFunc(validation.StringInSlice([]string{ + string(aksmodel.VmwareTanzuManageV1alpha1AksclusterManagedIdentityTypeSYSTEMASSIGNED), + string(aksmodel.VmwareTanzuManageV1alpha1AksclusterManagedIdentityTypeUSERASSIGNED), + }, false)), + }, + userAssignedKey: { + Type: schema.TypeList, + Description: "User Assigned Managed Identity Config", + Optional: true, + MaxItems: 1, + Elem: UserAssignedManagedIdentityConfig, + }, + }, +} + +var UserAssignedManagedIdentityConfig = &schema.Resource{ + Schema: map[string]*schema.Schema{ + resourceIDKey: { + Type: schema.TypeString, + Description: "The ARM resource ID of user assigned identity in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'", + Required: true, + }, + }, +} + // NodepoolConfig defines the info and nodepool spec for AKS clusters. // // Note: ForceNew is not used in any of the elements because this is a part of From 79e4449a10e5ea73f63bddebfbc63edf2da63db4 Mon Sep 17 00:00:00 2001 From: Gavin Shaw Date: Tue, 19 Dec 2023 18:41:40 -0500 Subject: [PATCH 10/22] Update aks cluster docs Signed-off-by: Gavin Shaw --- docs/data-sources/akscluster.md | 18 ++++++++++++++++++ docs/resources/akscluster.md | 18 ++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/docs/data-sources/akscluster.md b/docs/data-sources/akscluster.md index 45d33bfaa..f61e9861e 100644 --- a/docs/data-sources/akscluster.md +++ b/docs/data-sources/akscluster.md @@ -109,6 +109,7 @@ Optional: - `api_server_access_config` (Block List, Max: 1) API Server Access Config (see [below for nested schema](#nestedblock--spec--config--api_server_access_config)) - `auto_upgrade_config` (Block List, Max: 1) Auto Upgrade Config (see [below for nested schema](#nestedblock--spec--config--auto_upgrade_config)) - `disk_encryption_set` (String) Resource ID of the disk encryption set to use for enabling +- `identity_config` (Block List, Max: 1) Managed Identity Config (see [below for nested schema](#nestedblock--spec--config--identity_config)) - `linux_config` (Block List, Max: 1) Linux Config (see [below for nested schema](#nestedblock--spec--config--linux_config)) - `node_resource_group_name` (String) Name of the resource group containing nodepools. - `sku` (Block List, Max: 1) Azure Kubernetes Service SKU (see [below for nested schema](#nestedblock--spec--config--sku)) @@ -212,6 +213,23 @@ Optional: - `upgrade_channel` (String) Upgrade Channel. Allowed values include: NONE, PATCH, STABLE, RAPID or NODE_IMAGE + +### Nested Schema for `spec.config.identity_config` + +Optional: + +- `type` (String) Type of managed identity used by the cluster (default IDENTITY_TYPE_SYSTEM_ASSIGNED). Allowed values include: IDENTITY_TYPE_SYSTEM_ASSIGNED or IDENTITY_TYPE_USER_ASSIGNED +- `user_assigned` (Block List, Max: 1) User Assigned Managed Identity Config (see [below for nested schema](#nestedblock--spec--config--identity_config--user_assigned)) + + +### Nested Schema for `spec.config.identity_config.user_assigned` + +Required: + +- `resource_id` (String) The ARM resource ID of user assigned identity in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}' + + + ### Nested Schema for `spec.config.linux_config` diff --git a/docs/resources/akscluster.md b/docs/resources/akscluster.md index 0a62d89c9..02da8c205 100644 --- a/docs/resources/akscluster.md +++ b/docs/resources/akscluster.md @@ -130,6 +130,7 @@ Optional: - `api_server_access_config` (Block List, Max: 1) API Server Access Config (see [below for nested schema](#nestedblock--spec--config--api_server_access_config)) - `auto_upgrade_config` (Block List, Max: 1) Auto Upgrade Config (see [below for nested schema](#nestedblock--spec--config--auto_upgrade_config)) - `disk_encryption_set` (String) Resource ID of the disk encryption set to use for enabling +- `identity_config` (Block List, Max: 1) Managed Identity Config (see [below for nested schema](#nestedblock--spec--config--identity_config)) - `linux_config` (Block List, Max: 1) Linux Config (see [below for nested schema](#nestedblock--spec--config--linux_config)) - `node_resource_group_name` (String) Name of the resource group containing nodepools. - `sku` (Block List, Max: 1) Azure Kubernetes Service SKU (see [below for nested schema](#nestedblock--spec--config--sku)) @@ -233,6 +234,23 @@ Optional: - `upgrade_channel` (String) Upgrade Channel. Allowed values include: NONE, PATCH, STABLE, RAPID or NODE_IMAGE + +### Nested Schema for `spec.config.identity_config` + +Optional: + +- `type` (String) Type of managed identity used by the cluster (default IDENTITY_TYPE_SYSTEM_ASSIGNED). Allowed values include: IDENTITY_TYPE_SYSTEM_ASSIGNED or IDENTITY_TYPE_USER_ASSIGNED +- `user_assigned` (Block List, Max: 1) User Assigned Managed Identity Config (see [below for nested schema](#nestedblock--spec--config--identity_config--user_assigned)) + + +### Nested Schema for `spec.config.identity_config.user_assigned` + +Required: + +- `resource_id` (String) The ARM resource ID of user assigned identity in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}' + + + ### Nested Schema for `spec.config.linux_config` From 92c4692eec1b053cf73878a1f2984d3977dd8379 Mon Sep 17 00:00:00 2001 From: Gavin Shaw Date: Thu, 4 Jan 2024 14:37:21 -0500 Subject: [PATCH 11/22] Improve test coverage of managed identities Signed-off-by: Gavin Shaw --- internal/resources/akscluster/helpers_test.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/internal/resources/akscluster/helpers_test.go b/internal/resources/akscluster/helpers_test.go index 15422d635..fe59ca9de 100644 --- a/internal/resources/akscluster/helpers_test.go +++ b/internal/resources/akscluster/helpers_test.go @@ -159,6 +159,12 @@ func aTestCluster(w ...clusterWither) *models.VmwareTanzuManageV1alpha1AksCluste AutoUpgradeConfig: &models.VmwareTanzuManageV1alpha1AksclusterAutoUpgradeConfig{ Channel: models.VmwareTanzuManageV1alpha1AksclusterChannelSTABLE.Pointer(), }, + IdentityConfig: &models.VmwareTanzuManageV1alpha1AksclusterManagedIdentityConfig{ + Type: models.VmwareTanzuManageV1alpha1AksclusterManagedIdentityTypeUSERASSIGNED.Pointer(), + UserAssignedIdentityType: &models.VmwareTanzuManageV1alpha1AksclusterUserAssignedIdentityTypeConfig{ + ManagedResourceID: "resource-id-for-a-user-assigned-managed-identity", + }, + }, }, ProxyName: "my-proxy", AgentName: "my-agent-name", @@ -372,7 +378,12 @@ func aTestClusterDataMap(w ...mapWither) map[string]any { "auto_upgrade_config": []any{map[string]any{ "upgrade_channel": "STABLE", }}, - "identity_config": []any{}, + "identity_config": []any{map[string]any{ + "type": "IDENTITY_TYPE_USER_ASSIGNED", + "user_assigned": []any{map[string]any{ + "resource_id": "resource-id-for-a-user-assigned-managed-identity", + }}, + }}, }}, "nodepool": []any{ aTestNodepoolDataMap(), From 6e024b128644ff00074149fe81a44e33b1cbe709 Mon Sep 17 00:00:00 2001 From: GilTS Date: Fri, 3 Nov 2023 15:32:35 +0200 Subject: [PATCH 12/22] [Feature-Inspection] Client/Models/Schema, implementation, tests & Docs Signed-off-by: GilTS --- .github/workflows/release.yml | 2 +- .github/workflows/test.yml | 2 +- docs/data-sources/inspection_results.md | 46 ++++++ docs/data-sources/inspections.md | 59 ++++++++ .../datasource_inspection_results.tf | 10 ++ .../inspections/datasource_inspections.tf | 9 ++ internal/client/http_client.go | 3 + .../inspections/inspections_resource.go | 93 ++++++++++++ internal/helper/converter/construct_model.go | 2 +- .../helper/converter/construct_tf_schema.go | 2 +- internal/helper/converter/map_types.go | 45 ++++-- internal/models/inspections/cis_spec.go | 41 ++++++ .../models/inspections/cis_spec_targets.go | 67 +++++++++ .../models/inspections/conformance_spec.go | 14 ++ internal/models/inspections/e2e_spec.go | 14 ++ internal/models/inspections/fullname.go | 53 +++++++ internal/models/inspections/inspection.go | 59 ++++++++ internal/models/inspections/lite_spec.go | 14 ++ internal/models/inspections/request.go | 75 ++++++++++ internal/models/inspections/spec.go | 50 +++++++ internal/models/inspections/status.go | 59 ++++++++ internal/models/inspections/status_phase.go | 83 +++++++++++ internal/models/inspections/status_report.go | 56 ++++++++ .../models/inspections/status_report_info.go | 68 +++++++++ .../status_report_progress_info.go | 44 ++++++ .../inspections/status_report_result.go | 59 ++++++++ internal/provider/provider.go | 3 + .../resources/inspections/common_schema.go | 96 +++++++++++++ .../inspections/converter_mapping.go | 66 +++++++++ .../inspections/datasource_inspection_list.go | 68 +++++++++ .../datasource_inspection_results.go | 62 ++++++++ .../inspections/inspection_list_schema.go | 33 +++++ .../inspections/inspection_results_schema.go | 22 +++ .../inspections/tests/datasource_tf_config.go | 56 ++++++++ .../inspections/tests/helper_test.go | 36 +++++ .../inspections/tests/inspections_env_vars.go | 50 +++++++ .../inspections/tests/inspections_test.go | 134 ++++++++++++++++++ .../data-sources/inspection_results.md.tmpl | 22 +++ templates/data-sources/inspections.md.tmpl | 21 +++ 39 files changed, 1685 insertions(+), 13 deletions(-) create mode 100644 docs/data-sources/inspection_results.md create mode 100644 docs/data-sources/inspections.md create mode 100644 examples/data-sources/inspections/datasource_inspection_results.tf create mode 100644 examples/data-sources/inspections/datasource_inspections.tf create mode 100644 internal/client/inspections/inspections_resource.go create mode 100644 internal/models/inspections/cis_spec.go create mode 100644 internal/models/inspections/cis_spec_targets.go create mode 100644 internal/models/inspections/conformance_spec.go create mode 100644 internal/models/inspections/e2e_spec.go create mode 100644 internal/models/inspections/fullname.go create mode 100644 internal/models/inspections/inspection.go create mode 100644 internal/models/inspections/lite_spec.go create mode 100644 internal/models/inspections/request.go create mode 100644 internal/models/inspections/spec.go create mode 100644 internal/models/inspections/status.go create mode 100644 internal/models/inspections/status_phase.go create mode 100644 internal/models/inspections/status_report.go create mode 100644 internal/models/inspections/status_report_info.go create mode 100644 internal/models/inspections/status_report_progress_info.go create mode 100644 internal/models/inspections/status_report_result.go create mode 100644 internal/resources/inspections/common_schema.go create mode 100644 internal/resources/inspections/converter_mapping.go create mode 100644 internal/resources/inspections/datasource_inspection_list.go create mode 100644 internal/resources/inspections/datasource_inspection_results.go create mode 100644 internal/resources/inspections/inspection_list_schema.go create mode 100644 internal/resources/inspections/inspection_results_schema.go create mode 100644 internal/resources/inspections/tests/datasource_tf_config.go create mode 100644 internal/resources/inspections/tests/helper_test.go create mode 100644 internal/resources/inspections/tests/inspections_env_vars.go create mode 100644 internal/resources/inspections/tests/inspections_test.go create mode 100644 templates/data-sources/inspection_results.md.tmpl create mode 100644 templates/data-sources/inspections.md.tmpl diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 149d7d438..467af74e3 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -6,7 +6,7 @@ on: - 'v*' env: - BUILD_TAGS: 'akscluster cluster clustergroup credential ekscluster gitrepository iampolicy kustomization namespace custompolicy imagepolicy networkpolicy quotapolicy securitypolicy sourcesecret workspace tanzupackage tanzupackages packagerepository packageinstall clustersecret integration mutationpolicy backupschedule targetlocation dataprotection tanzukubernetescluster clusterclass managementcluster provisioner' + BUILD_TAGS: 'akscluster cluster clustergroup credential ekscluster gitrepository iampolicy kustomization namespace custompolicy imagepolicy networkpolicy quotapolicy securitypolicy sourcesecret workspace tanzupackage tanzupackages packagerepository packageinstall clustersecret integration mutationpolicy backupschedule targetlocation dataprotection tanzukubernetescluster clusterclass managementcluster provisioner inspections' jobs: goreleaser: diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 1259c30f5..feed66f07 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -3,7 +3,7 @@ name: Test and coverage on: [pull_request, push] env: - BUILD_TAGS: 'akscluster cluster clustergroup credential ekscluster gitrepository iampolicy kustomization namespace custompolicy imagepolicy networkpolicy quotapolicy securitypolicy sourcesecret workspace tanzupackage tanzupackages packagerepository packageinstall clustersecret integration mutationpolicy backupschedule targetlocation dataprotection tanzukubernetescluster clusterclass managementcluster provisioner' + BUILD_TAGS: 'akscluster cluster clustergroup credential ekscluster gitrepository iampolicy kustomization namespace custompolicy imagepolicy networkpolicy quotapolicy securitypolicy sourcesecret workspace tanzupackage tanzupackages packagerepository packageinstall clustersecret integration mutationpolicy backupschedule targetlocation dataprotection tanzukubernetescluster clusterclass managementcluster provisioner inspections' jobs: build: name: Test and coverage diff --git a/docs/data-sources/inspection_results.md b/docs/data-sources/inspection_results.md new file mode 100644 index 000000000..35b4bc237 --- /dev/null +++ b/docs/data-sources/inspection_results.md @@ -0,0 +1,46 @@ +--- +Title: "Inspection Results Data Source" +Description: |- + Get inspection results +--- + +# Inspection Results Data Source + +This data source enables users to get a specific cluster inspection results. + +## Example Usage + +```terraform +data "tanzu-mission-control_inspection_results" "demo" { + management_cluster_name = "MGMT_CLS_NAME" + provisioner_name = "PROVISIONER_NAME" + cluster_name = "CLS_NAME" + name = "INSPECTION_NAME" +} + +output "inspection_report" { + value = jsondecode(data.tanzu-mission-control_inspection_results.demo.status.report) +} +``` + + +## Schema + +### Required + +- `cluster_name` (String) Cluster name. +- `management_cluster_name` (String) Management cluster name. +- `name` (String) Inspection name. +- `provisioner_name` (String) Cluster provisioner name. + +### Read-Only + +- `id` (String) The ID of this resource. +- `status` (Map of String) Status of inspection resource + +## Status Field ## + +Status field is a key-value pair of type string-string and it contains the following keys: +* phase - The phase which the inspection is in. +* phase_info - Information about the phase. +* report - JSON encoded string of the report data in the inspection. \ No newline at end of file diff --git a/docs/data-sources/inspections.md b/docs/data-sources/inspections.md new file mode 100644 index 000000000..1eb1545d7 --- /dev/null +++ b/docs/data-sources/inspections.md @@ -0,0 +1,59 @@ +--- +Title: "Inspections Data Source" +Description: |- + List cluster inspections +--- + +# Inspections Data Source + +This data source enables users to list cluster inspections. + +## Example Usage + +```terraform +data "tanzu-mission-control_inspections" "demo" { + management_cluster_name = "MGMT_CLS_NAME" + provisioner_name = "PROVISIONER_NAME" + cluster_name = "CLS_NAME" + } + + output "inspections" { + value = data.tanzu-mission-control_inspections.demo.inspections + } +``` + + +## Schema + +### Required + +- `cluster_name` (String) Cluster name. +- `management_cluster_name` (String) Management cluster name. +- `provisioner_name` (String) Cluster provisioner name. + +### Optional + +- `name` (String) Inspection name. + +### Read-Only + +- `id` (String) The ID of this resource. +- `inspections` (List of Object) Inspection objects. (see [below for nested schema](#nestedatt--inspections)) +- `total_count` (String) Total count of inspections returned. + + +### Nested Schema for `inspections` + +Read-Only: + +- `cluster_name` (String) +- `management_cluster_name` (String) +- `name` (String) +- `provisioner_name` (String) +- `status` (Map of String) + +## Status Field ## + +Status field is a key-value pair of type string-string and it contains the following keys: +* phase - The phase which the inspection is in. +* phase_info - Information about the phase. \ No newline at end of file diff --git a/examples/data-sources/inspections/datasource_inspection_results.tf b/examples/data-sources/inspections/datasource_inspection_results.tf new file mode 100644 index 000000000..a10cd3886 --- /dev/null +++ b/examples/data-sources/inspections/datasource_inspection_results.tf @@ -0,0 +1,10 @@ +data "tanzu-mission-control_inspection_results" "demo" { + management_cluster_name = "MGMT_CLS_NAME" + provisioner_name = "PROVISIONER_NAME" + cluster_name = "CLS_NAME" + name = "INSPECTION_NAME" +} + +output "inspection_report" { + value = jsondecode(data.tanzu-mission-control_inspection_results.demo.status.report) +} diff --git a/examples/data-sources/inspections/datasource_inspections.tf b/examples/data-sources/inspections/datasource_inspections.tf new file mode 100644 index 000000000..68d25bb26 --- /dev/null +++ b/examples/data-sources/inspections/datasource_inspections.tf @@ -0,0 +1,9 @@ + data "tanzu-mission-control_inspections" "demo" { + management_cluster_name = "MGMT_CLS_NAME" + provisioner_name = "PROVISIONER_NAME" + cluster_name = "CLS_NAME" + } + + output "inspections" { + value = data.tanzu-mission-control_inspections.demo.inspections + } diff --git a/internal/client/http_client.go b/internal/client/http_client.go index c40e34455..45d7d836c 100644 --- a/internal/client/http_client.go +++ b/internal/client/http_client.go @@ -40,6 +40,7 @@ import ( credentialclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/credential" eksclusterclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/ekscluster" eksnodepoolclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/ekscluster/nodepool" + inspectionsclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/inspections" integrationclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/integration" kubeconfigclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/kubeconfig" secretclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/kubernetessecret" @@ -137,6 +138,7 @@ func newHTTPClient(httpClient *transport.Client) *TanzuMissionControl { ClusterGroupSecretResourceService: secretclustergroupclient.New(httpClient), ClusterGroupSecretExportResourceService: secretexportclustergroupclient.New(httpClient), KubeConfigResourceService: kubeconfigclient.New(httpClient), + InspectionsResourceService: inspectionsclient.New(httpClient), BackupScheduleService: backupscheduleclient.New(httpClient), DataProtectionService: dataprotectionclient.New(httpClient), TargetLocationService: targetlocationclient.New(httpClient), @@ -202,4 +204,5 @@ type TanzuMissionControl struct { ClusterClassResourceService clusterclassclient.ClientService TanzuKubernetesClusterResourceService tanzukubernetesclusterclient.ClientService ProvisionerResourceService provisionerclient.ClientService + InspectionsResourceService inspectionsclient.ClientService } diff --git a/internal/client/inspections/inspections_resource.go b/internal/client/inspections/inspections_resource.go new file mode 100644 index 000000000..63b40003c --- /dev/null +++ b/internal/client/inspections/inspections_resource.go @@ -0,0 +1,93 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspectionsclient + +import ( + "net/url" + + "github.com/pkg/errors" + + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/transport" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/helper" + inspectionsmodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/inspections" +) + +const ( + // URL Paths. + clustersAPIVersionAndGroupPath = "v1alpha1/clusters" + inspectionsPath = "inspection/scans" + + // Query Params. + managementClusterNameListInspectionsParam = "searchScope.managementClusterName" + provisionerNameListInspectionsParam = "searchScope.provisionerName" + managementClusterNameGetInspectionParam = "fullName.managementClusterName" + provisionerNameGetInspectionParam = "fullName.provisionerName" +) + +// New creates a new inspections resource service API client. +func New(transport *transport.Client) ClientService { + return &Client{Client: transport} +} + +/* +Client for inspections resource service API. +*/ +type Client struct { + *transport.Client +} + +// ClientService is the interface for Client methods. +type ClientService interface { + InspectionsResourceServiceList(fn *inspectionsmodel.VmwareTanzuManageV1alpha1ClusterInspectionScanFullName) (*inspectionsmodel.VmwareTanzuManageV1alpha1ClusterInspectionScanListData, error) + + InspectionsResourceServiceGet(fn *inspectionsmodel.VmwareTanzuManageV1alpha1ClusterInspectionScanFullName) (*inspectionsmodel.VmwareTanzuManageV1alpha1ClusterInspectionScanData, error) +} + +/* +InspectionsResourceServiceList lists inspections. +*/ +func (c *Client) InspectionsResourceServiceList(fn *inspectionsmodel.VmwareTanzuManageV1alpha1ClusterInspectionScanFullName) (*inspectionsmodel.VmwareTanzuManageV1alpha1ClusterInspectionScanListData, error) { + resp := &inspectionsmodel.VmwareTanzuManageV1alpha1ClusterInspectionScanListData{} + + if fn.ManagementClusterName == "" || fn.ProvisionerName == "" || fn.ClusterName == "" { + return nil, errors.New("Management Cluster Name, Provisioner Name and Cluster Name must be provided.") + } + + requestURL := helper.ConstructRequestURL(clustersAPIVersionAndGroupPath, fn.ClusterName, inspectionsPath) + queryParams := url.Values{} + + queryParams.Add(managementClusterNameListInspectionsParam, fn.ManagementClusterName) + queryParams.Add(provisionerNameListInspectionsParam, fn.ProvisionerName) + + requestURL = requestURL.AppendQueryParams(queryParams) + + err := c.Get(requestURL.String(), resp) + + return resp, err +} + +/* +InspectionsResourceServiceGet returns an inspection. +*/ +func (c *Client) InspectionsResourceServiceGet(fn *inspectionsmodel.VmwareTanzuManageV1alpha1ClusterInspectionScanFullName) (*inspectionsmodel.VmwareTanzuManageV1alpha1ClusterInspectionScanData, error) { + resp := &inspectionsmodel.VmwareTanzuManageV1alpha1ClusterInspectionScanData{} + + if fn.ManagementClusterName == "" || fn.ProvisionerName == "" || fn.ClusterName == "" || fn.Name == "" { + return nil, errors.New("Management Cluster Name, Provisioner Name, Cluster Name and Inspection Name must be provided.") + } + + requestURL := helper.ConstructRequestURL(clustersAPIVersionAndGroupPath, fn.ClusterName, inspectionsPath, fn.Name) + queryParams := url.Values{} + + queryParams.Add(managementClusterNameGetInspectionParam, fn.ManagementClusterName) + queryParams.Add(provisionerNameGetInspectionParam, fn.ProvisionerName) + + requestURL = requestURL.AppendQueryParams(queryParams) + + err := c.Get(requestURL.String(), resp) + + return resp, err +} diff --git a/internal/helper/converter/construct_model.go b/internal/helper/converter/construct_model.go index fbddd382e..bb36cb306 100644 --- a/internal/helper/converter/construct_model.go +++ b/internal/helper/converter/construct_model.go @@ -65,7 +65,7 @@ func (converter *TFSchemaModelConverter[T]) modelHandleBlockMap(modelJSON *Block } } - for key, value := range definedKeysMapValue { + for key, value := range *definedKeysMapValue { converter.buildModelField(modelJSON, rootSchemaDict[key], value, arrIndexer) } } diff --git a/internal/helper/converter/construct_tf_schema.go b/internal/helper/converter/construct_tf_schema.go index b95849197..647b926a7 100644 --- a/internal/helper/converter/construct_tf_schema.go +++ b/internal/helper/converter/construct_tf_schema.go @@ -73,7 +73,7 @@ func (converter *TFSchemaModelConverter[T]) tfHandleBlockMap(modelJSONData *map[ } } - newBlock := BlockToStruct(mapValue.(*Map).Copy([]string{AllMapKeysFieldMarker})) + newBlock := BlockToStruct(*(mapValue.(*Map).Copy([]string{AllMapKeysFieldMarker}))) mapValue = &newBlock } diff --git a/internal/helper/converter/map_types.go b/internal/helper/converter/map_types.go index c09b599cc..7e2a7dde9 100644 --- a/internal/helper/converter/map_types.go +++ b/internal/helper/converter/map_types.go @@ -52,7 +52,7 @@ const ( // Copy Creates a copy of a Map object. // excludeKeys argument can be used to exclude certain keys to be copied. -func (curMap *Map) Copy(excludedKeys []string) Map { +func (curMap *Map) Copy(excludedKeys []string) *Map { nMap := make(Map) for k, v := range *curMap { @@ -74,15 +74,42 @@ func (curMap *Map) Copy(excludedKeys []string) Map { } } - return nMap + return &nMap +} + +// Copy Creates a copy of a BlockToStruct object. +// excludeKeys argument can be used to exclude certain keys to be copied. +func (currBlock *BlockToStruct) Copy(excludedKeys []string) *BlockToStruct { + nBlock := make(BlockToStruct) + + for k, v := range *currBlock { + if len(excludedKeys) > 0 { + isExcluded := false + + for _, excludedKey := range excludedKeys { + if excludedKey == k { + isExcluded = true + break + } + } + + if !isExcluded { + nBlock[k] = v + } + } else { + nBlock[k] = v + } + } + + return &nBlock } // UnpackSchema Unpacks a schema to a higher level schema, useful for data sources which list an individual Swagger API Model. -func (b *BlockToStruct) UnpackSchema(modelPathSeparator string, mapValue interface{}, prefix string) interface{} { +func (currBlock *BlockToStruct) UnpackSchema(modelPathSeparator string, mapValue interface{}, prefix string) interface{} { var elem interface{} if mapValue == nil { - mapValue = b + mapValue = currBlock } switch mapValue := mapValue.(type) { @@ -91,32 +118,32 @@ func (b *BlockToStruct) UnpackSchema(modelPathSeparator string, mapValue interfa elem = &BlockToStruct{} for key, value := range *mapValue.(*BlockToStruct) { - (*elem.(*BlockToStruct))[key] = b.UnpackSchema(modelPathSeparator, value, prefix) + (*elem.(*BlockToStruct))[key] = currBlock.UnpackSchema(modelPathSeparator, value, prefix) } } else { elem = &Map{} for key, value := range *mapValue.(*Map) { - (*elem.(*Map))[key] = b.UnpackSchema(modelPathSeparator, value, prefix) + (*elem.(*Map))[key] = currBlock.UnpackSchema(modelPathSeparator, value, prefix) } } case *BlockToStructSlice: elem = &BlockToStructSlice{} for _, elemMap := range *mapValue { - elemValue := b.UnpackSchema(modelPathSeparator, elemMap, prefix) + elemValue := currBlock.UnpackSchema(modelPathSeparator, elemMap, prefix) *elem.(*BlockToStructSlice) = append(*elem.(*BlockToStructSlice), elemValue.(*BlockToStruct)) } case *BlockSliceToStructSlice: elem = &BlockSliceToStructSlice{} for _, elemMap := range *mapValue { - elemValue := b.UnpackSchema(modelPathSeparator, elemMap, prefix) + elemValue := currBlock.UnpackSchema(modelPathSeparator, elemMap, prefix) *elem.(*BlockSliceToStructSlice) = append(*elem.(*BlockSliceToStructSlice), elemValue.(*BlockToStruct)) } case *ListToStruct: elem = &ListToStruct{} - elemValue := b.UnpackSchema(modelPathSeparator, (*mapValue)[0], prefix) + elemValue := currBlock.UnpackSchema(modelPathSeparator, (*mapValue)[0], prefix) *elem.(*ListToStruct) = append(*elem.(*ListToStruct), elemValue.(string)) case *EvaluatedField: elem = &EvaluatedField{ diff --git a/internal/models/inspections/cis_spec.go b/internal/models/inspections/cis_spec.go new file mode 100644 index 000000000..e450e232d --- /dev/null +++ b/internal/models/inspections/cis_spec.go @@ -0,0 +1,41 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspectionsmodels + +import ( + "github.com/go-openapi/swag" +) + +// VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpec CIS security inspection scan specification. +// +// swagger:model vmware.tanzu.manage.v1alpha1.cluster.inspection.scan.CISSpec +type VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpec struct { + + // List of Targets that the CIS plugin will run against. + CisTargets []*VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargets `json:"cisTargets"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpec) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpec) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpec + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/inspections/cis_spec_targets.go b/internal/models/inspections/cis_spec_targets.go new file mode 100644 index 000000000..5513dda34 --- /dev/null +++ b/internal/models/inspections/cis_spec_targets.go @@ -0,0 +1,67 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspectionsmodels + +import ( + "encoding/json" +) + +// VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargets Targets is an enumeration of targets that CIS can run against. +// +// - TARGETS_UNSPECIFIED: Unspecified Target refers to an unspecified target. +// - LEADER_NODE: Target is the leader Node. +// - NODE: Target is all nodes apart from the control plane nodes. +// - ETCD: Target is the ETCD. +// - CONTROL_PLANE: Target is the control plane components. +// - POLICIES: Target is the policies on the cluster. +// +// swagger:model vmware.tanzu.manage.v1alpha1.cluster.inspection.scan.CISSpec.Targets +type VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargets string + +func NewVmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargets(value VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargets) *VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargets { + return &value +} + +// Pointer returns a pointer to a freshly-allocated VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargets. +func (m VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargets) Pointer() *VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargets { + return &m +} + +const ( + + // VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargetsTARGETSUNSPECIFIED captures enum value "TARGETS_UNSPECIFIED". + VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargetsTARGETSUNSPECIFIED VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargets = "TARGETS_UNSPECIFIED" + + // VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargetsLEADERNODE captures enum value "LEADER_NODE". + VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargetsLEADERNODE VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargets = "LEADER_NODE" + + // VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargetsNODE captures enum value "NODE". + VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargetsNODE VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargets = "NODE" + + // VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargetsETCD captures enum value "ETCD". + VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargetsETCD VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargets = "ETCD" + + // VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargetsCONTROLPLANE captures enum value "CONTROL_PLANE". + VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargetsCONTROLPLANE VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargets = "CONTROL_PLANE" + + // VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargetsPOLICIES captures enum value "POLICIES". + VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargetsPOLICIES VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargets = "POLICIES" +) + +// for schema. +var vmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargetsEnum []interface{} + +func init() { + var res []VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargets + + if err := json.Unmarshal([]byte(`["TARGETS_UNSPECIFIED","LEADER_NODE","NODE","ETCD","CONTROL_PLANE","POLICIES"]`), &res); err != nil { + panic(err) + } + + for _, v := range res { + vmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargetsEnum = append(vmwareTanzuManageV1alpha1ClusterInspectionScanCISSpecTargetsEnum, v) + } +} diff --git a/internal/models/inspections/conformance_spec.go b/internal/models/inspections/conformance_spec.go new file mode 100644 index 000000000..a564447d6 --- /dev/null +++ b/internal/models/inspections/conformance_spec.go @@ -0,0 +1,14 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspectionsmodels + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// VmwareTanzuManageV1alpha1ClusterInspectionScanConformanceSpec Conformance inspection scan specification. +// +// swagger:model vmware.tanzu.manage.v1alpha1.cluster.inspection.scan.ConformanceSpec +type VmwareTanzuManageV1alpha1ClusterInspectionScanConformanceSpec interface{} diff --git a/internal/models/inspections/e2e_spec.go b/internal/models/inspections/e2e_spec.go new file mode 100644 index 000000000..1c9237389 --- /dev/null +++ b/internal/models/inspections/e2e_spec.go @@ -0,0 +1,14 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspectionsmodels + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// VmwareTanzuManageV1alpha1ClusterInspectionScanE2ESpec E2E inspection scan specification. +// +// swagger:model vmware.tanzu.manage.v1alpha1.cluster.inspection.scan.E2ESpec +type VmwareTanzuManageV1alpha1ClusterInspectionScanE2ESpec interface{} diff --git a/internal/models/inspections/fullname.go b/internal/models/inspections/fullname.go new file mode 100644 index 000000000..bb1d1149a --- /dev/null +++ b/internal/models/inspections/fullname.go @@ -0,0 +1,53 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspectionsmodels + +import ( + "github.com/go-openapi/swag" +) + +// VmwareTanzuManageV1alpha1ClusterInspectionScanFullName FullName of the inspection scan. +// +// swagger:model vmware.tanzu.manage.v1alpha1.cluster.inspection.scan.FullName +type VmwareTanzuManageV1alpha1ClusterInspectionScanFullName struct { + + // Name of Cluster. + ClusterName string `json:"clusterName,omitempty"` + + // Name of management cluster. + ManagementClusterName string `json:"managementClusterName,omitempty"` + + // Name of the inspection scan. + Name string `json:"name,omitempty"` + + // Org ID of the inspection scan. + OrgID string `json:"orgId,omitempty"` + + // Name of Provisioner. + ProvisionerName string `json:"provisionerName,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1ClusterInspectionScanFullName) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1ClusterInspectionScanFullName) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1ClusterInspectionScanFullName + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/inspections/inspection.go b/internal/models/inspections/inspection.go new file mode 100644 index 000000000..9a85c9408 --- /dev/null +++ b/internal/models/inspections/inspection.go @@ -0,0 +1,59 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspectionsmodels + +import ( + "github.com/go-openapi/swag" + + objectmetamodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/objectmeta" +) + +// VmwareTanzuManageV1alpha1ClusterInspectionScan Run an on demand inspection scan on a cluster +// +// Running an inspection scan verifies whether the cluster is certified conformant or security compliant. +// It is a diagnostic tool that helps you understand the state of a cluster by running a set of tests and +// provides clear informative reports about the cluster. +// +// swagger:model vmware.tanzu.manage.v1alpha1.cluster.inspection.scan.Scan +type VmwareTanzuManageV1alpha1ClusterInspectionScan struct { + + // Full name for the inspection scan. + FullName *VmwareTanzuManageV1alpha1ClusterInspectionScanFullName `json:"fullName,omitempty"` + + // Metadata for the inspection object. + Meta *objectmetamodel.VmwareTanzuCoreV1alpha1ObjectMeta `json:"meta,omitempty"` + + // Inspection Scan spec. + Spec *VmwareTanzuManageV1alpha1ClusterInspectionScanSpec `json:"spec,omitempty"` + + // Status of the Inspection Scan object. + Status *VmwareTanzuManageV1alpha1ClusterInspectionScanStatus `json:"status,omitempty"` + + // Metadata describing the type of the resource. + Type *objectmetamodel.VmwareTanzuCoreV1alpha1ObjectType `json:"type,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1ClusterInspectionScan) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1ClusterInspectionScan) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1ClusterInspectionScan + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/inspections/lite_spec.go b/internal/models/inspections/lite_spec.go new file mode 100644 index 000000000..46cc876ba --- /dev/null +++ b/internal/models/inspections/lite_spec.go @@ -0,0 +1,14 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspectionsmodels + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// VmwareTanzuManageV1alpha1ClusterInspectionScanLiteSpec Lite inspection scan specification. +// +// swagger:model vmware.tanzu.manage.v1alpha1.cluster.inspection.scan.LiteSpec +type VmwareTanzuManageV1alpha1ClusterInspectionScanLiteSpec interface{} diff --git a/internal/models/inspections/request.go b/internal/models/inspections/request.go new file mode 100644 index 000000000..d56e4aa02 --- /dev/null +++ b/internal/models/inspections/request.go @@ -0,0 +1,75 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspectionsmodels + +import ( + "github.com/go-openapi/swag" +) + +// VmwareTanzuManageV1alpha1ClusterInspectionScanData Request to create a Scan. +// +// swagger:model vmware.tanzu.manage.v1alpha1.cluster.inspection.scan.CreateScanRequest +type VmwareTanzuManageV1alpha1ClusterInspectionScanData struct { + + // Scan to create. + Scan *VmwareTanzuManageV1alpha1ClusterInspectionScan `json:"scan,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1ClusterInspectionScanData) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1ClusterInspectionScanData) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1ClusterInspectionScanData + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} + +// VmwareTanzuManageV1alpha1ClusterInspectionScanListData Response from listing Scans. +// +// swagger:model vmware.tanzu.manage.v1alpha1.cluster.inspection.scan.ListScansResponse +type VmwareTanzuManageV1alpha1ClusterInspectionScanListData struct { + + // List of scans. + Scans []*VmwareTanzuManageV1alpha1ClusterInspectionScan `json:"scans"` + + // Total count. + TotalCount string `json:"totalCount,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1ClusterInspectionScanListData) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1ClusterInspectionScanListData) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1ClusterInspectionScanListData + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/inspections/spec.go b/internal/models/inspections/spec.go new file mode 100644 index 000000000..ed05bae6b --- /dev/null +++ b/internal/models/inspections/spec.go @@ -0,0 +1,50 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspectionsmodels + +import ( + "github.com/go-openapi/swag" +) + +// VmwareTanzuManageV1alpha1ClusterInspectionScanSpec Spec of the inspection scan. +// +// swagger:model vmware.tanzu.manage.v1alpha1.cluster.inspection.scan.Spec +type VmwareTanzuManageV1alpha1ClusterInspectionScanSpec struct { + + // CIS security inspection scan specification. + CisSpec *VmwareTanzuManageV1alpha1ClusterInspectionScanCISSpec `json:"cisSpec,omitempty"` + + // Conformance inspection scan specification. + ConformanceSpec VmwareTanzuManageV1alpha1ClusterInspectionScanConformanceSpec `json:"conformanceSpec,omitempty"` + + // E2E inspection scan specification. + E2eSpec VmwareTanzuManageV1alpha1ClusterInspectionScanE2ESpec `json:"e2eSpec,omitempty"` + + // Lite inspection scan specification. + LiteSpec VmwareTanzuManageV1alpha1ClusterInspectionScanLiteSpec `json:"liteSpec,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1ClusterInspectionScanSpec) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1ClusterInspectionScanSpec) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1ClusterInspectionScanSpec + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/inspections/status.go b/internal/models/inspections/status.go new file mode 100644 index 000000000..e75e359e6 --- /dev/null +++ b/internal/models/inspections/status.go @@ -0,0 +1,59 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspectionsmodels + +import ( + "github.com/go-openapi/swag" + + statusmodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/status" +) + +// VmwareTanzuManageV1alpha1ClusterInspectionScanStatus Status of the scan inspection. +// +// swagger:model vmware.tanzu.manage.v1alpha1.cluster.inspection.scan.Status +type VmwareTanzuManageV1alpha1ClusterInspectionScanStatus struct { + + // Available phases of the inspection scan resource. + AvailablePhases []*VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhase `json:"availablePhases"` + + // Condition 'Scheduled' with Status 'Unknown' indicates that the inspection is pending + // Condition 'Scheduled' with 'True' and Condition 'Ready' with 'Unknown' indicates that the inspection is running + // Condition 'Ready' with 'True' indicates that the inspection is complete + // Condition 'Ready' with 'False' indicates that the inspection is in error state. + Conditions map[string]statusmodel.VmwareTanzuCoreV1alpha1StatusCondition `json:"conditions,omitempty"` + + // Phase of the inspection scan based on conditions. If state is 'PHASE_UNSPECIFIED', use conditions to + // interpret the state of the inspection. + Phase *VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhase `json:"phase,omitempty"` + + // Additional information e.g., reason for ERROR state. + PhaseInfo string `json:"phaseInfo,omitempty"` + + // Report details. + Report *VmwareTanzuManageV1alpha1ClusterInspectionScanReport `json:"report,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1ClusterInspectionScanStatus) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1ClusterInspectionScanStatus) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1ClusterInspectionScanStatus + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/inspections/status_phase.go b/internal/models/inspections/status_phase.go new file mode 100644 index 000000000..98cc03201 --- /dev/null +++ b/internal/models/inspections/status_phase.go @@ -0,0 +1,83 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspectionsmodels + +import ( + "encoding/json" +) + +// VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhase Phase describes the phase] of the inspection scan. +// +// - PHASE_UNSPECIFIED: Unknown - to be used if status of the current inspection scan is unknown. +// - RUNNING: Running - to indicate the inspection scan is currently running. +// - PENDING: Pending - to indicate that the inspectionscan is waiting to be started. +// - COMPLETE: Complete - to indicate that the sonobuoy open source has completed the inspection scan. +// - UPLOAD: Upload - to indicate that the inspection scan results are being uploaded to S3. +// - FINISH: Finish - to indicate that the inspection has completed inspection + uploaded results to S3 successfully. +// - STOP: Stop - to stop the sonobuoy inspection. +// - ERROR: Error - to indicate that an error had occurred during the inspection. +// - QUEUED: Queued - to indicate that the inspection is queued and waiting to be applied. +// - CANCEL: CANCEL - to indicate that the inspection is canceled. +// +// swagger:model vmware.tanzu.manage.v1alpha1.cluster.inspection.scan.Status.Phase +type VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhase string + +func NewVmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhase(value VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhase) *VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhase { + return &value +} + +// Pointer returns a pointer to a freshly-allocated VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhase. +func (m VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhase) Pointer() *VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhase { + return &m +} + +const ( + + // VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhasePHASEUNSPECIFIED captures enum value "PHASE_UNSPECIFIED". + VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhasePHASEUNSPECIFIED VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhase = "PHASE_UNSPECIFIED" + + // VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhaseRUNNING captures enum value "RUNNING". + VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhaseRUNNING VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhase = "RUNNING" + + // VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhasePENDING captures enum value "PENDING". + VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhasePENDING VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhase = "PENDING" + + // VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhaseCOMPLETE captures enum value "COMPLETE". + VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhaseCOMPLETE VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhase = "COMPLETE" + + // VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhaseUPLOAD captures enum value "UPLOAD". + VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhaseUPLOAD VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhase = "UPLOAD" + + // VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhaseFINISH captures enum value "FINISH". + VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhaseFINISH VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhase = "FINISH" + + // VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhaseSTOP captures enum value "STOP". + VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhaseSTOP VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhase = "STOP" + + // VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhaseERROR captures enum value "ERROR". + VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhaseERROR VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhase = "ERROR" + + // VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhaseQUEUED captures enum value "QUEUED". + VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhaseQUEUED VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhase = "QUEUED" + + // VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhaseCANCEL captures enum value "CANCEL". + VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhaseCANCEL VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhase = "CANCEL" +) + +// for schema. +var vmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhaseEnum []interface{} + +func init() { + var res []VmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhase + + if err := json.Unmarshal([]byte(`["PHASE_UNSPECIFIED","RUNNING","PENDING","COMPLETE","UPLOAD","FINISH","STOP","ERROR","QUEUED","CANCEL"]`), &res); err != nil { + panic(err) + } + + for _, v := range res { + vmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhaseEnum = append(vmwareTanzuManageV1alpha1ClusterInspectionScanStatusPhaseEnum, v) + } +} diff --git a/internal/models/inspections/status_report.go b/internal/models/inspections/status_report.go new file mode 100644 index 000000000..ed4be144f --- /dev/null +++ b/internal/models/inspections/status_report.go @@ -0,0 +1,56 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspectionsmodels + +import ( + "github.com/go-openapi/swag" +) + +// VmwareTanzuManageV1alpha1ClusterInspectionScanReport Encapsulates the data for a Inspection scan run. +// +// swagger:model vmware.tanzu.manage.v1alpha1.cluster.inspection.scan.Report +type VmwareTanzuManageV1alpha1ClusterInspectionScanReport struct { + + // Map of all the diagnostic files in the report tarball. + Diagnostic map[string]string `json:"diagnostic,omitempty"` + + // Map of files containing host information (config, healthz). + Hosts map[string]string `json:"hosts,omitempty"` + + // Meta-info of this report. + Info *VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfo `json:"info,omitempty"` + + // Map of files with metadata information about the scan (Config, query time, run). + Meta map[string]string `json:"meta,omitempty"` + + // Map of all the files ending in .xml. + Results map[string]string `json:"results,omitempty"` + + // Download URL for the .tar.gz file with this full report. + TarballDownloadURL string `json:"tarballDownloadUrl,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1ClusterInspectionScanReport) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1ClusterInspectionScanReport) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1ClusterInspectionScanReport + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/inspections/status_report_info.go b/internal/models/inspections/status_report_info.go new file mode 100644 index 000000000..795a97a1c --- /dev/null +++ b/internal/models/inspections/status_report_info.go @@ -0,0 +1,68 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspectionsmodels + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfo Contains the metadata for a single report +// (e.g. report id, etc). +// +// swagger:model vmware.tanzu.manage.v1alpha1.cluster.inspection.scan.ReportInfo +type VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfo struct { + + // Kubernetes Server Version. + KubeServerVersion string `json:"kubeServerVersion,omitempty"` + + // Number of inspections failed. + NumFailed string `json:"numFailed,omitempty"` + + // Total number of inspections as part of the scan. + NumInspections string `json:"numInspections,omitempty"` + + // Number of inspection tests in warning state. + NumWarning string `json:"numWarning,omitempty"` + + // Progress information about the inspection scan. + ProgressInfo *VmwareTanzuManageV1alpha1ClusterInspectionScanProgressInfo `json:"progressInfo,omitempty"` + + // Internal ID of the run. + ReportID string `json:"reportId,omitempty"` + + // Result is a success / failure condition based on the result of the scan. + Result *VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResult `json:"result,omitempty"` + + // Date and time of the run. + // Format: date-time + RunDatetime strfmt.DateTime `json:"runDatetime,omitempty"` + + // The scan type. + ScanType string `json:"scanType,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfo) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfo + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/inspections/status_report_progress_info.go b/internal/models/inspections/status_report_progress_info.go new file mode 100644 index 000000000..f4cf09935 --- /dev/null +++ b/internal/models/inspections/status_report_progress_info.go @@ -0,0 +1,44 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspectionsmodels + +import ( + "github.com/go-openapi/swag" +) + +// VmwareTanzuManageV1alpha1ClusterInspectionScanProgressInfo Progress information about the inspection scan. +// +// swagger:model vmware.tanzu.manage.v1alpha1.cluster.inspection.scan.ProgressInfo +type VmwareTanzuManageV1alpha1ClusterInspectionScanProgressInfo struct { + + // Number of tests completed. + NumTestsCompleted string `json:"numTestsCompleted,omitempty"` + + // Number of tests run as part of the inspection scan. + TotalNumTests string `json:"totalNumTests,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1ClusterInspectionScanProgressInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1ClusterInspectionScanProgressInfo) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1ClusterInspectionScanProgressInfo + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/inspections/status_report_result.go b/internal/models/inspections/status_report_result.go new file mode 100644 index 000000000..6685daf47 --- /dev/null +++ b/internal/models/inspections/status_report_result.go @@ -0,0 +1,59 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspectionsmodels + +import ( + "encoding/json" +) + +// VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResult Result describes the status of the inspection. +// +// - RESULT_UNSPECIFIED: Unspecified result indicates the scan result is unknown. +// - SUCCESS: Success - to be used if all the tests are part of the scan are successful. +// - FAILURE: Failure - to indicate that one or more test as part of the scan failed. +// - WARNING: Warning - to indicate that one or more test as part of the scan has a warning error. +// +// swagger:model vmware.tanzu.manage.v1alpha1.cluster.inspection.scan.ReportInfo.Result +type VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResult string + +func NewVmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResult(value VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResult) *VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResult { + return &value +} + +// Pointer returns a pointer to a freshly-allocated VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResult. +func (m VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResult) Pointer() *VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResult { + return &m +} + +const ( + + // VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResultRESULTUNSPECIFIED captures enum value "RESULT_UNSPECIFIED". + VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResultRESULTUNSPECIFIED VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResult = "RESULT_UNSPECIFIED" + + // VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResultSUCCESS captures enum value "SUCCESS". + VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResultSUCCESS VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResult = "SUCCESS" + + // VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResultFAILURE captures enum value "FAILURE". + VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResultFAILURE VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResult = "FAILURE" + + // VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResultWARNING captures enum value "WARNING". + VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResultWARNING VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResult = "WARNING" +) + +// for schema. +var vmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResultEnum []interface{} + +func init() { + var res []VmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResult + + if err := json.Unmarshal([]byte(`["RESULT_UNSPECIFIED","SUCCESS","FAILURE","WARNING"]`), &res); err != nil { + panic(err) + } + + for _, v := range res { + vmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResultEnum = append(vmwareTanzuManageV1alpha1ClusterInspectionScanReportInfoResultEnum, v) + } +} diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 7f3a012e9..bbfc82462 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -25,6 +25,7 @@ import ( "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/helmrelease" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/helmrepository" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/iampolicy" + inspections "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/inspections" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/kubernetessecret" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/kustomization" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/managementcluster" @@ -113,6 +114,8 @@ func Provider() *schema.Provider { managementcluster.ResourceName: managementcluster.DataSourceManagementClusterRegistration(), clusterclass.ResourceName: clusterclass.DataSourceClusterClass(), provisioner.ResourceName: provisioner.DataSourceProvisioner(), + inspections.ResourceNameInspections: inspections.DataSourceInspections(), + inspections.ResourceNameInspectionResults: inspections.DataSourceInspectionResults(), }, ConfigureContextFunc: authctx.ProviderConfigureContext, } diff --git a/internal/resources/inspections/common_schema.go b/internal/resources/inspections/common_schema.go new file mode 100644 index 000000000..ffe72e1a3 --- /dev/null +++ b/internal/resources/inspections/common_schema.go @@ -0,0 +1,96 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspections + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +const ( + // Inspection Object Keys. + ClusterNameKey = "cluster_name" + ManagementClusterNameKey = "management_cluster_name" + ProvisionerNameKey = "provisioner_name" + NameKey = "name" + StatusKey = "status" + + // Inspection Object Status Keys. + PhaseKey = "phase" + PhaseInfoKey = "phase_info" + ReportKey = "report" + TarballDownloadURL = "tarball_download_url" +) + +var clusterNameSchema = &schema.Schema{ + Type: schema.TypeString, + Description: "Cluster name.", + Required: true, + ForceNew: true, +} + +var managementClusterNameSchema = &schema.Schema{ + Type: schema.TypeString, + Description: "Management cluster name.", + Required: true, + ForceNew: true, +} + +var provisionerNameSchema = &schema.Schema{ + Type: schema.TypeString, + Description: "Cluster provisioner name.", + Required: true, + ForceNew: true, +} + +func getNameSchema(required bool) (nameSchema *schema.Schema) { + nameSchema = &schema.Schema{ + Type: schema.TypeString, + Description: "Inspection name.", + Optional: !required, + Required: required, + ForceNew: true, + } + + return nameSchema +} + +var computedInspectionSchema = &schema.Schema{ + Type: schema.TypeList, + Description: "Inspection objects.", + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + ClusterNameKey: { + Type: schema.TypeString, + Description: "Cluster name.", + Computed: true, + }, + ManagementClusterNameKey: { + Type: schema.TypeString, + Description: "Management cluster name.", + Computed: true, + }, + ProvisionerNameKey: { + Type: schema.TypeString, + Description: "Provisioner name.", + Computed: true, + }, + NameKey: { + Type: schema.TypeString, + Description: "Inspection name.", + Computed: true, + }, + StatusKey: { + Type: schema.TypeMap, + Description: "Status of inspection resource", + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, +} diff --git a/internal/resources/inspections/converter_mapping.go b/internal/resources/inspections/converter_mapping.go new file mode 100644 index 000000000..e3238e190 --- /dev/null +++ b/internal/resources/inspections/converter_mapping.go @@ -0,0 +1,66 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspections + +import ( + "encoding/json" + + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/helper" + tfModelConverterHelper "github.com/vmware/terraform-provider-tanzu-mission-control/internal/helper/converter" + inspectionsmodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/inspections" +) + +var tfInspectionModelMap = &tfModelConverterHelper.BlockToStruct{ + NameKey: tfModelConverterHelper.BuildDefaultModelPath("fullName", "name"), + ClusterNameKey: tfModelConverterHelper.BuildDefaultModelPath("fullName", "clusterName"), + ManagementClusterNameKey: tfModelConverterHelper.BuildDefaultModelPath("fullName", "managementClusterName"), + ProvisionerNameKey: tfModelConverterHelper.BuildDefaultModelPath("fullName", "provisionerName"), + StatusKey: &tfModelConverterHelper.Map{ + PhaseKey: tfModelConverterHelper.BuildDefaultModelPath("status", "phase"), + PhaseInfoKey: tfModelConverterHelper.BuildDefaultModelPath("status", "phaseInfo"), + ReportKey: &tfModelConverterHelper.EvaluatedField{ + Field: tfModelConverterHelper.BuildDefaultModelPath("status", "report"), + EvalFunc: evaluateReport, + }, + TarballDownloadURL: tfModelConverterHelper.BuildDefaultModelPath("status", "tarballDownloadUrl"), + }, +} + +func evaluateReport(mode tfModelConverterHelper.EvaluationMode, value interface{}) (reportData interface{}) { + if mode == tfModelConverterHelper.ConstructTFSchema { + reportJSONBytes, _ := json.Marshal(value) + reportData = helper.ConvertToString(reportJSONBytes, "") + } + + return reportData +} + +var tfInspectionModelConverter = tfModelConverterHelper.TFSchemaModelConverter[*inspectionsmodel.VmwareTanzuManageV1alpha1ClusterInspectionScan]{ + TFModelMap: tfInspectionModelMap, +} + +var tfInspectionListModelMap = &tfModelConverterHelper.BlockToStruct{ + InspectionListKey: &tfModelConverterHelper.BlockSliceToStructSlice{ + // UNPACK tfModelResourceMap HERE. + }, + TotalCountKey: "totalCount", +} + +var tfListModelConverter = tfModelConverterHelper.TFSchemaModelConverter[*inspectionsmodel.VmwareTanzuManageV1alpha1ClusterInspectionScanListData]{ + TFModelMap: tfInspectionListModelMap, +} + +func constructTFListModelDataMap() { + tfListModelSchema := tfInspectionModelConverter.UnpackSchema(tfModelConverterHelper.BuildArrayField("scans")) + + statusKey := (*tfListModelSchema)[StatusKey] + (*tfListModelSchema)[StatusKey] = statusKey.(*tfModelConverterHelper.Map).Copy([]string{TarballDownloadURL}) + + *(*tfInspectionListModelMap)[InspectionListKey].(*tfModelConverterHelper.BlockSliceToStructSlice) = append( + *(*tfInspectionListModelMap)[InspectionListKey].(*tfModelConverterHelper.BlockSliceToStructSlice), + tfListModelSchema, + ) +} diff --git a/internal/resources/inspections/datasource_inspection_list.go b/internal/resources/inspections/datasource_inspection_list.go new file mode 100644 index 000000000..8fe0cf435 --- /dev/null +++ b/internal/resources/inspections/datasource_inspection_list.go @@ -0,0 +1,68 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspections + +import ( + "context" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/pkg/errors" + + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/authctx" +) + +func DataSourceInspections() *schema.Resource { + // Unpack resource map to datasource map. + constructTFListModelDataMap() + + return &schema.Resource{ + ReadContext: dataSourceInspectionsRead, + Schema: inspectionListDataSourceSchema, + } +} + +func dataSourceInspectionsRead(ctx context.Context, data *schema.ResourceData, m interface{}) (diags diag.Diagnostics) { + config := m.(authctx.TanzuContext) + model, err := tfInspectionModelConverter.ConvertTFSchemaToAPIModel(data, []string{ClusterNameKey, ManagementClusterNameKey, ProvisionerNameKey}) + inspectionFullName := model.FullName + + if err != nil { + return diag.FromErr(errors.Wrap(err, "Converting schema failed.")) + } + + resp, err := config.TMCConnection.InspectionsResourceService.InspectionsResourceServiceList(inspectionFullName) + + switch { + case err != nil: + return diag.FromErr(errors.Wrapf(err, "Couldn't list inspections.")) + case resp.Scans == nil: + data.SetId("NO_DATA") + default: + err = tfListModelConverter.FillTFSchema(resp, data) + + if err != nil { + return diag.FromErr(err) + } + + inspectionFullName := resp.Scans[0].FullName + + var idKeys = []string{ + inspectionFullName.ManagementClusterName, + inspectionFullName.ProvisionerName, + inspectionFullName.ClusterName, + } + + if inspectionFullName.Name != "" { + idKeys = append(idKeys, inspectionFullName.Name) + } + + data.SetId(strings.Join(idKeys, "/")) + } + + return diags +} diff --git a/internal/resources/inspections/datasource_inspection_results.go b/internal/resources/inspections/datasource_inspection_results.go new file mode 100644 index 000000000..2aa6c9ccf --- /dev/null +++ b/internal/resources/inspections/datasource_inspection_results.go @@ -0,0 +1,62 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspections + +import ( + "context" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/pkg/errors" + + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/authctx" +) + +func DataSourceInspectionResults() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceInspectionResultsRead, + Schema: inspectionResultsDataSourceSchema, + } +} + +func dataSourceInspectionResultsRead(ctx context.Context, data *schema.ResourceData, m interface{}) (diags diag.Diagnostics) { + config := m.(authctx.TanzuContext) + model, err := tfInspectionModelConverter.ConvertTFSchemaToAPIModel(data, []string{NameKey, ClusterNameKey, ManagementClusterNameKey, ProvisionerNameKey}) + inspectionFullName := model.FullName + + if err != nil { + return diag.FromErr(errors.Wrap(err, "Converting schema failed.")) + } + + resp, err := config.TMCConnection.InspectionsResourceService.InspectionsResourceServiceGet(inspectionFullName) + + switch { + case err != nil: + return diag.FromErr(errors.Wrapf(err, "Couldn't read inspection results.")) + case resp.Scan == nil: + data.SetId("NO_DATA") + default: + err = tfInspectionModelConverter.FillTFSchema(resp.Scan, data) + + if err != nil { + return diag.FromErr(err) + } + + inspectionFullName := resp.Scan.FullName + + var idKeys = []string{ + inspectionFullName.ManagementClusterName, + inspectionFullName.ProvisionerName, + inspectionFullName.ClusterName, + inspectionFullName.Name, + } + + data.SetId(strings.Join(idKeys, "/")) + } + + return diags +} diff --git a/internal/resources/inspections/inspection_list_schema.go b/internal/resources/inspections/inspection_list_schema.go new file mode 100644 index 000000000..2af5957cf --- /dev/null +++ b/internal/resources/inspections/inspection_list_schema.go @@ -0,0 +1,33 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspections + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +const ( + ResourceNameInspections = "tanzu-mission-control_inspections" + + // Root Keys. + InspectionListKey = "inspections" + TotalCountKey = "total_count" +) + +var inspectionListDataSourceSchema = map[string]*schema.Schema{ + ClusterNameKey: clusterNameSchema, + ManagementClusterNameKey: managementClusterNameSchema, + ProvisionerNameKey: provisionerNameSchema, + NameKey: getNameSchema(false), + InspectionListKey: computedInspectionSchema, + TotalCountKey: totalCountSchema, +} + +var totalCountSchema = &schema.Schema{ + Type: schema.TypeString, + Description: "Total count of inspections returned.", + Computed: true, +} diff --git a/internal/resources/inspections/inspection_results_schema.go b/internal/resources/inspections/inspection_results_schema.go new file mode 100644 index 000000000..61c57ec55 --- /dev/null +++ b/internal/resources/inspections/inspection_results_schema.go @@ -0,0 +1,22 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspections + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +const ( + ResourceNameInspectionResults = "tanzu-mission-control_inspection_results" +) + +var inspectionResultsDataSourceSchema = map[string]*schema.Schema{ + ClusterNameKey: clusterNameSchema, + ManagementClusterNameKey: managementClusterNameSchema, + ProvisionerNameKey: provisionerNameSchema, + NameKey: getNameSchema(true), + StatusKey: computedInspectionSchema.Elem.(*schema.Resource).Schema[StatusKey], +} diff --git a/internal/resources/inspections/tests/datasource_tf_config.go b/internal/resources/inspections/tests/datasource_tf_config.go new file mode 100644 index 000000000..7b7603627 --- /dev/null +++ b/internal/resources/inspections/tests/datasource_tf_config.go @@ -0,0 +1,56 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspectionstests + +import ( + "fmt" + + inspectionsres "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/inspections" +) + +const ( + InspectionListDataSourceName = "inspection_list_Test" + InspectionResultsDataSourceName = "inspection_result_Test" +) + +var ( + InspectionListDataSourceFullName = fmt.Sprintf("data.%s.%s", inspectionsres.ResourceNameInspections, InspectionListDataSourceName) + InspectionResultsDataSourceFullName = fmt.Sprintf("data.%s.%s", inspectionsres.ResourceNameInspectionResults, InspectionResultsDataSourceName) +) + +func GetInspectionListConfig(inspectionsEnvVars map[ClusterClassEnvVar]string) string { + return fmt.Sprintf(` + data "%s" "%s" { + management_cluster_name = "%s" + provisioner_name = "%s" + cluster_name = "%s" + } + `, + inspectionsres.ResourceNameInspections, + InspectionListDataSourceName, + inspectionsEnvVars[ManagementClusterNameEnv], + inspectionsEnvVars[ProvisionerNameEnv], + inspectionsEnvVars[ClusterNameEnv], + ) +} + +func GetInspectionResultsConfig(inspectionsEnvVars map[ClusterClassEnvVar]string) string { + return fmt.Sprintf(` + data "%s" "%s" { + management_cluster_name = "%s" + provisioner_name = "%s" + cluster_name = "%s" + name = "%s" + } + `, + inspectionsres.ResourceNameInspectionResults, + InspectionResultsDataSourceName, + inspectionsEnvVars[ManagementClusterNameEnv], + inspectionsEnvVars[ProvisionerNameEnv], + inspectionsEnvVars[ClusterNameEnv], + inspectionsEnvVars[InspectionNameEnv], + ) +} diff --git a/internal/resources/inspections/tests/helper_test.go b/internal/resources/inspections/tests/helper_test.go new file mode 100644 index 000000000..e02a2f859 --- /dev/null +++ b/internal/resources/inspections/tests/helper_test.go @@ -0,0 +1,36 @@ +//go:build inspections +// +build inspections + +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspectionstests + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/require" + + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/authctx" + inspectionsres "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/inspections" +) + +func initTestProvider(t *testing.T) *schema.Provider { + testAccProvider := &schema.Provider{ + Schema: authctx.ProviderAuthSchema(), + DataSourcesMap: map[string]*schema.Resource{ + inspectionsres.ResourceNameInspections: inspectionsres.DataSourceInspections(), + inspectionsres.ResourceNameInspectionResults: inspectionsres.DataSourceInspectionResults(), + }, + ConfigureContextFunc: authctx.ProviderConfigureContext, + } + + if err := testAccProvider.InternalValidate(); err != nil { + require.NoError(t, err) + } + + return testAccProvider +} diff --git a/internal/resources/inspections/tests/inspections_env_vars.go b/internal/resources/inspections/tests/inspections_env_vars.go new file mode 100644 index 000000000..d5669bba7 --- /dev/null +++ b/internal/resources/inspections/tests/inspections_env_vars.go @@ -0,0 +1,50 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspectionstests + +import ( + "os" + + "github.com/pkg/errors" +) + +type ClusterClassEnvVar string + +const ( + // TKGM Env Vars. + ManagementClusterNameEnv ClusterClassEnvVar = "MANAGEMENT_CLUSTER_NAME" + ProvisionerNameEnv ClusterClassEnvVar = "PROVISIONER_NAME" + ClusterNameEnv ClusterClassEnvVar = "CLUSTER_NAME" + InspectionNameEnv ClusterClassEnvVar = "INSPECTION_NAME" +) + +var ClusterEnvironmentVariables = map[ClusterClassEnvVar]bool{ + ManagementClusterNameEnv: true, + ProvisionerNameEnv: true, + ClusterNameEnv: true, + InspectionNameEnv: true, +} + +func ReadClusterEnvironmentVariables() (envVars map[ClusterClassEnvVar]string, errs []error) { + envVars = make(map[ClusterClassEnvVar]string) + errs = make([]error, 0) + + for k := range ClusterEnvironmentVariables { + envVarVal, exists := os.LookupEnv(string(k)) + + if exists { + envVars[k] = envVarVal + } else { + errs = append(errs, errors.Errorf("Environment variable '%s' is required!", k)) + } + } + + if len(errs) > 0 { + envVars = nil + } + + return envVars, errs +} diff --git a/internal/resources/inspections/tests/inspections_test.go b/internal/resources/inspections/tests/inspections_test.go new file mode 100644 index 000000000..ba5201dbe --- /dev/null +++ b/internal/resources/inspections/tests/inspections_test.go @@ -0,0 +1,134 @@ +//go:build inspections +// +build inspections + +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package inspectionstests + +import ( + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/pkg/errors" + + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/authctx" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/proxy" + inspectionsres "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/inspections" + testhelper "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/testing" +) + +var ( + context = authctx.TanzuContext{ + ServerEndpoint: os.Getenv(authctx.ServerEndpointEnvVar), + Token: os.Getenv(authctx.VMWCloudAPITokenEnvVar), + VMWCloudEndPoint: os.Getenv(authctx.VMWCloudEndpointEnvVar), + TLSConfig: &proxy.TLSConfig{}, + } +) + +func TestAcceptanceInspectionsDataSources(t *testing.T) { + err := context.Setup() + + if err != nil { + t.Error(errors.Wrap(err, "unable to set the context")) + t.FailNow() + } + + // See cluster_env_vars.go for required environment variables. + environmentVars, errs := ReadClusterEnvironmentVariables() + + if len(errs) > 0 { + errMsg := "" + + for _, e := range errs { + errMsg = fmt.Sprintf("%s\n%s", errMsg, e.Error()) + } + + t.Error(errors.Errorf("Required environment variables are missing: %s", errMsg)) + t.FailNow() + } + + provider := initTestProvider(t) + + resource.Test(t, resource.TestCase{ + PreCheck: testhelper.TestPreCheck(t), + ProviderFactories: testhelper.GetTestProviderFactories(provider), + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: GetInspectionListConfig(environmentVars), + Check: resource.ComposeTestCheckFunc( + verifyInspectionListDataSource(provider, InspectionListDataSourceFullName), + ), + }, + { + Config: GetInspectionResultsConfig(environmentVars), + Check: resource.ComposeTestCheckFunc( + verifyInspectionResultsDataSource(provider, InspectionResultsDataSourceFullName), + ), + }, + }, + }, + ) + + t.Log("Inspections data sources acceptance test complete!") +} + +func verifyInspectionListDataSource(provider *schema.Provider, dataSourceName string) resource.TestCheckFunc { + verifyFunc := func(rs *terraform.ResourceState) (err error) { + inspectionsCount, countExist := rs.Primary.Attributes[fmt.Sprintf("%s.#", inspectionsres.InspectionListKey)] + + if !countExist || (countExist && (inspectionsCount == "" || inspectionsCount == "0")) { + err = errors.New("Inspection list is empty") + } + + return err + } + + return verifyBackupScheduleDataSource(provider, dataSourceName, verifyFunc) +} + +func verifyInspectionResultsDataSource(provider *schema.Provider, dataSourceName string) resource.TestCheckFunc { + verifyFunc := func(rs *terraform.ResourceState) (err error) { + reportData, reportExists := rs.Primary.Attributes[fmt.Sprintf("%s.%s", inspectionsres.StatusKey, inspectionsres.ReportKey)] + + if !reportExists || (reportExists && reportData == "") { + err = errors.New("Inspection results is empty") + } + + return err + } + + return verifyBackupScheduleDataSource(provider, dataSourceName, verifyFunc) +} + +func verifyBackupScheduleDataSource( + provider *schema.Provider, + dataSourceName string, + verificationFunc func(*terraform.ResourceState) error, +) resource.TestCheckFunc { + return func(s *terraform.State) error { + if provider == nil { + return fmt.Errorf("provider not initialised") + } + + rs, ok := s.RootModule().Resources[dataSourceName] + + if !ok { + return fmt.Errorf("could not found data source %s", dataSourceName) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("ID not set, data source %s", dataSourceName) + } + + return verificationFunc(rs) + } +} diff --git a/templates/data-sources/inspection_results.md.tmpl b/templates/data-sources/inspection_results.md.tmpl new file mode 100644 index 000000000..cc92d1ac6 --- /dev/null +++ b/templates/data-sources/inspection_results.md.tmpl @@ -0,0 +1,22 @@ +--- +Title: "Inspection Results Data Source" +Description: |- + Get inspection results +--- + +# Inspection Results Data Source + +This data source enables users to get a specific cluster inspection results. + +## Example Usage + +{{ tffile "examples/data-sources/inspections/datasource_inspection_results.tf" }} + +{{ .SchemaMarkdown | trimspace }} + +## Status Field ## + +Status field is a key-value pair of type string-string and it contains the following keys: +* phase - The phase which the inspection is in. +* phase_info - Information about the phase. +* report - JSON encoded string of the report data in the inspection. \ No newline at end of file diff --git a/templates/data-sources/inspections.md.tmpl b/templates/data-sources/inspections.md.tmpl new file mode 100644 index 000000000..5275e473a --- /dev/null +++ b/templates/data-sources/inspections.md.tmpl @@ -0,0 +1,21 @@ +--- +Title: "Inspections Data Source" +Description: |- + List cluster inspections +--- + +# Inspections Data Source + +This data source enables users to list cluster inspections. + +## Example Usage + +{{ tffile "examples/data-sources/inspections/datasource_inspections.tf" }} + +{{ .SchemaMarkdown | trimspace }} + +## Status Field ## + +Status field is a key-value pair of type string-string and it contains the following keys: +* phase - The phase which the inspection is in. +* phase_info - Information about the phase. \ No newline at end of file From c6bc92effb92881eba7ed3580565eb9b550e74c9 Mon Sep 17 00:00:00 2001 From: Ramya Bangera Date: Tue, 9 Jan 2024 15:27:16 +0530 Subject: [PATCH 13/22] Add the inspections build tag to Makefile, accidentally removed while rebasing and updated the copyright to 2024 Signed-off-by: Ramya Bangera --- Makefile | 2 +- .../inspections/inspections_resource.go | 2 +- internal/models/inspections/cis_spec.go | 2 +- .../models/inspections/cis_spec_targets.go | 2 +- .../models/inspections/conformance_spec.go | 2 +- internal/models/inspections/e2e_spec.go | 2 +- internal/models/inspections/fullname.go | 2 +- internal/models/inspections/inspection.go | 2 +- internal/models/inspections/lite_spec.go | 2 +- internal/models/inspections/request.go | 2 +- internal/models/inspections/spec.go | 2 +- internal/models/inspections/status.go | 2 +- internal/models/inspections/status_phase.go | 2 +- internal/models/inspections/status_report.go | 2 +- .../models/inspections/status_report_info.go | 2 +- .../status_report_progress_info.go | 2 +- .../inspections/status_report_result.go | 2 +- internal/provider/provider.go | 50 +++++++++---------- .../resources/inspections/common_schema.go | 2 +- .../inspections/converter_mapping.go | 2 +- .../inspections/datasource_inspection_list.go | 2 +- .../datasource_inspection_results.go | 2 +- .../inspections/inspection_list_schema.go | 2 +- .../inspections/inspection_results_schema.go | 2 +- .../inspections/tests/datasource_tf_config.go | 2 +- .../inspections/tests/helper_test.go | 2 +- .../inspections/tests/inspections_env_vars.go | 2 +- .../inspections/tests/inspections_test.go | 2 +- 28 files changed, 52 insertions(+), 52 deletions(-) diff --git a/Makefile b/Makefile index 86008f2dc..2a73abed8 100644 --- a/Makefile +++ b/Makefile @@ -22,7 +22,7 @@ ifeq ($(TEST_FLAGS),) endif ifeq ($(BUILD_TAGS),) - BUILD_TAGS := 'akscluster cluster clustergroup credential ekscluster gitrepository iampolicy kustomization namespace custompolicy imagepolicy networkpolicy quotapolicy securitypolicy sourcesecret workspace tanzupackage tanzupackages packagerepository packageinstall clustersecret integration mutationpolicy helmfeature helmrelease backupschedule targetlocation dataprotection tanzukubernetescluster clusterclass managementcluster provisioner' + BUILD_TAGS := 'akscluster cluster clustergroup credential ekscluster gitrepository iampolicy kustomization namespace custompolicy imagepolicy networkpolicy quotapolicy securitypolicy sourcesecret workspace tanzupackage tanzupackages packagerepository packageinstall clustersecret integration mutationpolicy helmfeature helmrelease backupschedule targetlocation dataprotection tanzukubernetescluster clusterclass managementcluster provisioner inspections' endif .PHONY: build clean-up test gofmt vet lint acc-test website-lint website-lint-fix diff --git a/internal/client/inspections/inspections_resource.go b/internal/client/inspections/inspections_resource.go index 63b40003c..354342514 100644 --- a/internal/client/inspections/inspections_resource.go +++ b/internal/client/inspections/inspections_resource.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/inspections/cis_spec.go b/internal/models/inspections/cis_spec.go index e450e232d..f776e61d7 100644 --- a/internal/models/inspections/cis_spec.go +++ b/internal/models/inspections/cis_spec.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/inspections/cis_spec_targets.go b/internal/models/inspections/cis_spec_targets.go index 5513dda34..e77c714d1 100644 --- a/internal/models/inspections/cis_spec_targets.go +++ b/internal/models/inspections/cis_spec_targets.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/inspections/conformance_spec.go b/internal/models/inspections/conformance_spec.go index a564447d6..3d69af82f 100644 --- a/internal/models/inspections/conformance_spec.go +++ b/internal/models/inspections/conformance_spec.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/inspections/e2e_spec.go b/internal/models/inspections/e2e_spec.go index 1c9237389..fe23767a3 100644 --- a/internal/models/inspections/e2e_spec.go +++ b/internal/models/inspections/e2e_spec.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/inspections/fullname.go b/internal/models/inspections/fullname.go index bb1d1149a..2fc03ad11 100644 --- a/internal/models/inspections/fullname.go +++ b/internal/models/inspections/fullname.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/inspections/inspection.go b/internal/models/inspections/inspection.go index 9a85c9408..c34f6724c 100644 --- a/internal/models/inspections/inspection.go +++ b/internal/models/inspections/inspection.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/inspections/lite_spec.go b/internal/models/inspections/lite_spec.go index 46cc876ba..3c5718e11 100644 --- a/internal/models/inspections/lite_spec.go +++ b/internal/models/inspections/lite_spec.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/inspections/request.go b/internal/models/inspections/request.go index d56e4aa02..aed4f4020 100644 --- a/internal/models/inspections/request.go +++ b/internal/models/inspections/request.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/inspections/spec.go b/internal/models/inspections/spec.go index ed05bae6b..be2f3dc85 100644 --- a/internal/models/inspections/spec.go +++ b/internal/models/inspections/spec.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/inspections/status.go b/internal/models/inspections/status.go index e75e359e6..9c8428ae1 100644 --- a/internal/models/inspections/status.go +++ b/internal/models/inspections/status.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/inspections/status_phase.go b/internal/models/inspections/status_phase.go index 98cc03201..7eb89e6f8 100644 --- a/internal/models/inspections/status_phase.go +++ b/internal/models/inspections/status_phase.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/inspections/status_report.go b/internal/models/inspections/status_report.go index ed4be144f..6362ed97a 100644 --- a/internal/models/inspections/status_report.go +++ b/internal/models/inspections/status_report.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/inspections/status_report_info.go b/internal/models/inspections/status_report_info.go index 795a97a1c..189599eb4 100644 --- a/internal/models/inspections/status_report_info.go +++ b/internal/models/inspections/status_report_info.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/inspections/status_report_progress_info.go b/internal/models/inspections/status_report_progress_info.go index f4cf09935..e76b4049d 100644 --- a/internal/models/inspections/status_report_progress_info.go +++ b/internal/models/inspections/status_report_progress_info.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/inspections/status_report_result.go b/internal/models/inspections/status_report_result.go index 6685daf47..6e5e6c2ec 100644 --- a/internal/models/inspections/status_report_result.go +++ b/internal/models/inspections/status_report_result.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/provider/provider.go b/internal/provider/provider.go index bbfc82462..8ddc0663e 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -25,7 +25,7 @@ import ( "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/helmrelease" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/helmrepository" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/iampolicy" - inspections "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/inspections" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/inspections" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/kubernetessecret" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/kustomization" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/managementcluster" @@ -90,30 +90,30 @@ func Provider() *schema.Provider { provisioner.ResourceName: provisioner.ResourceProvisioner(), }, DataSourcesMap: map[string]*schema.Resource{ - cluster.ResourceName: cluster.DataSourceTMCCluster(), - ekscluster.ResourceName: ekscluster.DataSourceTMCEKSCluster(), - akscluster.ResourceName: akscluster.DataSourceTMCAKSCluster(), - workspace.ResourceName: workspace.DataSourceWorkspace(), - namespace.ResourceName: namespace.DataSourceNamespace(), - clustergroup.ResourceName: clustergroup.DataSourceClusterGroup(), - nodepools.ResourceName: nodepools.DataSourceClusterNodePool(), - credential.ResourceName: credential.DataSourceCredential(), - integration.ResourceName: integration.DataSourceIntegration(), - gitrepository.ResourceName: gitrepository.DataSourceGitRepository(), - sourcesecret.ResourceName: sourcesecret.DataSourceSourcesecret(), - packagerepository.ResourceName: packagerepository.DataSourcePackageRepository(), - tanzupackage.ResourceName: tanzupackage.DataSourceTanzuPackage(), - tanzupackages.ResourceName: tanzupackages.DataSourceTanzuPackages(), - tanzupackageinstall.ResourceName: tanzupackageinstall.DataSourcePackageInstall(), - kubernetessecret.ResourceName: kubernetessecret.DataSourceSecret(), - helmfeature.ResourceName: helmfeature.DataSourceHelm(), - helmcharts.ResourceName: helmcharts.DataSourceHelmCharts(), - helmrepository.ResourceName: helmrepository.DataSourceHelmRepository(), - backupschedule.ResourceName: backupschedule.DataSourceBackupSchedule(), - targetlocation.ResourceName: targetlocation.DataSourceTargetLocations(), - managementcluster.ResourceName: managementcluster.DataSourceManagementClusterRegistration(), - clusterclass.ResourceName: clusterclass.DataSourceClusterClass(), - provisioner.ResourceName: provisioner.DataSourceProvisioner(), + cluster.ResourceName: cluster.DataSourceTMCCluster(), + ekscluster.ResourceName: ekscluster.DataSourceTMCEKSCluster(), + akscluster.ResourceName: akscluster.DataSourceTMCAKSCluster(), + workspace.ResourceName: workspace.DataSourceWorkspace(), + namespace.ResourceName: namespace.DataSourceNamespace(), + clustergroup.ResourceName: clustergroup.DataSourceClusterGroup(), + nodepools.ResourceName: nodepools.DataSourceClusterNodePool(), + credential.ResourceName: credential.DataSourceCredential(), + integration.ResourceName: integration.DataSourceIntegration(), + gitrepository.ResourceName: gitrepository.DataSourceGitRepository(), + sourcesecret.ResourceName: sourcesecret.DataSourceSourcesecret(), + packagerepository.ResourceName: packagerepository.DataSourcePackageRepository(), + tanzupackage.ResourceName: tanzupackage.DataSourceTanzuPackage(), + tanzupackages.ResourceName: tanzupackages.DataSourceTanzuPackages(), + tanzupackageinstall.ResourceName: tanzupackageinstall.DataSourcePackageInstall(), + kubernetessecret.ResourceName: kubernetessecret.DataSourceSecret(), + helmfeature.ResourceName: helmfeature.DataSourceHelm(), + helmcharts.ResourceName: helmcharts.DataSourceHelmCharts(), + helmrepository.ResourceName: helmrepository.DataSourceHelmRepository(), + backupschedule.ResourceName: backupschedule.DataSourceBackupSchedule(), + targetlocation.ResourceName: targetlocation.DataSourceTargetLocations(), + managementcluster.ResourceName: managementcluster.DataSourceManagementClusterRegistration(), + clusterclass.ResourceName: clusterclass.DataSourceClusterClass(), + provisioner.ResourceName: provisioner.DataSourceProvisioner(), inspections.ResourceNameInspections: inspections.DataSourceInspections(), inspections.ResourceNameInspectionResults: inspections.DataSourceInspectionResults(), }, diff --git a/internal/resources/inspections/common_schema.go b/internal/resources/inspections/common_schema.go index ffe72e1a3..f4703d4f7 100644 --- a/internal/resources/inspections/common_schema.go +++ b/internal/resources/inspections/common_schema.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/resources/inspections/converter_mapping.go b/internal/resources/inspections/converter_mapping.go index e3238e190..97e0545a0 100644 --- a/internal/resources/inspections/converter_mapping.go +++ b/internal/resources/inspections/converter_mapping.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/resources/inspections/datasource_inspection_list.go b/internal/resources/inspections/datasource_inspection_list.go index 8fe0cf435..3af013305 100644 --- a/internal/resources/inspections/datasource_inspection_list.go +++ b/internal/resources/inspections/datasource_inspection_list.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/resources/inspections/datasource_inspection_results.go b/internal/resources/inspections/datasource_inspection_results.go index 2aa6c9ccf..9640722c7 100644 --- a/internal/resources/inspections/datasource_inspection_results.go +++ b/internal/resources/inspections/datasource_inspection_results.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/resources/inspections/inspection_list_schema.go b/internal/resources/inspections/inspection_list_schema.go index 2af5957cf..de82c8572 100644 --- a/internal/resources/inspections/inspection_list_schema.go +++ b/internal/resources/inspections/inspection_list_schema.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/resources/inspections/inspection_results_schema.go b/internal/resources/inspections/inspection_results_schema.go index 61c57ec55..b69f2423c 100644 --- a/internal/resources/inspections/inspection_results_schema.go +++ b/internal/resources/inspections/inspection_results_schema.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/resources/inspections/tests/datasource_tf_config.go b/internal/resources/inspections/tests/datasource_tf_config.go index 7b7603627..50b06040e 100644 --- a/internal/resources/inspections/tests/datasource_tf_config.go +++ b/internal/resources/inspections/tests/datasource_tf_config.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/resources/inspections/tests/helper_test.go b/internal/resources/inspections/tests/helper_test.go index e02a2f859..fcdf9312f 100644 --- a/internal/resources/inspections/tests/helper_test.go +++ b/internal/resources/inspections/tests/helper_test.go @@ -2,7 +2,7 @@ // +build inspections /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/resources/inspections/tests/inspections_env_vars.go b/internal/resources/inspections/tests/inspections_env_vars.go index d5669bba7..78ada5674 100644 --- a/internal/resources/inspections/tests/inspections_env_vars.go +++ b/internal/resources/inspections/tests/inspections_env_vars.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/resources/inspections/tests/inspections_test.go b/internal/resources/inspections/tests/inspections_test.go index ba5201dbe..83ea04b6b 100644 --- a/internal/resources/inspections/tests/inspections_test.go +++ b/internal/resources/inspections/tests/inspections_test.go @@ -2,7 +2,7 @@ // +build inspections /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ From dab40e0ef9076cf47feab3ef69dafb6e7efe75e8 Mon Sep 17 00:00:00 2001 From: GilTS Date: Wed, 29 Nov 2023 11:21:31 +0200 Subject: [PATCH 14/22] Added custom policy template resource Signed-off-by: GilTS --- .github/workflows/release.yml | 2 +- .github/workflows/test.yml | 2 +- Makefile | 2 +- docs/resources/custom_policy_template.md | 152 ++++++++++++++++ .../custom_policy_template/provider.tf | 16 ++ .../resource_custom_policy_template.tf | 72 ++++++++ .../custompolicytemplate_resource.go | 81 +++++++++ internal/client/http_client.go | 3 + .../custompolicytemplate.go | 53 ++++++ .../custompolicytemplate/data_inventory.go | 50 ++++++ .../models/custompolicytemplate/fullname.go | 45 +++++ .../policy_update_strategy.go | 41 +++++ .../models/custompolicytemplate/request.go | 41 +++++ internal/models/custompolicytemplate/spec.go | 66 +++++++ .../custompolicytemplate/strategy_type.go | 58 +++++++ internal/provider/provider.go | 62 +++---- .../custompolicytemplate/converter_mapping.go | 38 ++++ .../resource_custom_policy_template.go | 164 ++++++++++++++++++ .../resources/custompolicytemplate/schema.go | 105 +++++++++++ .../tests/custom_policy_template_test.go | 111 ++++++++++++ .../custompolicytemplate/tests/helper_test.go | 35 ++++ .../tests/resource_tf_configs.go | 145 ++++++++++++++++ .../resources/custom_policy_template.md.tmpl | 19 ++ 23 files changed, 1330 insertions(+), 33 deletions(-) create mode 100644 docs/resources/custom_policy_template.md create mode 100644 examples/resources/custom_policy_template/provider.tf create mode 100644 examples/resources/custom_policy_template/resource_custom_policy_template.tf create mode 100644 internal/client/custompolicytemplate/custompolicytemplate_resource.go create mode 100644 internal/models/custompolicytemplate/custompolicytemplate.go create mode 100644 internal/models/custompolicytemplate/data_inventory.go create mode 100644 internal/models/custompolicytemplate/fullname.go create mode 100644 internal/models/custompolicytemplate/policy_update_strategy.go create mode 100644 internal/models/custompolicytemplate/request.go create mode 100644 internal/models/custompolicytemplate/spec.go create mode 100644 internal/models/custompolicytemplate/strategy_type.go create mode 100644 internal/resources/custompolicytemplate/converter_mapping.go create mode 100644 internal/resources/custompolicytemplate/resource_custom_policy_template.go create mode 100644 internal/resources/custompolicytemplate/schema.go create mode 100644 internal/resources/custompolicytemplate/tests/custom_policy_template_test.go create mode 100644 internal/resources/custompolicytemplate/tests/helper_test.go create mode 100644 internal/resources/custompolicytemplate/tests/resource_tf_configs.go create mode 100644 templates/resources/custom_policy_template.md.tmpl diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 467af74e3..5e38c7948 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -6,7 +6,7 @@ on: - 'v*' env: - BUILD_TAGS: 'akscluster cluster clustergroup credential ekscluster gitrepository iampolicy kustomization namespace custompolicy imagepolicy networkpolicy quotapolicy securitypolicy sourcesecret workspace tanzupackage tanzupackages packagerepository packageinstall clustersecret integration mutationpolicy backupschedule targetlocation dataprotection tanzukubernetescluster clusterclass managementcluster provisioner inspections' + BUILD_TAGS: 'akscluster cluster clustergroup credential ekscluster gitrepository iampolicy kustomization namespace custompolicy imagepolicy networkpolicy quotapolicy securitypolicy sourcesecret workspace tanzupackage tanzupackages packagerepository packageinstall clustersecret integration mutationpolicy backupschedule targetlocation dataprotection tanzukubernetescluster clusterclass managementcluster provisioner inspections custompolicytemplate' jobs: goreleaser: diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index feed66f07..fc3838aea 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -3,7 +3,7 @@ name: Test and coverage on: [pull_request, push] env: - BUILD_TAGS: 'akscluster cluster clustergroup credential ekscluster gitrepository iampolicy kustomization namespace custompolicy imagepolicy networkpolicy quotapolicy securitypolicy sourcesecret workspace tanzupackage tanzupackages packagerepository packageinstall clustersecret integration mutationpolicy backupschedule targetlocation dataprotection tanzukubernetescluster clusterclass managementcluster provisioner inspections' + BUILD_TAGS: 'akscluster cluster clustergroup credential ekscluster gitrepository iampolicy kustomization namespace custompolicy imagepolicy networkpolicy quotapolicy securitypolicy sourcesecret workspace tanzupackage tanzupackages packagerepository packageinstall clustersecret integration mutationpolicy backupschedule targetlocation dataprotection tanzukubernetescluster clusterclass managementcluster provisioner inspections custompolicytemplate' jobs: build: name: Test and coverage diff --git a/Makefile b/Makefile index 2a73abed8..3764fb98b 100644 --- a/Makefile +++ b/Makefile @@ -22,7 +22,7 @@ ifeq ($(TEST_FLAGS),) endif ifeq ($(BUILD_TAGS),) - BUILD_TAGS := 'akscluster cluster clustergroup credential ekscluster gitrepository iampolicy kustomization namespace custompolicy imagepolicy networkpolicy quotapolicy securitypolicy sourcesecret workspace tanzupackage tanzupackages packagerepository packageinstall clustersecret integration mutationpolicy helmfeature helmrelease backupschedule targetlocation dataprotection tanzukubernetescluster clusterclass managementcluster provisioner inspections' + BUILD_TAGS := 'akscluster cluster clustergroup credential ekscluster gitrepository iampolicy kustomization namespace custompolicy imagepolicy networkpolicy quotapolicy securitypolicy sourcesecret workspace tanzupackage tanzupackages packagerepository packageinstall clustersecret integration mutationpolicy helmfeature helmrelease backupschedule targetlocation dataprotection tanzukubernetescluster clusterclass managementcluster provisioner inspections custompolicytemplate' endif .PHONY: build clean-up test gofmt vet lint acc-test website-lint website-lint-fix diff --git a/docs/resources/custom_policy_template.md b/docs/resources/custom_policy_template.md new file mode 100644 index 000000000..e59a88d0e --- /dev/null +++ b/docs/resources/custom_policy_template.md @@ -0,0 +1,152 @@ +--- +Title: "Custom Policy Template Resource" +Description: |- + Creating a custom policy template. +--- + +# Custom Policy Template Resource + +This resource enables users to create custom policy template in TMC. + +For more information regarding custom policy template, see [Custom Policy Template][custom-policy-template]. + +[custom-policy-template]: https://docs.vmware.com/en/VMware-Tanzu-Mission-Control/services/tanzumc-using/GUID-F147492B-04FD-4CFD-8D1F-66E36D40D49C.html + +## Example Usage + +```terraform +resource "tanzu-mission-control_custom_policy_template" "sample" { + name = "tf-custom-template-test" + + spec { + object_type = "ConstraintTemplate" + template_type = "OPAGatekeeper" + + data_inventory { + kind = "ConfigMap" + group = "admissionregistration.k8s.io" + version = "v1" + } + + data_inventory { + kind = "Deployment" + group = "extensions" + version = "v1" + } + + template_manifest = < in your <%v> <%v> has no <%v>", [container.name, review.kind.kind, review.object.metadata.name, probe]) + } +YAML + } +} +``` + + +## Schema + +### Required + +- `name` (String) The name of the custom policy template +- `spec` (Block List, Min: 1, Max: 1) Spec block of the custom policy template (see [below for nested schema](#nestedblock--spec)) + +### Optional + +- `meta` (Block List, Max: 1) Metadata for the resource (see [below for nested schema](#nestedblock--meta)) + +### Read-Only + +- `id` (String) The ID of this resource. + + +### Nested Schema for `spec` + +Required: + +- `template_manifest` (String) YAML formatted Kubernetes resource. +The Kubernetes object has to be of the type defined in ObjectType ('ConstraintTemplate'). +The object name must match the name of the wrapping policy template. +This will be applied on the cluster after a policy is created using this version of the template. +This contains the latest version of the object. For previous versions, check Versions API. + +Optional: + +- `data_inventory` (Block List) List of Kubernetes api-resource kinds that need to be synced/replicated in Gatekeeper in order to enforce policy rules on those resources. +Note: This is used for OPAGatekeeper based templates, and should be used if the policy enforcement logic in Rego code uses cached data using "data.inventory" fields. (see [below for nested schema](#nestedblock--spec--data_inventory)) +- `is_deprecated` (Boolean) Flag representing whether the custom policy template is deprecated. +- `object_type` (String) The type of Kubernetes resource encoded in Object. +Currently, we only support OPAGatekeeper based 'ConstraintTemplate' object. +- `template_type` (String) The type of policy template. +Currently, we only support 'OPAGatekeeper' based policy templates. + + +### Nested Schema for `spec.data_inventory` + +Required: + +- `group` (String) API resource group +- `kind` (String) API resource kind +- `version` (String) API resource version + + + + +### Nested Schema for `meta` + +Optional: + +- `annotations` (Map of String) Annotations for the resource +- `description` (String) Description of the resource +- `labels` (Map of String) Labels for the resource + +Read-Only: + +- `resource_version` (String) Resource version of the resource +- `uid` (String) UID of the resource diff --git a/examples/resources/custom_policy_template/provider.tf b/examples/resources/custom_policy_template/provider.tf new file mode 100644 index 000000000..3e8ae86af --- /dev/null +++ b/examples/resources/custom_policy_template/provider.tf @@ -0,0 +1,16 @@ +terraform { + required_providers { + tanzu-mission-control = { + source = "vmware/dev/tanzu-mission-control" + } + } +} + +terraform { + backend "local" { + path = "./terraform.tfstate" + } +} + +provider "tanzu-mission-control" { +} diff --git a/examples/resources/custom_policy_template/resource_custom_policy_template.tf b/examples/resources/custom_policy_template/resource_custom_policy_template.tf new file mode 100644 index 000000000..ad4c49532 --- /dev/null +++ b/examples/resources/custom_policy_template/resource_custom_policy_template.tf @@ -0,0 +1,72 @@ +resource "tanzu-mission-control_custom_policy_template" "sample" { + name = "tf-custom-template-test" + + spec { + object_type = "ConstraintTemplate" + template_type = "OPAGatekeeper" + + data_inventory { + kind = "ConfigMap" + group = "admissionregistration.k8s.io" + version = "v1" + } + + data_inventory { + kind = "Deployment" + group = "extensions" + version = "v1" + } + + template_manifest = < in your <%v> <%v> has no <%v>", [container.name, review.kind.kind, review.object.metadata.name, probe]) + } +YAML + } +} diff --git a/internal/client/custompolicytemplate/custompolicytemplate_resource.go b/internal/client/custompolicytemplate/custompolicytemplate_resource.go new file mode 100644 index 000000000..0742a39e6 --- /dev/null +++ b/internal/client/custompolicytemplate/custompolicytemplate_resource.go @@ -0,0 +1,81 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package custompolicytemplateclient + +import ( + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/transport" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/helper" + custompolicytemplatemodels "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/custompolicytemplate" +) + +const ( + customPolicyTemplateAPIVersionAndGroup = "v1alpha1/policy/templates" +) + +// New creates a new custom policy template resource service API client. +func New(transport *transport.Client) ClientService { + return &Client{Client: transport} +} + +/* +Client for custom policy template resource service API. +*/ +type Client struct { + *transport.Client +} + +// ClientService is the interface for Client methods. +type ClientService interface { + CustomPolicyTemplateResourceServiceCreate(request *custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplateData) (*custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplateData, error) + + CustomPolicyTemplateResourceServiceUpdate(request *custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplateData) (*custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplateData, error) + + CustomPolicyTemplateResourceServiceDelete(fn *custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplateFullName) error + + CustomPolicyTemplateResourceServiceGet(fn *custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplateFullName) (*custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplateData, error) +} + +/* +CustomPolicyTemplateResourceServiceGet gets an custom policy template. +*/ +func (c *Client) CustomPolicyTemplateResourceServiceGet(fullName *custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplateFullName) (*custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplateData, error) { + requestURL := helper.ConstructRequestURL(customPolicyTemplateAPIVersionAndGroup, fullName.Name).String() + resp := &custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplateData{} + err := c.Get(requestURL, resp) + + return resp, err +} + +/* +CustomPolicyTemplateResourceServiceCreate creates an custom policy template. +*/ +func (c *Client) CustomPolicyTemplateResourceServiceCreate(request *custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplateData) (*custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplateData, error) { + response := &custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplateData{} + requestURL := helper.ConstructRequestURL(customPolicyTemplateAPIVersionAndGroup).String() + err := c.Create(requestURL, request, response) + + return response, err +} + +/* +CustomPolicyTemplateResourceServiceUpdate updates an custom policy template. +*/ +func (c *Client) CustomPolicyTemplateResourceServiceUpdate(request *custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplateData) (*custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplateData, error) { + response := &custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplateData{} + requestURL := helper.ConstructRequestURL(customPolicyTemplateAPIVersionAndGroup, request.Template.FullName.Name).String() + err := c.Update(requestURL, request, response) + + return response, err +} + +/* +CustomPolicyTemplateResourceServiceDelete deletes an custom policy template. +*/ +func (c *Client) CustomPolicyTemplateResourceServiceDelete(fullName *custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplateFullName) error { + requestURL := helper.ConstructRequestURL(customPolicyTemplateAPIVersionAndGroup, fullName.Name).String() + + return c.Delete(requestURL) +} diff --git a/internal/client/http_client.go b/internal/client/http_client.go index 45d7d836c..82c750e0c 100644 --- a/internal/client/http_client.go +++ b/internal/client/http_client.go @@ -38,6 +38,7 @@ import ( policyclustergroupclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/clustergroup/policy" sourcesecretclustergroupclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/clustergroup/sourcesecret" credentialclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/credential" + custompolicytemplateclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/custompolicytemplate" eksclusterclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/ekscluster" eksnodepoolclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/ekscluster/nodepool" inspectionsclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/inspections" @@ -146,6 +147,7 @@ func newHTTPClient(httpClient *transport.Client) *TanzuMissionControl { ClusterClassResourceService: clusterclassclient.New(httpClient), TanzuKubernetesClusterResourceService: tanzukubernetesclusterclient.New(httpClient), ProvisionerResourceService: provisionerclient.New(httpClient), + CustomPolicyTemplateResourceService: custompolicytemplateclient.New(httpClient), } } @@ -205,4 +207,5 @@ type TanzuMissionControl struct { TanzuKubernetesClusterResourceService tanzukubernetesclusterclient.ClientService ProvisionerResourceService provisionerclient.ClientService InspectionsResourceService inspectionsclient.ClientService + CustomPolicyTemplateResourceService custompolicytemplateclient.ClientService } diff --git a/internal/models/custompolicytemplate/custompolicytemplate.go b/internal/models/custompolicytemplate/custompolicytemplate.go new file mode 100644 index 000000000..9c71468f1 --- /dev/null +++ b/internal/models/custompolicytemplate/custompolicytemplate.go @@ -0,0 +1,53 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package custompolicytemplatemodels + +import ( + "github.com/go-openapi/swag" + + objectmetamodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/objectmeta" +) + +// VmwareTanzuManageV1alpha1PolicyTemplate A Policy Template wraps a Kubernetes resource that is a pre-requisite/dependency for creating policies. +// An example of a policy template is OPAGatekeeper based ConstraintTemplate. +// +// swagger:model vmware.tanzu.manage.v1alpha1.policy.template.Template +type VmwareTanzuManageV1alpha1PolicyTemplate struct { + + // Full name for the policy template. + FullName *VmwareTanzuManageV1alpha1PolicyTemplateFullName `json:"fullName,omitempty"` + + // Metadata for the policy template object. + Meta *objectmetamodel.VmwareTanzuCoreV1alpha1ObjectMeta `json:"meta,omitempty"` + + // Spec for the policy template. + Spec *VmwareTanzuManageV1alpha1PolicyTemplateSpec `json:"spec,omitempty"` + + // Metadata describing the type of the resource. + Type *objectmetamodel.VmwareTanzuCoreV1alpha1ObjectType `json:"type,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTemplate) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTemplate) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1PolicyTemplate + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/custompolicytemplate/data_inventory.go b/internal/models/custompolicytemplate/data_inventory.go new file mode 100644 index 000000000..b6d19c38c --- /dev/null +++ b/internal/models/custompolicytemplate/data_inventory.go @@ -0,0 +1,50 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package custompolicytemplatemodels + +import ( + "github.com/go-openapi/swag" +) + +// K8sIoApimachineryPkgApisMetaV1GroupVersionKind GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion +// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +// swagger:model k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind +type K8sIoApimachineryPkgApisMetaV1GroupVersionKind struct { + + // group + Group string `json:"group,omitempty"` + + // kind + Kind string `json:"kind,omitempty"` + + // version + Version string `json:"version,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *K8sIoApimachineryPkgApisMetaV1GroupVersionKind) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *K8sIoApimachineryPkgApisMetaV1GroupVersionKind) UnmarshalBinary(b []byte) error { + var res K8sIoApimachineryPkgApisMetaV1GroupVersionKind + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/custompolicytemplate/fullname.go b/internal/models/custompolicytemplate/fullname.go new file mode 100644 index 000000000..520e3df71 --- /dev/null +++ b/internal/models/custompolicytemplate/fullname.go @@ -0,0 +1,45 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package custompolicytemplatemodels + +import ( + "github.com/go-openapi/swag" +) + +// VmwareTanzuManageV1alpha1PolicyTemplateFullName Full name of the policy template. This includes the object name along +// with any parents or further identifiers. +// +// swagger:model vmware.tanzu.manage.v1alpha1.policy.template.FullName +type VmwareTanzuManageV1alpha1PolicyTemplateFullName struct { + + // Name of policy template. + Name string `json:"name,omitempty"` + + // ID of Organization. + OrgID string `json:"orgId,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTemplateFullName) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTemplateFullName) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1PolicyTemplateFullName + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/custompolicytemplate/policy_update_strategy.go b/internal/models/custompolicytemplate/policy_update_strategy.go new file mode 100644 index 000000000..27f472706 --- /dev/null +++ b/internal/models/custompolicytemplate/policy_update_strategy.go @@ -0,0 +1,41 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package custompolicytemplatemodels + +import ( + "github.com/go-openapi/swag" +) + +// VmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategy PolicyUpdateStrategy on how to handle policies after a policy template update. +// +// swagger:model vmware.tanzu.manage.v1alpha1.policy.template.PolicyUpdateStrategy +type VmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategy struct { + + // The strategy to use for policy updates. + Type *VmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategyType `json:"type,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategy) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategy) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategy + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/custompolicytemplate/request.go b/internal/models/custompolicytemplate/request.go new file mode 100644 index 000000000..724b2baa6 --- /dev/null +++ b/internal/models/custompolicytemplate/request.go @@ -0,0 +1,41 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package custompolicytemplatemodels + +import ( + "github.com/go-openapi/swag" +) + +// VmwareTanzuManageV1alpha1PolicyTemplateCreateTemplateRequest Request to create a Template. +// +// swagger:model vmware.tanzu.manage.v1alpha1.policy.template.CreateTemplateRequest +type VmwareTanzuManageV1alpha1PolicyTemplateData struct { + + // Template to create. + Template *VmwareTanzuManageV1alpha1PolicyTemplate `json:"template,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTemplateData) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTemplateData) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1PolicyTemplateData + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/custompolicytemplate/spec.go b/internal/models/custompolicytemplate/spec.go new file mode 100644 index 000000000..6c10be1e0 --- /dev/null +++ b/internal/models/custompolicytemplate/spec.go @@ -0,0 +1,66 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package custompolicytemplatemodels + +import ( + "github.com/go-openapi/swag" +) + +// VmwareTanzuManageV1alpha1PolicyTemplateSpec Spec of policy template. +// +// swagger:model vmware.tanzu.manage.v1alpha1.policy.template.Spec +type VmwareTanzuManageV1alpha1PolicyTemplateSpec struct { + + // DataInventory is a list of Kubernetes api-resource kinds that need to be synced/replicated + // in Gatekeeper in order to enforce policy rules on those resources. + // Note: This is used for OPAGatekeeper based templates, and should be used if the policy + // enforcement logic in Rego code uses cached data using "data.inventory" fields. + DataInventory []*K8sIoApimachineryPkgApisMetaV1GroupVersionKind `json:"dataInventory"` + + // Deprecated specifies whether this version (latest version) of the policy template is deprecated. + // Updating a policy template deprecates the previous versions. To view all versions, use Versions API. + Deprecated bool `json:"deprecated"` + + // Object is a yaml-formatted Kubernetes resource. + // The Kubernetes object has to be of the type defined in ObjectType ('ConstraintTemplate'). + // The object name must match the name of the wrapping policy template. + // This will be applied on the cluster after a policy is created using this version of the template. + // This contains the latest version of the object. For previous versions, check Versions API. + Object string `json:"object,omitempty"` + + // ObjectType is the type of Kubernetes resource encoded in Object. + // Currently, we only support OPAGatekeeper based 'ConstraintTemplate' object. + ObjectType string `json:"objectType,omitempty"` + + // PolicyUpdateStrategy on how to handle policies after a policy template update. + PolicyUpdateStrategy *VmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategy `json:"policyUpdateStrategy,omitempty"` + + // TemplateType is the type of policy template. + // Currently, we only support 'OPAGatekeeper' based policy templates. + TemplateType string `json:"templateType,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTemplateSpec) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTemplateSpec) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1PolicyTemplateSpec + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/custompolicytemplate/strategy_type.go b/internal/models/custompolicytemplate/strategy_type.go new file mode 100644 index 000000000..d82783418 --- /dev/null +++ b/internal/models/custompolicytemplate/strategy_type.go @@ -0,0 +1,58 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package custompolicytemplatemodels + +import ( + "encoding/json" +) + +// VmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategyType PolicyUpdateStrategyType defines strategies for updating policies after a policy template update. +// +// - POLICY_UPDATE_STRATEGY_TYPE_UNSPECIFIED: UNSPECIFIED policy update strategy (default). +// +// Updates will not be allowed when this strategy is selected. +// - INPLACE_UPDATE: In-place policy update strategy. +// +// Existing Template will be forcibly updated without creating a new version. +// There will be no changes to the policies using the template. +// Warning: When using this strategy, make sure that the updated template does not +// adversely affect the existing policies. +// +// swagger:model vmware.tanzu.manage.v1alpha1.policy.template.PolicyUpdateStrategyType +type VmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategyType string + +const ( + + // VmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategyTypePOLICYUPDATESTRATEGYTYPEUNSPECIFIED captures enum value "POLICY_UPDATE_STRATEGY_TYPE_UNSPECIFIED". + VmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategyTypePOLICYUPDATESTRATEGYTYPEUNSPECIFIED VmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategyType = "POLICY_UPDATE_STRATEGY_TYPE_UNSPECIFIED" + + // VmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategyTypeINPLACEUPDATE captures enum value "INPLACE_UPDATE". + VmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategyTypeINPLACEUPDATE VmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategyType = "INPLACE_UPDATE" +) + +// for schema. +var vmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategyTypeEnum []interface{} + +func NewVmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategyType(value VmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategyType) *VmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategyType { + return &value +} + +// Pointer returns a pointer to a freshly-allocated VmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategyType. +func (m VmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategyType) Pointer() *VmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategyType { + return &m +} + +func init() { + var res []VmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategyType + + if err := json.Unmarshal([]byte(`["POLICY_UPDATE_STRATEGY_TYPE_UNSPECIFIED","INPLACE_UPDATE"]`), &res); err != nil { + panic(err) + } + + for _, v := range res { + vmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategyTypeEnum = append(vmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategyTypeEnum, v) + } +} diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 8ddc0663e..cc48fc1c8 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -18,6 +18,7 @@ import ( "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/clusterclass" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/clustergroup" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/credential" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/custompolicytemplate" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/ekscluster" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/gitrepository" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/helmcharts" @@ -58,36 +59,37 @@ func Provider() *schema.Provider { return &schema.Provider{ Schema: authctx.ProviderAuthSchema(), ResourcesMap: map[string]*schema.Resource{ - cluster.ResourceName: cluster.ResourceTMCCluster(), - ekscluster.ResourceName: ekscluster.ResourceTMCEKSCluster(), - akscluster.ResourceName: akscluster.ResourceTMCAKSCluster(), - workspace.ResourceName: workspace.ResourceWorkspace(), - namespace.ResourceName: namespace.ResourceNamespace(), - clustergroup.ResourceName: clustergroup.ResourceClusterGroup(), - nodepools.ResourceName: nodepools.ResourceNodePool(), - iampolicy.ResourceName: iampolicy.ResourceIAMPolicy(), - custompolicy.ResourceName: custompolicyresource.ResourceCustomPolicy(), - securitypolicy.ResourceName: securitypolicyresource.ResourceSecurityPolicy(), - imagepolicy.ResourceName: imagepolicyresource.ResourceImagePolicy(), - quotapolicy.ResourceName: quotapolicyresource.ResourceQuotaPolicy(), - networkpolicy.ResourceName: networkpolicyresource.ResourceNetworkPolicy(), - credential.ResourceName: credential.ResourceCredential(), - integration.ResourceName: integration.ResourceIntegration(), - gitrepository.ResourceName: gitrepository.ResourceGitRepository(), - kustomization.ResourceName: kustomization.ResourceKustomization(), - sourcesecret.ResourceName: sourcesecret.ResourceSourceSecret(), - packagerepository.ResourceName: packagerepository.ResourcePackageRepository(), - tanzupackageinstall.ResourceName: tanzupackageinstall.ResourcePackageInstall(), - kubernetessecret.ResourceName: kubernetessecret.ResourceSecret(), - mutationpolicy.ResourceName: mutationpolicyresource.ResourceMutationPolicy(), - helmrelease.ResourceName: helmrelease.ResourceHelmRelease(), - helmfeature.ResourceName: helmfeature.ResourceHelm(), - backupschedule.ResourceName: backupschedule.ResourceBackupSchedule(), - dataprotection.ResourceName: dataprotection.ResourceEnableDataProtection(), - targetlocation.ResourceName: targetlocation.ResourceTargetLocation(), - managementcluster.ResourceName: managementcluster.ResourceManagementClusterRegistration(), - utkgresource.ResourceName: utkgresource.ResourceTanzuKubernetesCluster(), - provisioner.ResourceName: provisioner.ResourceProvisioner(), + cluster.ResourceName: cluster.ResourceTMCCluster(), + ekscluster.ResourceName: ekscluster.ResourceTMCEKSCluster(), + akscluster.ResourceName: akscluster.ResourceTMCAKSCluster(), + workspace.ResourceName: workspace.ResourceWorkspace(), + namespace.ResourceName: namespace.ResourceNamespace(), + clustergroup.ResourceName: clustergroup.ResourceClusterGroup(), + nodepools.ResourceName: nodepools.ResourceNodePool(), + iampolicy.ResourceName: iampolicy.ResourceIAMPolicy(), + custompolicy.ResourceName: custompolicyresource.ResourceCustomPolicy(), + securitypolicy.ResourceName: securitypolicyresource.ResourceSecurityPolicy(), + imagepolicy.ResourceName: imagepolicyresource.ResourceImagePolicy(), + quotapolicy.ResourceName: quotapolicyresource.ResourceQuotaPolicy(), + networkpolicy.ResourceName: networkpolicyresource.ResourceNetworkPolicy(), + credential.ResourceName: credential.ResourceCredential(), + integration.ResourceName: integration.ResourceIntegration(), + gitrepository.ResourceName: gitrepository.ResourceGitRepository(), + kustomization.ResourceName: kustomization.ResourceKustomization(), + sourcesecret.ResourceName: sourcesecret.ResourceSourceSecret(), + packagerepository.ResourceName: packagerepository.ResourcePackageRepository(), + tanzupackageinstall.ResourceName: tanzupackageinstall.ResourcePackageInstall(), + kubernetessecret.ResourceName: kubernetessecret.ResourceSecret(), + mutationpolicy.ResourceName: mutationpolicyresource.ResourceMutationPolicy(), + helmrelease.ResourceName: helmrelease.ResourceHelmRelease(), + helmfeature.ResourceName: helmfeature.ResourceHelm(), + backupschedule.ResourceName: backupschedule.ResourceBackupSchedule(), + dataprotection.ResourceName: dataprotection.ResourceEnableDataProtection(), + targetlocation.ResourceName: targetlocation.ResourceTargetLocation(), + managementcluster.ResourceName: managementcluster.ResourceManagementClusterRegistration(), + utkgresource.ResourceName: utkgresource.ResourceTanzuKubernetesCluster(), + provisioner.ResourceName: provisioner.ResourceProvisioner(), + custompolicytemplate.ResourceName: custompolicytemplate.ResourceCustomPolicyTemplate(), }, DataSourcesMap: map[string]*schema.Resource{ cluster.ResourceName: cluster.DataSourceTMCCluster(), diff --git a/internal/resources/custompolicytemplate/converter_mapping.go b/internal/resources/custompolicytemplate/converter_mapping.go new file mode 100644 index 000000000..6faca793d --- /dev/null +++ b/internal/resources/custompolicytemplate/converter_mapping.go @@ -0,0 +1,38 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package custompolicytemplate + +import ( + tfModelConverterHelper "github.com/vmware/terraform-provider-tanzu-mission-control/internal/helper/converter" + custompolicytemplatemodels "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/custompolicytemplate" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/common" +) + +var ( + dataInventoryArrayField = tfModelConverterHelper.BuildArrayField("dataInventory") +) + +var tfModelResourceMap = &tfModelConverterHelper.BlockToStruct{ + NameKey: tfModelConverterHelper.BuildDefaultModelPath("fullName", "name"), + common.MetaKey: common.GetMetaConverterMap(tfModelConverterHelper.DefaultModelPathSeparator), + SpecKey: &tfModelConverterHelper.BlockToStruct{ + IsDeprecatedKey: tfModelConverterHelper.BuildDefaultModelPath("spec", "deprecated"), + TemplateManifestKey: tfModelConverterHelper.BuildDefaultModelPath("spec", "object"), + ObjectTypeKey: tfModelConverterHelper.BuildDefaultModelPath("spec", "objectType"), + TemplateTypeKey: tfModelConverterHelper.BuildDefaultModelPath("spec", "templateType"), + DataInventoryKey: &tfModelConverterHelper.BlockSliceToStructSlice{ + { + GroupKey: tfModelConverterHelper.BuildDefaultModelPath("spec", dataInventoryArrayField, "group"), + VersionKey: tfModelConverterHelper.BuildDefaultModelPath("spec", dataInventoryArrayField, "kind"), + KindKey: tfModelConverterHelper.BuildDefaultModelPath("spec", dataInventoryArrayField, "version"), + }, + }, + }, +} + +var tfModelConverter = tfModelConverterHelper.TFSchemaModelConverter[*custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplate]{ + TFModelMap: tfModelResourceMap, +} diff --git a/internal/resources/custompolicytemplate/resource_custom_policy_template.go b/internal/resources/custompolicytemplate/resource_custom_policy_template.go new file mode 100644 index 000000000..bcfafdee1 --- /dev/null +++ b/internal/resources/custompolicytemplate/resource_custom_policy_template.go @@ -0,0 +1,164 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package custompolicytemplate + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/pkg/errors" + + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/authctx" + clienterrors "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/errors" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/helper" + custompolicytemplatemodels "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/custompolicytemplate" +) + +func ResourceCustomPolicyTemplate() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceCustomPolicyTemplateCreate, + UpdateContext: resourceCustomPolicyTemplateUpdate, + ReadContext: resourceCustomPolicyTemplateRead, + DeleteContext: resourceCustomPolicyTemplateDelete, + Importer: &schema.ResourceImporter{ + StateContext: resourceCustomPolicyTemplateImporter, + }, + Schema: customPolicyTemplateResourceSchema, + } +} + +func resourceCustomPolicyTemplateCreate(ctx context.Context, data *schema.ResourceData, m interface{}) (diags diag.Diagnostics) { + config := m.(authctx.TanzuContext) + model, err := tfModelConverter.ConvertTFSchemaToAPIModel(data, []string{}) + + if err != nil { + return diag.FromErr(errors.Wrapf(err, "Couldn't create custom policy template.")) + } + + request := &custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplateData{ + Template: model, + } + + _, err = config.TMCConnection.CustomPolicyTemplateResourceService.CustomPolicyTemplateResourceServiceCreate(request) + + if err != nil { + return diag.FromErr(errors.Wrapf(err, "Couldn't create custom policy template.\nName: %s", model.FullName.Name)) + } + + return resourceCustomPolicyTemplateRead(helper.GetContextWithCaller(ctx, helper.CreateState), data, m) +} + +func resourceCustomPolicyTemplateUpdate(ctx context.Context, data *schema.ResourceData, m interface{}) (diags diag.Diagnostics) { + config := m.(authctx.TanzuContext) + model, err := tfModelConverter.ConvertTFSchemaToAPIModel(data, []string{}) + model.Spec.PolicyUpdateStrategy = &custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategy{ + Type: custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplatePolicyUpdateStrategyTypeINPLACEUPDATE.Pointer(), + } + + if err != nil { + return diag.FromErr(errors.Wrapf(err, "Couldn't update custom policy template.")) + } + + request := &custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplateData{ + Template: model, + } + + _, err = config.TMCConnection.CustomPolicyTemplateResourceService.CustomPolicyTemplateResourceServiceUpdate(request) + + if err != nil { + return diag.FromErr(errors.Wrapf(err, "Couldn't update custom policy template.\nName: %s", model.FullName.Name)) + } + + return resourceCustomPolicyTemplateRead(helper.GetContextWithCaller(ctx, helper.UpdateState), data, m) +} + +func resourceCustomPolicyTemplateRead(ctx context.Context, data *schema.ResourceData, m interface{}) (diags diag.Diagnostics) { + var resp *custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplateData + + config := m.(authctx.TanzuContext) + model, err := tfModelConverter.ConvertTFSchemaToAPIModel(data, []string{NameKey}) + + if err != nil { + return diag.FromErr(errors.Wrapf(err, "Couldn't read custom policy template.")) + } + + customPolicyFn := model.FullName + resp, err = config.TMCConnection.CustomPolicyTemplateResourceService.CustomPolicyTemplateResourceServiceGet(customPolicyFn) + + if err != nil { + if clienterrors.IsNotFoundError(err) { + if !helper.IsContextCallerSet(ctx) { + *data = schema.ResourceData{} + + return diags + } else if helper.IsDeleteState(ctx) { + // d.SetId("") is automatically called assuming delete returns no errors, but + // it is added here for explicitness. + _ = schema.RemoveFromState(data, m) + + return diags + } + } + + return diag.FromErr(errors.Wrapf(err, "Couldn't read custom policy template.\nName: %s", customPolicyFn.Name)) + } else if resp != nil { + err = tfModelConverter.FillTFSchema(resp.Template, data) + + if err != nil { + return diag.FromErr(errors.Wrapf(err, "Couldn't read custom policy template.\nName: %s", customPolicyFn.Name)) + } + + data.SetId(customPolicyFn.Name) + } + + return diags +} + +func resourceCustomPolicyTemplateDelete(ctx context.Context, data *schema.ResourceData, m interface{}) (diags diag.Diagnostics) { + config := m.(authctx.TanzuContext) + model, err := tfModelConverter.ConvertTFSchemaToAPIModel(data, []string{NameKey}) + + if err != nil { + return diag.FromErr(errors.Wrapf(err, "Couldn't delete custom policy template.")) + } + + customPolicyFn := model.FullName + err = config.TMCConnection.CustomPolicyTemplateResourceService.CustomPolicyTemplateResourceServiceDelete(customPolicyFn) + + if err != nil { + return diag.FromErr(errors.Wrapf(err, "Couldn't delete custom policy template.\nName: %s", customPolicyFn.Name)) + } + + return resourceCustomPolicyTemplateRead(helper.GetContextWithCaller(ctx, helper.DeleteState), data, m) +} + +func resourceCustomPolicyTemplateImporter(_ context.Context, data *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { + config := m.(authctx.TanzuContext) + customPolicyTemplateName := data.Id() + + if customPolicyTemplateName == "" { + return nil, errors.New("Cluster ID must be set to the custom policy template name.") + } + + customPolicyFn := &custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplateFullName{ + Name: customPolicyTemplateName, + } + + resp, err := config.TMCConnection.CustomPolicyTemplateResourceService.CustomPolicyTemplateResourceServiceGet(customPolicyFn) + + if err != nil || resp.Template == nil { + return nil, errors.Wrapf(err, "Couldn't read custom policy template.\nName: %s", customPolicyFn.Name) + } + + err = tfModelConverter.FillTFSchema(resp.Template, data) + + if err != nil { + return nil, errors.Wrapf(err, "Couldn't read custom policy template.\nName: %s", customPolicyFn.Name) + } + + return []*schema.ResourceData{data}, err +} diff --git a/internal/resources/custompolicytemplate/schema.go b/internal/resources/custompolicytemplate/schema.go new file mode 100644 index 000000000..1fc8851f7 --- /dev/null +++ b/internal/resources/custompolicytemplate/schema.go @@ -0,0 +1,105 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package custompolicytemplate + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/common" +) + +const ( + ResourceName = "tanzu-mission-control_custom_policy_template" + + // Root Keys. + NameKey = "name" + SpecKey = "spec" + + // Spec Directive Keys. + IsDeprecatedKey = "is_deprecated" + DataInventoryKey = "data_inventory" + TemplateManifestKey = "template_manifest" + ObjectTypeKey = "object_type" + TemplateTypeKey = "template_type" + + // Data Inventory Directive Keys. + GroupKey = "group" + VersionKey = "version" + KindKey = "kind" +) + +var customPolicyTemplateResourceSchema = map[string]*schema.Schema{ + NameKey: nameSchema, + SpecKey: specSchema, + common.MetaKey: common.Meta, +} + +var nameSchema = &schema.Schema{ + Type: schema.TypeString, + Description: "The name of the custom policy template", + Required: true, + ForceNew: true, +} + +var specSchema = &schema.Schema{ + Type: schema.TypeList, + Description: "Spec block of the custom policy template", + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + IsDeprecatedKey: { + Type: schema.TypeBool, + Description: "Flag representing whether the custom policy template is deprecated.", + Default: false, + Optional: true, + }, + TemplateManifestKey: { + Type: schema.TypeString, + Description: "YAML formatted Kubernetes resource.\nThe Kubernetes object has to be of the type defined in ObjectType ('ConstraintTemplate').\nThe object name must match the name of the wrapping policy template.\nThis will be applied on the cluster after a policy is created using this version of the template.\nThis contains the latest version of the object. For previous versions, check Versions API.", + Required: true, + }, + ObjectTypeKey: { + Type: schema.TypeString, + Description: "The type of Kubernetes resource encoded in Object.\nCurrently, we only support OPAGatekeeper based 'ConstraintTemplate' object.", + Optional: true, + Default: "ConstraintTemplate", + }, + TemplateTypeKey: { + Type: schema.TypeString, + Description: "The type of policy template.\nCurrently, we only support 'OPAGatekeeper' based policy templates.", + Optional: true, + Default: "OPAGatekeeper", + }, + DataInventoryKey: DataInventorySchema, + }, + }, +} + +var DataInventorySchema = &schema.Schema{ + Type: schema.TypeList, + Description: "List of Kubernetes api-resource kinds that need to be synced/replicated in Gatekeeper in order to enforce policy rules on those resources.\nNote: This is used for OPAGatekeeper based templates, and should be used if the policy enforcement logic in Rego code uses cached data using \"data.inventory\" fields.", + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + GroupKey: { + Type: schema.TypeString, + Description: "API resource group", + Required: true, + }, + KindKey: { + Type: schema.TypeString, + Description: "API resource kind", + Required: true, + }, + VersionKey: { + Type: schema.TypeString, + Description: "API resource version", + Required: true, + }, + }, + }, +} diff --git a/internal/resources/custompolicytemplate/tests/custom_policy_template_test.go b/internal/resources/custompolicytemplate/tests/custom_policy_template_test.go new file mode 100644 index 000000000..0411cbf2a --- /dev/null +++ b/internal/resources/custompolicytemplate/tests/custom_policy_template_test.go @@ -0,0 +1,111 @@ +//go:build custompolicytemplate +// +build custompolicytemplate + +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package custompolicytemplate + +import ( + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/pkg/errors" + + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/authctx" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/proxy" + custompolicytemplatemodels "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/custompolicytemplate" + testhelper "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/testing" +) + +var ( + context = authctx.TanzuContext{ + ServerEndpoint: os.Getenv(authctx.ServerEndpointEnvVar), + Token: os.Getenv(authctx.VMWCloudAPITokenEnvVar), + VMWCloudEndPoint: os.Getenv(authctx.VMWCloudEndpointEnvVar), + TLSConfig: &proxy.TLSConfig{}, + } +) + +func TestAcceptanceCustomPolicyTemplateResource(t *testing.T) { + err := context.Setup() + + if err != nil { + t.Error(errors.Wrap(err, "unable to set the context")) + t.FailNow() + } + + var ( + provider = initTestProvider(t) + tfResourceConfigBuilder = InitResourceTFConfigBuilder() + ) + + resource.Test(t, resource.TestCase{ + PreCheck: testhelper.TestPreCheck(t), + ProviderFactories: testhelper.GetTestProviderFactories(provider), + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: tfResourceConfigBuilder.GetSlimCustomPolicyTemplateConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(CustomPolicyTemplateResourceFullName, "name", CustomPolicyTemplateName), + verifyTanzuKubernetesClusterResource(provider, CustomPolicyTemplateResourceFullName, CustomPolicyTemplateName), + ), + }, + { + Config: tfResourceConfigBuilder.GetFullCustomPolicyTemplateConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(CustomPolicyTemplateResourceFullName, "name", CustomPolicyTemplateName), + verifyTanzuKubernetesClusterResource(provider, CustomPolicyTemplateResourceFullName, CustomPolicyTemplateName), + ), + }, + }, + }, + ) + + t.Log("Custom policy template resource acceptance test complete!") +} + +func verifyTanzuKubernetesClusterResource( + provider *schema.Provider, + resourceName string, + customPolicyTemplateName string, +) resource.TestCheckFunc { + return func(s *terraform.State) error { + if provider == nil { + return fmt.Errorf("provider not initialised") + } + + rs, ok := s.RootModule().Resources[resourceName] + + if !ok { + return fmt.Errorf("could not find resource %s", resourceName) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("ID not set, resource %s", resourceName) + } + + fn := &custompolicytemplatemodels.VmwareTanzuManageV1alpha1PolicyTemplateFullName{ + Name: customPolicyTemplateName, + } + + resp, err := context.TMCConnection.CustomPolicyTemplateResourceService.CustomPolicyTemplateResourceServiceGet(fn) + + if err != nil { + return errors.Errorf("Custom IAM Role resource not found, resource: %s | err: %s", resourceName, err) + } + + if resp == nil { + return errors.Errorf("Custom IAM Role resource is empty, resource: %s", resourceName) + } + + return nil + } +} diff --git a/internal/resources/custompolicytemplate/tests/helper_test.go b/internal/resources/custompolicytemplate/tests/helper_test.go new file mode 100644 index 000000000..b362891a7 --- /dev/null +++ b/internal/resources/custompolicytemplate/tests/helper_test.go @@ -0,0 +1,35 @@ +//go:build custompolicytemplate +// +build custompolicytemplate + +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package custompolicytemplate + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/require" + + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/authctx" + custompolicytemplateres "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/custompolicytemplate" +) + +func initTestProvider(t *testing.T) *schema.Provider { + testAccProvider := &schema.Provider{ + Schema: authctx.ProviderAuthSchema(), + ResourcesMap: map[string]*schema.Resource{ + custompolicytemplateres.ResourceName: custompolicytemplateres.ResourceCustomPolicyTemplate(), + }, + ConfigureContextFunc: authctx.ProviderConfigureContext, + } + + if err := testAccProvider.InternalValidate(); err != nil { + require.NoError(t, err) + } + + return testAccProvider +} diff --git a/internal/resources/custompolicytemplate/tests/resource_tf_configs.go b/internal/resources/custompolicytemplate/tests/resource_tf_configs.go new file mode 100644 index 000000000..ec95d302c --- /dev/null +++ b/internal/resources/custompolicytemplate/tests/resource_tf_configs.go @@ -0,0 +1,145 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package custompolicytemplate + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + + custompolicytemplateres "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/custompolicytemplate" +) + +const ( + CustomPolicyTemplateResourceName = "test_custom_policy_template" +) + +var ( + CustomPolicyTemplateResourceFullName = fmt.Sprintf("%s.%s", custompolicytemplateres.ResourceName, CustomPolicyTemplateResourceName) + CustomPolicyTemplateName = acctest.RandomWithPrefix("acc-test-custom-policy-template") +) + +type ResourceTFConfigBuilder struct { + TemplateManifest string +} + +func InitResourceTFConfigBuilder() *ResourceTFConfigBuilder { + firstManifestPart := fmt.Sprintf( + `< in your <%v> <%v> has no <%v>", [container.name, review.kind.kind, review.object.metadata.name, probe]) + } +YAML +` + + tfConfigBuilder := &ResourceTFConfigBuilder{ + TemplateManifest: fmt.Sprintf("%s\n%s", firstManifestPart, secondManifestPart), + } + + return tfConfigBuilder +} + +func (builder *ResourceTFConfigBuilder) GetFullCustomPolicyTemplateConfig() string { + return fmt.Sprintf(` + resource "%s" "%s" { + name = "%s" + + spec { + object_type = "ConstraintTemplate" + template_type = "OPAGatekeeper" + + data_inventory { + kind = "ConfigMap" + group = "admissionregistration.k8s.io" + version = "v1" + } + + data_inventory { + kind = "Deployment" + group = "extensions" + version = "v1" + } + + template_manifest = %s + } + } + `, + custompolicytemplateres.ResourceName, + CustomPolicyTemplateResourceName, + CustomPolicyTemplateName, + builder.TemplateManifest, + ) +} + +func (builder *ResourceTFConfigBuilder) GetSlimCustomPolicyTemplateConfig() string { + return fmt.Sprintf(` + resource "%s" "%s" { + name = "%s" + + spec { + object_type = "ConstraintTemplate" + template_type = "OPAGatekeeper" + + template_manifest = %s + } + } + `, + custompolicytemplateres.ResourceName, + CustomPolicyTemplateResourceName, + CustomPolicyTemplateName, + builder.TemplateManifest, + ) +} diff --git a/templates/resources/custom_policy_template.md.tmpl b/templates/resources/custom_policy_template.md.tmpl new file mode 100644 index 000000000..d36a7e0a8 --- /dev/null +++ b/templates/resources/custom_policy_template.md.tmpl @@ -0,0 +1,19 @@ +--- +Title: "Custom Policy Template Resource" +Description: |- + Creating a custom policy template. +--- + +# Custom Policy Template Resource + +This resource enables users to create custom policy template in TMC. + +For more information regarding custom policy template, see [Custom Policy Template][custom-policy-template]. + +[custom-policy-template]: https://docs.vmware.com/en/VMware-Tanzu-Mission-Control/services/tanzumc-using/GUID-F147492B-04FD-4CFD-8D1F-66E36D40D49C.html + +## Example Usage + +{{ tffile "examples/resources/custom_policy_template/resource_custom_policy_template.tf" }} + +{{ .SchemaMarkdown | trimspace }} From b0148a253a03f4263d2715fac754b3127e0a077b Mon Sep 17 00:00:00 2001 From: GilTS Date: Tue, 21 Nov 2023 22:17:26 +0200 Subject: [PATCH 15/22] Add support for Custom Policy Assignment Signed-off-by: GilTS --- docs/resources/custom_policy.md | 240 +++++++++++++++++- .../resource_cluster_group_tmc_custom.tf | 64 +++++ .../resource_cluster_tmc_custom.tf | 66 +++++ .../resource_organization_tmc_custom.tf | 62 +++++ internal/client/http_client.go | 3 + internal/client/recipe/recipe_resource.go | 46 ++++ .../models/policy/recipe/custom/tmc_custom.go | 54 ++++ internal/models/recipe/fullname.go | 48 ++++ internal/models/recipe/recipe.go | 54 ++++ internal/models/recipe/request.go | 41 +++ internal/models/recipe/spec.go | 54 ++++ .../resources/policy/kind/custom/constants.go | 1 + .../policy/kind/custom/input_schema.go | 78 ++++-- .../policy/kind/custom/recipe/constants.go | 2 + .../recipe/tmc_custom_recipe_flatten_test.go | 108 ++++++++ .../kind/custom/recipe/tmc_custom_schema.go | 168 ++++++++++++ .../policy/kind/custom/spec_schema.go | 20 ++ templates/resources/custom_policy.md.tmpl | 19 ++ 18 files changed, 1100 insertions(+), 28 deletions(-) create mode 100644 examples/resources/custom_policy/resource_cluster_group_tmc_custom.tf create mode 100644 examples/resources/custom_policy/resource_cluster_tmc_custom.tf create mode 100644 examples/resources/custom_policy/resource_organization_tmc_custom.tf create mode 100644 internal/client/recipe/recipe_resource.go create mode 100644 internal/models/policy/recipe/custom/tmc_custom.go create mode 100644 internal/models/recipe/fullname.go create mode 100644 internal/models/recipe/recipe.go create mode 100644 internal/models/recipe/request.go create mode 100644 internal/models/recipe/spec.go create mode 100644 internal/resources/policy/kind/custom/recipe/tmc_custom_recipe_flatten_test.go create mode 100644 internal/resources/policy/kind/custom/recipe/tmc_custom_schema.go diff --git a/docs/resources/custom_policy.md b/docs/resources/custom_policy.md index 8042c5eda..a2086ce2e 100644 --- a/docs/resources/custom_policy.md +++ b/docs/resources/custom_policy.md @@ -18,6 +18,7 @@ In the Tanzu Mission Control custom policy resource, there are six system define - **tmc-external-ips** - **tmc-https-ingress** - **tmc-require-labels** +- **Any custom template defined in TMC** ## Policy Scope and Inheritance @@ -407,6 +408,79 @@ resource "tanzu-mission-control_custom_policy" "cluster_scoped_tmc-require-label } ``` +## Cluster scoped Custom Policy + +### Example Usage + +```terraform +resource "tanzu-mission-control_custom_policy" "custom" { + name = "test-custom-template-tf" + + scope { + cluster { + management_cluster_name = "attached" + provisioner_name = "attached" + name = "tf-create-test" + } + } + + + spec { + input { + custom { + template_name = "replica-count-range-enforcement" + audit = false + + parameters = jsonencode({ + ranges = [ + { + minReplicas = 3 + maxReplicas = 7 + } + ] + }) + + + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "Deployment" + ] + } + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "StatefulSet", + ] + } + } + } + + namespace_selector { + match_expressions { + key = "" + operator = "" + values = [ + "", + "" + ] + } + match_expressions { + key = "" + operator = "" + values = [] + } + } + } +} +``` + ## Cluster group scoped TMC-block-nodeport-service Custom Policy ### Example Usage @@ -742,6 +816,77 @@ resource "tanzu-mission-control_custom_policy" "cluster_group_scoped_tmc-require } ``` +## Cluster group scoped Custom Policy + +### Example Usage + +```terraform +resource "tanzu-mission-control_custom_policy" "custom" { + name = "test-custom-template-tf" + + scope { + cluster_group { + cluster_group = "tf-create-test" + } + } + + + spec { + input { + custom { + template_name = "replica-count-range-enforcement" + audit = false + + parameters = jsonencode({ + ranges = [ + { + minReplicas = 3 + maxReplicas = 7 + } + ] + }) + + + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "Deployment" + ] + } + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "StatefulSet", + ] + } + } + } + + namespace_selector { + match_expressions { + key = "" + operator = "" + values = [ + "", + "" + ] + } + match_expressions { + key = "" + operator = "" + values = [] + } + } + } +} +``` + ## Organization scoped TMC-block-nodeport-service Custom Policy ### Example Usage @@ -1077,6 +1222,75 @@ resource "tanzu-mission-control_custom_policy" "organization_scoped_tmc-require- } ``` +## Organization scoped Custom Policy + +### Example Usage + +```terraform +resource "tanzu-mission-control_custom_policy" "custom" { + name = "test-custom-template-tf" + + scope { + organization { + organization = "dummy-id" + } + } + + spec { + input { + custom { + template_name = "replica-count-range-enforcement" + audit = false + + parameters = jsonencode({ + ranges = [ + { + minReplicas = 3 + maxReplicas = 7 + } + ] + }) + + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "Deployment" + ] + } + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "StatefulSet", + ] + } + } + } + + namespace_selector { + match_expressions { + key = "" + operator = "" + values = [ + "", + "" + ] + } + match_expressions { + key = "" + operator = "" + values = [] + } + } + } +} +``` + ## Schema @@ -1147,7 +1361,7 @@ Required: Required: -- `input` (Block List, Min: 1, Max: 1) Input for the custom policy, having one of the valid recipes: tmc_block_nodeport_service, tmc_block_resources, tmc_block_rolebinding_subjects, tmc_external_ips, tmc_https_ingress or tmc_require_labels. (see [below for nested schema](#nestedblock--spec--input)) +- `input` (Block List, Min: 1, Max: 1) Input for the custom policy, having one of the valid recipes: [tmc_block_nodeport_service tmc_block_resources tmc_block_rolebinding_subjects tmc_external_ips tmc_https_ingress tmc_require_labels custom]. (see [below for nested schema](#nestedblock--spec--input)) Optional: @@ -1158,6 +1372,7 @@ Optional: Optional: +- `custom` (Block List, Max: 1) The input schema for custom policy tmc_external_ips recipe version v1 (see [below for nested schema](#nestedblock--spec--input--custom)) - `tmc_block_nodeport_service` (Block List, Max: 1) The input schema for custom policy tmc_block_nodeport_service recipe version v1 (see [below for nested schema](#nestedblock--spec--input--tmc_block_nodeport_service)) - `tmc_block_resources` (Block List, Max: 1) The input schema for custom policy tmc_block_resources recipe version v1 (see [below for nested schema](#nestedblock--spec--input--tmc_block_resources)) - `tmc_block_rolebinding_subjects` (Block List, Max: 1) The input schema for custom policy tmc_block_rolebinding_subjects recipe version v1 (see [below for nested schema](#nestedblock--spec--input--tmc_block_rolebinding_subjects)) @@ -1165,6 +1380,29 @@ Optional: - `tmc_https_ingress` (Block List, Max: 1) The input schema for custom policy tmc_https_ingress recipe version v1 (see [below for nested schema](#nestedblock--spec--input--tmc_https_ingress)) - `tmc_require_labels` (Block List, Max: 1) The input schema for custom policy tmc_require_labels recipe version v1 (see [below for nested schema](#nestedblock--spec--input--tmc_require_labels)) + +### Nested Schema for `spec.input.custom` + +Required: + +- `target_kubernetes_resources` (Block List, Min: 1) A list of kubernetes api resources on which the policy will be enforced, identified using apiGroups and kinds. (see [below for nested schema](#nestedblock--spec--input--custom--target_kubernetes_resources)) +- `template_name` (String) Name of custom template. + +Optional: + +- `audit` (Boolean) Audit (dry-run). +- `parameters` (String) JSON encoded template parameters. + + +### Nested Schema for `spec.input.custom.target_kubernetes_resources` + +Required: + +- `api_groups` (List of String) APIGroup is a group containing the resource type. +- `kinds` (List of String) Kind is the name of the object schema (resource type). + + + ### Nested Schema for `spec.input.tmc_block_nodeport_service` diff --git a/examples/resources/custom_policy/resource_cluster_group_tmc_custom.tf b/examples/resources/custom_policy/resource_cluster_group_tmc_custom.tf new file mode 100644 index 000000000..3bc9f0270 --- /dev/null +++ b/examples/resources/custom_policy/resource_cluster_group_tmc_custom.tf @@ -0,0 +1,64 @@ +resource "tanzu-mission-control_custom_policy" "custom" { + name = "test-custom-template-tf" + + scope { + cluster_group { + cluster_group = "tf-create-test" + } + } + + + spec { + input { + custom { + template_name = "replica-count-range-enforcement" + audit = false + + parameters = jsonencode({ + ranges = [ + { + minReplicas = 3 + maxReplicas = 7 + } + ] + }) + + + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "Deployment" + ] + } + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "StatefulSet", + ] + } + } + } + + namespace_selector { + match_expressions { + key = "" + operator = "" + values = [ + "", + "" + ] + } + match_expressions { + key = "" + operator = "" + values = [] + } + } + } +} diff --git a/examples/resources/custom_policy/resource_cluster_tmc_custom.tf b/examples/resources/custom_policy/resource_cluster_tmc_custom.tf new file mode 100644 index 000000000..6ddcf5f85 --- /dev/null +++ b/examples/resources/custom_policy/resource_cluster_tmc_custom.tf @@ -0,0 +1,66 @@ +resource "tanzu-mission-control_custom_policy" "custom" { + name = "test-custom-template-tf" + + scope { + cluster { + management_cluster_name = "attached" + provisioner_name = "attached" + name = "tf-create-test" + } + } + + + spec { + input { + custom { + template_name = "replica-count-range-enforcement" + audit = false + + parameters = jsonencode({ + ranges = [ + { + minReplicas = 3 + maxReplicas = 7 + } + ] + }) + + + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "Deployment" + ] + } + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "StatefulSet", + ] + } + } + } + + namespace_selector { + match_expressions { + key = "" + operator = "" + values = [ + "", + "" + ] + } + match_expressions { + key = "" + operator = "" + values = [] + } + } + } +} diff --git a/examples/resources/custom_policy/resource_organization_tmc_custom.tf b/examples/resources/custom_policy/resource_organization_tmc_custom.tf new file mode 100644 index 000000000..57e06f566 --- /dev/null +++ b/examples/resources/custom_policy/resource_organization_tmc_custom.tf @@ -0,0 +1,62 @@ +resource "tanzu-mission-control_custom_policy" "custom" { + name = "test-custom-template-tf" + + scope { + organization { + organization = "dummy-id" + } + } + + spec { + input { + custom { + template_name = "replica-count-range-enforcement" + audit = false + + parameters = jsonencode({ + ranges = [ + { + minReplicas = 3 + maxReplicas = 7 + } + ] + }) + + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "Deployment" + ] + } + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "StatefulSet", + ] + } + } + } + + namespace_selector { + match_expressions { + key = "" + operator = "" + values = [ + "", + "" + ] + } + match_expressions { + key = "" + operator = "" + values = [] + } + } + } +} diff --git a/internal/client/http_client.go b/internal/client/http_client.go index 82c750e0c..0f3092b12 100644 --- a/internal/client/http_client.go +++ b/internal/client/http_client.go @@ -55,6 +55,7 @@ import ( policyorganizationclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/organization/policy" provisionerclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/provisioner" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/proxy" + recipeclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/recipe" tanzukubernetesclusterclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/tanzukubernetescluster" tanzupackageclusterclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/tanzupackage" pkginstallclusterclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/tanzupackageinstall" @@ -148,6 +149,7 @@ func newHTTPClient(httpClient *transport.Client) *TanzuMissionControl { TanzuKubernetesClusterResourceService: tanzukubernetesclusterclient.New(httpClient), ProvisionerResourceService: provisionerclient.New(httpClient), CustomPolicyTemplateResourceService: custompolicytemplateclient.New(httpClient), + RecipeResourceService: recipeclient.New(httpClient), } } @@ -208,4 +210,5 @@ type TanzuMissionControl struct { ProvisionerResourceService provisionerclient.ClientService InspectionsResourceService inspectionsclient.ClientService CustomPolicyTemplateResourceService custompolicytemplateclient.ClientService + RecipeResourceService recipeclient.ClientService } diff --git a/internal/client/recipe/recipe_resource.go b/internal/client/recipe/recipe_resource.go new file mode 100644 index 000000000..4d614c3a8 --- /dev/null +++ b/internal/client/recipe/recipe_resource.go @@ -0,0 +1,46 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package recipeclient + +import ( + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/transport" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/helper" + + recipemodels "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/recipe" +) + +const ( + policyAPIPath = "v1alpha1/policy/types" + recipeAPIPath = "recipes" +) + +// New creates a new recipe resource service API client. +func New(transport *transport.Client) ClientService { + return &Client{Client: transport} +} + +/* +Client for recipe resource service API. +*/ +type Client struct { + *transport.Client +} + +// ClientService is the interface for Client methods. +type ClientService interface { + RecipeResourceServiceGet(fn *recipemodels.VmwareTanzuManageV1alpha1PolicyTypeRecipeFullName) (*recipemodels.VmwareTanzuManageV1alpha1PolicyTypeRecipeData, error) +} + +/* +RecipeResourceServiceGet gets a recipe. +*/ +func (c *Client) RecipeResourceServiceGet(fn *recipemodels.VmwareTanzuManageV1alpha1PolicyTypeRecipeFullName) (*recipemodels.VmwareTanzuManageV1alpha1PolicyTypeRecipeData, error) { + requestURL := helper.ConstructRequestURL(policyAPIPath, fn.TypeName, recipeAPIPath, fn.Name).String() + resp := &recipemodels.VmwareTanzuManageV1alpha1PolicyTypeRecipeData{} + err := c.Get(requestURL, resp) + + return resp, err +} diff --git a/internal/models/policy/recipe/custom/tmc_custom.go b/internal/models/policy/recipe/custom/tmc_custom.go new file mode 100644 index 000000000..a44947aba --- /dev/null +++ b/internal/models/policy/recipe/custom/tmc_custom.go @@ -0,0 +1,54 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package policyrecipecustommodel + +import ( + "github.com/go-openapi/swag" + + policyrecipecustomcommonmodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/policy/recipe/custom/common" +) + +// VmwareTanzuManageV1alpha1CommonPolicySpecCustom tmc-external-ips recipe schema. +// +// The input schema for tmc-external-ips recipe. +// +// swagger:model VmwareTanzuManageV1alpha1CommonPolicySpecCustomV1TMCExternalIPS +type VmwareTanzuManageV1alpha1CommonPolicySpecCustom struct { + + // Audit (dry-run). + // Creates this policy for dry-run. Violations will be logged but not denied. Defaults to false (deny). + Audit bool `json:"audit,omitempty"` + + // Parameters. + Parameters map[string]interface{} `json:"parameters,omitempty"` + + // TargetKubernetesResources is a list of kubernetes api resources on which the policy will be enforced, identified using apiGroups and kinds. You can use 'kubectl api-resources' to view the list of available api resources on your cluster. + // Required: true + // Min Items: 1 + TargetKubernetesResources []*policyrecipecustomcommonmodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustomV1TargetKubernetesResources `json:"targetKubernetesResources"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1CommonPolicySpecCustom) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1CommonPolicySpecCustom) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1CommonPolicySpecCustom + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/recipe/fullname.go b/internal/models/recipe/fullname.go new file mode 100644 index 000000000..0aeb80542 --- /dev/null +++ b/internal/models/recipe/fullname.go @@ -0,0 +1,48 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package recipemodels + +import ( + "github.com/go-openapi/swag" +) + +// VmwareTanzuManageV1alpha1PolicyTypeRecipeFullName Full name of the policy recipe. This includes the object name along +// with any parents or further identifiers. +// +// swagger:model vmware.tanzu.manage.v1alpha1.policy.type.recipe.FullName +type VmwareTanzuManageV1alpha1PolicyTypeRecipeFullName struct { + + // Name of policy recipe. + Name string `json:"name,omitempty"` + + // ID of Organization. + OrgID string `json:"orgId,omitempty"` + + // Name of policy type. + TypeName string `json:"typeName,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTypeRecipeFullName) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTypeRecipeFullName) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1PolicyTypeRecipeFullName + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/recipe/recipe.go b/internal/models/recipe/recipe.go new file mode 100644 index 000000000..8e39509dc --- /dev/null +++ b/internal/models/recipe/recipe.go @@ -0,0 +1,54 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package recipemodels + +import ( + "github.com/go-openapi/swag" + + objectmetamodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/objectmeta" +) + +// VmwareTanzuManageV1alpha1PolicyTypeRecipe A Recipe is an internal template for policy type. +// +// Recipe is a convenience decorator. It gives a friendly way to produce policy instances using simple parameters. +// +// swagger:model vmware.tanzu.manage.v1alpha1.policy.type.recipe.Recipe +type VmwareTanzuManageV1alpha1PolicyTypeRecipe struct { + + // Full name for the policy recipe. + FullName *VmwareTanzuManageV1alpha1PolicyTypeRecipeFullName `json:"fullName,omitempty"` + + // Metadata for the policy recipe object. + Meta *objectmetamodel.VmwareTanzuCoreV1alpha1ObjectMeta `json:"meta,omitempty"` + + // Spec for the policy recipe. + Spec *VmwareTanzuManageV1alpha1PolicyTypeRecipeSpec `json:"spec,omitempty"` + + // Metadata describing the type of the resource. + Type *objectmetamodel.VmwareTanzuCoreV1alpha1ObjectType `json:"type,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTypeRecipe) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTypeRecipe) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1PolicyTypeRecipe + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/recipe/request.go b/internal/models/recipe/request.go new file mode 100644 index 000000000..e0a950189 --- /dev/null +++ b/internal/models/recipe/request.go @@ -0,0 +1,41 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package recipemodels + +import ( + "github.com/go-openapi/swag" +) + +// VmwareTanzuManageV1alpha1PolicyTypeRecipeData Response from getting a Recipe. +// +// swagger:model vmware.tanzu.manage.v1alpha1.policy.type.recipe.GetRecipeResponse +type VmwareTanzuManageV1alpha1PolicyTypeRecipeData struct { + + // Recipe returned. + Recipe *VmwareTanzuManageV1alpha1PolicyTypeRecipe `json:"recipe,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTypeRecipeData) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTypeRecipeData) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1PolicyTypeRecipeData + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/recipe/spec.go b/internal/models/recipe/spec.go new file mode 100644 index 000000000..28a1879a9 --- /dev/null +++ b/internal/models/recipe/spec.go @@ -0,0 +1,54 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package recipemodels + +import ( + "github.com/go-openapi/swag" + + objectmetamodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/objectmeta" +) + +// VmwareTanzuManageV1alpha1PolicyTypeRecipeSpec Spec of policy recipe. +// +// swagger:model vmware.tanzu.manage.v1alpha1.policy.type.recipe.Spec +type VmwareTanzuManageV1alpha1PolicyTypeRecipeSpec struct { + + // Deprecated specifies whether this version (latest version) of the recipe is deprecated. + // Deprecated recipes will not be assignable to new policy instances nor visible in the UI. + Deprecated bool `json:"deprecated,omitempty"` + + // InputSchema defines the set of variable inputs needed to create a policy using this recipe, in JsonSchema format. + // This input schema is for the latest version of the recipe. For previous versions, check Versions API. + InputSchema string `json:"inputSchema,omitempty"` + + // Policy templates are references to kubernetes resources (policy pre-requisites) associated with this recipe. + // These templates will be applied on clusters where policy instances using this recipe are effective. + // A recipe can have 0 or more templates associated with it. + // These references are for the latest version of the recipe. For previous versions, check Versions API. + PolicyTemplates []*objectmetamodel.VmwareTanzuCoreV1alpha1ObjectReference `json:"policyTemplates"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTypeRecipeSpec) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTypeRecipeSpec) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1PolicyTypeRecipeSpec + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/resources/policy/kind/custom/constants.go b/internal/resources/policy/kind/custom/constants.go index aa13d8857..6fa936d16 100644 --- a/internal/resources/policy/kind/custom/constants.go +++ b/internal/resources/policy/kind/custom/constants.go @@ -24,4 +24,5 @@ const ( TMCExternalIPSRecipe Recipe = reciperesource.TMCExternalIPSKey TMCHTTPSIngressRecipe Recipe = reciperesource.TMCHTTPSIngressKey TMCRequireLabelsRecipe Recipe = reciperesource.TMCRequireLabelsKey + TMCCustomRecipe Recipe = reciperesource.TMCCustomKey ) diff --git a/internal/resources/policy/kind/custom/input_schema.go b/internal/resources/policy/kind/custom/input_schema.go index 5ede487b1..991d5dbc4 100644 --- a/internal/resources/policy/kind/custom/input_schema.go +++ b/internal/resources/policy/kind/custom/input_schema.go @@ -11,16 +11,28 @@ import ( "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/pkg/errors" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/authctx" policyrecipecustommodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/policy/recipe/custom" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/policy" reciperesource "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/policy/kind/custom/recipe" ) var ( + RecipesAllowed = [...]string{ + reciperesource.TMCBlockNodeportServiceKey, + reciperesource.TMCBlockResourcesKey, + reciperesource.TMCBlockRolebindingSubjectsKey, + reciperesource.TMCExternalIPSKey, + reciperesource.TMCHTTPSIngressKey, + reciperesource.TMCRequireLabelsKey, + reciperesource.TMCCustomKey, + } + inputSchema = &schema.Schema{ Type: schema.TypeList, - Description: "Input for the custom policy, having one of the valid recipes: tmc_block_nodeport_service, tmc_block_resources, tmc_block_rolebinding_subjects, tmc_external_ips, tmc_https_ingress or tmc_require_labels.", + Description: fmt.Sprintf("Input for the custom policy, having one of the valid recipes: %v.", RecipesAllowed), Required: true, MaxItems: 1, MinItems: 1, @@ -33,10 +45,10 @@ var ( reciperesource.TMCExternalIPSKey: reciperesource.TMCExternalIps, reciperesource.TMCHTTPSIngressKey: reciperesource.TMCHTTPSIngress, reciperesource.TMCRequireLabelsKey: reciperesource.TMCRequireLabels, + reciperesource.TMCCustomKey: reciperesource.TMCCustomSchema, }, }, } - RecipesAllowed = [...]string{reciperesource.TMCBlockNodeportServiceKey, reciperesource.TMCBlockResourcesKey, reciperesource.TMCBlockRolebindingSubjectsKey, reciperesource.TMCExternalIPSKey, reciperesource.TMCHTTPSIngressKey, reciperesource.TMCRequireLabelsKey} ) type ( @@ -50,6 +62,10 @@ type ( inputTMCExternalIps *policyrecipecustommodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustomV1TMCExternalIPS inputTMCHTTPSIngress *policyrecipecustommodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustomV1TMCCommonRecipe inputTMCRequireLabels *policyrecipecustommodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustomV1TMCRequireLabels + inputTMCCustom *policyrecipecustommodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustom + + // recipeTMCCustom is needed when using a custom policy template + recipeTMCCustom string } ) @@ -114,6 +130,18 @@ func constructInput(data []interface{}) (inputRecipeData *inputRecipe) { } } + if input, ok := inputData[reciperesource.TMCCustomKey]; ok { + if recipeData, ok := input.([]interface{}); ok && len(recipeData) != 0 { + recipeName := recipeData[0].(map[string]interface{})[reciperesource.TemplateNameKey].(string) + + inputRecipeData = &inputRecipe{ + recipe: TMCCustomRecipe, + recipeTMCCustom: recipeName, + inputTMCCustom: reciperesource.ConstructTMCCustom(recipeData), + } + } + } + return inputRecipeData } @@ -137,6 +165,8 @@ func flattenInput(inputRecipeData *inputRecipe) (data []interface{}) { flattenInputData[reciperesource.TMCHTTPSIngressKey] = reciperesource.FlattenTMCCommonRecipe(inputRecipeData.inputTMCHTTPSIngress) case TMCRequireLabelsRecipe: flattenInputData[reciperesource.TMCRequireLabelsKey] = reciperesource.FlattenTMCRequireLabels(inputRecipeData.inputTMCRequireLabels) + case TMCCustomRecipe: + flattenInputData[reciperesource.TMCCustomKey] = reciperesource.FlattenTMCCustom(inputRecipeData.recipeTMCCustom, inputRecipeData.inputTMCCustom) case UnknownRecipe: fmt.Printf("[ERROR]: No valid input recipe block found: minimum one valid input recipe block is required among: %v. Please check the schema.", strings.Join(RecipesAllowed[:], `, `)) @@ -176,39 +206,33 @@ func ValidateInput(ctx context.Context, diff *schema.ResourceDiff, i interface{} inputData, _ := inputType[0].(map[string]interface{}) recipesFound := make([]string, 0) - if recipeData, ok := inputData[reciperesource.TMCBlockNodeportServiceKey]; ok { - if recipeType, ok := recipeData.([]interface{}); ok && len(recipeType) != 0 { - recipesFound = append(recipesFound, reciperesource.TMCBlockNodeportServiceKey) - } - } - - if recipeData, ok := inputData[reciperesource.TMCBlockResourcesKey]; ok { - if recipeType, ok := recipeData.([]interface{}); ok && len(recipeType) != 0 { - recipesFound = append(recipesFound, reciperesource.TMCBlockResourcesKey) - } + recipes := []string{ + reciperesource.TMCBlockNodeportServiceKey, + reciperesource.TMCBlockResourcesKey, + reciperesource.TMCBlockRolebindingSubjectsKey, + reciperesource.TMCExternalIPSKey, + reciperesource.TMCHTTPSIngressKey, + reciperesource.TMCRequireLabelsKey, } - if recipeData, ok := inputData[reciperesource.TMCBlockRolebindingSubjectsKey]; ok { - if recipeType, ok := recipeData.([]interface{}); ok && len(recipeType) != 0 { - recipesFound = append(recipesFound, reciperesource.TMCBlockRolebindingSubjectsKey) + for _, recipe := range recipes { + if recipeData, ok := inputData[recipe]; ok { + if recipeType, ok := recipeData.([]interface{}); ok && len(recipeType) != 0 { + recipesFound = append(recipesFound, recipe) + } } } - if recipeData, ok := inputData[reciperesource.TMCExternalIPSKey]; ok { + if recipeData, ok := inputData[reciperesource.TMCCustomKey]; ok { if recipeType, ok := recipeData.([]interface{}); ok && len(recipeType) != 0 { - recipesFound = append(recipesFound, reciperesource.TMCExternalIPSKey) - } - } + config := i.(authctx.TanzuContext) + err := reciperesource.ValidateCustomRecipe(config, recipeType[0].(map[string]interface{})) - if recipeData, ok := inputData[reciperesource.TMCHTTPSIngressKey]; ok { - if recipeType, ok := recipeData.([]interface{}); ok && len(recipeType) != 0 { - recipesFound = append(recipesFound, reciperesource.TMCHTTPSIngressKey) - } - } + if err != nil { + return errors.Wrapf(err, "Custom Recipe validation failed:\n") + } - if recipeData, ok := inputData[reciperesource.TMCRequireLabelsKey]; ok { - if recipeType, ok := recipeData.([]interface{}); ok && len(recipeType) != 0 { - recipesFound = append(recipesFound, reciperesource.TMCRequireLabelsKey) + recipesFound = append(recipesFound, reciperesource.TMCCustomKey) } } diff --git a/internal/resources/policy/kind/custom/recipe/constants.go b/internal/resources/policy/kind/custom/recipe/constants.go index 8dab89d7c..e05eab4b5 100644 --- a/internal/resources/policy/kind/custom/recipe/constants.go +++ b/internal/resources/policy/kind/custom/recipe/constants.go @@ -12,6 +12,7 @@ const ( TMCBlockNodeportServiceKey = "tmc_block_nodeport_service" TMCBlockResourcesKey = "tmc_block_resources" TMCHTTPSIngressKey = "tmc_https_ingress" + TMCCustomKey = "custom" AuditKey = "audit" TargetKubernetesResourcesKey = "target_kubernetes_resources" ParametersKey = "parameters" @@ -24,4 +25,5 @@ const ( disallowedSubjectsKey = "disallowed_subjects" kindKey = "kind" nameKey = "name" + TemplateNameKey = "template_name" ) diff --git a/internal/resources/policy/kind/custom/recipe/tmc_custom_recipe_flatten_test.go b/internal/resources/policy/kind/custom/recipe/tmc_custom_recipe_flatten_test.go new file mode 100644 index 000000000..b5959cdc6 --- /dev/null +++ b/internal/resources/policy/kind/custom/recipe/tmc_custom_recipe_flatten_test.go @@ -0,0 +1,108 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package recipe + +import ( + "testing" + + "github.com/stretchr/testify/require" + + policyrecipecustommodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/policy/recipe/custom" + policyrecipecustomcommonmodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/policy/recipe/custom/common" +) + +const ( + customRecipeTemplateName = "some-custom-template" +) + +func TestFlattenTMCCustom(t *testing.T) { + t.Parallel() + + cases := []struct { + description string + input *policyrecipecustommodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustom + expected []interface{} + }{ + { + description: "check for nil custom policy recipe", + input: nil, + expected: nil, + }, + { + description: "normal scenario with complete custom policy recipe", + input: &policyrecipecustommodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustom{ + Audit: true, + Parameters: map[string]interface{}{ + "ranges": []map[string]interface{}{ + { + "min_replicas": 3, + "max_replicas": 7, + }, + }, + }, + TargetKubernetesResources: []*policyrecipecustomcommonmodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustomV1TargetKubernetesResources{ + { + APIGroups: []string{"apps"}, + Kinds: []string{"Deployment", "StatefulSet"}, + }, + }, + }, + expected: []interface{}{ + map[string]interface{}{ + AuditKey: true, + TemplateNameKey: customRecipeTemplateName, + ParametersKey: "{\"ranges\":[{\"max_replicas\":7,\"min_replicas\":3}]}", + TargetKubernetesResourcesKey: []interface{}{ + map[string]interface{}{ + APIGroupsKey: []string{"apps"}, + KindsKey: []string{"Deployment", "StatefulSet"}, + }, + }, + }, + }, + }, + } + + for _, each := range cases { + test := each + t.Run(test.description, func(t *testing.T) { + actual := FlattenTMCCustom(customRecipeTemplateName, test.input) + require.Equal(t, test.expected, actual) + }) + } +} + +func TestValidateRecipeParameters(t *testing.T) { + t.Parallel() + + cases := []struct { + description string + recipeSchema string + recipeParameters string + expectedErrors bool + }{ + { + description: "scenario for valid recipe parameters", + recipeSchema: "{\"description\":\"The input schema for replica-count-range-enforcement recipe\",\"type\":\"object\",\"title\":\"replica-count-range-enforcement recipe schema\",\"required\":[\"targetKubernetesResources\"],\"properties\":{\"audit\":{\"description\":\"Creates this policy for dry-run. Violations will be logged but not denied. Defaults to false (deny). (This is deprecated, please use enforcementAction instead)\",\"type\":\"boolean\",\"title\":\"Audit (dry-run)\"},\"enforcementAction\":{\"description\":\"Select the action to take when the policy is violated.\",\"type\":\"string\",\"title\":\"Enforcement Action\",\"pattern\":\"dryrun|warn|deny\"},\"parameters\":{\"type\":\"object\",\"properties\":{\"ranges\":{\"description\":\"Allowed ranges for numbers of replicas. Values are inclusive.\",\"type\":\"array\",\"items\":{\"description\":\"A range of allowed replicas. Values are inclusive.\",\"type\":\"object\",\"properties\":{\"max_replicas\":{\"description\":\"The maximum number of replicas allowed, inclusive.\",\"type\":\"integer\"},\"min_replicas\":{\"description\":\"The minimum number of replicas allowed, inclusive.\",\"type\":\"integer\"}}}}}},\"targetKubernetesResources\":{\"description\":\"TargetKubernetesResources is a list of kubernetes api resources on which the policy will be enforced, identified using apiGroups and kinds. You can use 'kubectl api-resources' to view the list of available api resources on your cluster.\",\"type\":\"array\",\"minItems\":1,\"items\":{\"required\":[\"apiGroups\",\"kinds\"],\"properties\":{\"apiGroups\":{\"description\":\"apiGroup is group containing the resource type, for example 'rbac.authorization.k8s.io', 'networking.k8s.io', 'extensions', '' (some resources like Namespace, Pod have empty apiGroup).\",\"type\":\"array\",\"minItems\":1,\"items\":{\"type\":\"string\"}},\"kinds\":{\"description\":\"kind is the name of the object schema (resource type), for example 'Namespace', 'Pod', 'Ingress'\",\"type\":\"array\",\"minItems\":1,\"items\":{\"type\":\"string\"}}}}}}}", + recipeParameters: "{\"ranges\":[{\"max_replicas\":7,\"min_replicas\":3}]}", + expectedErrors: false, + }, + { + description: "scenario for invalid recipe parameters", + recipeSchema: "{\"description\":\"The input schema for replica-count-range-enforcement recipe\",\"type\":\"object\",\"title\":\"replica-count-range-enforcement recipe schema\",\"required\":[\"targetKubernetesResources\"],\"properties\":{\"audit\":{\"description\":\"Creates this policy for dry-run. Violations will be logged but not denied. Defaults to false (deny). (This is deprecated, please use enforcementAction instead)\",\"type\":\"boolean\",\"title\":\"Audit (dry-run)\"},\"enforcementAction\":{\"description\":\"Select the action to take when the policy is violated.\",\"type\":\"string\",\"title\":\"Enforcement Action\",\"pattern\":\"dryrun|warn|deny\"},\"parameters\":{\"type\":\"object\",\"properties\":{\"ranges\":{\"description\":\"Allowed ranges for numbers of replicas. Values are inclusive.\",\"type\":\"array\",\"items\":{\"description\":\"A range of allowed replicas. Values are inclusive.\",\"type\":\"object\",\"properties\":{\"max_replicas\":{\"description\":\"The maximum number of replicas allowed, inclusive.\",\"type\":\"integer\"},\"min_replicas\":{\"description\":\"The minimum number of replicas allowed, inclusive.\",\"type\":\"integer\"}}}}}},\"targetKubernetesResources\":{\"description\":\"TargetKubernetesResources is a list of kubernetes api resources on which the policy will be enforced, identified using apiGroups and kinds. You can use 'kubectl api-resources' to view the list of available api resources on your cluster.\",\"type\":\"array\",\"minItems\":1,\"items\":{\"required\":[\"apiGroups\",\"kinds\"],\"properties\":{\"apiGroups\":{\"description\":\"apiGroup is group containing the resource type, for example 'rbac.authorization.k8s.io', 'networking.k8s.io', 'extensions', '' (some resources like Namespace, Pod have empty apiGroup).\",\"type\":\"array\",\"minItems\":1,\"items\":{\"type\":\"string\"}},\"kinds\":{\"description\":\"kind is the name of the object schema (resource type), for example 'Namespace', 'Pod', 'Ingress'\",\"type\":\"array\",\"minItems\":1,\"items\":{\"type\":\"string\"}}}}}}}", + recipeParameters: "{\"replica_ranges\":[{\"maximum\":7,\"minimum\":3}]}", + expectedErrors: true, + }, + } + + for _, each := range cases { + test := each + t.Run(test.description, func(t *testing.T) { + actual := ValidateRecipeParameters(test.recipeSchema, test.recipeParameters) + require.Equal(t, test.expectedErrors, len(actual) > 0) + }) + } +} diff --git a/internal/resources/policy/kind/custom/recipe/tmc_custom_schema.go b/internal/resources/policy/kind/custom/recipe/tmc_custom_schema.go new file mode 100644 index 000000000..5f2462186 --- /dev/null +++ b/internal/resources/policy/kind/custom/recipe/tmc_custom_schema.go @@ -0,0 +1,168 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package recipe + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/pkg/errors" + + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/authctx" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/helper" + openapiv3 "github.com/vmware/terraform-provider-tanzu-mission-control/internal/helper/openapi_v3_schema_validator" + policyrecipecustommodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/policy/recipe/custom" + policyrecipecustomcommonmodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/policy/recipe/custom/common" + recipemodels "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/recipe" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/policy/kind/common" +) + +var TMCCustomSchema = &schema.Schema{ + Type: schema.TypeList, + Description: "The input schema for custom policy tmc_external_ips recipe version v1", + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + TemplateNameKey: { + Type: schema.TypeString, + Description: "Name of custom template.", + Required: true, + }, + AuditKey: { + Type: schema.TypeBool, + Description: "Audit (dry-run).", + Optional: true, + Default: false, + }, + ParametersKey: { + Type: schema.TypeString, + Description: "JSON encoded template parameters.", + Optional: true, + }, + TargetKubernetesResourcesKey: common.TargetKubernetesResourcesSchema, + }, + }, +} + +func FlattenTMCCustom(recipeName string, customRecipe *policyrecipecustommodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustom) []interface{} { + if customRecipe == nil { + return nil + } + + customInputMap := make(map[string]interface{}) + customInputMap[AuditKey] = customRecipe.Audit + customInputMap[TemplateNameKey] = recipeName + + if customRecipe.Parameters != nil { + parametersJSONBytes, _ := json.Marshal(customRecipe.Parameters) + customInputMap[ParametersKey] = helper.ConvertToString(parametersJSONBytes, "") + } + + targetKubernetesResources := make([]interface{}, 0) + + for _, tkr := range customRecipe.TargetKubernetesResources { + targetKubernetesResources = append(targetKubernetesResources, common.FlattenTargetKubernetesResources(tkr)) + } + + customInputMap[TargetKubernetesResourcesKey] = targetKubernetesResources + + return []interface{}{customInputMap} +} + +func ConstructTMCCustom(customRecipe []interface{}) (customInputModel *policyrecipecustommodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustom) { + if len(customRecipe) != 0 && customRecipe[0] != nil { + customInputMap := customRecipe[0].(map[string]interface{}) + + customInputModel = &policyrecipecustommodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustom{ + Audit: customInputMap[AuditKey].(bool), + } + + parametersData := customInputMap[ParametersKey].(string) + + if parametersData != "" { + parametersJSON := make(map[string]interface{}) + + _ = json.Unmarshal([]byte(parametersData), ¶metersJSON) + + customInputModel.Parameters = parametersJSON + } + + targetKubernetesResourcesData := customInputMap[TargetKubernetesResourcesKey].([]interface{}) + customInputModel.TargetKubernetesResources = make([]*policyrecipecustomcommonmodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustomV1TargetKubernetesResources, 0) + + for _, targetKubernetesResource := range targetKubernetesResourcesData { + customInputModel.TargetKubernetesResources = append(customInputModel.TargetKubernetesResources, common.ExpandTargetKubernetesResources(targetKubernetesResource)) + } + } + + return customInputModel +} + +func ValidateCustomRecipe(config authctx.TanzuContext, customRecipe map[string]interface{}) error { + errMessages := make([]string, 0) + customTemplateName := customRecipe[TemplateNameKey].(string) + + recipeModel := &recipemodels.VmwareTanzuManageV1alpha1PolicyTypeRecipeFullName{ + TypeName: "custom-policy", + Name: customTemplateName, + } + + recipeData, err := config.TMCConnection.RecipeResourceService.RecipeResourceServiceGet(recipeModel) + + if err != nil { + errMessages = append(errMessages, err.Error()) + } else { + errs := ValidateRecipeParameters(recipeData.Recipe.Spec.InputSchema, customRecipe[ParametersKey].(string)) + + if len(errs) > 0 { + errMsg := "" + + for _, e := range errs { + if errMsg == "" { + errMsg = e.Error() + } else { + errMsg = fmt.Sprintf("%s\n%s", errMsg, e.Error()) + } + } + + errMessages = append(errMessages, errMsg) + } + } + + if len(errMessages) > 0 { + errMsg := strings.Join(errMessages, "\n") + + return errors.New(errMsg) + } + + return nil +} + +func ValidateRecipeParameters(recipeSchema string, recipeParameters string) (errs []error) { + recipeSchemaJSON := make(map[string]interface{}) + _ = json.Unmarshal([]byte(recipeSchema), &recipeSchemaJSON) + + recipeParametersSchema, parametersSchemaExist := recipeSchemaJSON["properties"].(map[string]interface{})["parameters"] + + if parametersSchemaExist { + openAPIV3Validator := &openapiv3.OpenAPIV3SchemaValidator{ + Schema: recipeParametersSchema.(map[string]interface{})["properties"].(map[string]interface{}), + } + + recipeParametersJSON := make(map[string]interface{}) + _ = json.Unmarshal([]byte(recipeParameters), &recipeParametersJSON) + errs = make([]error, 0) + + errs = append(errs, openAPIV3Validator.ValidateRequiredFields(recipeParametersJSON)...) + errs = append(errs, openAPIV3Validator.ValidateFormat(recipeParametersJSON)...) + } + + return errs +} diff --git a/internal/resources/policy/kind/custom/spec_schema.go b/internal/resources/policy/kind/custom/spec_schema.go index 7efbc4ed6..5984a2463 100644 --- a/internal/resources/policy/kind/custom/spec_schema.go +++ b/internal/resources/policy/kind/custom/spec_schema.go @@ -92,6 +92,12 @@ func ConstructSpec(d *schema.ResourceData) (spec *policymodel.VmwareTanzuManageV if inputRecipeData.inputTMCRequireLabels != nil { spec.Input = *inputRecipeData.inputTMCRequireLabels } + case TMCCustomRecipe: + spec.Recipe = inputRecipeData.recipeTMCCustom + + if inputRecipeData.inputTMCCustom != nil { + spec.Input = *inputRecipeData.inputTMCCustom + } case UnknownRecipe: fmt.Printf("[ERROR]: No valid input recipe block found: minimum one valid input recipe block is required among: %v. Please check the schema.", strings.Join(RecipesAllowed[:], `, `)) } @@ -203,6 +209,20 @@ func FlattenSpec(spec *policymodel.VmwareTanzuManageV1alpha1CommonPolicySpec) (d } case string(UnknownRecipe): fmt.Printf("[ERROR]: No valid input recipe block found: minimum one valid input recipe block is required among: %v. Please check the schema.", strings.Join(RecipesAllowed[:], `, `)) + default: + var tmcCustomRecipeInput policyrecipecustommodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustom + + err = tmcCustomRecipeInput.UnmarshalBinary(byteSlice) + + if err != nil { + return data + } + + inputRecipeData = &inputRecipe{ + recipe: TMCCustomRecipe, + recipeTMCCustom: spec.Recipe, + inputTMCCustom: &tmcCustomRecipeInput, + } } flattenSpecData[policy.InputKey] = flattenInput(inputRecipeData) diff --git a/templates/resources/custom_policy.md.tmpl b/templates/resources/custom_policy.md.tmpl index 323fc17b6..62f921bc2 100644 --- a/templates/resources/custom_policy.md.tmpl +++ b/templates/resources/custom_policy.md.tmpl @@ -18,6 +18,7 @@ In the Tanzu Mission Control custom policy resource, there are six system define - **tmc-external-ips** - **tmc-https-ingress** - **tmc-require-labels** +- **Any custom template defined in TMC** ## Policy Scope and Inheritance @@ -96,6 +97,12 @@ target_kubernetes_resources { {{ tffile "examples/resources/custom_policy/resource_cluster_tmc_require_labels_custom_policy.tf" }} +## Cluster scoped Custom Policy + +### Example Usage + +{{ tffile "examples/resources/custom_policy/resource_cluster_tmc_custom.tf" }} + ## Cluster group scoped TMC-block-nodeport-service Custom Policy ### Example Usage @@ -132,6 +139,12 @@ target_kubernetes_resources { {{ tffile "examples/resources/custom_policy/resource_cluster_group_tmc_require_labels_custom_policy.tf" }} +## Cluster group scoped Custom Policy + +### Example Usage + +{{ tffile "examples/resources/custom_policy/resource_cluster_group_tmc_custom.tf" }} + ## Organization scoped TMC-block-nodeport-service Custom Policy ### Example Usage @@ -168,4 +181,10 @@ target_kubernetes_resources { {{ tffile "examples/resources/custom_policy/resource_organization_tmc_require_labels_custom_policy.tf" }} +## Organization scoped Custom Policy + +### Example Usage + +{{ tffile "examples/resources/custom_policy/resource_organization_tmc_custom.tf" }} + {{ .SchemaMarkdown | trimspace }} From da0f96612ec30459c24f245177445eb9a65b6562 Mon Sep 17 00:00:00 2001 From: Shobha M Date: Wed, 17 Jan 2024 10:32:07 +0530 Subject: [PATCH 16/22] Add acceptance test and guide for custom policy resource Signed-off-by: Shobha M --- docs/guides/tanzu-mission-control_policy.md | 5 + .../resource/custom_policy_provider_test.go | 8 +- .../resource/resource_custom_policy_test.go | 163 ++++++++++++++++++ .../tanzu-mission-control_policy.md.tmpl | 5 + 4 files changed, 178 insertions(+), 3 deletions(-) diff --git a/docs/guides/tanzu-mission-control_policy.md b/docs/guides/tanzu-mission-control_policy.md index 9007d10aa..1e99021cb 100644 --- a/docs/guides/tanzu-mission-control_policy.md +++ b/docs/guides/tanzu-mission-control_policy.md @@ -275,3 +275,8 @@ resource "tanzu-mission-control_custom_policy" "cluster_group_scoped_tmc-block-r } ``` +## Custom Template and Custom Policy + +Template provides a declarative definition of a policy, which can be used to apply custom constraints on managed kubernetes resources. +Custom policy consumes these declared custom templates to enforce specific policies. One must create the custom template before consuming it the custom policy. +Please refer to custom policy template and custom policy terraform scripts within examples. diff --git a/internal/resources/policy/kind/custom/resource/custom_policy_provider_test.go b/internal/resources/policy/kind/custom/resource/custom_policy_provider_test.go index b313177d1..aa41ce9ee 100644 --- a/internal/resources/policy/kind/custom/resource/custom_policy_provider_test.go +++ b/internal/resources/policy/kind/custom/resource/custom_policy_provider_test.go @@ -14,6 +14,7 @@ import ( "github.com/vmware/terraform-provider-tanzu-mission-control/internal/authctx" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/cluster" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/clustergroup" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/custompolicytemplate" policykindcustom "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/policy/kind/custom" ) @@ -21,9 +22,10 @@ func initTestProvider(t *testing.T) *schema.Provider { testAccProvider := &schema.Provider{ Schema: authctx.ProviderAuthSchema(), ResourcesMap: map[string]*schema.Resource{ - policykindcustom.ResourceName: ResourceCustomPolicy(), - cluster.ResourceName: cluster.ResourceTMCCluster(), - clustergroup.ResourceName: clustergroup.ResourceClusterGroup(), + policykindcustom.ResourceName: ResourceCustomPolicy(), + cluster.ResourceName: cluster.ResourceTMCCluster(), + clustergroup.ResourceName: clustergroup.ResourceClusterGroup(), + custompolicytemplate.ResourceName: custompolicytemplate.ResourceCustomPolicyTemplate(), }, ConfigureContextFunc: authctx.ProviderConfigureContext, } diff --git a/internal/resources/policy/kind/custom/resource/resource_custom_policy_test.go b/internal/resources/policy/kind/custom/resource/resource_custom_policy_test.go index 78abc86db..22ac0b4ed 100644 --- a/internal/resources/policy/kind/custom/resource/resource_custom_policy_test.go +++ b/internal/resources/policy/kind/custom/resource/resource_custom_policy_test.go @@ -26,6 +26,7 @@ import ( policyclustermodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/policy/cluster" policyclustergroupmodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/policy/clustergroup" policyorganizationmodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/policy/organization" + custompolicytemplateres "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/custompolicytemplate" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/policy" policykindCustom "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/policy/kind/custom" policyoperations "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/policy/operations" @@ -61,6 +62,7 @@ func testGetDefaultAcceptanceConfig(t *testing.T) *testAcceptanceConfig { func TestAcceptanceForCustomPolicyResource(t *testing.T) { testConfig := testGetDefaultAcceptanceConfig(t) + customPolicyTemplateResource := fmt.Sprintf("%s.%s", custompolicytemplateres.ResourceName, "test_custom_policy_template") t.Log("start custom policy resource acceptance tests!") @@ -267,9 +269,64 @@ func TestAcceptanceForCustomPolicyResource(t *testing.T) { ) t.Log("Custom policy resource acceptance test complete for tmc-require-labels recipe!") + + // Test case for custom policy template assignment resource + resource.Test(t, resource.TestCase{ + PreCheck: testhelper.TestPreCheck(t), + ProviderFactories: testhelper.GetTestProviderFactories(testConfig.Provider), + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: testConfig.getTestCustomPolicyTemplateConfigValue(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(customPolicyTemplateResource, "name", "tf-custom-template-test"), + ), + }, + { + PreConfig: func() { + if testConfig.ScopeHelperResources.Cluster.KubeConfigPath == "" { + t.Skip("KUBECONFIG env var is not set for cluster scoped custom policy acceptance test") + } + }, + ResourceName: customPolicyTemplateResource, + ImportState: true, + ImportStateVerify: true, + Config: testConfig.getTestCustomPolicyConfigValue(scope.ClusterScope, policykindCustom.TMCCustomRecipe), + Check: testConfig.checkCustomPolicyResourceAttributes(scope.ClusterScope), + }, + { + Config: testConfig.getTestCustomPolicyConfigValue(scope.ClusterGroupScope, policykindCustom.TMCCustomRecipe), + ResourceName: customPolicyTemplateResource, + ImportState: true, + ImportStateVerify: true, + Check: resource.ComposeTestCheckFunc( + testConfig.checkCustomPolicyResourceAttributes(scope.ClusterGroupScope), + ), + }, + { + PreConfig: func() { + if testConfig.ScopeHelperResources.OrgID == "" { + t.Skip("ORG_ID env var is not set for organization scoped custom policy acceptance test") + } + }, + ResourceName: customPolicyTemplateResource, + ImportState: true, + ImportStateVerify: true, + Config: testConfig.getTestCustomPolicyConfigValue(scope.OrganizationScope, policykindCustom.TMCCustomRecipe), + Check: testConfig.checkCustomPolicyResourceAttributes(scope.OrganizationScope), + }, + }, + }, + ) + + t.Log("Custom policy resource acceptance test complete for custom recipe!") t.Log("all custom policy resource acceptance tests complete!") } +func (testConfig *testAcceptanceConfig) getTestCustomPolicyConfigValue(scope scope.Scope, recipe policykindCustom.Recipe) string { + return fmt.Sprintf("%s\n%s", testConfig.getTestCustomPolicyTemplateConfigValue(), testConfig.getTestCustomPolicyResourceBasicConfigValue(scope, recipe)) +} + func (testConfig *testAcceptanceConfig) getTestCustomPolicyResourceBasicConfigValue(scope scope.Scope, recipe policykindCustom.Recipe) string { helperBlock, scopeBlock := testConfig.ScopeHelperResources.GetTestPolicyResourceHelperAndScope(scope, policyoperations.ScopeMap[testConfig.CustomPolicyResource], false) inputBlock := testConfig.getTestCustomPolicyResourceInput(recipe) @@ -305,6 +362,85 @@ func (testConfig *testAcceptanceConfig) getTestCustomPolicyResourceBasicConfigVa `, helperBlock, testConfig.CustomPolicyResource, testConfig.CustomPolicyResourceVar, testConfig.CustomPolicyName, scopeBlock, inputBlock) } +func (testConfig *testAcceptanceConfig) getTestCustomPolicyTemplateConfigValue() string { + customTemplate := ` +resource "tanzu-mission-control_custom_policy_template" "test_custom_policy_template" { + name = "tf-custom-template-test" + + spec { + object_type = "ConstraintTemplate" + template_type = "OPAGatekeeper" + + data_inventory { + kind = "ConfigMap" + group = "admissionregistration.k8s.io" + version = "v1" + } + + data_inventory { + kind = "Deployment" + group = "extensions" + version = "v1" + } + + template_manifest = < in your <%v> <%v> has no <%v>", [container.name, review.kind.kind, review.object.metadata.name, probe]) + } +YAML + } +} +` + + return customTemplate +} + // getTestCustomPolicyResourceInput builds the input block for custom policy resource based a recipe. func (testConfig *testAcceptanceConfig) getTestCustomPolicyResourceInput(recipe policykindCustom.Recipe) string { var inputBlock string @@ -430,6 +566,33 @@ func (testConfig *testAcceptanceConfig) getTestCustomPolicyResourceInput(recipe } } } +` + case policykindCustom.TMCCustomRecipe: + inputBlock = ` + input { + custom { + template_name = "tf-custom-template-test" + audit = false + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "Deployment" + ] + } + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "StatefulSet", + ] + } + } + } ` case policykindCustom.UnknownRecipe: log.Printf("[ERROR]: No valid input recipe block found: minimum one valid input recipe block is required among: %v. Please check the schema.", strings.Join(policykindCustom.RecipesAllowed[:], `, `)) diff --git a/templates/guides/tanzu-mission-control_policy.md.tmpl b/templates/guides/tanzu-mission-control_policy.md.tmpl index 7c673bce7..ddc9d870e 100644 --- a/templates/guides/tanzu-mission-control_policy.md.tmpl +++ b/templates/guides/tanzu-mission-control_policy.md.tmpl @@ -41,3 +41,8 @@ In the following example, there are multiple dependencies shown. {{ tffile "examples/usecases/custom_policy_usecase.tf" }} +## Custom Template and Custom Policy + +Template provides a declarative definition of a policy, which can be used to apply custom constraints on managed kubernetes resources. +Custom policy consumes these declared custom templates to enforce specific policies. One must create the custom template before consuming it the custom policy. +Please refer to custom policy template and custom policy terraform scripts within examples. From 013cd5d7cd02daa60cafd013b16bd9ae022b8c06 Mon Sep 17 00:00:00 2001 From: GilTS Date: Tue, 21 Nov 2023 22:17:26 +0200 Subject: [PATCH 17/22] Add support for Custom Policy Assignment Signed-off-by: GilTS --- docs/resources/custom_policy.md | 240 +++++++++++++++++- .../resource_cluster_group_tmc_custom.tf | 64 +++++ .../resource_cluster_tmc_custom.tf | 66 +++++ .../resource_organization_tmc_custom.tf | 62 +++++ internal/client/http_client.go | 3 + internal/client/recipe/recipe_resource.go | 46 ++++ .../models/policy/recipe/custom/tmc_custom.go | 54 ++++ internal/models/recipe/fullname.go | 48 ++++ internal/models/recipe/recipe.go | 54 ++++ internal/models/recipe/request.go | 41 +++ internal/models/recipe/spec.go | 54 ++++ .../resources/policy/kind/custom/constants.go | 1 + .../policy/kind/custom/input_schema.go | 78 ++++-- .../policy/kind/custom/recipe/constants.go | 2 + .../recipe/tmc_custom_recipe_flatten_test.go | 108 ++++++++ .../kind/custom/recipe/tmc_custom_schema.go | 168 ++++++++++++ .../policy/kind/custom/spec_schema.go | 20 ++ templates/resources/custom_policy.md.tmpl | 19 ++ 18 files changed, 1100 insertions(+), 28 deletions(-) create mode 100644 examples/resources/custom_policy/resource_cluster_group_tmc_custom.tf create mode 100644 examples/resources/custom_policy/resource_cluster_tmc_custom.tf create mode 100644 examples/resources/custom_policy/resource_organization_tmc_custom.tf create mode 100644 internal/client/recipe/recipe_resource.go create mode 100644 internal/models/policy/recipe/custom/tmc_custom.go create mode 100644 internal/models/recipe/fullname.go create mode 100644 internal/models/recipe/recipe.go create mode 100644 internal/models/recipe/request.go create mode 100644 internal/models/recipe/spec.go create mode 100644 internal/resources/policy/kind/custom/recipe/tmc_custom_recipe_flatten_test.go create mode 100644 internal/resources/policy/kind/custom/recipe/tmc_custom_schema.go diff --git a/docs/resources/custom_policy.md b/docs/resources/custom_policy.md index 8042c5eda..a2086ce2e 100644 --- a/docs/resources/custom_policy.md +++ b/docs/resources/custom_policy.md @@ -18,6 +18,7 @@ In the Tanzu Mission Control custom policy resource, there are six system define - **tmc-external-ips** - **tmc-https-ingress** - **tmc-require-labels** +- **Any custom template defined in TMC** ## Policy Scope and Inheritance @@ -407,6 +408,79 @@ resource "tanzu-mission-control_custom_policy" "cluster_scoped_tmc-require-label } ``` +## Cluster scoped Custom Policy + +### Example Usage + +```terraform +resource "tanzu-mission-control_custom_policy" "custom" { + name = "test-custom-template-tf" + + scope { + cluster { + management_cluster_name = "attached" + provisioner_name = "attached" + name = "tf-create-test" + } + } + + + spec { + input { + custom { + template_name = "replica-count-range-enforcement" + audit = false + + parameters = jsonencode({ + ranges = [ + { + minReplicas = 3 + maxReplicas = 7 + } + ] + }) + + + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "Deployment" + ] + } + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "StatefulSet", + ] + } + } + } + + namespace_selector { + match_expressions { + key = "" + operator = "" + values = [ + "", + "" + ] + } + match_expressions { + key = "" + operator = "" + values = [] + } + } + } +} +``` + ## Cluster group scoped TMC-block-nodeport-service Custom Policy ### Example Usage @@ -742,6 +816,77 @@ resource "tanzu-mission-control_custom_policy" "cluster_group_scoped_tmc-require } ``` +## Cluster group scoped Custom Policy + +### Example Usage + +```terraform +resource "tanzu-mission-control_custom_policy" "custom" { + name = "test-custom-template-tf" + + scope { + cluster_group { + cluster_group = "tf-create-test" + } + } + + + spec { + input { + custom { + template_name = "replica-count-range-enforcement" + audit = false + + parameters = jsonencode({ + ranges = [ + { + minReplicas = 3 + maxReplicas = 7 + } + ] + }) + + + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "Deployment" + ] + } + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "StatefulSet", + ] + } + } + } + + namespace_selector { + match_expressions { + key = "" + operator = "" + values = [ + "", + "" + ] + } + match_expressions { + key = "" + operator = "" + values = [] + } + } + } +} +``` + ## Organization scoped TMC-block-nodeport-service Custom Policy ### Example Usage @@ -1077,6 +1222,75 @@ resource "tanzu-mission-control_custom_policy" "organization_scoped_tmc-require- } ``` +## Organization scoped Custom Policy + +### Example Usage + +```terraform +resource "tanzu-mission-control_custom_policy" "custom" { + name = "test-custom-template-tf" + + scope { + organization { + organization = "dummy-id" + } + } + + spec { + input { + custom { + template_name = "replica-count-range-enforcement" + audit = false + + parameters = jsonencode({ + ranges = [ + { + minReplicas = 3 + maxReplicas = 7 + } + ] + }) + + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "Deployment" + ] + } + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "StatefulSet", + ] + } + } + } + + namespace_selector { + match_expressions { + key = "" + operator = "" + values = [ + "", + "" + ] + } + match_expressions { + key = "" + operator = "" + values = [] + } + } + } +} +``` + ## Schema @@ -1147,7 +1361,7 @@ Required: Required: -- `input` (Block List, Min: 1, Max: 1) Input for the custom policy, having one of the valid recipes: tmc_block_nodeport_service, tmc_block_resources, tmc_block_rolebinding_subjects, tmc_external_ips, tmc_https_ingress or tmc_require_labels. (see [below for nested schema](#nestedblock--spec--input)) +- `input` (Block List, Min: 1, Max: 1) Input for the custom policy, having one of the valid recipes: [tmc_block_nodeport_service tmc_block_resources tmc_block_rolebinding_subjects tmc_external_ips tmc_https_ingress tmc_require_labels custom]. (see [below for nested schema](#nestedblock--spec--input)) Optional: @@ -1158,6 +1372,7 @@ Optional: Optional: +- `custom` (Block List, Max: 1) The input schema for custom policy tmc_external_ips recipe version v1 (see [below for nested schema](#nestedblock--spec--input--custom)) - `tmc_block_nodeport_service` (Block List, Max: 1) The input schema for custom policy tmc_block_nodeport_service recipe version v1 (see [below for nested schema](#nestedblock--spec--input--tmc_block_nodeport_service)) - `tmc_block_resources` (Block List, Max: 1) The input schema for custom policy tmc_block_resources recipe version v1 (see [below for nested schema](#nestedblock--spec--input--tmc_block_resources)) - `tmc_block_rolebinding_subjects` (Block List, Max: 1) The input schema for custom policy tmc_block_rolebinding_subjects recipe version v1 (see [below for nested schema](#nestedblock--spec--input--tmc_block_rolebinding_subjects)) @@ -1165,6 +1380,29 @@ Optional: - `tmc_https_ingress` (Block List, Max: 1) The input schema for custom policy tmc_https_ingress recipe version v1 (see [below for nested schema](#nestedblock--spec--input--tmc_https_ingress)) - `tmc_require_labels` (Block List, Max: 1) The input schema for custom policy tmc_require_labels recipe version v1 (see [below for nested schema](#nestedblock--spec--input--tmc_require_labels)) + +### Nested Schema for `spec.input.custom` + +Required: + +- `target_kubernetes_resources` (Block List, Min: 1) A list of kubernetes api resources on which the policy will be enforced, identified using apiGroups and kinds. (see [below for nested schema](#nestedblock--spec--input--custom--target_kubernetes_resources)) +- `template_name` (String) Name of custom template. + +Optional: + +- `audit` (Boolean) Audit (dry-run). +- `parameters` (String) JSON encoded template parameters. + + +### Nested Schema for `spec.input.custom.target_kubernetes_resources` + +Required: + +- `api_groups` (List of String) APIGroup is a group containing the resource type. +- `kinds` (List of String) Kind is the name of the object schema (resource type). + + + ### Nested Schema for `spec.input.tmc_block_nodeport_service` diff --git a/examples/resources/custom_policy/resource_cluster_group_tmc_custom.tf b/examples/resources/custom_policy/resource_cluster_group_tmc_custom.tf new file mode 100644 index 000000000..3bc9f0270 --- /dev/null +++ b/examples/resources/custom_policy/resource_cluster_group_tmc_custom.tf @@ -0,0 +1,64 @@ +resource "tanzu-mission-control_custom_policy" "custom" { + name = "test-custom-template-tf" + + scope { + cluster_group { + cluster_group = "tf-create-test" + } + } + + + spec { + input { + custom { + template_name = "replica-count-range-enforcement" + audit = false + + parameters = jsonencode({ + ranges = [ + { + minReplicas = 3 + maxReplicas = 7 + } + ] + }) + + + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "Deployment" + ] + } + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "StatefulSet", + ] + } + } + } + + namespace_selector { + match_expressions { + key = "" + operator = "" + values = [ + "", + "" + ] + } + match_expressions { + key = "" + operator = "" + values = [] + } + } + } +} diff --git a/examples/resources/custom_policy/resource_cluster_tmc_custom.tf b/examples/resources/custom_policy/resource_cluster_tmc_custom.tf new file mode 100644 index 000000000..6ddcf5f85 --- /dev/null +++ b/examples/resources/custom_policy/resource_cluster_tmc_custom.tf @@ -0,0 +1,66 @@ +resource "tanzu-mission-control_custom_policy" "custom" { + name = "test-custom-template-tf" + + scope { + cluster { + management_cluster_name = "attached" + provisioner_name = "attached" + name = "tf-create-test" + } + } + + + spec { + input { + custom { + template_name = "replica-count-range-enforcement" + audit = false + + parameters = jsonencode({ + ranges = [ + { + minReplicas = 3 + maxReplicas = 7 + } + ] + }) + + + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "Deployment" + ] + } + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "StatefulSet", + ] + } + } + } + + namespace_selector { + match_expressions { + key = "" + operator = "" + values = [ + "", + "" + ] + } + match_expressions { + key = "" + operator = "" + values = [] + } + } + } +} diff --git a/examples/resources/custom_policy/resource_organization_tmc_custom.tf b/examples/resources/custom_policy/resource_organization_tmc_custom.tf new file mode 100644 index 000000000..57e06f566 --- /dev/null +++ b/examples/resources/custom_policy/resource_organization_tmc_custom.tf @@ -0,0 +1,62 @@ +resource "tanzu-mission-control_custom_policy" "custom" { + name = "test-custom-template-tf" + + scope { + organization { + organization = "dummy-id" + } + } + + spec { + input { + custom { + template_name = "replica-count-range-enforcement" + audit = false + + parameters = jsonencode({ + ranges = [ + { + minReplicas = 3 + maxReplicas = 7 + } + ] + }) + + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "Deployment" + ] + } + + target_kubernetes_resources { + api_groups = [ + "apps", + ] + kinds = [ + "StatefulSet", + ] + } + } + } + + namespace_selector { + match_expressions { + key = "" + operator = "" + values = [ + "", + "" + ] + } + match_expressions { + key = "" + operator = "" + values = [] + } + } + } +} diff --git a/internal/client/http_client.go b/internal/client/http_client.go index 82c750e0c..0f3092b12 100644 --- a/internal/client/http_client.go +++ b/internal/client/http_client.go @@ -55,6 +55,7 @@ import ( policyorganizationclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/organization/policy" provisionerclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/provisioner" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/proxy" + recipeclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/recipe" tanzukubernetesclusterclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/tanzukubernetescluster" tanzupackageclusterclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/tanzupackage" pkginstallclusterclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/tanzupackageinstall" @@ -148,6 +149,7 @@ func newHTTPClient(httpClient *transport.Client) *TanzuMissionControl { TanzuKubernetesClusterResourceService: tanzukubernetesclusterclient.New(httpClient), ProvisionerResourceService: provisionerclient.New(httpClient), CustomPolicyTemplateResourceService: custompolicytemplateclient.New(httpClient), + RecipeResourceService: recipeclient.New(httpClient), } } @@ -208,4 +210,5 @@ type TanzuMissionControl struct { ProvisionerResourceService provisionerclient.ClientService InspectionsResourceService inspectionsclient.ClientService CustomPolicyTemplateResourceService custompolicytemplateclient.ClientService + RecipeResourceService recipeclient.ClientService } diff --git a/internal/client/recipe/recipe_resource.go b/internal/client/recipe/recipe_resource.go new file mode 100644 index 000000000..4d614c3a8 --- /dev/null +++ b/internal/client/recipe/recipe_resource.go @@ -0,0 +1,46 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package recipeclient + +import ( + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/transport" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/helper" + + recipemodels "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/recipe" +) + +const ( + policyAPIPath = "v1alpha1/policy/types" + recipeAPIPath = "recipes" +) + +// New creates a new recipe resource service API client. +func New(transport *transport.Client) ClientService { + return &Client{Client: transport} +} + +/* +Client for recipe resource service API. +*/ +type Client struct { + *transport.Client +} + +// ClientService is the interface for Client methods. +type ClientService interface { + RecipeResourceServiceGet(fn *recipemodels.VmwareTanzuManageV1alpha1PolicyTypeRecipeFullName) (*recipemodels.VmwareTanzuManageV1alpha1PolicyTypeRecipeData, error) +} + +/* +RecipeResourceServiceGet gets a recipe. +*/ +func (c *Client) RecipeResourceServiceGet(fn *recipemodels.VmwareTanzuManageV1alpha1PolicyTypeRecipeFullName) (*recipemodels.VmwareTanzuManageV1alpha1PolicyTypeRecipeData, error) { + requestURL := helper.ConstructRequestURL(policyAPIPath, fn.TypeName, recipeAPIPath, fn.Name).String() + resp := &recipemodels.VmwareTanzuManageV1alpha1PolicyTypeRecipeData{} + err := c.Get(requestURL, resp) + + return resp, err +} diff --git a/internal/models/policy/recipe/custom/tmc_custom.go b/internal/models/policy/recipe/custom/tmc_custom.go new file mode 100644 index 000000000..a44947aba --- /dev/null +++ b/internal/models/policy/recipe/custom/tmc_custom.go @@ -0,0 +1,54 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package policyrecipecustommodel + +import ( + "github.com/go-openapi/swag" + + policyrecipecustomcommonmodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/policy/recipe/custom/common" +) + +// VmwareTanzuManageV1alpha1CommonPolicySpecCustom tmc-external-ips recipe schema. +// +// The input schema for tmc-external-ips recipe. +// +// swagger:model VmwareTanzuManageV1alpha1CommonPolicySpecCustomV1TMCExternalIPS +type VmwareTanzuManageV1alpha1CommonPolicySpecCustom struct { + + // Audit (dry-run). + // Creates this policy for dry-run. Violations will be logged but not denied. Defaults to false (deny). + Audit bool `json:"audit,omitempty"` + + // Parameters. + Parameters map[string]interface{} `json:"parameters,omitempty"` + + // TargetKubernetesResources is a list of kubernetes api resources on which the policy will be enforced, identified using apiGroups and kinds. You can use 'kubectl api-resources' to view the list of available api resources on your cluster. + // Required: true + // Min Items: 1 + TargetKubernetesResources []*policyrecipecustomcommonmodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustomV1TargetKubernetesResources `json:"targetKubernetesResources"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1CommonPolicySpecCustom) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1CommonPolicySpecCustom) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1CommonPolicySpecCustom + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/recipe/fullname.go b/internal/models/recipe/fullname.go new file mode 100644 index 000000000..0aeb80542 --- /dev/null +++ b/internal/models/recipe/fullname.go @@ -0,0 +1,48 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package recipemodels + +import ( + "github.com/go-openapi/swag" +) + +// VmwareTanzuManageV1alpha1PolicyTypeRecipeFullName Full name of the policy recipe. This includes the object name along +// with any parents or further identifiers. +// +// swagger:model vmware.tanzu.manage.v1alpha1.policy.type.recipe.FullName +type VmwareTanzuManageV1alpha1PolicyTypeRecipeFullName struct { + + // Name of policy recipe. + Name string `json:"name,omitempty"` + + // ID of Organization. + OrgID string `json:"orgId,omitempty"` + + // Name of policy type. + TypeName string `json:"typeName,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTypeRecipeFullName) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTypeRecipeFullName) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1PolicyTypeRecipeFullName + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/recipe/recipe.go b/internal/models/recipe/recipe.go new file mode 100644 index 000000000..8e39509dc --- /dev/null +++ b/internal/models/recipe/recipe.go @@ -0,0 +1,54 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package recipemodels + +import ( + "github.com/go-openapi/swag" + + objectmetamodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/objectmeta" +) + +// VmwareTanzuManageV1alpha1PolicyTypeRecipe A Recipe is an internal template for policy type. +// +// Recipe is a convenience decorator. It gives a friendly way to produce policy instances using simple parameters. +// +// swagger:model vmware.tanzu.manage.v1alpha1.policy.type.recipe.Recipe +type VmwareTanzuManageV1alpha1PolicyTypeRecipe struct { + + // Full name for the policy recipe. + FullName *VmwareTanzuManageV1alpha1PolicyTypeRecipeFullName `json:"fullName,omitempty"` + + // Metadata for the policy recipe object. + Meta *objectmetamodel.VmwareTanzuCoreV1alpha1ObjectMeta `json:"meta,omitempty"` + + // Spec for the policy recipe. + Spec *VmwareTanzuManageV1alpha1PolicyTypeRecipeSpec `json:"spec,omitempty"` + + // Metadata describing the type of the resource. + Type *objectmetamodel.VmwareTanzuCoreV1alpha1ObjectType `json:"type,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTypeRecipe) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTypeRecipe) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1PolicyTypeRecipe + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/recipe/request.go b/internal/models/recipe/request.go new file mode 100644 index 000000000..e0a950189 --- /dev/null +++ b/internal/models/recipe/request.go @@ -0,0 +1,41 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package recipemodels + +import ( + "github.com/go-openapi/swag" +) + +// VmwareTanzuManageV1alpha1PolicyTypeRecipeData Response from getting a Recipe. +// +// swagger:model vmware.tanzu.manage.v1alpha1.policy.type.recipe.GetRecipeResponse +type VmwareTanzuManageV1alpha1PolicyTypeRecipeData struct { + + // Recipe returned. + Recipe *VmwareTanzuManageV1alpha1PolicyTypeRecipe `json:"recipe,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTypeRecipeData) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTypeRecipeData) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1PolicyTypeRecipeData + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/recipe/spec.go b/internal/models/recipe/spec.go new file mode 100644 index 000000000..28a1879a9 --- /dev/null +++ b/internal/models/recipe/spec.go @@ -0,0 +1,54 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package recipemodels + +import ( + "github.com/go-openapi/swag" + + objectmetamodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/objectmeta" +) + +// VmwareTanzuManageV1alpha1PolicyTypeRecipeSpec Spec of policy recipe. +// +// swagger:model vmware.tanzu.manage.v1alpha1.policy.type.recipe.Spec +type VmwareTanzuManageV1alpha1PolicyTypeRecipeSpec struct { + + // Deprecated specifies whether this version (latest version) of the recipe is deprecated. + // Deprecated recipes will not be assignable to new policy instances nor visible in the UI. + Deprecated bool `json:"deprecated,omitempty"` + + // InputSchema defines the set of variable inputs needed to create a policy using this recipe, in JsonSchema format. + // This input schema is for the latest version of the recipe. For previous versions, check Versions API. + InputSchema string `json:"inputSchema,omitempty"` + + // Policy templates are references to kubernetes resources (policy pre-requisites) associated with this recipe. + // These templates will be applied on clusters where policy instances using this recipe are effective. + // A recipe can have 0 or more templates associated with it. + // These references are for the latest version of the recipe. For previous versions, check Versions API. + PolicyTemplates []*objectmetamodel.VmwareTanzuCoreV1alpha1ObjectReference `json:"policyTemplates"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTypeRecipeSpec) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1PolicyTypeRecipeSpec) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1PolicyTypeRecipeSpec + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/resources/policy/kind/custom/constants.go b/internal/resources/policy/kind/custom/constants.go index aa13d8857..6fa936d16 100644 --- a/internal/resources/policy/kind/custom/constants.go +++ b/internal/resources/policy/kind/custom/constants.go @@ -24,4 +24,5 @@ const ( TMCExternalIPSRecipe Recipe = reciperesource.TMCExternalIPSKey TMCHTTPSIngressRecipe Recipe = reciperesource.TMCHTTPSIngressKey TMCRequireLabelsRecipe Recipe = reciperesource.TMCRequireLabelsKey + TMCCustomRecipe Recipe = reciperesource.TMCCustomKey ) diff --git a/internal/resources/policy/kind/custom/input_schema.go b/internal/resources/policy/kind/custom/input_schema.go index 5ede487b1..991d5dbc4 100644 --- a/internal/resources/policy/kind/custom/input_schema.go +++ b/internal/resources/policy/kind/custom/input_schema.go @@ -11,16 +11,28 @@ import ( "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/pkg/errors" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/authctx" policyrecipecustommodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/policy/recipe/custom" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/policy" reciperesource "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/policy/kind/custom/recipe" ) var ( + RecipesAllowed = [...]string{ + reciperesource.TMCBlockNodeportServiceKey, + reciperesource.TMCBlockResourcesKey, + reciperesource.TMCBlockRolebindingSubjectsKey, + reciperesource.TMCExternalIPSKey, + reciperesource.TMCHTTPSIngressKey, + reciperesource.TMCRequireLabelsKey, + reciperesource.TMCCustomKey, + } + inputSchema = &schema.Schema{ Type: schema.TypeList, - Description: "Input for the custom policy, having one of the valid recipes: tmc_block_nodeport_service, tmc_block_resources, tmc_block_rolebinding_subjects, tmc_external_ips, tmc_https_ingress or tmc_require_labels.", + Description: fmt.Sprintf("Input for the custom policy, having one of the valid recipes: %v.", RecipesAllowed), Required: true, MaxItems: 1, MinItems: 1, @@ -33,10 +45,10 @@ var ( reciperesource.TMCExternalIPSKey: reciperesource.TMCExternalIps, reciperesource.TMCHTTPSIngressKey: reciperesource.TMCHTTPSIngress, reciperesource.TMCRequireLabelsKey: reciperesource.TMCRequireLabels, + reciperesource.TMCCustomKey: reciperesource.TMCCustomSchema, }, }, } - RecipesAllowed = [...]string{reciperesource.TMCBlockNodeportServiceKey, reciperesource.TMCBlockResourcesKey, reciperesource.TMCBlockRolebindingSubjectsKey, reciperesource.TMCExternalIPSKey, reciperesource.TMCHTTPSIngressKey, reciperesource.TMCRequireLabelsKey} ) type ( @@ -50,6 +62,10 @@ type ( inputTMCExternalIps *policyrecipecustommodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustomV1TMCExternalIPS inputTMCHTTPSIngress *policyrecipecustommodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustomV1TMCCommonRecipe inputTMCRequireLabels *policyrecipecustommodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustomV1TMCRequireLabels + inputTMCCustom *policyrecipecustommodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustom + + // recipeTMCCustom is needed when using a custom policy template + recipeTMCCustom string } ) @@ -114,6 +130,18 @@ func constructInput(data []interface{}) (inputRecipeData *inputRecipe) { } } + if input, ok := inputData[reciperesource.TMCCustomKey]; ok { + if recipeData, ok := input.([]interface{}); ok && len(recipeData) != 0 { + recipeName := recipeData[0].(map[string]interface{})[reciperesource.TemplateNameKey].(string) + + inputRecipeData = &inputRecipe{ + recipe: TMCCustomRecipe, + recipeTMCCustom: recipeName, + inputTMCCustom: reciperesource.ConstructTMCCustom(recipeData), + } + } + } + return inputRecipeData } @@ -137,6 +165,8 @@ func flattenInput(inputRecipeData *inputRecipe) (data []interface{}) { flattenInputData[reciperesource.TMCHTTPSIngressKey] = reciperesource.FlattenTMCCommonRecipe(inputRecipeData.inputTMCHTTPSIngress) case TMCRequireLabelsRecipe: flattenInputData[reciperesource.TMCRequireLabelsKey] = reciperesource.FlattenTMCRequireLabels(inputRecipeData.inputTMCRequireLabels) + case TMCCustomRecipe: + flattenInputData[reciperesource.TMCCustomKey] = reciperesource.FlattenTMCCustom(inputRecipeData.recipeTMCCustom, inputRecipeData.inputTMCCustom) case UnknownRecipe: fmt.Printf("[ERROR]: No valid input recipe block found: minimum one valid input recipe block is required among: %v. Please check the schema.", strings.Join(RecipesAllowed[:], `, `)) @@ -176,39 +206,33 @@ func ValidateInput(ctx context.Context, diff *schema.ResourceDiff, i interface{} inputData, _ := inputType[0].(map[string]interface{}) recipesFound := make([]string, 0) - if recipeData, ok := inputData[reciperesource.TMCBlockNodeportServiceKey]; ok { - if recipeType, ok := recipeData.([]interface{}); ok && len(recipeType) != 0 { - recipesFound = append(recipesFound, reciperesource.TMCBlockNodeportServiceKey) - } - } - - if recipeData, ok := inputData[reciperesource.TMCBlockResourcesKey]; ok { - if recipeType, ok := recipeData.([]interface{}); ok && len(recipeType) != 0 { - recipesFound = append(recipesFound, reciperesource.TMCBlockResourcesKey) - } + recipes := []string{ + reciperesource.TMCBlockNodeportServiceKey, + reciperesource.TMCBlockResourcesKey, + reciperesource.TMCBlockRolebindingSubjectsKey, + reciperesource.TMCExternalIPSKey, + reciperesource.TMCHTTPSIngressKey, + reciperesource.TMCRequireLabelsKey, } - if recipeData, ok := inputData[reciperesource.TMCBlockRolebindingSubjectsKey]; ok { - if recipeType, ok := recipeData.([]interface{}); ok && len(recipeType) != 0 { - recipesFound = append(recipesFound, reciperesource.TMCBlockRolebindingSubjectsKey) + for _, recipe := range recipes { + if recipeData, ok := inputData[recipe]; ok { + if recipeType, ok := recipeData.([]interface{}); ok && len(recipeType) != 0 { + recipesFound = append(recipesFound, recipe) + } } } - if recipeData, ok := inputData[reciperesource.TMCExternalIPSKey]; ok { + if recipeData, ok := inputData[reciperesource.TMCCustomKey]; ok { if recipeType, ok := recipeData.([]interface{}); ok && len(recipeType) != 0 { - recipesFound = append(recipesFound, reciperesource.TMCExternalIPSKey) - } - } + config := i.(authctx.TanzuContext) + err := reciperesource.ValidateCustomRecipe(config, recipeType[0].(map[string]interface{})) - if recipeData, ok := inputData[reciperesource.TMCHTTPSIngressKey]; ok { - if recipeType, ok := recipeData.([]interface{}); ok && len(recipeType) != 0 { - recipesFound = append(recipesFound, reciperesource.TMCHTTPSIngressKey) - } - } + if err != nil { + return errors.Wrapf(err, "Custom Recipe validation failed:\n") + } - if recipeData, ok := inputData[reciperesource.TMCRequireLabelsKey]; ok { - if recipeType, ok := recipeData.([]interface{}); ok && len(recipeType) != 0 { - recipesFound = append(recipesFound, reciperesource.TMCRequireLabelsKey) + recipesFound = append(recipesFound, reciperesource.TMCCustomKey) } } diff --git a/internal/resources/policy/kind/custom/recipe/constants.go b/internal/resources/policy/kind/custom/recipe/constants.go index 8dab89d7c..e05eab4b5 100644 --- a/internal/resources/policy/kind/custom/recipe/constants.go +++ b/internal/resources/policy/kind/custom/recipe/constants.go @@ -12,6 +12,7 @@ const ( TMCBlockNodeportServiceKey = "tmc_block_nodeport_service" TMCBlockResourcesKey = "tmc_block_resources" TMCHTTPSIngressKey = "tmc_https_ingress" + TMCCustomKey = "custom" AuditKey = "audit" TargetKubernetesResourcesKey = "target_kubernetes_resources" ParametersKey = "parameters" @@ -24,4 +25,5 @@ const ( disallowedSubjectsKey = "disallowed_subjects" kindKey = "kind" nameKey = "name" + TemplateNameKey = "template_name" ) diff --git a/internal/resources/policy/kind/custom/recipe/tmc_custom_recipe_flatten_test.go b/internal/resources/policy/kind/custom/recipe/tmc_custom_recipe_flatten_test.go new file mode 100644 index 000000000..b5959cdc6 --- /dev/null +++ b/internal/resources/policy/kind/custom/recipe/tmc_custom_recipe_flatten_test.go @@ -0,0 +1,108 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package recipe + +import ( + "testing" + + "github.com/stretchr/testify/require" + + policyrecipecustommodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/policy/recipe/custom" + policyrecipecustomcommonmodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/policy/recipe/custom/common" +) + +const ( + customRecipeTemplateName = "some-custom-template" +) + +func TestFlattenTMCCustom(t *testing.T) { + t.Parallel() + + cases := []struct { + description string + input *policyrecipecustommodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustom + expected []interface{} + }{ + { + description: "check for nil custom policy recipe", + input: nil, + expected: nil, + }, + { + description: "normal scenario with complete custom policy recipe", + input: &policyrecipecustommodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustom{ + Audit: true, + Parameters: map[string]interface{}{ + "ranges": []map[string]interface{}{ + { + "min_replicas": 3, + "max_replicas": 7, + }, + }, + }, + TargetKubernetesResources: []*policyrecipecustomcommonmodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustomV1TargetKubernetesResources{ + { + APIGroups: []string{"apps"}, + Kinds: []string{"Deployment", "StatefulSet"}, + }, + }, + }, + expected: []interface{}{ + map[string]interface{}{ + AuditKey: true, + TemplateNameKey: customRecipeTemplateName, + ParametersKey: "{\"ranges\":[{\"max_replicas\":7,\"min_replicas\":3}]}", + TargetKubernetesResourcesKey: []interface{}{ + map[string]interface{}{ + APIGroupsKey: []string{"apps"}, + KindsKey: []string{"Deployment", "StatefulSet"}, + }, + }, + }, + }, + }, + } + + for _, each := range cases { + test := each + t.Run(test.description, func(t *testing.T) { + actual := FlattenTMCCustom(customRecipeTemplateName, test.input) + require.Equal(t, test.expected, actual) + }) + } +} + +func TestValidateRecipeParameters(t *testing.T) { + t.Parallel() + + cases := []struct { + description string + recipeSchema string + recipeParameters string + expectedErrors bool + }{ + { + description: "scenario for valid recipe parameters", + recipeSchema: "{\"description\":\"The input schema for replica-count-range-enforcement recipe\",\"type\":\"object\",\"title\":\"replica-count-range-enforcement recipe schema\",\"required\":[\"targetKubernetesResources\"],\"properties\":{\"audit\":{\"description\":\"Creates this policy for dry-run. Violations will be logged but not denied. Defaults to false (deny). (This is deprecated, please use enforcementAction instead)\",\"type\":\"boolean\",\"title\":\"Audit (dry-run)\"},\"enforcementAction\":{\"description\":\"Select the action to take when the policy is violated.\",\"type\":\"string\",\"title\":\"Enforcement Action\",\"pattern\":\"dryrun|warn|deny\"},\"parameters\":{\"type\":\"object\",\"properties\":{\"ranges\":{\"description\":\"Allowed ranges for numbers of replicas. Values are inclusive.\",\"type\":\"array\",\"items\":{\"description\":\"A range of allowed replicas. Values are inclusive.\",\"type\":\"object\",\"properties\":{\"max_replicas\":{\"description\":\"The maximum number of replicas allowed, inclusive.\",\"type\":\"integer\"},\"min_replicas\":{\"description\":\"The minimum number of replicas allowed, inclusive.\",\"type\":\"integer\"}}}}}},\"targetKubernetesResources\":{\"description\":\"TargetKubernetesResources is a list of kubernetes api resources on which the policy will be enforced, identified using apiGroups and kinds. You can use 'kubectl api-resources' to view the list of available api resources on your cluster.\",\"type\":\"array\",\"minItems\":1,\"items\":{\"required\":[\"apiGroups\",\"kinds\"],\"properties\":{\"apiGroups\":{\"description\":\"apiGroup is group containing the resource type, for example 'rbac.authorization.k8s.io', 'networking.k8s.io', 'extensions', '' (some resources like Namespace, Pod have empty apiGroup).\",\"type\":\"array\",\"minItems\":1,\"items\":{\"type\":\"string\"}},\"kinds\":{\"description\":\"kind is the name of the object schema (resource type), for example 'Namespace', 'Pod', 'Ingress'\",\"type\":\"array\",\"minItems\":1,\"items\":{\"type\":\"string\"}}}}}}}", + recipeParameters: "{\"ranges\":[{\"max_replicas\":7,\"min_replicas\":3}]}", + expectedErrors: false, + }, + { + description: "scenario for invalid recipe parameters", + recipeSchema: "{\"description\":\"The input schema for replica-count-range-enforcement recipe\",\"type\":\"object\",\"title\":\"replica-count-range-enforcement recipe schema\",\"required\":[\"targetKubernetesResources\"],\"properties\":{\"audit\":{\"description\":\"Creates this policy for dry-run. Violations will be logged but not denied. Defaults to false (deny). (This is deprecated, please use enforcementAction instead)\",\"type\":\"boolean\",\"title\":\"Audit (dry-run)\"},\"enforcementAction\":{\"description\":\"Select the action to take when the policy is violated.\",\"type\":\"string\",\"title\":\"Enforcement Action\",\"pattern\":\"dryrun|warn|deny\"},\"parameters\":{\"type\":\"object\",\"properties\":{\"ranges\":{\"description\":\"Allowed ranges for numbers of replicas. Values are inclusive.\",\"type\":\"array\",\"items\":{\"description\":\"A range of allowed replicas. Values are inclusive.\",\"type\":\"object\",\"properties\":{\"max_replicas\":{\"description\":\"The maximum number of replicas allowed, inclusive.\",\"type\":\"integer\"},\"min_replicas\":{\"description\":\"The minimum number of replicas allowed, inclusive.\",\"type\":\"integer\"}}}}}},\"targetKubernetesResources\":{\"description\":\"TargetKubernetesResources is a list of kubernetes api resources on which the policy will be enforced, identified using apiGroups and kinds. You can use 'kubectl api-resources' to view the list of available api resources on your cluster.\",\"type\":\"array\",\"minItems\":1,\"items\":{\"required\":[\"apiGroups\",\"kinds\"],\"properties\":{\"apiGroups\":{\"description\":\"apiGroup is group containing the resource type, for example 'rbac.authorization.k8s.io', 'networking.k8s.io', 'extensions', '' (some resources like Namespace, Pod have empty apiGroup).\",\"type\":\"array\",\"minItems\":1,\"items\":{\"type\":\"string\"}},\"kinds\":{\"description\":\"kind is the name of the object schema (resource type), for example 'Namespace', 'Pod', 'Ingress'\",\"type\":\"array\",\"minItems\":1,\"items\":{\"type\":\"string\"}}}}}}}", + recipeParameters: "{\"replica_ranges\":[{\"maximum\":7,\"minimum\":3}]}", + expectedErrors: true, + }, + } + + for _, each := range cases { + test := each + t.Run(test.description, func(t *testing.T) { + actual := ValidateRecipeParameters(test.recipeSchema, test.recipeParameters) + require.Equal(t, test.expectedErrors, len(actual) > 0) + }) + } +} diff --git a/internal/resources/policy/kind/custom/recipe/tmc_custom_schema.go b/internal/resources/policy/kind/custom/recipe/tmc_custom_schema.go new file mode 100644 index 000000000..5f2462186 --- /dev/null +++ b/internal/resources/policy/kind/custom/recipe/tmc_custom_schema.go @@ -0,0 +1,168 @@ +/* +Copyright © 2024 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package recipe + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/pkg/errors" + + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/authctx" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/helper" + openapiv3 "github.com/vmware/terraform-provider-tanzu-mission-control/internal/helper/openapi_v3_schema_validator" + policyrecipecustommodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/policy/recipe/custom" + policyrecipecustomcommonmodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/policy/recipe/custom/common" + recipemodels "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/recipe" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/policy/kind/common" +) + +var TMCCustomSchema = &schema.Schema{ + Type: schema.TypeList, + Description: "The input schema for custom policy tmc_external_ips recipe version v1", + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + TemplateNameKey: { + Type: schema.TypeString, + Description: "Name of custom template.", + Required: true, + }, + AuditKey: { + Type: schema.TypeBool, + Description: "Audit (dry-run).", + Optional: true, + Default: false, + }, + ParametersKey: { + Type: schema.TypeString, + Description: "JSON encoded template parameters.", + Optional: true, + }, + TargetKubernetesResourcesKey: common.TargetKubernetesResourcesSchema, + }, + }, +} + +func FlattenTMCCustom(recipeName string, customRecipe *policyrecipecustommodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustom) []interface{} { + if customRecipe == nil { + return nil + } + + customInputMap := make(map[string]interface{}) + customInputMap[AuditKey] = customRecipe.Audit + customInputMap[TemplateNameKey] = recipeName + + if customRecipe.Parameters != nil { + parametersJSONBytes, _ := json.Marshal(customRecipe.Parameters) + customInputMap[ParametersKey] = helper.ConvertToString(parametersJSONBytes, "") + } + + targetKubernetesResources := make([]interface{}, 0) + + for _, tkr := range customRecipe.TargetKubernetesResources { + targetKubernetesResources = append(targetKubernetesResources, common.FlattenTargetKubernetesResources(tkr)) + } + + customInputMap[TargetKubernetesResourcesKey] = targetKubernetesResources + + return []interface{}{customInputMap} +} + +func ConstructTMCCustom(customRecipe []interface{}) (customInputModel *policyrecipecustommodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustom) { + if len(customRecipe) != 0 && customRecipe[0] != nil { + customInputMap := customRecipe[0].(map[string]interface{}) + + customInputModel = &policyrecipecustommodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustom{ + Audit: customInputMap[AuditKey].(bool), + } + + parametersData := customInputMap[ParametersKey].(string) + + if parametersData != "" { + parametersJSON := make(map[string]interface{}) + + _ = json.Unmarshal([]byte(parametersData), ¶metersJSON) + + customInputModel.Parameters = parametersJSON + } + + targetKubernetesResourcesData := customInputMap[TargetKubernetesResourcesKey].([]interface{}) + customInputModel.TargetKubernetesResources = make([]*policyrecipecustomcommonmodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustomV1TargetKubernetesResources, 0) + + for _, targetKubernetesResource := range targetKubernetesResourcesData { + customInputModel.TargetKubernetesResources = append(customInputModel.TargetKubernetesResources, common.ExpandTargetKubernetesResources(targetKubernetesResource)) + } + } + + return customInputModel +} + +func ValidateCustomRecipe(config authctx.TanzuContext, customRecipe map[string]interface{}) error { + errMessages := make([]string, 0) + customTemplateName := customRecipe[TemplateNameKey].(string) + + recipeModel := &recipemodels.VmwareTanzuManageV1alpha1PolicyTypeRecipeFullName{ + TypeName: "custom-policy", + Name: customTemplateName, + } + + recipeData, err := config.TMCConnection.RecipeResourceService.RecipeResourceServiceGet(recipeModel) + + if err != nil { + errMessages = append(errMessages, err.Error()) + } else { + errs := ValidateRecipeParameters(recipeData.Recipe.Spec.InputSchema, customRecipe[ParametersKey].(string)) + + if len(errs) > 0 { + errMsg := "" + + for _, e := range errs { + if errMsg == "" { + errMsg = e.Error() + } else { + errMsg = fmt.Sprintf("%s\n%s", errMsg, e.Error()) + } + } + + errMessages = append(errMessages, errMsg) + } + } + + if len(errMessages) > 0 { + errMsg := strings.Join(errMessages, "\n") + + return errors.New(errMsg) + } + + return nil +} + +func ValidateRecipeParameters(recipeSchema string, recipeParameters string) (errs []error) { + recipeSchemaJSON := make(map[string]interface{}) + _ = json.Unmarshal([]byte(recipeSchema), &recipeSchemaJSON) + + recipeParametersSchema, parametersSchemaExist := recipeSchemaJSON["properties"].(map[string]interface{})["parameters"] + + if parametersSchemaExist { + openAPIV3Validator := &openapiv3.OpenAPIV3SchemaValidator{ + Schema: recipeParametersSchema.(map[string]interface{})["properties"].(map[string]interface{}), + } + + recipeParametersJSON := make(map[string]interface{}) + _ = json.Unmarshal([]byte(recipeParameters), &recipeParametersJSON) + errs = make([]error, 0) + + errs = append(errs, openAPIV3Validator.ValidateRequiredFields(recipeParametersJSON)...) + errs = append(errs, openAPIV3Validator.ValidateFormat(recipeParametersJSON)...) + } + + return errs +} diff --git a/internal/resources/policy/kind/custom/spec_schema.go b/internal/resources/policy/kind/custom/spec_schema.go index 7efbc4ed6..5984a2463 100644 --- a/internal/resources/policy/kind/custom/spec_schema.go +++ b/internal/resources/policy/kind/custom/spec_schema.go @@ -92,6 +92,12 @@ func ConstructSpec(d *schema.ResourceData) (spec *policymodel.VmwareTanzuManageV if inputRecipeData.inputTMCRequireLabels != nil { spec.Input = *inputRecipeData.inputTMCRequireLabels } + case TMCCustomRecipe: + spec.Recipe = inputRecipeData.recipeTMCCustom + + if inputRecipeData.inputTMCCustom != nil { + spec.Input = *inputRecipeData.inputTMCCustom + } case UnknownRecipe: fmt.Printf("[ERROR]: No valid input recipe block found: minimum one valid input recipe block is required among: %v. Please check the schema.", strings.Join(RecipesAllowed[:], `, `)) } @@ -203,6 +209,20 @@ func FlattenSpec(spec *policymodel.VmwareTanzuManageV1alpha1CommonPolicySpec) (d } case string(UnknownRecipe): fmt.Printf("[ERROR]: No valid input recipe block found: minimum one valid input recipe block is required among: %v. Please check the schema.", strings.Join(RecipesAllowed[:], `, `)) + default: + var tmcCustomRecipeInput policyrecipecustommodel.VmwareTanzuManageV1alpha1CommonPolicySpecCustom + + err = tmcCustomRecipeInput.UnmarshalBinary(byteSlice) + + if err != nil { + return data + } + + inputRecipeData = &inputRecipe{ + recipe: TMCCustomRecipe, + recipeTMCCustom: spec.Recipe, + inputTMCCustom: &tmcCustomRecipeInput, + } } flattenSpecData[policy.InputKey] = flattenInput(inputRecipeData) diff --git a/templates/resources/custom_policy.md.tmpl b/templates/resources/custom_policy.md.tmpl index 323fc17b6..62f921bc2 100644 --- a/templates/resources/custom_policy.md.tmpl +++ b/templates/resources/custom_policy.md.tmpl @@ -18,6 +18,7 @@ In the Tanzu Mission Control custom policy resource, there are six system define - **tmc-external-ips** - **tmc-https-ingress** - **tmc-require-labels** +- **Any custom template defined in TMC** ## Policy Scope and Inheritance @@ -96,6 +97,12 @@ target_kubernetes_resources { {{ tffile "examples/resources/custom_policy/resource_cluster_tmc_require_labels_custom_policy.tf" }} +## Cluster scoped Custom Policy + +### Example Usage + +{{ tffile "examples/resources/custom_policy/resource_cluster_tmc_custom.tf" }} + ## Cluster group scoped TMC-block-nodeport-service Custom Policy ### Example Usage @@ -132,6 +139,12 @@ target_kubernetes_resources { {{ tffile "examples/resources/custom_policy/resource_cluster_group_tmc_require_labels_custom_policy.tf" }} +## Cluster group scoped Custom Policy + +### Example Usage + +{{ tffile "examples/resources/custom_policy/resource_cluster_group_tmc_custom.tf" }} + ## Organization scoped TMC-block-nodeport-service Custom Policy ### Example Usage @@ -168,4 +181,10 @@ target_kubernetes_resources { {{ tffile "examples/resources/custom_policy/resource_organization_tmc_require_labels_custom_policy.tf" }} +## Organization scoped Custom Policy + +### Example Usage + +{{ tffile "examples/resources/custom_policy/resource_organization_tmc_custom.tf" }} + {{ .SchemaMarkdown | trimspace }} From 3d7e7ab1f1982022b447ebfa0786e210a85492e6 Mon Sep 17 00:00:00 2001 From: GilTS Date: Wed, 15 Nov 2023 23:52:39 +0200 Subject: [PATCH 18/22] [Feature complete]Custom IAM Role modles, client, schema, implmementation and docs Signed-off-by: GilTS --- .github/workflows/release.yml | 2 +- .github/workflows/test.yml | 2 +- Makefile | 2 +- docs/resources/custom_iam_role.md | 169 ++++++++++ .../resource_custom_iam_role.tf | 45 +++ .../customiamrole/customiamrole_resource.go | 81 +++++ internal/client/http_client.go | 3 + .../models/customiamrole/aggregeation_rule.go | 41 +++ .../models/customiamrole/customiamrole.go | 52 +++ internal/models/customiamrole/fullname.go | 44 +++ .../models/customiamrole/label_selector.go | 50 +++ .../models/customiamrole/match_expression.go | 55 ++++ internal/models/customiamrole/request.go | 41 +++ internal/models/customiamrole/resources.go | 66 ++++ internal/models/customiamrole/rules.go | 53 +++ internal/models/customiamrole/spec.go | 56 ++++ internal/provider/provider.go | 2 + .../customiamrole/converter_mapping.go | 59 ++++ .../customiamrole/resource_custom_iam_role.go | 306 ++++++++++++++++++ internal/resources/customiamrole/schema.go | 229 +++++++++++++ .../tests/custom_iam_role_test.go | 111 +++++++ .../customiamrole/tests/helper_test.go | 35 ++ .../tests/resource_tf_configs.go | 102 ++++++ templates/resources/custom_iam_role.md.tmpl | 19 ++ 24 files changed, 1622 insertions(+), 3 deletions(-) create mode 100644 docs/resources/custom_iam_role.md create mode 100644 examples/resources/custom_iam_role/resource_custom_iam_role.tf create mode 100644 internal/client/customiamrole/customiamrole_resource.go create mode 100644 internal/models/customiamrole/aggregeation_rule.go create mode 100644 internal/models/customiamrole/customiamrole.go create mode 100644 internal/models/customiamrole/fullname.go create mode 100644 internal/models/customiamrole/label_selector.go create mode 100644 internal/models/customiamrole/match_expression.go create mode 100644 internal/models/customiamrole/request.go create mode 100644 internal/models/customiamrole/resources.go create mode 100644 internal/models/customiamrole/rules.go create mode 100644 internal/models/customiamrole/spec.go create mode 100644 internal/resources/customiamrole/converter_mapping.go create mode 100644 internal/resources/customiamrole/resource_custom_iam_role.go create mode 100644 internal/resources/customiamrole/schema.go create mode 100644 internal/resources/customiamrole/tests/custom_iam_role_test.go create mode 100644 internal/resources/customiamrole/tests/helper_test.go create mode 100644 internal/resources/customiamrole/tests/resource_tf_configs.go create mode 100644 templates/resources/custom_iam_role.md.tmpl diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 5e38c7948..212a5739c 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -6,7 +6,7 @@ on: - 'v*' env: - BUILD_TAGS: 'akscluster cluster clustergroup credential ekscluster gitrepository iampolicy kustomization namespace custompolicy imagepolicy networkpolicy quotapolicy securitypolicy sourcesecret workspace tanzupackage tanzupackages packagerepository packageinstall clustersecret integration mutationpolicy backupschedule targetlocation dataprotection tanzukubernetescluster clusterclass managementcluster provisioner inspections custompolicytemplate' + BUILD_TAGS: 'akscluster cluster clustergroup credential ekscluster gitrepository iampolicy kustomization namespace custompolicy imagepolicy networkpolicy quotapolicy securitypolicy sourcesecret workspace tanzupackage tanzupackages packagerepository packageinstall clustersecret integration mutationpolicy backupschedule targetlocation dataprotection tanzukubernetescluster clusterclass managementcluster provisioner inspections custompolicytemplate customiamrole' jobs: goreleaser: diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index fc3838aea..03af1110e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -3,7 +3,7 @@ name: Test and coverage on: [pull_request, push] env: - BUILD_TAGS: 'akscluster cluster clustergroup credential ekscluster gitrepository iampolicy kustomization namespace custompolicy imagepolicy networkpolicy quotapolicy securitypolicy sourcesecret workspace tanzupackage tanzupackages packagerepository packageinstall clustersecret integration mutationpolicy backupschedule targetlocation dataprotection tanzukubernetescluster clusterclass managementcluster provisioner inspections custompolicytemplate' + BUILD_TAGS: 'akscluster cluster clustergroup credential ekscluster gitrepository iampolicy kustomization namespace custompolicy imagepolicy networkpolicy quotapolicy securitypolicy sourcesecret workspace tanzupackage tanzupackages packagerepository packageinstall clustersecret integration mutationpolicy backupschedule targetlocation dataprotection tanzukubernetescluster clusterclass managementcluster provisioner inspections custompolicytemplate customiamrole' jobs: build: name: Test and coverage diff --git a/Makefile b/Makefile index 3764fb98b..a23040216 100644 --- a/Makefile +++ b/Makefile @@ -22,7 +22,7 @@ ifeq ($(TEST_FLAGS),) endif ifeq ($(BUILD_TAGS),) - BUILD_TAGS := 'akscluster cluster clustergroup credential ekscluster gitrepository iampolicy kustomization namespace custompolicy imagepolicy networkpolicy quotapolicy securitypolicy sourcesecret workspace tanzupackage tanzupackages packagerepository packageinstall clustersecret integration mutationpolicy helmfeature helmrelease backupschedule targetlocation dataprotection tanzukubernetescluster clusterclass managementcluster provisioner inspections custompolicytemplate' + BUILD_TAGS := 'akscluster cluster clustergroup credential ekscluster gitrepository iampolicy kustomization namespace custompolicy imagepolicy networkpolicy quotapolicy securitypolicy sourcesecret workspace tanzupackage tanzupackages packagerepository packageinstall clustersecret integration mutationpolicy helmfeature helmrelease backupschedule targetlocation dataprotection tanzukubernetescluster clusterclass managementcluster provisioner inspections custompolicytemplate customiamrole' endif .PHONY: build clean-up test gofmt vet lint acc-test website-lint website-lint-fix diff --git a/docs/resources/custom_iam_role.md b/docs/resources/custom_iam_role.md new file mode 100644 index 000000000..9e7f08fb3 --- /dev/null +++ b/docs/resources/custom_iam_role.md @@ -0,0 +1,169 @@ +--- +Title: "Custom IAM Role Resource" +Description: |- + Creating a custom IAM role. +--- + +# Custom IAM Role Resource + +This resource enables users to create custom IAM roles in TMC. + +For more information regarding custom roles, see [Custom Role][custom-role]. + +[custom-role]: https://docs.vmware.com/en/VMware-Tanzu-Mission-Control/services/tanzumc-using/GUID-F314ED9E-2736-48CC-A1BB-CB9C32900B30.html + +## Example Usage + +```terraform +resource "tanzu-mission-control_custom_iam_role" "demo-role" { + name = "tf-custom-role" + + spec { + is_deprecated = false + + aggregation_rule { + cluster_role_selector { + match_labels = { + key = "value" + } + } + + cluster_role_selector { + match_expression { + key = "aa" + operator = "Exists" + values = ["aa", "bb", "cc"] + } + } + } + + allowed_scopes = [ + "ORGANIZATION", + "CLUSTER_GROUP", + "CLUSTER" + ] + + tanzu_permissions = [] + + kubernetes_permissions { + rule { + resources = ["deployments"] + verbs = ["get", "list"] + api_groups = ["*"] + } + + rule { + verbs = ["get", "list"] + api_groups = ["*"] + url_paths = ["/healthz"] + } + } + } +} +``` + + +## Schema + +### Required + +- `name` (String) The name of the iam role +- `spec` (Block List, Min: 1, Max: 1) Spec block of iam role (see [below for nested schema](#nestedblock--spec)) + +### Optional + +- `meta` (Block List, Max: 1) Metadata for the resource (see [below for nested schema](#nestedblock--meta)) + +### Read-Only + +- `id` (String) The ID of this resource. + + +### Nested Schema for `spec` + +Required: + +- `allowed_scopes` (List of String) The allowed scopes for the iam role. +Valid values are (ORGANIZATION, MANAGEMENT_CLUSTER, PROVISIONER, CLUSTER_GROUP, CLUSTER, WORKSPACE, NAMESPACE) + +Optional: + +- `aggregation_rule` (Block List, Max: 1) Aggregation rules for the iam role. (see [below for nested schema](#nestedblock--spec--aggregation_rule)) +- `is_deprecated` (Boolean) Flag representing whether role is deprecated. +- `kubernetes_permissions` (Block List, Max: 1) Kubernetes permissions for the iam role. (see [below for nested schema](#nestedblock--spec--kubernetes_permissions)) +- `tanzu_permissions` (List of String) Tanzu-specific permissions for the role. + + +### Nested Schema for `spec.aggregation_rule` + +Required: + +- `cluster_role_selector` (Block List, Min: 1) Cluster role selector for the iam role. (see [below for nested schema](#nestedblock--spec--aggregation_rule--cluster_role_selector)) + + +### Nested Schema for `spec.aggregation_rule.cluster_role_selector` + +Optional: + +- `match_expression` (Block List) List of label selector requirements. +The requirements are ANDed. (see [below for nested schema](#nestedblock--spec--aggregation_rule--cluster_role_selector--match_expression)) +- `match_labels` (Map of String) Map of {key,value} pairs. +A single {key,value} in the match_labels map is equivalent to an element of match_expression, whose key field is "key", the operator is "In", and the values array contains only "value". +The requirements are ANDed. + + +### Nested Schema for `spec.aggregation_rule.cluster_role_selector.match_expression` + +Required: + +- `key` (String) Key is the label key that the selector applies to. +- `operator` (String) Operator represents a key's relationship to a set of values. +Valid operators are "In", "NotIn", "Exists" and "DoesNotExist". + +Optional: + +- `values` (List of String) Values is an array of string values. +If the operator is "In" or "NotIn", the values array must be non-empty. +If the operator is "Exists" or "DoesNotExist", the values array must be empty. +This array is replaced during a strategic merge patch. + + + + + +### Nested Schema for `spec.kubernetes_permissions` + +Required: + +- `rule` (Block List, Min: 1) Kubernetes rules. (see [below for nested schema](#nestedblock--spec--kubernetes_permissions--rule)) + + +### Nested Schema for `spec.kubernetes_permissions.rule` + +Required: + +- `verbs` (List of String) Verbs. + +Optional: + +- `api_groups` (List of String) API groups. +- `resource_names` (List of String) Restricts the rule to resources by name. +- `resources` (List of String) Resources for the role. +- `url_paths` (List of String) Non-resource urls for the role. + + + + + +### Nested Schema for `meta` + +Optional: + +- `annotations` (Map of String) Annotations for the resource +- `description` (String) Description of the resource +- `labels` (Map of String) Labels for the resource + +Read-Only: + +- `resource_version` (String) Resource version of the resource +- `uid` (String) UID of the resource diff --git a/examples/resources/custom_iam_role/resource_custom_iam_role.tf b/examples/resources/custom_iam_role/resource_custom_iam_role.tf new file mode 100644 index 000000000..22ea8ced6 --- /dev/null +++ b/examples/resources/custom_iam_role/resource_custom_iam_role.tf @@ -0,0 +1,45 @@ +resource "tanzu-mission-control_custom_iam_role" "demo-role" { + name = "tf-custom-role" + + spec { + is_deprecated = false + + aggregation_rule { + cluster_role_selector { + match_labels = { + key = "value" + } + } + + cluster_role_selector { + match_expression { + key = "aa" + operator = "Exists" + values = ["aa", "bb", "cc"] + } + } + } + + allowed_scopes = [ + "ORGANIZATION", + "CLUSTER_GROUP", + "CLUSTER" + ] + + tanzu_permissions = [] + + kubernetes_permissions { + rule { + resources = ["deployments"] + verbs = ["get", "list"] + api_groups = ["*"] + } + + rule { + verbs = ["get", "list"] + api_groups = ["*"] + url_paths = ["/healthz"] + } + } + } +} diff --git a/internal/client/customiamrole/customiamrole_resource.go b/internal/client/customiamrole/customiamrole_resource.go new file mode 100644 index 000000000..1da7ba235 --- /dev/null +++ b/internal/client/customiamrole/customiamrole_resource.go @@ -0,0 +1,81 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package customiamrole + +import ( + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/transport" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/helper" + customiamrolemodels "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/customiamrole" +) + +const ( + iamRoleAPIVersionAndGroup = "v1alpha1/iam/roles" +) + +// New creates a new custom iam role resource service API client. +func New(transport *transport.Client) ClientService { + return &Client{Client: transport} +} + +/* +Client for custom iam role resource service API. +*/ +type Client struct { + *transport.Client +} + +// ClientService is the interface for Client methods. +type ClientService interface { + CustomIAMRoleResourceServiceCreate(request *customiamrolemodels.VmwareTanzuManageV1alpha1IamRoleData) (*customiamrolemodels.VmwareTanzuManageV1alpha1IamRoleData, error) + + CustomIAMRoleResourceServiceUpdate(request *customiamrolemodels.VmwareTanzuManageV1alpha1IamRoleData) (*customiamrolemodels.VmwareTanzuManageV1alpha1IamRoleData, error) + + CustomIAMRoleResourceServiceDelete(fn *customiamrolemodels.VmwareTanzuManageV1alpha1IamRoleFullName) error + + CustomIAMRoleResourceServiceGet(fn *customiamrolemodels.VmwareTanzuManageV1alpha1IamRoleFullName) (*customiamrolemodels.VmwareTanzuManageV1alpha1IamRoleData, error) +} + +/* +CustomIAMRoleResourceServiceGet gets a custom iam role. +*/ +func (c *Client) CustomIAMRoleResourceServiceGet(fullName *customiamrolemodels.VmwareTanzuManageV1alpha1IamRoleFullName) (*customiamrolemodels.VmwareTanzuManageV1alpha1IamRoleData, error) { + requestURL := helper.ConstructRequestURL(iamRoleAPIVersionAndGroup, fullName.Name).String() + resp := &customiamrolemodels.VmwareTanzuManageV1alpha1IamRoleData{} + err := c.Get(requestURL, resp) + + return resp, err +} + +/* +CustomIAMRoleResourceServiceCreate creates a custom iam role. +*/ +func (c *Client) CustomIAMRoleResourceServiceCreate(request *customiamrolemodels.VmwareTanzuManageV1alpha1IamRoleData) (*customiamrolemodels.VmwareTanzuManageV1alpha1IamRoleData, error) { + response := &customiamrolemodels.VmwareTanzuManageV1alpha1IamRoleData{} + requestURL := helper.ConstructRequestURL(iamRoleAPIVersionAndGroup).String() + err := c.Create(requestURL, request, response) + + return response, err +} + +/* +CustomIAMRoleResourceServiceUpdate updates a custom iam role. +*/ +func (c *Client) CustomIAMRoleResourceServiceUpdate(request *customiamrolemodels.VmwareTanzuManageV1alpha1IamRoleData) (*customiamrolemodels.VmwareTanzuManageV1alpha1IamRoleData, error) { + response := &customiamrolemodels.VmwareTanzuManageV1alpha1IamRoleData{} + requestURL := helper.ConstructRequestURL(iamRoleAPIVersionAndGroup, request.Role.FullName.Name).String() + err := c.Update(requestURL, request, response) + + return response, err +} + +/* +CustomIAMRoleResourceServiceDelete deletes a custom iam role. +*/ +func (c *Client) CustomIAMRoleResourceServiceDelete(fullName *customiamrolemodels.VmwareTanzuManageV1alpha1IamRoleFullName) error { + requestURL := helper.ConstructRequestURL(iamRoleAPIVersionAndGroup, fullName.Name).String() + + return c.Delete(requestURL) +} diff --git a/internal/client/http_client.go b/internal/client/http_client.go index 0f3092b12..cb25037fe 100644 --- a/internal/client/http_client.go +++ b/internal/client/http_client.go @@ -39,6 +39,7 @@ import ( sourcesecretclustergroupclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/clustergroup/sourcesecret" credentialclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/credential" custompolicytemplateclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/custompolicytemplate" + customiamroleclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/customiamrole" eksclusterclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/ekscluster" eksnodepoolclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/ekscluster/nodepool" inspectionsclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/inspections" @@ -150,6 +151,7 @@ func newHTTPClient(httpClient *transport.Client) *TanzuMissionControl { ProvisionerResourceService: provisionerclient.New(httpClient), CustomPolicyTemplateResourceService: custompolicytemplateclient.New(httpClient), RecipeResourceService: recipeclient.New(httpClient), + CustomIAMRoleResourceService: customiamroleclient.New(httpClient), } } @@ -211,4 +213,5 @@ type TanzuMissionControl struct { InspectionsResourceService inspectionsclient.ClientService CustomPolicyTemplateResourceService custompolicytemplateclient.ClientService RecipeResourceService recipeclient.ClientService + CustomIAMRoleResourceService customiamroleclient.ClientService } diff --git a/internal/models/customiamrole/aggregeation_rule.go b/internal/models/customiamrole/aggregeation_rule.go new file mode 100644 index 000000000..b4358e253 --- /dev/null +++ b/internal/models/customiamrole/aggregeation_rule.go @@ -0,0 +1,41 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package customiamrolemodels + +import ( + "github.com/go-openapi/swag" +) + +// VmwareTanzuManageV1alpha1IamRoleAggregationRule AggregationRule for a role. +// +// swagger:model vmware.tanzu.manage.v1alpha1.iam.role.AggregationRule +type VmwareTanzuManageV1alpha1IamRoleAggregationRule struct { + + // Label based Cluster Role Selector. + ClusterRoleSelectors []*K8sIoApimachineryPkgApisMetaV1LabelSelector `json:"clusterRoleSelectors"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1IamRoleAggregationRule) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1IamRoleAggregationRule) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1IamRoleAggregationRule + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/customiamrole/customiamrole.go b/internal/models/customiamrole/customiamrole.go new file mode 100644 index 000000000..059b2eb21 --- /dev/null +++ b/internal/models/customiamrole/customiamrole.go @@ -0,0 +1,52 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package customiamrolemodels + +import ( + "github.com/go-openapi/swag" + + objectmetamodel "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/objectmeta" +) + +// VmwareTanzuManageV1alpha1IamRoleRole Manage permissions on resources. +// +// swagger:model vmware.tanzu.manage.v1alpha1.iam.role.Role +type VmwareTanzuManageV1alpha1IamRole struct { + + // Full name for the role. + FullName *VmwareTanzuManageV1alpha1IamRoleFullName `json:"fullName,omitempty"` + + // Metadata for the role object. + Meta *objectmetamodel.VmwareTanzuCoreV1alpha1ObjectMeta `json:"meta,omitempty"` + + // Spec for the role. + Spec *VmwareTanzuManageV1alpha1IamRoleSpec `json:"spec,omitempty"` + + // Metadata describing the type of the resource. + Type *objectmetamodel.VmwareTanzuCoreV1alpha1ObjectType `json:"type,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1IamRole) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1IamRole) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1IamRole + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/customiamrole/fullname.go b/internal/models/customiamrole/fullname.go new file mode 100644 index 000000000..44d73a8ce --- /dev/null +++ b/internal/models/customiamrole/fullname.go @@ -0,0 +1,44 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package customiamrolemodels + +import ( + "github.com/go-openapi/swag" +) + +// VmwareTanzuManageV1alpha1IamRoleFullName Full name for role. +// +// swagger:model vmware.tanzu.manage.v1alpha1.iam.role.FullName +type VmwareTanzuManageV1alpha1IamRoleFullName struct { + + // Name of the role. + Name string `json:"name,omitempty"` + + // Org Id. + OrgID string `json:"orgId,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1IamRoleFullName) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1IamRoleFullName) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1IamRoleFullName + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/customiamrole/label_selector.go b/internal/models/customiamrole/label_selector.go new file mode 100644 index 000000000..f2b5ae12d --- /dev/null +++ b/internal/models/customiamrole/label_selector.go @@ -0,0 +1,50 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package customiamrolemodels + +import ( + "github.com/go-openapi/swag" +) + +// K8sIoApimachineryPkgApisMetaV1LabelSelector A label selector is a label query over a set of resources. The result of matchLabels and +// matchExpressions are ANDed. An empty label selector matches all objects. A null +// label selector matches no objects. +// +// swagger:model k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector +type K8sIoApimachineryPkgApisMetaV1LabelSelector struct { + + // matchExpressions is a list of label selector requirements. The requirements are ANDed. + // +optional + MatchExpressions []*K8sIoApimachineryPkgApisMetaV1LabelSelectorRequirement `json:"matchExpressions"` + + // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + // map is equivalent to an element of matchExpressions, whose key field is "key", the + // operator is "In", and the values array contains only "value". The requirements are ANDed. + // +optional + MatchLabels map[string]string `json:"matchLabels,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *K8sIoApimachineryPkgApisMetaV1LabelSelector) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *K8sIoApimachineryPkgApisMetaV1LabelSelector) UnmarshalBinary(b []byte) error { + var res K8sIoApimachineryPkgApisMetaV1LabelSelector + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/customiamrole/match_expression.go b/internal/models/customiamrole/match_expression.go new file mode 100644 index 000000000..b37bbd82d --- /dev/null +++ b/internal/models/customiamrole/match_expression.go @@ -0,0 +1,55 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package customiamrolemodels + +import ( + "github.com/go-openapi/swag" +) + +// K8sIoApimachineryPkgApisMetaV1LabelSelectorRequirement A label selector requirement is a selector that contains values, a key, and an operator that +// relates the key and values. +// +// swagger:model k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement +type K8sIoApimachineryPkgApisMetaV1LabelSelectorRequirement struct { + + // key is the label key that the selector applies to. + // +patchMergeKey=key + // +patchStrategy=merge + Key string `json:"key,omitempty"` + + // operator represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists and DoesNotExist. + Operator string `json:"operator,omitempty"` + + // values is an array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. This array is replaced during a strategic + // merge patch. + // +optional + Values []string `json:"values"` +} + +// MarshalBinary interface implementation. +func (m *K8sIoApimachineryPkgApisMetaV1LabelSelectorRequirement) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *K8sIoApimachineryPkgApisMetaV1LabelSelectorRequirement) UnmarshalBinary(b []byte) error { + var res K8sIoApimachineryPkgApisMetaV1LabelSelectorRequirement + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/customiamrole/request.go b/internal/models/customiamrole/request.go new file mode 100644 index 000000000..6f2c19966 --- /dev/null +++ b/internal/models/customiamrole/request.go @@ -0,0 +1,41 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package customiamrolemodels + +import ( + "github.com/go-openapi/swag" +) + +// VmwareTanzuManageV1alpha1IamRoleCreateRoleRequest Request to create a Role. +// +// swagger:model vmware.tanzu.manage.v1alpha1.iam.role.CreateRoleRequest +type VmwareTanzuManageV1alpha1IamRoleData struct { + + // Role to create. + Role *VmwareTanzuManageV1alpha1IamRole `json:"role,omitempty"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1IamRoleData) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1IamRoleData) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1IamRoleData + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/customiamrole/resources.go b/internal/models/customiamrole/resources.go new file mode 100644 index 000000000..e6850930c --- /dev/null +++ b/internal/models/customiamrole/resources.go @@ -0,0 +1,66 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package customiamrolemodels + +import ( + "encoding/json" +) + +// VmwareTanzuManageV1alpha1IamPermissionResource Resource Types. +// +// - RESOURCE_UNSPECIFIED: Unknown. +// - ORGANIZATION: Organization. +// - MANAGEMENT_CLUSTER: Management cluster. +// - PROVISIONER: Provisioner. +// - CLUSTER_GROUP: Cluster group. +// - CLUSTER: Cluster. +// - WORKSPACE: Workspace. +// - NAMESPACE: Namespace. +// +// swagger:model vmware.tanzu.manage.v1alpha1.iam.permission.Resource +type VmwareTanzuManageV1alpha1IamPermissionResource string + +const ( + + // VmwareTanzuManageV1alpha1IamPermissionResourceRESOURCEUNSPECIFIED captures enum value "RESOURCE_UNSPECIFIED". + VmwareTanzuManageV1alpha1IamPermissionResourceRESOURCEUNSPECIFIED VmwareTanzuManageV1alpha1IamPermissionResource = "RESOURCE_UNSPECIFIED" + + // VmwareTanzuManageV1alpha1IamPermissionResourceORGANIZATION captures enum value "ORGANIZATION". + VmwareTanzuManageV1alpha1IamPermissionResourceORGANIZATION VmwareTanzuManageV1alpha1IamPermissionResource = "ORGANIZATION" + + // VmwareTanzuManageV1alpha1IamPermissionResourceMANAGEMENTCLUSTER captures enum value "MANAGEMENT_CLUSTER". + VmwareTanzuManageV1alpha1IamPermissionResourceMANAGEMENTCLUSTER VmwareTanzuManageV1alpha1IamPermissionResource = "MANAGEMENT_CLUSTER" + + // VmwareTanzuManageV1alpha1IamPermissionResourcePROVISIONER captures enum value "PROVISIONER". + VmwareTanzuManageV1alpha1IamPermissionResourcePROVISIONER VmwareTanzuManageV1alpha1IamPermissionResource = "PROVISIONER" + + // VmwareTanzuManageV1alpha1IamPermissionResourceCLUSTERGROUP captures enum value "CLUSTER_GROUP". + VmwareTanzuManageV1alpha1IamPermissionResourceCLUSTERGROUP VmwareTanzuManageV1alpha1IamPermissionResource = "CLUSTER_GROUP" + + // VmwareTanzuManageV1alpha1IamPermissionResourceCLUSTER captures enum value "CLUSTER". + VmwareTanzuManageV1alpha1IamPermissionResourceCLUSTER VmwareTanzuManageV1alpha1IamPermissionResource = "CLUSTER" + + // VmwareTanzuManageV1alpha1IamPermissionResourceWORKSPACE captures enum value "WORKSPACE". + VmwareTanzuManageV1alpha1IamPermissionResourceWORKSPACE VmwareTanzuManageV1alpha1IamPermissionResource = "WORKSPACE" + + // VmwareTanzuManageV1alpha1IamPermissionResourceNAMESPACE captures enum value "NAMESPACE". + VmwareTanzuManageV1alpha1IamPermissionResourceNAMESPACE VmwareTanzuManageV1alpha1IamPermissionResource = "NAMESPACE" +) + +// for schema. +var vmwareTanzuManageV1alpha1IamPermissionResourceEnum []interface{} + +func init() { + var res []VmwareTanzuManageV1alpha1IamPermissionResource + + if err := json.Unmarshal([]byte(`["RESOURCE_UNSPECIFIED","ORGANIZATION","MANAGEMENT_CLUSTER","PROVISIONER","CLUSTER_GROUP","CLUSTER","WORKSPACE","NAMESPACE"]`), &res); err != nil { + panic(err) + } + + for _, v := range res { + vmwareTanzuManageV1alpha1IamPermissionResourceEnum = append(vmwareTanzuManageV1alpha1IamPermissionResourceEnum, v) + } +} diff --git a/internal/models/customiamrole/rules.go b/internal/models/customiamrole/rules.go new file mode 100644 index 000000000..7e7375c56 --- /dev/null +++ b/internal/models/customiamrole/rules.go @@ -0,0 +1,53 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package customiamrolemodels + +import ( + "github.com/go-openapi/swag" +) + +// VmwareTanzuManageV1alpha1IamRoleKubernetesRule KubernetesRule for a role. +// +// swagger:model vmware.tanzu.manage.v1alpha1.iam.role.KubernetesRule +type VmwareTanzuManageV1alpha1IamRoleKubernetesRule struct { + + // API group. + APIGroups []string `json:"apiGroups"` + + // Non-resource urls for the role. + NonResourceUrls []string `json:"nonResourceUrls"` + + // ResourceNames to restrict the rule to resources by name + ResourceNames []string `json:"resourceNames"` + + // Resources - added a validation to input. + Resources []string `json:"resources"` + + // Verbs. + Verbs []string `json:"verbs"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1IamRoleKubernetesRule) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1IamRoleKubernetesRule) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1IamRoleKubernetesRule + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/models/customiamrole/spec.go b/internal/models/customiamrole/spec.go new file mode 100644 index 000000000..6a8c654e3 --- /dev/null +++ b/internal/models/customiamrole/spec.go @@ -0,0 +1,56 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package customiamrolemodels + +import ( + "github.com/go-openapi/swag" +) + +// VmwareTanzuManageV1alpha1IamRoleSpec Spec for role. +// +// swagger:model vmware.tanzu.manage.v1alpha1.iam.role.Spec +type VmwareTanzuManageV1alpha1IamRoleSpec struct { + + // AggregationRule. + AggregationRule *VmwareTanzuManageV1alpha1IamRoleAggregationRule `json:"aggregationRule,omitempty"` + + // Flag representing whether role is deprecated. + IsDeprecated bool `json:"isDeprecated"` + + // This flag will help the client identify if this is an inbuilt role. + IsInbuilt bool `json:"isInbuilt"` + + // Valid resources for this role. + Resources []*VmwareTanzuManageV1alpha1IamPermissionResource `json:"resources"` + + // KubernetesRule. + Rules []*VmwareTanzuManageV1alpha1IamRoleKubernetesRule `json:"rules"` + + // Tanzu-specific permissions for the role. + TanzuPermissions []string `json:"tanzuPermissions"` +} + +// MarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1IamRoleSpec) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation. +func (m *VmwareTanzuManageV1alpha1IamRoleSpec) UnmarshalBinary(b []byte) error { + var res VmwareTanzuManageV1alpha1IamRoleSpec + + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + + *m = res + + return nil +} diff --git a/internal/provider/provider.go b/internal/provider/provider.go index cc48fc1c8..ec95e424c 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -19,6 +19,7 @@ import ( "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/clustergroup" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/credential" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/custompolicytemplate" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/customiamrole" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/ekscluster" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/gitrepository" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/helmcharts" @@ -90,6 +91,7 @@ func Provider() *schema.Provider { utkgresource.ResourceName: utkgresource.ResourceTanzuKubernetesCluster(), provisioner.ResourceName: provisioner.ResourceProvisioner(), custompolicytemplate.ResourceName: custompolicytemplate.ResourceCustomPolicyTemplate(), + customiamrole.ResourceName: customiamrole.ResourceCustomIAMRole(), }, DataSourcesMap: map[string]*schema.Resource{ cluster.ResourceName: cluster.DataSourceTMCCluster(), diff --git a/internal/resources/customiamrole/converter_mapping.go b/internal/resources/customiamrole/converter_mapping.go new file mode 100644 index 000000000..50478becb --- /dev/null +++ b/internal/resources/customiamrole/converter_mapping.go @@ -0,0 +1,59 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package customiamrole + +import ( + tfModelConverterHelper "github.com/vmware/terraform-provider-tanzu-mission-control/internal/helper/converter" + customiamrolemodels "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/customiamrole" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/common" +) + +var ( + clusterRoleSelectorsArrayField = tfModelConverterHelper.BuildArrayField("clusterRoleSelectors") + rulesArrayField = tfModelConverterHelper.BuildArrayField("rules") + matchExpressionsArrayField = tfModelConverterHelper.BuildArrayField("matchExpressions") +) + +var tfModelResourceMap = &tfModelConverterHelper.BlockToStruct{ + NameKey: tfModelConverterHelper.BuildDefaultModelPath("fullName", "name"), + common.MetaKey: common.GetMetaConverterMap(tfModelConverterHelper.DefaultModelPathSeparator), + SpecKey: &tfModelConverterHelper.BlockToStruct{ + IsDeprecatedKey: tfModelConverterHelper.BuildDefaultModelPath("spec", "isDeprecated"), + AllowedScopesKey: tfModelConverterHelper.BuildDefaultModelPath("spec", "resources"), + TanzuPermissionsKey: tfModelConverterHelper.BuildDefaultModelPath("spec", "tanzuPermissions"), + KubernetesPermissionsKey: &tfModelConverterHelper.BlockToStruct{ + RuleKey: &tfModelConverterHelper.BlockSliceToStructSlice{ + { + APIGroupsKey: tfModelConverterHelper.BuildDefaultModelPath("spec", rulesArrayField, "apiGroups"), + URLPathsKey: tfModelConverterHelper.BuildDefaultModelPath("spec", rulesArrayField, "nonResourceUrls"), + ResourceNamesKey: tfModelConverterHelper.BuildDefaultModelPath("spec", rulesArrayField, "resourceNames"), + ResourcesKey: tfModelConverterHelper.BuildDefaultModelPath("spec", rulesArrayField, "resources"), + VerbsKey: tfModelConverterHelper.BuildDefaultModelPath("spec", rulesArrayField, "verbs"), + }, + }, + }, + AggregationRuleKey: &tfModelConverterHelper.BlockToStruct{ + ClusterRoleSelectorKey: &tfModelConverterHelper.BlockSliceToStructSlice{ + { + MatchExpressionKey: &tfModelConverterHelper.BlockSliceToStructSlice{ + { + MeKey: tfModelConverterHelper.BuildDefaultModelPath("spec", "aggregationRule", clusterRoleSelectorsArrayField, matchExpressionsArrayField, "key"), + MeOperatorKey: tfModelConverterHelper.BuildDefaultModelPath("spec", "aggregationRule", clusterRoleSelectorsArrayField, matchExpressionsArrayField, "operator"), + MeValuesKey: tfModelConverterHelper.BuildDefaultModelPath("spec", "aggregationRule", clusterRoleSelectorsArrayField, matchExpressionsArrayField, "values"), + }, + }, + MatchLabelsKey: &tfModelConverterHelper.Map{ + tfModelConverterHelper.AllMapKeysFieldMarker: tfModelConverterHelper.BuildDefaultModelPath("spec", "aggregationRule", clusterRoleSelectorsArrayField, "matchLabels", tfModelConverterHelper.AllMapKeysFieldMarker), + }, + }, + }, + }, + }, +} + +var tfModelConverter = tfModelConverterHelper.TFSchemaModelConverter[*customiamrolemodels.VmwareTanzuManageV1alpha1IamRole]{ + TFModelMap: tfModelResourceMap, +} diff --git a/internal/resources/customiamrole/resource_custom_iam_role.go b/internal/resources/customiamrole/resource_custom_iam_role.go new file mode 100644 index 000000000..74803ff27 --- /dev/null +++ b/internal/resources/customiamrole/resource_custom_iam_role.go @@ -0,0 +1,306 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package customiamrole + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/pkg/errors" + + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/authctx" + clienterrors "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/errors" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/helper" + customiamrolemodels "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/customiamrole" +) + +func ResourceCustomIAMRole() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceCustomIAMRoleCreate, + UpdateContext: resourceCustomIAMRoleUpdate, + ReadContext: resourceCustomIAMRoleRead, + DeleteContext: resourceCustomIAMRoleDelete, + Importer: &schema.ResourceImporter{ + StateContext: resourceCustomIAMRoleImporter, + }, + CustomizeDiff: validateSchema, + Schema: customIAMRoleResourceSchema, + } +} + +func resourceCustomIAMRoleCreate(ctx context.Context, data *schema.ResourceData, m interface{}) (diags diag.Diagnostics) { + config := m.(authctx.TanzuContext) + model, err := tfModelConverter.ConvertTFSchemaToAPIModel(data, []string{}) + + if err != nil { + return diag.FromErr(errors.Wrapf(err, "Couldn't create Custom IAM Role.")) + } + + request := &customiamrolemodels.VmwareTanzuManageV1alpha1IamRoleData{ + Role: model, + } + + _, err = config.TMCConnection.CustomIAMRoleResourceService.CustomIAMRoleResourceServiceCreate(request) + + if err != nil { + return diag.FromErr(errors.Wrapf(err, "Couldn't create Custom IAM Role.\nName: %s", model.FullName.Name)) + } + + return resourceCustomIAMRoleRead(helper.GetContextWithCaller(ctx, helper.CreateState), data, m) +} + +func resourceCustomIAMRoleUpdate(ctx context.Context, data *schema.ResourceData, m interface{}) (diags diag.Diagnostics) { + config := m.(authctx.TanzuContext) + model, err := tfModelConverter.ConvertTFSchemaToAPIModel(data, []string{}) + + if err != nil { + return diag.FromErr(errors.Wrapf(err, "Couldn't update Custom IAM Role.")) + } + + request := &customiamrolemodels.VmwareTanzuManageV1alpha1IamRoleData{ + Role: model, + } + + _, err = config.TMCConnection.CustomIAMRoleResourceService.CustomIAMRoleResourceServiceUpdate(request) + + if err != nil { + return diag.FromErr(errors.Wrapf(err, "Couldn't update Custom IAM Role.\nName: %s", model.FullName.Name)) + } + + return resourceCustomIAMRoleRead(helper.GetContextWithCaller(ctx, helper.UpdateState), data, m) +} + +func resourceCustomIAMRoleRead(ctx context.Context, data *schema.ResourceData, m interface{}) (diags diag.Diagnostics) { + var resp *customiamrolemodels.VmwareTanzuManageV1alpha1IamRoleData + + config := m.(authctx.TanzuContext) + model, err := tfModelConverter.ConvertTFSchemaToAPIModel(data, []string{NameKey}) + + if err != nil { + return diag.FromErr(errors.Wrapf(err, "Couldn't read Custom IAM Role.")) + } + + customIAMRoleFn := model.FullName + resp, err = config.TMCConnection.CustomIAMRoleResourceService.CustomIAMRoleResourceServiceGet(customIAMRoleFn) + + if err != nil { + if clienterrors.IsNotFoundError(err) { + if !helper.IsContextCallerSet(ctx) { + *data = schema.ResourceData{} + + return diags + } else if helper.IsDeleteState(ctx) { + // d.SetId("") is automatically called assuming delete returns no errors, but + // it is added here for explicitness. + _ = schema.RemoveFromState(data, m) + + return diags + } + } + + return diag.FromErr(errors.Wrapf(err, "Couldn't read Custom IAM Role.\nName: %s", customIAMRoleFn.Name)) + } else if resp != nil { + oldSpecData := data.Get(SpecKey).([]interface{})[0].(map[string]interface{}) + aggregationRuleData, aggregationRuleExists := oldSpecData[AggregationRuleKey] + allowedScopesData, allowedScopesExist := oldSpecData[AllowedScopesKey] + err = tfModelConverter.FillTFSchema(resp.Role, data) + + if err != nil { + return diag.FromErr(errors.Wrapf(err, "Couldn't read Custom IAM Role.\nName: %s", customIAMRoleFn.Name)) + } + + // API Inconsistency Fix + if aggregationRuleExists || allowedScopesExist { + newSpecData := data.Get(SpecKey).([]interface{})[0].(map[string]interface{}) + + if aggregationRuleExists && len(aggregationRuleData.([]interface{})) > 0 { + newSpecData[AggregationRuleKey] = formatAggregationRuleData(aggregationRuleData.([]interface{}), resp.Role.Spec.AggregationRule) + } + + newSpecData[AllowedScopesKey] = formatResourcesData(allowedScopesData.([]interface{}), resp.Role.Spec.Resources) + _ = data.Set(SpecKey, []interface{}{newSpecData}) + } + + data.SetId(customIAMRoleFn.Name) + } + + return diags +} + +func resourceCustomIAMRoleDelete(ctx context.Context, data *schema.ResourceData, m interface{}) (diags diag.Diagnostics) { + config := m.(authctx.TanzuContext) + model, err := tfModelConverter.ConvertTFSchemaToAPIModel(data, []string{NameKey}) + + if err != nil { + return diag.FromErr(errors.Wrapf(err, "Couldn't delete Custom IAM Role.")) + } + + customIAMRoleFn := model.FullName + err = config.TMCConnection.CustomIAMRoleResourceService.CustomIAMRoleResourceServiceDelete(customIAMRoleFn) + + if err != nil { + return diag.FromErr(errors.Wrapf(err, "Couldn't delete Custom IAM Role.\nName: %s", customIAMRoleFn.Name)) + } + + return resourceCustomIAMRoleRead(helper.GetContextWithCaller(ctx, helper.DeleteState), data, m) +} + +func resourceCustomIAMRoleImporter(_ context.Context, data *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { + config := m.(authctx.TanzuContext) + customIAMRoleName := data.Id() + + if customIAMRoleName == "" { + return nil, errors.New("Cluster ID must be set to the custom IAM role name.") + } + + customIAMRoleFn := &customiamrolemodels.VmwareTanzuManageV1alpha1IamRoleFullName{ + Name: customIAMRoleName, + } + + resp, err := config.TMCConnection.CustomIAMRoleResourceService.CustomIAMRoleResourceServiceGet(customIAMRoleFn) + + if err != nil || resp.Role == nil { + return nil, errors.Wrapf(err, "Couldn't import Custom IAM Role.\nName: %s", customIAMRoleFn.Name) + } + + err = tfModelConverter.FillTFSchema(resp.Role, data) + + if err != nil { + return nil, errors.Wrapf(err, "Couldn't import Custom IAM Role.\nName: %s", customIAMRoleFn.Name) + } + + return []*schema.ResourceData{data}, err +} + +func validateSchema(ctx context.Context, data *schema.ResourceDiff, m interface{}) (err error) { + specData := data.Get(SpecKey).([]interface{})[0].(map[string]interface{}) + kubernetesPermissions := specData[KubernetesPermissionsKey].([]interface{}) + + if len(kubernetesPermissions) > 0 { + rulesData := kubernetesPermissions[0].(map[string]interface{})[RuleKey].([]interface{}) + errMsg := "" + + for i, r := range rulesData { + resourcesLen := len(r.(map[string]interface{})[ResourcesKey].([]interface{})) + urlPathsLen := len(r.(map[string]interface{})[URLPathsKey].([]interface{})) + + if (resourcesLen > 0 && urlPathsLen > 0) || (resourcesLen == 0 && urlPathsLen == 0) { + if errMsg == "" { + errMsg = "Custom IAM Role Rules Validation Failed:" + } + + errMsg = fmt.Sprintf("%s\n%s", errMsg, fmt.Sprintf("Rule #%d - Must include %s or %s but not both.", i+1, ResourcesKey, URLPathsKey)) + } + } + + if errMsg != "" { + err = errors.New(errMsg) + } + } + + return err +} + +func formatAggregationRuleData(tfAggregationRule []interface{}, modelAggregationRule *customiamrolemodels.VmwareTanzuManageV1alpha1IamRoleAggregationRule) (aggregationRule []interface{}) { + clusterRoleSelector := make([]interface{}, 0) + tfClusterRoleSelectors := tfAggregationRule[0].(map[string]interface{})[ClusterRoleSelectorKey].([]interface{}) + modelClusterRoleSelectors := modelAggregationRule.ClusterRoleSelectors + + for _, tfSelector := range tfClusterRoleSelectors { + tfSelectorMap := tfSelector.(map[string]interface{}) + + if tfSelectorMap[MatchExpressionKey] != nil { + clusterRoleSelector = append(clusterRoleSelector, tfSelector) + } else if tfSelectorMap[MatchLabelsKey] != nil { + for _, modelSelector := range modelClusterRoleSelectors { + if compareClusterRoleSelectors(tfSelectorMap, modelSelector) { + clusterRoleSelector = append(clusterRoleSelector, tfSelector) + break + } + } + } + } + + if len(clusterRoleSelector) != len(modelClusterRoleSelectors) { + for _, modelSelector := range modelClusterRoleSelectors { + modelSelectorFound := false + + if modelSelector.MatchLabels != nil { + for _, tfSelector := range tfClusterRoleSelectors { + if compareClusterRoleSelectors(tfSelector.(map[string]interface{}), modelSelector) { + modelSelectorFound = true + break + } + } + + if !modelSelectorFound { + clusterRoleSelector = append(clusterRoleSelector, modelSelector) + } + } + } + } + + aggregationRule = make([]interface{}, 0) + aggregationRuleMap := make(map[string]interface{}) + aggregationRuleMap[ClusterRoleSelectorKey] = clusterRoleSelector + aggregationRule = append(aggregationRule, aggregationRuleMap) + + return aggregationRule +} + +func compareClusterRoleSelectors(tfSelector map[string]interface{}, modelSelector *customiamrolemodels.K8sIoApimachineryPkgApisMetaV1LabelSelector) bool { + isEqual := false + tfMatchLabels, _ := tfSelector[MatchLabelsKey].(map[string]interface{}) + + if modelSelector.MatchLabels != nil && tfMatchLabels != nil { + isEqual = true + + for k, v := range tfMatchLabels { + modelValue, keyExist := modelSelector.MatchLabels[k] + + if !keyExist || modelValue != v { + isEqual = false + break + } + } + } + + return isEqual +} + +func formatResourcesData(tfResources []interface{}, modelResources []*customiamrolemodels.VmwareTanzuManageV1alpha1IamPermissionResource) (resources []interface{}) { + resources = make([]interface{}, 0) + + for _, tfRes := range tfResources { + for _, modelRes := range modelResources { + if tfRes.(string) == string(*modelRes) { + resources = append(resources, tfRes) + break + } + } + } + + if len(resources) != len(modelResources) { + for _, modelRes := range modelResources { + modelResFound := false + + for _, tfRes := range tfResources { + if tfRes.(string) == string(*modelRes) { + modelResFound = true + break + } + } + + if !modelResFound { + resources = append(resources, modelRes) + } + } + } + + return resources +} diff --git a/internal/resources/customiamrole/schema.go b/internal/resources/customiamrole/schema.go new file mode 100644 index 000000000..1f9a859f9 --- /dev/null +++ b/internal/resources/customiamrole/schema.go @@ -0,0 +1,229 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package customiamrole + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + customiamrolemodels "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/customiamrole" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/common" +) + +const ( + ResourceName = "tanzu-mission-control_custom_iam_role" + + // Root Keys. + NameKey = "name" + SpecKey = "spec" + + // Spec Directive Keys. + AllowedScopesKey = "allowed_scopes" + AggregationRuleKey = "aggregation_rule" + IsDeprecatedKey = "is_deprecated" + TanzuPermissionsKey = "tanzu_permissions" + KubernetesPermissionsKey = "kubernetes_permissions" + + // Kubernetes Permissions Directive Keys. + RuleKey = "rule" + + // Rule Directive Keys. + ResourcesKey = "resources" + APIGroupsKey = "api_groups" + URLPathsKey = "url_paths" + ResourceNamesKey = "resource_names" + VerbsKey = "verbs" + + // Aggregation Rule Directive Keys. + ClusterRoleSelectorKey = "cluster_role_selector" + + // Cluster Role Selector Directive Keys. + MatchLabelsKey = "match_labels" + MatchExpressionKey = "match_expression" + + // Match Expression Directive Keys. + MeKey = "key" + MeOperatorKey = "operator" + MeValuesKey = "values" +) + +var AllowedScopesValidValues = []string{ + string(customiamrolemodels.VmwareTanzuManageV1alpha1IamPermissionResourceORGANIZATION), + string(customiamrolemodels.VmwareTanzuManageV1alpha1IamPermissionResourceMANAGEMENTCLUSTER), + string(customiamrolemodels.VmwareTanzuManageV1alpha1IamPermissionResourcePROVISIONER), + string(customiamrolemodels.VmwareTanzuManageV1alpha1IamPermissionResourceCLUSTERGROUP), + string(customiamrolemodels.VmwareTanzuManageV1alpha1IamPermissionResourceCLUSTER), + string(customiamrolemodels.VmwareTanzuManageV1alpha1IamPermissionResourceWORKSPACE), + string(customiamrolemodels.VmwareTanzuManageV1alpha1IamPermissionResourceNAMESPACE), +} + +var customIAMRoleResourceSchema = map[string]*schema.Schema{ + NameKey: nameSchema, + SpecKey: specSchema, + common.MetaKey: common.Meta, +} + +var nameSchema = &schema.Schema{ + Type: schema.TypeString, + Description: "The name of the iam role", + Required: true, + ForceNew: true, +} + +var specSchema = &schema.Schema{ + Type: schema.TypeList, + Description: "Spec block of iam role", + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + AllowedScopesKey: AllowedScopesSchema, + AggregationRuleKey: AggregationRuleSchema, + KubernetesPermissionsKey: KubernetesPermissionsSchema, + TanzuPermissionsKey: { + Type: schema.TypeList, + Description: "Tanzu-specific permissions for the role.", + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + IsDeprecatedKey: { + Type: schema.TypeBool, + Description: "Flag representing whether role is deprecated.", + Default: false, + Optional: true, + }, + }, + }, +} + +var AllowedScopesSchema = &schema.Schema{ + Type: schema.TypeList, + Description: fmt.Sprintf("The allowed scopes for the iam role.\nValid values are (%s)", strings.Join(AllowedScopesValidValues, ", ")), + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateDiagFunc: validation.ToDiagFunc(validation.StringInSlice(AllowedScopesValidValues, false)), + }, +} + +var AggregationRuleSchema = &schema.Schema{ + Type: schema.TypeList, + Description: "Aggregation rules for the iam role.", + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + ClusterRoleSelectorKey: { + Type: schema.TypeList, + Description: "Cluster role selector for the iam role.", + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + MatchLabelsKey: { + Type: schema.TypeMap, + Description: "Map of {key,value} pairs.\nA single {key,value} in the match_labels map is equivalent to an element of match_expression, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". \nThe requirements are ANDed.", + Optional: true, + }, + MatchExpressionKey: { + Type: schema.TypeList, + Description: "List of label selector requirements.\nThe requirements are ANDed.", + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + MeKey: { + Type: schema.TypeString, + Description: "Key is the label key that the selector applies to.", + Required: true, + }, + MeOperatorKey: { + Type: schema.TypeString, + Description: "Operator represents a key's relationship to a set of values.\nValid operators are \"In\", \"NotIn\", \"Exists\" and \"DoesNotExist\".", + Required: true, + }, + MeValuesKey: { + Type: schema.TypeList, + Description: "Values is an array of string values.\nIf the operator is \"In\" or \"NotIn\", the values array must be non-empty.\nIf the operator is \"Exists\" or \"DoesNotExist\", the values array must be empty.\nThis array is replaced during a strategic merge patch.", + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, +} + +var KubernetesPermissionsSchema = &schema.Schema{ + Type: schema.TypeList, + Description: "Kubernetes permissions for the iam role.", + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + RuleKey: RuleSchema, + }, + }, +} + +var RuleSchema = &schema.Schema{ + Type: schema.TypeList, + Description: "Kubernetes rules.", + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + APIGroupsKey: { + Type: schema.TypeList, + Description: "API groups.", + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + VerbsKey: { + Type: schema.TypeList, + Description: "Verbs.", + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + ResourcesKey: { + Type: schema.TypeList, + Description: "Resources for the role.", + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + ResourceNamesKey: { + Type: schema.TypeList, + Description: "Restricts the rule to resources by name.", + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + URLPathsKey: { + Type: schema.TypeList, + Description: "Non-resource urls for the role.", + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, +} diff --git a/internal/resources/customiamrole/tests/custom_iam_role_test.go b/internal/resources/customiamrole/tests/custom_iam_role_test.go new file mode 100644 index 000000000..ae042dd5d --- /dev/null +++ b/internal/resources/customiamrole/tests/custom_iam_role_test.go @@ -0,0 +1,111 @@ +//go:build customiamrole +// +build customiamrole + +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package customiamroletests + +import ( + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/pkg/errors" + + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/authctx" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/proxy" + customiamrolemodels "github.com/vmware/terraform-provider-tanzu-mission-control/internal/models/customiamrole" + testhelper "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/testing" +) + +var ( + context = authctx.TanzuContext{ + ServerEndpoint: os.Getenv(authctx.ServerEndpointEnvVar), + Token: os.Getenv(authctx.VMWCloudAPITokenEnvVar), + VMWCloudEndPoint: os.Getenv(authctx.VMWCloudEndpointEnvVar), + TLSConfig: &proxy.TLSConfig{}, + } +) + +func TestAcceptanceCustomIAMRoleResource(t *testing.T) { + err := context.Setup() + + if err != nil { + t.Error(errors.Wrap(err, "unable to set the context")) + t.FailNow() + } + + var ( + provider = initTestProvider(t) + tfResourceConfigBuilder = InitResourceTFConfigBuilder() + ) + + resource.Test(t, resource.TestCase{ + PreCheck: testhelper.TestPreCheck(t), + ProviderFactories: testhelper.GetTestProviderFactories(provider), + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: tfResourceConfigBuilder.GetCustomSlimIAMRoleConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(CustomIAMRoleResourceFullName, "name", CustomIAMRoleName), + verifyTanzuKubernetesClusterResource(provider, CustomIAMRoleResourceFullName, CustomIAMRoleName), + ), + }, + { + Config: tfResourceConfigBuilder.GetCustomFullIAMRoleConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(CustomIAMRoleResourceFullName, "name", CustomIAMRoleName), + verifyTanzuKubernetesClusterResource(provider, CustomIAMRoleResourceFullName, CustomIAMRoleName), + ), + }, + }, + }, + ) + + t.Log("Custom IAM role resource acceptance test complete!") +} + +func verifyTanzuKubernetesClusterResource( + provider *schema.Provider, + resourceName string, + customRoleName string, +) resource.TestCheckFunc { + return func(s *terraform.State) error { + if provider == nil { + return fmt.Errorf("provider not initialised") + } + + rs, ok := s.RootModule().Resources[resourceName] + + if !ok { + return fmt.Errorf("could not find resource %s", resourceName) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("ID not set, resource %s", resourceName) + } + + fn := &customiamrolemodels.VmwareTanzuManageV1alpha1IamRoleFullName{ + Name: customRoleName, + } + + resp, err := context.TMCConnection.CustomIAMRoleResourceService.CustomIARoleResourceServiceGet(fn) + + if err != nil { + return errors.Errorf("Custom IAM Role resource not found, resource: %s | err: %s", resourceName, err) + } + + if resp == nil { + return errors.Errorf("Custom IAM Role resource is empty, resource: %s", resourceName) + } + + return nil + } +} diff --git a/internal/resources/customiamrole/tests/helper_test.go b/internal/resources/customiamrole/tests/helper_test.go new file mode 100644 index 000000000..a4e1fd56b --- /dev/null +++ b/internal/resources/customiamrole/tests/helper_test.go @@ -0,0 +1,35 @@ +//go:build customiamrole +// +build customiamrole + +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package customiamroletests + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/require" + + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/authctx" + customiamroleres "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/customiamrole" +) + +func initTestProvider(t *testing.T) *schema.Provider { + testAccProvider := &schema.Provider{ + Schema: authctx.ProviderAuthSchema(), + ResourcesMap: map[string]*schema.Resource{ + customiamroleres.ResourceName: customiamroleres.ResourceCustomIAMRole(), + }, + ConfigureContextFunc: authctx.ProviderConfigureContext, + } + + if err := testAccProvider.InternalValidate(); err != nil { + require.NoError(t, err) + } + + return testAccProvider +} diff --git a/internal/resources/customiamrole/tests/resource_tf_configs.go b/internal/resources/customiamrole/tests/resource_tf_configs.go new file mode 100644 index 000000000..60968563f --- /dev/null +++ b/internal/resources/customiamrole/tests/resource_tf_configs.go @@ -0,0 +1,102 @@ +/* +Copyright © 2023 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: MPL-2.0 +*/ + +package customiamroletests + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + + customiamroleres "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/customiamrole" +) + +const ( + CustomIAMRoleResourceName = "test_custom_iam_role" +) + +var ( + CustomIAMRoleResourceFullName = fmt.Sprintf("%s.%s", customiamroleres.ResourceName, CustomIAMRoleResourceName) + CustomIAMRoleName = acctest.RandomWithPrefix("acc-test-custom-iam-role") +) + +type ResourceTFConfigBuilder struct { + NodePoolDefinition string +} + +func InitResourceTFConfigBuilder() *ResourceTFConfigBuilder { + tfConfigBuilder := &ResourceTFConfigBuilder{} + + return tfConfigBuilder +} + +func (builder *ResourceTFConfigBuilder) GetCustomFullIAMRoleConfig() string { + return fmt.Sprintf(` + resource "%s" "%s" { + name = "%s" + + spec { + is_deprecated = false + + aggregation_rule { + cluster_role_selector { + match_labels = { + key = "value" + } + } + + cluster_role_selector { + match_expression { + key = "aa" + operator = "Exists" + values = ["aa", "bb", "cc"] + } + } + } + + resources = ["ORGANIZATION", "CLUSTER_GROUP", "CLUSTER"] + tanzu_permissions = ["account.credential.iam.get"] + + rule { + resources = ["deployments"] + verbs = ["get", "list"] + api_groups = ["*"] + } + + rule { + verbs = ["get", "list"] + api_groups = ["*"] + url_paths = ["/healthz"] + } + } + } + `, + customiamroleres.ResourceName, + CustomIAMRoleResourceName, + CustomIAMRoleName, + ) +} + +func (builder *ResourceTFConfigBuilder) GetCustomSlimIAMRoleConfig() string { + return fmt.Sprintf(` + resource "%s" "%s" { + name = "%s" + + spec { + resources = ["ORGANIZATION", "CLUSTER_GROUP", "CLUSTER"] + + rule { + resources = ["deployments"] + verbs = ["get", "list"] + api_groups = ["*"] + } + } + } + `, + customiamroleres.ResourceName, + CustomIAMRoleResourceName, + CustomIAMRoleName, + ) +} diff --git a/templates/resources/custom_iam_role.md.tmpl b/templates/resources/custom_iam_role.md.tmpl new file mode 100644 index 000000000..d6c237a69 --- /dev/null +++ b/templates/resources/custom_iam_role.md.tmpl @@ -0,0 +1,19 @@ +--- +Title: "Custom IAM Role Resource" +Description: |- + Creating a custom IAM role. +--- + +# Custom IAM Role Resource + +This resource enables users to create custom IAM roles in TMC. + +For more information regarding custom roles, see [Custom Role][custom-role]. + +[custom-role]: https://docs.vmware.com/en/VMware-Tanzu-Mission-Control/services/tanzumc-using/GUID-F314ED9E-2736-48CC-A1BB-CB9C32900B30.html + +## Example Usage + +{{ tffile "examples/resources/custom_iam_role/resource_custom_iam_role.tf" }} + +{{ .SchemaMarkdown | trimspace }} From 9f63c4960b50a82f1c7502084ea10062e126ef8b Mon Sep 17 00:00:00 2001 From: Ramya Bangera Date: Tue, 16 Jan 2024 15:12:21 +0530 Subject: [PATCH 19/22] Update the copyright to 2024 Signed-off-by: Ramya Bangera --- internal/client/customiamrole/customiamrole_resource.go | 2 +- internal/models/customiamrole/aggregeation_rule.go | 2 +- internal/models/customiamrole/customiamrole.go | 2 +- internal/models/customiamrole/fullname.go | 2 +- internal/models/customiamrole/label_selector.go | 2 +- internal/models/customiamrole/match_expression.go | 2 +- internal/models/customiamrole/request.go | 2 +- internal/models/customiamrole/resources.go | 2 +- internal/models/customiamrole/rules.go | 2 +- internal/models/customiamrole/spec.go | 2 +- internal/resources/customiamrole/converter_mapping.go | 2 +- internal/resources/customiamrole/resource_custom_iam_role.go | 2 +- internal/resources/customiamrole/schema.go | 2 +- internal/resources/customiamrole/tests/custom_iam_role_test.go | 2 +- internal/resources/customiamrole/tests/helper_test.go | 2 +- internal/resources/customiamrole/tests/resource_tf_configs.go | 2 +- 16 files changed, 16 insertions(+), 16 deletions(-) diff --git a/internal/client/customiamrole/customiamrole_resource.go b/internal/client/customiamrole/customiamrole_resource.go index 1da7ba235..c6fedf6fc 100644 --- a/internal/client/customiamrole/customiamrole_resource.go +++ b/internal/client/customiamrole/customiamrole_resource.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/customiamrole/aggregeation_rule.go b/internal/models/customiamrole/aggregeation_rule.go index b4358e253..d76abad5a 100644 --- a/internal/models/customiamrole/aggregeation_rule.go +++ b/internal/models/customiamrole/aggregeation_rule.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/customiamrole/customiamrole.go b/internal/models/customiamrole/customiamrole.go index 059b2eb21..0ce1a5f2d 100644 --- a/internal/models/customiamrole/customiamrole.go +++ b/internal/models/customiamrole/customiamrole.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/customiamrole/fullname.go b/internal/models/customiamrole/fullname.go index 44d73a8ce..10b9c17ab 100644 --- a/internal/models/customiamrole/fullname.go +++ b/internal/models/customiamrole/fullname.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/customiamrole/label_selector.go b/internal/models/customiamrole/label_selector.go index f2b5ae12d..1575cb828 100644 --- a/internal/models/customiamrole/label_selector.go +++ b/internal/models/customiamrole/label_selector.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/customiamrole/match_expression.go b/internal/models/customiamrole/match_expression.go index b37bbd82d..9c99b724e 100644 --- a/internal/models/customiamrole/match_expression.go +++ b/internal/models/customiamrole/match_expression.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/customiamrole/request.go b/internal/models/customiamrole/request.go index 6f2c19966..a9818243e 100644 --- a/internal/models/customiamrole/request.go +++ b/internal/models/customiamrole/request.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/customiamrole/resources.go b/internal/models/customiamrole/resources.go index e6850930c..de3e37323 100644 --- a/internal/models/customiamrole/resources.go +++ b/internal/models/customiamrole/resources.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/customiamrole/rules.go b/internal/models/customiamrole/rules.go index 7e7375c56..5f655a851 100644 --- a/internal/models/customiamrole/rules.go +++ b/internal/models/customiamrole/rules.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/models/customiamrole/spec.go b/internal/models/customiamrole/spec.go index 6a8c654e3..a20ce67d1 100644 --- a/internal/models/customiamrole/spec.go +++ b/internal/models/customiamrole/spec.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/resources/customiamrole/converter_mapping.go b/internal/resources/customiamrole/converter_mapping.go index 50478becb..d890155a8 100644 --- a/internal/resources/customiamrole/converter_mapping.go +++ b/internal/resources/customiamrole/converter_mapping.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/resources/customiamrole/resource_custom_iam_role.go b/internal/resources/customiamrole/resource_custom_iam_role.go index 74803ff27..f978b8a27 100644 --- a/internal/resources/customiamrole/resource_custom_iam_role.go +++ b/internal/resources/customiamrole/resource_custom_iam_role.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/resources/customiamrole/schema.go b/internal/resources/customiamrole/schema.go index 1f9a859f9..2863b2d93 100644 --- a/internal/resources/customiamrole/schema.go +++ b/internal/resources/customiamrole/schema.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/resources/customiamrole/tests/custom_iam_role_test.go b/internal/resources/customiamrole/tests/custom_iam_role_test.go index ae042dd5d..e91d50694 100644 --- a/internal/resources/customiamrole/tests/custom_iam_role_test.go +++ b/internal/resources/customiamrole/tests/custom_iam_role_test.go @@ -2,7 +2,7 @@ // +build customiamrole /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/resources/customiamrole/tests/helper_test.go b/internal/resources/customiamrole/tests/helper_test.go index a4e1fd56b..edc0c5b0c 100644 --- a/internal/resources/customiamrole/tests/helper_test.go +++ b/internal/resources/customiamrole/tests/helper_test.go @@ -2,7 +2,7 @@ // +build customiamrole /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ diff --git a/internal/resources/customiamrole/tests/resource_tf_configs.go b/internal/resources/customiamrole/tests/resource_tf_configs.go index 60968563f..3e1b8975e 100644 --- a/internal/resources/customiamrole/tests/resource_tf_configs.go +++ b/internal/resources/customiamrole/tests/resource_tf_configs.go @@ -1,5 +1,5 @@ /* -Copyright © 2023 VMware, Inc. All Rights Reserved. +Copyright © 2024 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: MPL-2.0 */ From e83bed26bead3143d3fdf6ae95c2495c926bacc8 Mon Sep 17 00:00:00 2001 From: Ramya Bangera Date: Tue, 16 Jan 2024 16:30:12 +0530 Subject: [PATCH 20/22] Fix the resource definition and API endpoint in the test as per the schema Signed-off-by: Ramya Bangera --- internal/client/http_client.go | 2 +- internal/provider/provider.go | 4 +-- .../tests/custom_iam_role_test.go | 2 +- .../tests/resource_tf_configs.go | 36 ++++++++++--------- 4 files changed, 24 insertions(+), 20 deletions(-) diff --git a/internal/client/http_client.go b/internal/client/http_client.go index cb25037fe..793cd5f19 100644 --- a/internal/client/http_client.go +++ b/internal/client/http_client.go @@ -38,8 +38,8 @@ import ( policyclustergroupclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/clustergroup/policy" sourcesecretclustergroupclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/clustergroup/sourcesecret" credentialclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/credential" - custompolicytemplateclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/custompolicytemplate" customiamroleclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/customiamrole" + custompolicytemplateclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/custompolicytemplate" eksclusterclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/ekscluster" eksnodepoolclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/ekscluster/nodepool" inspectionsclient "github.com/vmware/terraform-provider-tanzu-mission-control/internal/client/inspections" diff --git a/internal/provider/provider.go b/internal/provider/provider.go index ec95e424c..335ca647b 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -18,8 +18,8 @@ import ( "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/clusterclass" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/clustergroup" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/credential" - "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/custompolicytemplate" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/customiamrole" + "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/custompolicytemplate" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/ekscluster" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/gitrepository" "github.com/vmware/terraform-provider-tanzu-mission-control/internal/resources/helmcharts" @@ -91,7 +91,7 @@ func Provider() *schema.Provider { utkgresource.ResourceName: utkgresource.ResourceTanzuKubernetesCluster(), provisioner.ResourceName: provisioner.ResourceProvisioner(), custompolicytemplate.ResourceName: custompolicytemplate.ResourceCustomPolicyTemplate(), - customiamrole.ResourceName: customiamrole.ResourceCustomIAMRole(), + customiamrole.ResourceName: customiamrole.ResourceCustomIAMRole(), }, DataSourcesMap: map[string]*schema.Resource{ cluster.ResourceName: cluster.DataSourceTMCCluster(), diff --git a/internal/resources/customiamrole/tests/custom_iam_role_test.go b/internal/resources/customiamrole/tests/custom_iam_role_test.go index e91d50694..9e686cfa4 100644 --- a/internal/resources/customiamrole/tests/custom_iam_role_test.go +++ b/internal/resources/customiamrole/tests/custom_iam_role_test.go @@ -96,7 +96,7 @@ func verifyTanzuKubernetesClusterResource( Name: customRoleName, } - resp, err := context.TMCConnection.CustomIAMRoleResourceService.CustomIARoleResourceServiceGet(fn) + resp, err := context.TMCConnection.CustomIAMRoleResourceService.CustomIAMRoleResourceServiceGet(fn) if err != nil { return errors.Errorf("Custom IAM Role resource not found, resource: %s | err: %s", resourceName, err) diff --git a/internal/resources/customiamrole/tests/resource_tf_configs.go b/internal/resources/customiamrole/tests/resource_tf_configs.go index 3e1b8975e..28b2cde0c 100644 --- a/internal/resources/customiamrole/tests/resource_tf_configs.go +++ b/internal/resources/customiamrole/tests/resource_tf_configs.go @@ -56,19 +56,21 @@ func (builder *ResourceTFConfigBuilder) GetCustomFullIAMRoleConfig() string { } } - resources = ["ORGANIZATION", "CLUSTER_GROUP", "CLUSTER"] + allowed_scopes = ["ORGANIZATION", "CLUSTER_GROUP", "CLUSTER"] tanzu_permissions = ["account.credential.iam.get"] - rule { - resources = ["deployments"] - verbs = ["get", "list"] - api_groups = ["*"] - } - - rule { - verbs = ["get", "list"] - api_groups = ["*"] - url_paths = ["/healthz"] + kubernetes_permissions { + rule { + resources = ["deployments"] + verbs = ["get", "list"] + api_groups = ["*"] + } + + rule { + verbs = ["get", "list"] + api_groups = ["*"] + url_paths = ["/healthz"] + } } } } @@ -85,12 +87,14 @@ func (builder *ResourceTFConfigBuilder) GetCustomSlimIAMRoleConfig() string { name = "%s" spec { - resources = ["ORGANIZATION", "CLUSTER_GROUP", "CLUSTER"] + allowed_scopes = ["ORGANIZATION", "CLUSTER_GROUP", "CLUSTER"] - rule { - resources = ["deployments"] - verbs = ["get", "list"] - api_groups = ["*"] + kubernetes_permissions { + rule { + resources = ["deployments"] + verbs = ["get", "list"] + api_groups = ["*"] + } } } } From e7821843c8b6caab20f6c8d95841e6704f27aff2 Mon Sep 17 00:00:00 2001 From: Ramya Bangera Date: Fri, 19 Jan 2024 11:42:01 +0530 Subject: [PATCH 21/22] Add wait time for create/update operation to complete Signed-off-by: Ramya Bangera --- .../provisioner/provisioner_data_source_test.go | 1 + .../provisioner/provisioner_resource_test.go | 1 + .../resources/provisioner/resource_provisioner.go | 12 ++++++++++++ 3 files changed, 14 insertions(+) diff --git a/internal/resources/provisioner/provisioner_data_source_test.go b/internal/resources/provisioner/provisioner_data_source_test.go index 16206e695..6724ad8c3 100644 --- a/internal/resources/provisioner/provisioner_data_source_test.go +++ b/internal/resources/provisioner/provisioner_data_source_test.go @@ -40,6 +40,7 @@ func TestAcceptanceForProvisionerDataSource(t *testing.T) { }, }, }) + t.Log("provisioner datasource acceptance test complete!") } func getTestProvisionerWithDataSourceConfigValue(prvName string) string { diff --git a/internal/resources/provisioner/provisioner_resource_test.go b/internal/resources/provisioner/provisioner_resource_test.go index f82045cbb..b53a3a5ee 100644 --- a/internal/resources/provisioner/provisioner_resource_test.go +++ b/internal/resources/provisioner/provisioner_resource_test.go @@ -83,6 +83,7 @@ func TestAcceptanceForProvisionerResource(t *testing.T) { }, }, }) + t.Log("provisioner resource acceptance test complete!") } func checkResourceAttributes(provider *schema.Provider, resourceName, prvName string) resource.TestCheckFunc { diff --git a/internal/resources/provisioner/resource_provisioner.go b/internal/resources/provisioner/resource_provisioner.go index 9b0e729ab..3db8e12ae 100644 --- a/internal/resources/provisioner/resource_provisioner.go +++ b/internal/resources/provisioner/resource_provisioner.go @@ -8,6 +8,7 @@ package provisioner import ( "context" "log" + "time" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -24,6 +25,11 @@ type ( contextMethodKey struct{} ) +// Adding a wait time of 10sec after the create/update operation as the operation takes some time to complete and +// there is no status field to rely on to check the completion of create/update operation. +// NOTE: If the error still persists then the timeout have to tuned in accordingly. +const waitTime = 10 * time.Second + func ResourceProvisioner() *schema.Resource { return &schema.Resource{ Schema: provisionerSchema, @@ -80,6 +86,9 @@ func resourceProvisionerCreate(ctx context.Context, d *schema.ResourceData, m in d.SetId(provisionerResponse.Provisioner.Meta.UID) + log.Printf("Wait for %d seconds after the create the operation before fetching the state", waitTime) + time.Sleep(waitTime) + return append(diags, resourceProvisionerRead(context.WithValue(ctx, contextMethodKey{}, helper.CreateState), d, m)...) } @@ -118,6 +127,9 @@ func resourceProvisionerInPlaceUpdate(ctx context.Context, d *schema.ResourceDat return diag.FromErr(errors.Wrapf(err, "Unable to update Tanzu Mission Control provisioner entry, name : %s", model.FullName.Name)) } + log.Printf("Wait for %d seconds after the update the operation before fetching the state", waitTime) + time.Sleep(waitTime) + return resourceProvisionerRead(ctx, d, m) } From 25c9eca71a2597d8f111aae3425ac79340eb7e9d Mon Sep 17 00:00:00 2001 From: Ramya Bangera Date: Fri, 19 Jan 2024 12:51:48 +0530 Subject: [PATCH 22/22] Fix the NPE issue in provisioner resource/datasource Added the fix to return without trying to parse the nil response in case of any error. Signed-off-by: Ramya Bangera --- internal/resources/provisioner/data_source_provisioner.go | 3 +++ internal/resources/provisioner/resource_provisioner.go | 2 ++ 2 files changed, 5 insertions(+) diff --git a/internal/resources/provisioner/data_source_provisioner.go b/internal/resources/provisioner/data_source_provisioner.go index a62e9db08..d1fffc435 100644 --- a/internal/resources/provisioner/data_source_provisioner.go +++ b/internal/resources/provisioner/data_source_provisioner.go @@ -62,6 +62,8 @@ func dataSourceProvisionerRead(ctx context.Context, d *schema.ResourceData, m in _ = schema.RemoveFromState(d, m) return } + + return } for i := range resp.Provisioners { @@ -78,6 +80,7 @@ func dataSourceProvisionerRead(ctx context.Context, d *schema.ResourceData, m in _ = schema.RemoveFromState(d, m) return } + return } d.SetId(resp.Provisioner.Meta.UID) diff --git a/internal/resources/provisioner/resource_provisioner.go b/internal/resources/provisioner/resource_provisioner.go index 3db8e12ae..d19b68b53 100644 --- a/internal/resources/provisioner/resource_provisioner.go +++ b/internal/resources/provisioner/resource_provisioner.go @@ -147,6 +147,8 @@ func resourceProvisionerRead(ctx context.Context, d *schema.ResourceData, m inte _ = schema.RemoveFromState(d, m) return } + + return } d.SetId(resp.Provisioner.Meta.UID)