Skip to content

Commit

Permalink
Address CVE-2024-1725: Restrict access to infrastructure PVCs by requ…
Browse files Browse the repository at this point in the history
…iring matching infraClusterLabels on tenant PVCs (#103)

The CVE describes how an attacker may create a PV/PVC in a guest cluster to access any PVC in the infra cluster namespace.
The infra clusters may belong to other guest clusters or have been created out of band from the kubevirt-csi driver.

This PR addresses the issue by:

1.  infraClusterLabels are required (but is up to admin to make sure they are unique per tenant)
2.  guest may only access infra PVCs with matching labels
3.  guest can only access PVCs with specific prefix (default is "pvc-")

Shoutout to awels who actually implemented this based on input from davidvossel.

Signed-off-by: Michael Henriksen <mhenriks@redhat.com>
Co-authored-by: Alexander Wels <awels@redhat.com>
  • Loading branch information
mhenriks and awels authored Mar 8, 2024
1 parent a722f94 commit cc28dcb
Show file tree
Hide file tree
Showing 8 changed files with 296 additions and 33 deletions.
8 changes: 6 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,11 @@ include $(addprefix ./vendor/github.com/openshift/build-machinery-go/make/, \
# You can list all codegen related variables by:
# $ make -n --print-data-base | grep ^CODEGEN
.PHONY: image-build
image-build: generate
# let's disable generate for for now
# it updates libs and I think it is better to do that manually
# especially when changes will be backported
#image-build: generate
image-build:
source ./hack/cri-bin.sh && \
$$CRI_BIN build -t $(IMAGE_REF) --build-arg git_sha=$(SHA) .

Expand Down Expand Up @@ -116,4 +120,4 @@ sanity-test:

.PHONY: generate
generate:
./hack/generate_clients.sh
./hack/generate_clients.sh
10 changes: 9 additions & 1 deletion cmd/kubevirt-csi-driver/kubevirt-csi-driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ var (
infraClusterNamespace = flag.String("infra-cluster-namespace", "", "The infra-cluster namespace")
infraClusterKubeconfig = flag.String("infra-cluster-kubeconfig", "", "the infra-cluster kubeconfig file. If not set, defaults to in cluster config.")
infraClusterLabels = flag.String("infra-cluster-labels", "", "The infra-cluster labels to use when creating resources in infra cluster. 'name=value' fields separated by a comma")
volumePrefix = flag.String("volume-prefix", "pvc", "The prefix expected for persistent volumes")
// infraStorageClassEnforcement = flag.String("infra-storage-class-enforcement", "", "A string encoded yaml that represents the policy of enforcing which infra storage classes are allowed in persistentVolume of type kubevirt")
infraStorageClassEnforcement = os.Getenv("INFRA_STORAGE_CLASS_ENFORCEMENT")

Expand Down Expand Up @@ -54,6 +55,13 @@ func handle() {
}
klog.V(2).Infof("Driver vendor %v %v", service.VendorName, service.VendorVersion)

if (infraClusterLabels == nil || *infraClusterLabels == "") && !*runNodeService {
klog.Fatal("infra-cluster-labels must be set")
}
if volumePrefix == nil || *volumePrefix == "" {
klog.Fatal("volume-prefix must be set")
}

inClusterConfig, err := rest.InClusterConfig()
if err != nil {
klog.Fatalf("Failed to build in cluster config: %v", err)
Expand Down Expand Up @@ -86,7 +94,7 @@ func handle() {
infraClusterLabelsMap := parseLabels()
storageClassEnforcement := configureStorageClassEnforcement(infraStorageClassEnforcement)

virtClient, err := kubevirt.NewClient(infraRestConfig, infraClusterLabelsMap, storageClassEnforcement)
virtClient, err := kubevirt.NewClient(infraRestConfig, infraClusterLabelsMap, storageClassEnforcement, *volumePrefix)
if err != nil {
klog.Fatal(err)
}
Expand Down
10 changes: 10 additions & 0 deletions e2e/common_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/klog/v2"
cdicli "kubevirt.io/csi-driver/pkg/generated/containerized-data-importer/client-go/clientset/versioned"
)

// RunCmd function executes a command, and returns STDOUT and STDERR bytes
Expand Down Expand Up @@ -192,6 +193,15 @@ func generateInfraClient() (*kubernetes.Clientset, error) {
return kubernetes.NewForConfig(restConfig)
}

func generateInfraCdiClient() (*cdicli.Clientset, error) {
restConfig, err := generateInfraRestConfig()
if err != nil {
return nil, err
}

return cdicli.NewForConfig(restConfig)
}

func generateInfraSnapClient() (*snapcli.Clientset, error) {
restConfig, err := generateInfraRestConfig()
if err != nil {
Expand Down
134 changes: 134 additions & 0 deletions e2e/create-pvc_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,18 +10,22 @@ import (

"k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/klog/v2"

"k8s.io/client-go/tools/clientcmd"
cdicli "kubevirt.io/csi-driver/pkg/generated/containerized-data-importer/client-go/clientset/versioned"
kubecli "kubevirt.io/csi-driver/pkg/generated/kubevirt/client-go/clientset/versioned"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"

"github.com/spf13/pflag"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/client-go/kubernetes"
cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
)

var virtClient *kubecli.Clientset
Expand Down Expand Up @@ -282,6 +286,136 @@ var _ = Describe("CreatePVC", func() {
Entry("Filesystem volume mode", k8sv1.PersistentVolumeFilesystem, attacherPodFs),
Entry("Block volume mode", k8sv1.PersistentVolumeBlock, attacherPodBlock),
)

Context("Should prevent access to volumes from infra cluster", func() {
var tenantPVC *k8sv1.PersistentVolumeClaim
var tenantPV *k8sv1.PersistentVolume
var infraDV *cdiv1.DataVolume
var infraCdiClient *cdicli.Clientset
BeforeEach(func() {
var err error
infraCdiClient, err = generateInfraCdiClient()
Expect(err).ToNot(HaveOccurred())
})

AfterEach(func() {
By("Cleaning up resources for test")
if tenantPVC != nil {
err := tenantClient.CoreV1().PersistentVolumeClaims(tenantPVC.Namespace).Delete(context.Background(), tenantPVC.Name, metav1.DeleteOptions{})
Expect(err).ToNot(HaveOccurred())
Eventually(func() bool {
_, err := tenantClient.CoreV1().PersistentVolumeClaims(tenantPVC.Namespace).Get(context.Background(), tenantPVC.Name, metav1.GetOptions{})
return errors.IsNotFound(err)
}, 1*time.Minute, 2*time.Second).Should(BeTrue(), "tenant pvc should disappear")
tenantPVC = nil
}
if tenantPV != nil {
err := tenantClient.CoreV1().PersistentVolumes().Delete(context.Background(), tenantPV.Name, metav1.DeleteOptions{})
Expect(err).ToNot(HaveOccurred())
// For some reason this takes about 2 minutes.
Eventually(func() bool {
_, err := tenantClient.CoreV1().PersistentVolumes().Get(context.Background(), tenantPV.Name, metav1.GetOptions{})
return errors.IsNotFound(err)
}, 3*time.Minute, 2*time.Second).Should(BeTrue(), "tenant pv should disappear")
tenantPV = nil
}
if infraDV != nil {
_, err := infraCdiClient.CdiV1beta1().DataVolumes(InfraClusterNamespace).Get(context.Background(), infraDV.Name, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
err = infraCdiClient.CdiV1beta1().DataVolumes(InfraClusterNamespace).Delete(context.Background(), infraDV.Name, metav1.DeleteOptions{})
Expect(err).ToNot(HaveOccurred())
infraDV = nil
}
})

It("should not be able to create a PV and access a volume from the infra cluster that is not labeled", func() {
infraDV = &cdiv1.DataVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "infra-pvc",
Namespace: InfraClusterNamespace,
},
Spec: cdiv1.DataVolumeSpec{
Source: &cdiv1.DataVolumeSource{
Blank: &cdiv1.DataVolumeBlankImage{},
},
Storage: &cdiv1.StorageSpec{
Resources: k8sv1.ResourceRequirements{
Requests: k8sv1.ResourceList{
k8sv1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
},
}
var err error
infraDV, err = infraCdiClient.CdiV1beta1().DataVolumes(InfraClusterNamespace).Create(context.Background(), infraDV, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())

By("Creating a specially crafted PV, attempt to access volume from infra cluster that should not be accessed")
tenantPV = &k8sv1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "tenant-pv",
},
Spec: k8sv1.PersistentVolumeSpec{
AccessModes: []k8sv1.PersistentVolumeAccessMode{k8sv1.ReadWriteOnce},
Capacity: k8sv1.ResourceList{k8sv1.ResourceStorage: resource.MustParse("1Gi")},
PersistentVolumeSource: k8sv1.PersistentVolumeSource{
CSI: &k8sv1.CSIPersistentVolumeSource{
Driver: "csi.kubevirt.io",
VolumeHandle: infraDV.Name,
VolumeAttributes: map[string]string{
"bus": "scsi",
"serial": "abcd",
"storage.kubernetes.io/csiProvisionerIdentity": "1708112628060-923-csi.kubevirt.io",
},
FSType: "ext4",
},
},
StorageClassName: "kubevirt",
PersistentVolumeReclaimPolicy: k8sv1.PersistentVolumeReclaimDelete,
},
}
_, err = tenantClient.CoreV1().PersistentVolumes().Create(context.Background(), tenantPV, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
tenantPVC = &k8sv1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "tenant-pvc",
},
Spec: k8sv1.PersistentVolumeClaimSpec{
AccessModes: []k8sv1.PersistentVolumeAccessMode{k8sv1.ReadWriteOnce},
Resources: k8sv1.ResourceRequirements{
Requests: k8sv1.ResourceList{
k8sv1.ResourceStorage: resource.MustParse("1Gi"),
},
},
VolumeName: tenantPV.Name,
},
}
tenantPVC, err = tenantClient.CoreV1().PersistentVolumeClaims(namespace).Create(context.Background(), tenantPVC, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
pod := writerPodFs(tenantPVC.Name)
By("Creating pod that attempts to use the specially crafted PVC")
pod, err = tenantClient.CoreV1().Pods(namespace).Create(context.Background(), pod, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
defer deletePod(tenantClient.CoreV1(), namespace, pod.Name)

involvedObject := fmt.Sprintf("involvedObject.name=%s", pod.Name)
By("Waiting for error event to show up in pod event log")
Eventually(func() bool {
list, err := tenantClient.CoreV1().Events(namespace).List(context.Background(), metav1.ListOptions{
FieldSelector: involvedObject, TypeMeta: metav1.TypeMeta{Kind: "Pod"},
})
Expect(err).ToNot(HaveOccurred())
for _, event := range list.Items {
klog.Infof("Event: %s [%s]", event.Message, event.Reason)
if event.Reason == "FailedAttachVolume" && strings.Contains(event.Message, "invalid volume name") {
return true
}
}
return false
}, 30*time.Second, time.Second).Should(BeTrue(), "error event should show up in pod event log")
})
})
})

func writerPodFs(volumeName string) *k8sv1.Pod {
Expand Down
1 change: 1 addition & 0 deletions hack/cluster-sync.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ INFRA_STORAGE_CLASS=${INFRA_STORAGE_CLASS:-rook-ceph-block}
REGISTRY=${REGISTRY:-192.168.66.2:5000}
TARGET_NAME=${TARGET_NAME:-kubevirt-csi-driver}
TAG=${TAG:-latest}
export INFRACLUSTER_LABELS=${INFRACLUSTER_LABELS:-"tenant-cluster=${TENANT_CLUSTER_NAMESPACE}"}

function tenant::deploy_kubeconfig_secret() {
TOKEN=$(_kubectl create token kubevirt-csi -n $TENANT_CLUSTER_NAMESPACE)
Expand Down
32 changes: 28 additions & 4 deletions pkg/kubevirt/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"encoding/json"
goerrors "errors"
"fmt"
"strings"
"time"

snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1"
Expand Down Expand Up @@ -62,10 +63,11 @@ type client struct {
restClient *rest.RESTClient
storageClassEnforcement util.StorageClassEnforcement
infraLabelMap map[string]string
volumePrefix string
}

// NewClient New creates our client wrapper object for the actual kubeVirt and kubernetes clients we use.
func NewClient(config *rest.Config, infraClusterLabelMap map[string]string, storageClassEnforcement util.StorageClassEnforcement) (Client, error) {
func NewClient(config *rest.Config, infraClusterLabelMap map[string]string, storageClassEnforcement util.StorageClassEnforcement, prefix string) (Client, error) {
result := &client{}

Scheme := runtime.NewScheme()
Expand Down Expand Up @@ -108,6 +110,7 @@ func NewClient(config *rest.Config, infraClusterLabelMap map[string]string, stor
result.restClient = restClient
result.snapClient = snapClient
result.infraLabelMap = infraClusterLabelMap
result.volumePrefix = fmt.Sprintf("%s-", prefix)
result.storageClassEnforcement = storageClassEnforcement
return result, nil
}
Expand Down Expand Up @@ -211,7 +214,11 @@ func (c *client) GetVirtualMachine(ctx context.Context, namespace, name string)

// CreateDataVolume creates a new DataVolume under a namespace
func (c *client) CreateDataVolume(ctx context.Context, namespace string, dataVolume *cdiv1.DataVolume) (*cdiv1.DataVolume, error) {
return c.cdiClient.CdiV1beta1().DataVolumes(namespace).Create(ctx, dataVolume, metav1.CreateOptions{})
if !strings.HasPrefix(dataVolume.GetName(), c.volumePrefix) {
return nil, ErrInvalidVolume
} else {
return c.cdiClient.CdiV1beta1().DataVolumes(namespace).Create(ctx, dataVolume, metav1.CreateOptions{})
}
}

// Ping performs a minimal request to the infra-cluster k8s api
Expand All @@ -222,11 +229,27 @@ func (c *client) Ping(ctx context.Context) error {

// DeleteDataVolume deletes a DataVolume from a namespace by name
func (c *client) DeleteDataVolume(ctx context.Context, namespace string, name string) error {
return c.cdiClient.CdiV1beta1().DataVolumes(namespace).Delete(ctx, name, metav1.DeleteOptions{})
if dv, err := c.GetDataVolume(ctx, namespace, name); errors.IsNotFound(err) {
return nil
} else if err != nil {
return err
} else if dv != nil {
return c.cdiClient.CdiV1beta1().DataVolumes(namespace).Delete(ctx, dv.Name, metav1.DeleteOptions{})
}
return nil
}

func (c *client) GetDataVolume(ctx context.Context, namespace string, name string) (*cdiv1.DataVolume, error) {
return c.cdiClient.CdiV1beta1().DataVolumes(namespace).Get(ctx, name, metav1.GetOptions{})
dv, err := c.cdiClient.CdiV1beta1().DataVolumes(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if dv != nil {
if !containsLabels(dv.Labels, c.infraLabelMap) || !strings.HasPrefix(dv.GetName(), c.volumePrefix) {
return nil, ErrInvalidVolume
}
}
return dv, nil
}

func (c *client) CreateVolumeSnapshot(ctx context.Context, namespace, name, claimName, snapshotClassName string) (*snapshotv1.VolumeSnapshot, error) {
Expand Down Expand Up @@ -407,3 +430,4 @@ func (c *client) ListVolumeSnapshots(ctx context.Context, namespace string) (*sn
}

var ErrInvalidSnapshot = goerrors.New("invalid snapshot name")
var ErrInvalidVolume = goerrors.New("invalid volume name")
Loading

0 comments on commit cc28dcb

Please sign in to comment.