From 8eeb7149e021354be48ebdc510adbcd47f7b497c Mon Sep 17 00:00:00 2001 From: Ygal Blum Date: Thu, 6 Mar 2025 11:59:05 -0500 Subject: [PATCH] Add test for creating Virtual Machines from a master image (#197) Signed-off-by: Ygal Blum --- README.md | 44 +++ cmd/config/virt-clone/check.sh | 107 +++++++ .../templates/baseImageDataSource.yml | 15 + .../templates/baseImageDataVolume.yml | 20 ++ .../templates/baseImageDataVolumeSnapshot.yml | 8 + .../templates/secret_ssh_public.yml | 7 + cmd/config/virt-clone/templates/vm.yml | 73 +++++ cmd/config/virt-clone/virt-clone.yml | 296 ++++++++++++++++++ cmd/ocp.go | 1 + common.go | 10 +- test/test-ocp.bats | 13 + virt-clone.go | 124 ++++++++ 12 files changed, 717 insertions(+), 1 deletion(-) create mode 100755 cmd/config/virt-clone/check.sh create mode 100644 cmd/config/virt-clone/templates/baseImageDataSource.yml create mode 100644 cmd/config/virt-clone/templates/baseImageDataVolume.yml create mode 100644 cmd/config/virt-clone/templates/baseImageDataVolumeSnapshot.yml create mode 100644 cmd/config/virt-clone/templates/secret_ssh_public.yml create mode 100644 cmd/config/virt-clone/templates/vm.yml create mode 100644 cmd/config/virt-clone/virt-clone.yml create mode 100644 virt-clone.go diff --git a/README.md b/README.md index 59cb7f59..5033890c 100644 --- a/README.md +++ b/README.md @@ -454,6 +454,50 @@ The test generated the SSH keys automatically. By default, it stores the pair in a temporary directory. Users may choose the store the key in a specified directory by setting `--ssh-key-path` +### Virt Clone + +Test the capacity and performance of starting multiple virtual machines with a root disk as clones of a single volume. This test comes to mimic VDI sequence + +#### Test Sequence + +The test runs the following sequence: +1. Create a `VirtualMachine` in namespace A +2. Stop the `VirtualMachine` +3. Create a `DataVolume` in namespace B using the rootdisk of the `VirtualMachine` as the source +4. If the `dataImportCronSourceFormat` field of the `StorageProfile` `status` is set to `snapshot`, or `--use-snapshot` is set to `true`, create a `VolumeSnapshot` of the DataVolume +5. Create a `DataSource`, setting the `source` field to either the `VolumeSnapshot` (if was created) or the `DataVolume` +6. Create `VirtualMachine` in namespace B based in the `DataSource`. Some machines are marked as `persistent` and some `ephemeral` +7. Restart the `ephemeral` machines by stopping them, deleting their disk and starting them again + +#### Tested StorageClass + +By default, the test will use the default `StorageClass`. To use a different one, use `--storage-class` to provide a different name. + +If `--use-snapshot` is explicitly set to `true` a corresponding `VolumeSnapshotClass` using the same provisioner must exist. +Otherwise, the test will check the `StorageProfile` for the `StorageClass` and act accordingly. + +#### Test Namespace + +The test creates `VirtualMachines` in two namespaces: `-base` and `-clones` + +By default, the `baseName` is `virt-clone`. Set it by passing `--namespace` (or `-n`) + +#### Test Size Parameters + +Users may control the workload sizes by passing the following arguments: +- `--vms` - Number of `VirtualMachines` to create in step 6 + +#### Volume Access Mode + +By default, volumes are created with `ReadWriteMany` access mode as this is the recommended configuration for `VirtualMachines`. +If not supported, the access mode may be changes by setting `--access-mode`. The supported values are `RO`, `RWO` and `RWX`. + +#### Temporary SSH Keys + +In order to verify that the VMs actually completed booting, the test generates an SSH key pair. +By default, it stores the pair in a temporary directory. +Users may choose the store the key in a specified directory by setting `--ssh-key-path` + ## Custom Workload: Bring your own workload To kickstart kube-burner-ocp with a custom workload, `init` becomes your go-to command. This command is equipped with flags that enable to seamlessly integrate and run your personalized workloads. Here's a breakdown of the flags accepted by the init command: diff --git a/cmd/config/virt-clone/check.sh b/cmd/config/virt-clone/check.sh new file mode 100755 index 00000000..9794a2ab --- /dev/null +++ b/cmd/config/virt-clone/check.sh @@ -0,0 +1,107 @@ +#!/usr/bin/env bash +COMMAND=$1 +LABEL_KEY=$2 +LABEL_VALUE=$3 +NAMESPACE=$4 +IDENTITY_FILE=$5 +REMOTE_USER=$6 +EXPECTED_ROOT_SIZE=$7 +EXPECTED_DATA_SIZE=$8 + +# Wait up to ~60 minutes +MAX_RETRIES=130 +# In the first reties use a shorter sleep +MAX_SHORT_WAITS=12 +SHORT_WAIT=5 +LONG_WAIT=30 + +if virtctl ssh --help | grep -qc "\--local-ssh " ; then + LOCAL_SSH="--local-ssh" +else + LOCAL_SSH="" +fi + +get_vms() { + local namespace=$1 + local label_key=$2 + local label_value=$3 + + local vms + vms=$(kubectl get vm -n "${namespace}" -l "${label_key}"="${label_value}" -o json | jq .items | jq -r '.[] | .metadata.name') + local ret=$? + if [ $ret -ne 0 ]; then + echo "Failed to get VM list" + exit 1 + fi + echo "${vms}" +} + +remote_command() { + local namespace=$1 + local identity_file=$2 + local remote_user=$3 + local vm_name=$4 + local command=$5 + + local output + output=$(virtctl ssh ${LOCAL_SSH} --local-ssh-opts="-o StrictHostKeyChecking=no" --local-ssh-opts="-o UserKnownHostsFile=/dev/null" -n "${namespace}" -i "${identity_file}" -c "${command}" --username "${remote_user}" "${vm_name}" 2>/dev/null) + local ret=$? + if [ $ret -ne 0 ]; then + return 1 + fi + echo "${output}" +} + +check_vm_running() { + local vm=$1 + remote_command "${NAMESPACE}" "${IDENTITY_FILE}" "${REMOTE_USER}" "${vm}" "ls" + return $? +} + +check_resize() { + local vm=$1 + + local blk_devices + blk_devices=$(remote_command "${NAMESPACE}" "${IDENTITY_FILE}" "${REMOTE_USER}" "${vm}" "lsblk --json -v --output=NAME,SIZE") + local ret=$? + if [ $ret -ne 0 ]; then + return $ret + fi + + local size + size=$(echo "${blk_devices}" | jq .blockdevices | jq -r --arg name "vda" '.[] | select(.name == $name) | .size') + if [[ $size != "${EXPECTED_ROOT_SIZE}" ]]; then + return 1 + fi + + local datavolume_sizes + datavolume_sizes=$(echo "${blk_devices}" | jq .blockdevices | jq -r --arg name "vda" '.[] | select(.name != $name) | .size') + for datavolume_size in ${datavolume_sizes}; do + if [[ $datavolume_size != "${EXPECTED_DATA_SIZE}" ]]; then + return 1 + fi + done + + return 0 +} + +VMS=$(get_vms "${NAMESPACE}" "${LABEL_KEY}" "${LABEL_VALUE}") + +for vm in ${VMS}; do + for attempt in $(seq 1 $MAX_RETRIES); do + if ${COMMAND} "${vm}"; then + break + fi + if [ "${attempt}" -lt $MAX_RETRIES ]; then + if [ "${attempt}" -lt $MAX_SHORT_WAITS ]; then + sleep "${SHORT_WAIT}" + else + sleep "${LONG_WAIT}" + fi + else + echo "Failed waiting on ${COMMAND} for ${vm}" >&2 + exit 1 + fi + done + echo "${COMMAND} finished successfully for ${vm}" +done diff --git a/cmd/config/virt-clone/templates/baseImageDataSource.yml b/cmd/config/virt-clone/templates/baseImageDataSource.yml new file mode 100644 index 00000000..432c74de --- /dev/null +++ b/cmd/config/virt-clone/templates/baseImageDataSource.yml @@ -0,0 +1,15 @@ +apiVersion: cdi.kubevirt.io/v1beta1 +kind: DataSource +metadata: + name: {{ .cloneDataSourceName }} +spec: + source: + {{ if .useSnapshot }} + snapshot: + name: {{ .cloneDataSourceSnapshotName }} + namespace: {{ .cloneDataSourceSnapshotNamespace }} + {{ else }} + pvc: + name: {{ .cloneDataSourcePVCName }} + namespace: {{ .cloneDataSourcePVCNamespace }} + {{ end }} diff --git a/cmd/config/virt-clone/templates/baseImageDataVolume.yml b/cmd/config/virt-clone/templates/baseImageDataVolume.yml new file mode 100644 index 00000000..01355299 --- /dev/null +++ b/cmd/config/virt-clone/templates/baseImageDataVolume.yml @@ -0,0 +1,20 @@ +--- +apiVersion: cdi.kubevirt.io/v1beta1 +kind: DataVolume +metadata: + name: {{ .cloneDataVolumeName }} + annotations: + cdi.kubevirt.io/storage.bind.immediate.requested: "true" +spec: + source: + pvc: + namespace: {{ .baseVMNamespace }} + name: {{ .baseVMRootDiskPVCName }} + storage: + accessModes: + - {{ .accessMode }} + resources: + requests: + storage: {{ default "5Gi" .rootVolumeSize }} + storageClassName: {{ .storageClassName }} +... \ No newline at end of file diff --git a/cmd/config/virt-clone/templates/baseImageDataVolumeSnapshot.yml b/cmd/config/virt-clone/templates/baseImageDataVolumeSnapshot.yml new file mode 100644 index 00000000..cf1c3665 --- /dev/null +++ b/cmd/config/virt-clone/templates/baseImageDataVolumeSnapshot.yml @@ -0,0 +1,8 @@ +apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshot +metadata: + name: {{ .cloneVolumeSnapshotName }} +spec: + volumeSnapshotClassName: {{ .volumeSnapshotClassName }} + source: + persistentVolumeClaimName: {{ .cloneVolumeSnapshotPVCName }} diff --git a/cmd/config/virt-clone/templates/secret_ssh_public.yml b/cmd/config/virt-clone/templates/secret_ssh_public.yml new file mode 100644 index 00000000..f0e9a8d0 --- /dev/null +++ b/cmd/config/virt-clone/templates/secret_ssh_public.yml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} +type: Opaque +data: + key: {{ .publicKeyPath | ReadFile | b64enc }} \ No newline at end of file diff --git a/cmd/config/virt-clone/templates/vm.yml b/cmd/config/virt-clone/templates/vm.yml new file mode 100644 index 00000000..0a9e0e86 --- /dev/null +++ b/cmd/config/virt-clone/templates/vm.yml @@ -0,0 +1,73 @@ +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + name: "{{ .vmName }}-{{ .Iteration }}-{{ .Replica }}" + labels: + {{ range $key, $value := .vmLabels }} + {{ $key }}: {{ $value }} + {{ end }} +spec: + dataVolumeTemplates: + - metadata: + name: "{{ .rootDiskVolumeName }}-{{ .Iteration }}-{{ .Replica }}" + labels: + {{ range $key, $value := .rootVolumeLabels }} + {{ $key }}: {{ $value }} + {{ end }} + spec: + {{ if .rootdiskVolumeSource }} + source: {{ .rootdiskVolumeSource | mustToJson }} + {{ end }} + {{ if .rootdiskVolumeSourceRef }} + sourceRef: {{ .rootdiskVolumeSourceRef | mustToJson }} + {{ end }} + storage: + accessModes: + - {{ .accessMode }} + storageClassName: {{ .storageClassName }} + resources: + requests: + storage: {{ default "5Gi" .rootVolumeSize }} + running: true + template: + spec: + accessCredentials: + - sshPublicKey: + propagationMethod: + noCloud: {} + source: + secret: + secretName: {{ .sshPublicKeySecret }} + architecture: amd64 + domain: + resources: + requests: + memory: {{ default "512Mi" .vmMemory }} + devices: + disks: + - disk: + bus: virtio + name: rootdisk + bootOrder: 1 + interfaces: + - name: default + masquerade: {} + bootOrder: 2 + machine: + type: pc-q35-rhel9.4.0 + networks: + - name: default + pod: {} + volumes: + - dataVolume: + name: "{{ .rootDiskVolumeName }}-{{ .Iteration }}-{{ .Replica }}" + name: rootdisk + - cloudInitNoCloud: + userData: | + #cloud-config + chpasswd: + expire: false + password: {{ uuidv4 }} + user: fedora + runcmd: [] + name: cloudinitdisk diff --git a/cmd/config/virt-clone/virt-clone.yml b/cmd/config/virt-clone/virt-clone.yml new file mode 100644 index 00000000..e5986cb0 --- /dev/null +++ b/cmd/config/virt-clone/virt-clone.yml @@ -0,0 +1,296 @@ +{{- $namespaceBaseName := .testNamespaceBaseName -}} +{{- $testNamespacesLabelKey := "kube-burner.io/test-name" -}} +{{- $testNamespacesLabelValue := "virt-clone" -}} +{{- $createBaseVMJobName := "create-base-vm" -}} +{{- $baseVMNamespace := (list $namespaceBaseName "base") | join "-" -}} +{{- $baseVMName := "base-vm" -}} +{{- $baseVMRootDiskVolumeName := (list $baseVMName "root") | join "-" }} +{{- $sshPublicKeySecretName := "burner-clone-test" -}} +{{- $cloneVMNamespace := (list $namespaceBaseName "clones") | join "-" -}} +{{- $cloneDataVolumeName := "master-image" -}} +{{- $cloneDataSourceName := "master-image" -}} +{{- $createCloneVMJobName := "create-clone-vms" -}} +{{- $cloneVMName := "clone-vm" -}} +{{- $cloneRootDiskVolumeName := (list $cloneVMName "root") | join "-" }} +{{- $cloneVolumeSnapshotName := "master-image" -}} +{{- $vmTypeLabelKey := "clone.kube-burner.io/vm-type" -}} +{{- $typeMaster := "master" -}} +{{- $typePersistent := "persistent" -}} +{{- $typeEphemeral := "ephemeral" -}} +{{- $volumeTypeLabelKey := "clone.kube-burner.io/volume-type" -}} +{{- $persistentVMCount := (randInt 1 .cloneVMCount) -}} +{{- $ephemeralVMCount := sub .cloneVMCount $persistentVMCount -}} + +global: + measurements: + - name: vmiLatency + - name: dataVolumeLatency + +metricsEndpoints: +- indexer: + type: local + metricsDirectory: ./virt-clone-results + +jobs: +- name: start-fresh + jobType: delete + waitForDeletion: true + qps: 5 + burst: 10 + objects: + - kind: Namespace + labelSelector: + {{ $testNamespacesLabelKey }}: {{ $testNamespacesLabelValue }} + +- name: {{ $createBaseVMJobName }} + jobType: create + jobIterations: 1 + qps: 20 + burst: 20 + namespacedIterations: false + namespace: {{ $baseVMNamespace }} + namespaceLabels: + {{ $testNamespacesLabelKey }}: {{ $testNamespacesLabelValue }} + # verify object count after running each job + verifyObjects: true + errorOnVerify: true + # wait all VMI be in the Ready Condition + waitWhenFinished: false + podWait: true + # timeout time after waiting for all object creation + maxWaitTimeout: 15m + jobPause: 10s + # cleanup cleans previous execution (not deleted or failed) + cleanup: false + # Set missing key as empty to allow using default values + defaultMissingKeysWithZero: true + beforeCleanup: "./check.sh check_vm_running kube-burner-job {{ $createBaseVMJobName }} {{ $baseVMNamespace }} {{ .privateKey }} fedora" + objects: + + - objectTemplate: templates/secret_ssh_public.yml + runOnce: true + replicas: 1 + inputVars: + name: {{ $sshPublicKeySecretName }} + publicKeyPath: {{ .publicKey }} + + - objectTemplate: templates/vm.yml + replicas: 1 + inputVars: + vmName: {{ $baseVMName }} + vmLabels: + {{ $vmTypeLabelKey }}: {{ $typeMaster }} + rootDiskVolumeName: {{ $baseVMRootDiskVolumeName }} + rootVolumeLabels: + {{ $volumeTypeLabelKey }}: {{ $typeMaster }} + rootdiskVolumeSource: + registry: + url: "docker://quay.io/containerdisks/fedora:latest" + storageClassName: {{ .storageClassName }} + sshPublicKeySecret: {{ $sshPublicKeySecretName }} + accessMode: {{ .accessMode }} + +- name: stop-vm + jobType: kubevirt + qps: 20 + burst: 20 + jobIterations: 1 + maxWaitTimeout: 1h + objectDelay: 1m + waitWhenFinished: true + objects: + - kubeVirtOp: stop + labelSelector: + kube-burner-job: {{ $createBaseVMJobName }} + {{ $vmTypeLabelKey }}: {{ $typeMaster }} + +# Create the DV in a separate job to make sure it is ready before continuing +- name: create-base-image-dv + jobType: create + jobIterations: 1 + qps: 20 + burst: 20 + namespacedIterations: false + namespace: {{ $cloneVMNamespace }} + namespaceLabels: + {{ $testNamespacesLabelKey }}: {{ $testNamespacesLabelValue }} + # verify object count after running each job + verifyObjects: true + errorOnVerify: true + # wait all VMI be in the Ready Condition + waitWhenFinished: false + podWait: true + # timeout time after waiting for all object creation + maxWaitTimeout: 15m + # wait before job completes to allow metrics collection + jobPause: 1m + # Do not clean the namespaces + cleanup: false + # Set missing key as empty to allow using default values + defaultMissingKeysWithZero: true + objects: + - objectTemplate: templates/baseImageDataVolume.yml + replicas: 1 + inputVars: + cloneDataVolumeName: {{ $cloneDataVolumeName }} + storageClassName: {{ .storageClassName }} + baseVMNamespace: {{ $baseVMNamespace }} + baseVMRootDiskPVCName: "{{ $baseVMRootDiskVolumeName }}-0-1" + accessMode: {{ .accessMode }} + waitOptions: + customStatusPaths: + - key: '(.conditions.[] | select(.type == "Ready")).status' + value: "True" + +- name: create-data-source + jobType: create + jobIterations: 1 + qps: 20 + burst: 20 + namespacedIterations: false + namespace: {{ $cloneVMNamespace }} + namespaceLabels: + {{ $testNamespacesLabelKey }}: {{ $testNamespacesLabelValue }} + # verify object count after running each job + verifyObjects: true + errorOnVerify: true + # wait all VMI be in the Ready Condition + waitWhenFinished: false + podWait: true + # timeout time after waiting for all object creation + maxWaitTimeout: 15m + # wait before job completes to allow metrics collection + jobPause: 1m + # Do not clean the namespaces + cleanup: false + # Set missing key as empty to allow using default values + defaultMissingKeysWithZero: true + objects: + {{ if .useSnapshot | default false }} + - objectTemplate: templates/baseImageDataVolumeSnapshot.yml + replicas: 1 + inputVars: + cloneVolumeSnapshotName: {{ $cloneVolumeSnapshotName }} + volumeSnapshotClassName: {{ .volumeSnapshotClassName }} + cloneVolumeSnapshotPVCName: {{ $cloneDataVolumeName }} + waitOptions: + customStatusPaths: + - key: '.readyToUse | tostring' + value: "true" + {{ end }} + - objectTemplate: templates/baseImageDataSource.yml + replicas: 1 + inputVars: + cloneDataSourceName: {{ $cloneDataSourceName }} + cloneDataSourcePVCName: {{ $cloneDataVolumeName }} + cloneDataSourcePVCNamespace: {{ $cloneVMNamespace }} + cloneDataSourceSnapshotName: {{ $cloneVolumeSnapshotName }} + cloneDataSourceSnapshotNamespace: {{ $cloneVMNamespace }} + useSnapshot: {{ .useSnapshot | default false }} + waitOptions: + customStatusPaths: + - key: '(.conditions.[] | select(.type == "Ready")).status' + value: "True" + +- name: {{ $createCloneVMJobName }} + jobType: create + jobIterations: 1 + qps: 20 + burst: 20 + namespacedIterations: false + namespace: {{ $cloneVMNamespace }} + # verify object count after running each job + verifyObjects: true + errorOnVerify: true + # wait all VMI be in the Ready Condition + waitWhenFinished: false + podWait: true + # timeout time after waiting for all object creation + maxWaitTimeout: 1h + jobPause: 10s + # cleanup cleans previous execution (not deleted or failed) + cleanup: false + # Set missing key as empty to allow using default values + defaultMissingKeysWithZero: true + beforeCleanup: "./check.sh check_vm_running kube-burner-job {{ $createCloneVMJobName }} {{ $cloneVMNamespace }} {{ .privateKey }} fedora" + objects: + + - objectTemplate: templates/secret_ssh_public.yml + runOnce: true + replicas: 1 + inputVars: + name: {{ $sshPublicKeySecretName }} + publicKeyPath: {{ .publicKey }} + + - objectTemplate: templates/vm.yml + replicas: {{ $persistentVMCount }} + inputVars: + vmName: "{{ $typePersistent }}-{{ $cloneVMName }}" + vmLabels: + {{ $vmTypeLabelKey }}: {{ $typePersistent }} + rootDiskVolumeName: "{{ $typePersistent }}-{{ $cloneRootDiskVolumeName }}" + rootVolumeLabels: + {{ $volumeTypeLabelKey }}: {{ $typePersistent }} + rootdiskVolumeSourceRef: + kind: DataSource + name: {{ $cloneDataSourceName }} + namespace: {{ $cloneVMNamespace }} + storageClassName: {{ .storageClassName }} + sshPublicKeySecret: {{ $sshPublicKeySecretName }} + accessMode: {{ .accessMode }} + + - objectTemplate: templates/vm.yml + replicas: {{ $ephemeralVMCount }} + inputVars: + vmName: "{{ $typeEphemeral }}-{{ $cloneVMName }}" + vmLabels: + {{ $vmTypeLabelKey }}: {{ $typeEphemeral }} + rootDiskVolumeName: "{{ $typeEphemeral }}-{{ $cloneRootDiskVolumeName }}" + rootVolumeLabels: + {{ $volumeTypeLabelKey }}: {{ $typeEphemeral }} + rootdiskVolumeSourceRef: + kind: DataSource + name: {{ $cloneDataSourceName }} + namespace: {{ $cloneVMNamespace }} + storageClassName: {{ .storageClassName }} + sshPublicKeySecret: {{ $sshPublicKeySecretName }} + accessMode: {{ .accessMode }} + +- name: stop-ephemeral-vms + jobType: kubevirt + qps: 20 + burst: 20 + jobIterations: 1 + maxWaitTimeout: 1h + objectWait: true + objects: + - kubeVirtOp: stop + labelSelector: + kube-burner-job: {{ $createCloneVMJobName }} + {{ $vmTypeLabelKey }}: {{ $typeEphemeral }} + +- name: delete-ephemeral-volumes + jobType: delete + waitForDeletion: false + qps: 5 + burst: 10 + objects: + - apiVersion: cdi.kubevirt.io/v1beta1 + kind: DataVolume + labelSelector: + {{ $volumeTypeLabelKey }}: {{ $typeEphemeral }} + +- name: start-ephemeral-vms + jobType: kubevirt + qps: 20 + burst: 20 + jobIterations: 1 + maxWaitTimeout: 1h + objectDelay: 1m + beforeCleanup: "./check.sh check_vm_running {{ $vmTypeLabelKey }} {{ $typeEphemeral }} {{ $cloneVMNamespace }} {{ .privateKey }} fedora" + objectWait: true + objects: + - kubeVirtOp: start + labelSelector: + kube-burner-job: {{ $createCloneVMJobName }} + {{ $vmTypeLabelKey }}: {{ $typeEphemeral }} diff --git a/cmd/ocp.go b/cmd/ocp.go index 18be8c73..0ee46f9f 100644 --- a/cmd/ocp.go +++ b/cmd/ocp.go @@ -129,6 +129,7 @@ func openShiftCmd() *cobra.Command { ocp.ClusterHealth(), ocp.CustomWorkload(&wh), ocp.NewVirtCapacityBenchmark(&wh), + ocp.NewVirtClone(&wh), ) util.SetupCmd(ocpCmd) return ocpCmd diff --git a/common.go b/common.go index a087b6d3..c7894f22 100644 --- a/common.go +++ b/common.go @@ -28,7 +28,15 @@ import ( "github.com/spf13/cobra" ) -var clusterMetadata ocpmetadata.ClusterMetadata +var ( + clusterMetadata ocpmetadata.ClusterMetadata + + accessModeTranslator = map[string]string{ + "RO": "ReadOnly", + "RWO": "ReadWriteOnce", + "RWX": "ReadWriteMany", + } +) func setMetrics(cmd *cobra.Command, metricsProfiles []string) { profileType, _ := cmd.Root().PersistentFlags().GetString("profile-type") diff --git a/test/test-ocp.bats b/test/test-ocp.bats index 5342bc54..8e93d079 100755 --- a/test/test-ocp.bats +++ b/test/test-ocp.bats @@ -156,3 +156,16 @@ teardown_file() { done oc delete namespace virt-capacity-benchmark } + +@test "virt-clone" { + VIRT_CLONE_STORAGE_CLASS=${VIRT_CLONE_STORAGE_CLASS:-oci-bv} + run_cmd kube-burner-ocp virt-clone --storage-class $VIRT_CLONE_STORAGE_CLASS --access-mode RWO + local jobs=("create-base-vm" "create-clone-vms") + for job in "${jobs[@]}"; do + check_metric_recorded ./virt-clone-results ${job} dvLatency dvReadyLatency + check_metric_recorded ./virt-clone-results ${job} vmiLatency vmReadyLatency + check_quantile_recorded ./virt-clone-results ${job} dvLatency Ready + check_quantile_recorded ./virt-clone-results ${job} vmiLatency VMReady + done + run_cmd oc delete ns -l kube-burner.io/test-name=virt-clone +} diff --git a/virt-clone.go b/virt-clone.go new file mode 100644 index 00000000..f2a0d895 --- /dev/null +++ b/virt-clone.go @@ -0,0 +1,124 @@ +// Copyright 2024 The Kube-burner Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocp + +import ( + "os" + + k8sstorage "github.com/cloud-bulldozer/go-commons/v2/k8s-storage" + "github.com/cloud-bulldozer/go-commons/v2/ssh" + "github.com/cloud-bulldozer/go-commons/v2/virtctl" + "github.com/kube-burner/kube-burner/pkg/workloads" + log "github.com/sirupsen/logrus" + + "github.com/spf13/cobra" +) + +const ( + virtCloneSSHKeyFileName = "ssh" + virtCloneTmpDirPattern = "kube-burner-virt-clone-*" + virtCloneTestName = "virt-clone" +) + +// Returns virt-density workload +func NewVirtClone(wh *workloads.WorkloadHelper) *cobra.Command { + var storageClassName string + var volumeSnapshotClassName string + var sshKeyPairPath string + var useSnapshot bool + var vmCount int + var testNamespaceBaseName string + var metricsProfiles []string + var volumeAccessMode string + var rc int + cmd := &cobra.Command{ + Use: virtCloneTestName, + Short: "Runs virt-clone workload", + SilenceUsage: true, + PreRun: func(cmd *cobra.Command, args []string) { + var err error + + if _, ok := accessModeTranslator[volumeAccessMode]; !ok { + log.Fatalf("Unsupported access mode - %s", volumeAccessMode) + } + + if !virtctl.IsInstalled() { + log.Fatalf("Failed to run virtctl. Check that it is installed, in PATH and working") + } + + k8sConnector := getK8SConnector() + + // Verify provided storage class name or get default of cluster + storageClassName, err = k8sstorage.GetStorageClassName(k8sConnector, storageClassName, true) + if err != nil { + log.Fatal(err) + } + log.Infof("Running tests with Storage Class [%s]", storageClassName) + + // If user did not set use-snapshot, get the value from the StorageProfile + if !cmd.Flags().Lookup("use-snapshot").Changed { + sourceFormat, err := k8sstorage.GetDataImportCronSourceFormatForStorageClass(k8sConnector, storageClassName) + if err != nil { + log.Fatalf("Failed to get source format for StorageClass [%s] - %v", storageClassName, err) + } + useSnapshot = sourceFormat == "snapshot" + log.Info("The flag use-snapshot was not set. Using the value from the StorageProfile: ", useSnapshot) + } + + // If using Snapshot, get the VolumeSnapshotClass with the same provisioner as the StorageClass + if useSnapshot { + volumeSnapshotClassName, err = k8sstorage.GetVolumeSnapshotClassNameForStorageClass(k8sConnector, storageClassName) + if err != nil { + log.Fatalf("Failed to get VolumeSnapshotClass for StorageClass %s - %v", storageClassName, err) + } + if volumeSnapshotClassName == "" { + log.Fatalf("Could not find a corresponding VolumeSnapshotClass for StorageClass %s", storageClassName) + } + log.Infof("Running tests with VolumeSnapshotClass [%s]", volumeSnapshotClassName) + } + }, + Run: func(cmd *cobra.Command, args []string) { + privateKeyPath, publicKeyPath, err := ssh.GenerateSSHKeyPair(sshKeyPairPath, virtCloneTmpDirPattern, virtCloneSSHKeyFileName) + if err != nil { + log.Fatalf("Failed to generate SSH keys for the test - %v", err) + } + + additionalVars := map[string]interface{}{ + "privateKey": privateKeyPath, + "publicKey": publicKeyPath, + "storageClassName": storageClassName, + "volumeSnapshotClassName": volumeSnapshotClassName, + "testNamespaceBaseName": testNamespaceBaseName, + "useSnapshot": useSnapshot, + "cloneVMCount": vmCount, + "accessMode": accessModeTranslator[volumeAccessMode], + } + + setMetrics(cmd, metricsProfiles) + rc = wh.RunWithAdditionalVars(cmd.Name(), additionalVars) + }, + PostRun: func(cmd *cobra.Command, args []string) { + os.Exit(rc) + }, + } + cmd.Flags().StringVar(&storageClassName, "storage-class", "", "Name of the Storage Class to test") + cmd.Flags().StringVar(&sshKeyPairPath, "ssh-key-path", "", "Path to save the generarated SSH keys") + cmd.Flags().BoolVar(&useSnapshot, "use-snapshot", true, "Clone from snapshot") + cmd.Flags().IntVar(&vmCount, "vms", 10, "Number of clone VMs to create") + cmd.Flags().StringVarP(&testNamespaceBaseName, "namespace", "n", virtCloneTestName, "Base name for the namespace to run the test in") + cmd.Flags().StringVar(&volumeAccessMode, "access-mode", "RWX", "Access mode for the created volumes - RO, RWO, RWX") + cmd.Flags().StringSliceVar(&metricsProfiles, "metrics-profile", []string{"metrics.yml"}, "Comma separated list of metrics profiles to use") + return cmd +}