Skip to content

Commit

Permalink
Upgrade Go packages (#169)
Browse files Browse the repository at this point in the history
* Update Go version in Dockerfile
* Add csi-common package
* Implement missing CSI methods
* Upgrade kind and containerd version in workflows
* Change private registry config
* Remove dependency on deprecated packages
* Update README.md and Chart.yaml
  • Loading branch information
mugdha-adhav authored Mar 7, 2025
1 parent 03b1c2d commit 30514dd
Show file tree
Hide file tree
Showing 32 changed files with 830 additions and 1,672 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/backward-compatibility.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Start a kind cluster with containerd
uses: helm/kind-action@v1.9.0
uses: helm/kind-action@v1.12.0
with:
cluster_name: kind-${{ github.run_id }}
kubectl_version: "v1.25.2"
kubectl_version: "v1.32.2"
config: ./hack/ci/containerd-cluster-conf.yaml
- name: Build image
run: ./hack/ci/build.sh
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/containerd-async.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Start a kind cluster with containerd
uses: helm/kind-action@v1.9.0
uses: helm/kind-action@v1.12.0
with:
cluster_name: kind-${{ github.run_id }}
kubectl_version: "v1.25.2"
kubectl_version: "v1.32.2"
config: ./hack/ci/containerd-cluster-conf.yaml
- name: Install private registry
run: ./hack/ci/setup_private_registry.sh
Expand Down
5 changes: 3 additions & 2 deletions .github/workflows/containerd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Start a kind cluster with containerd
uses: helm/kind-action@v1.9.0
uses: helm/kind-action@v1.12.0
with:
cluster_name: kind-${{ github.run_id }}
kubectl_version: "v1.25.2"
kubectl_version: "v1.32.2"
config: ./hack/ci/containerd-cluster-conf.yaml
- name: Install private registry
run: ./hack/ci/setup_private_registry.sh
Expand All @@ -35,4 +35,5 @@ jobs:
- name: Run integration Tests
run: ./hack/ci/test.sh
- name: Uninstall the CSI Driver
if: always()
run: helm uninstall -n kube-system ${HELM_NAME} --wait
2 changes: 1 addition & 1 deletion .github/workflows/cri-o.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Start a kind cluster with crio
uses: helm/kind-action@v1.9.0
uses: helm/kind-action@v1.12.0
with:
cluster_name: kind-${{ github.run_id }}
kubectl_version: "v1.25.2"
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/docker-containerd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ jobs:
- uses: actions/checkout@v4
- uses: opsgang/ga-setup-minikube@v0.1.2
with:
minikube-version: '1.20.0'
k8s-version: '1.21.0'
minikube-version: 'v1.35.0'
k8s-version: '1.32.2'
- name: Start a Kubernetes cluster with docker
run: ./hack/ci/setup_docker_cluster.sh
- name: Build image
Expand All @@ -19,4 +19,4 @@ jobs:
- name: Run integration Tests
run: ./hack/ci/test.sh
- name: Uninstall the CSI Driver
run: ./hack/ci/uninstall_driver_containerd.sh
run: ./hack/ci/uninstall_driver_containerd.sh
4 changes: 2 additions & 2 deletions .github/workflows/metrics.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Start a kind cluster with containerd
uses: helm/kind-action@v1.9.0
uses: helm/kind-action@v1.12.0
with:
cluster_name: kind-${{ github.run_id }}
kubectl_version: "v1.25.2"
kubectl_version: "v1.32.2"
config: ./hack/ci/containerd-cluster-conf.yaml
- name: Build image
run: ./hack/ci/build.sh
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/restart-ds-containerd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Start a kind cluster with containerd
uses: helm/kind-action@v1.9.0
uses: helm/kind-action@v1.12.0
with:
cluster_name: kind-${{ github.run_id }}
kubectl_version: "v1.25.2"
kubectl_version: "v1.32.2"
config: ./hack/ci/containerd-cluster-conf.yaml
- name: Build image
run: ./hack/ci/build.sh
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/restart-ds-crio.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Start a kind cluster with crio
uses: helm/kind-action@v1.9.0
uses: helm/kind-action@v1.12.0
with:
cluster_name: kind-${{ github.run_id }}
kubectl_version: "v1.25.2"
Expand Down
4 changes: 2 additions & 2 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM docker.io/library/golang:1.22.5-alpine3.19 as builder
FROM docker.io/library/golang:1.24.1-alpine3.21 as builder
RUN apk add --no-cache btrfs-progs-dev lvm2-dev make build-base
WORKDIR /go/src/container-image-csi-driver
COPY go.mod go.sum ./
Expand All @@ -12,7 +12,7 @@ RUN make install-util
FROM scratch as install-util
COPY --from=builder /go/src/container-image-csi-driver/_output/container-image-csi-driver-install /

FROM alpine:3.19
FROM alpine:3.21.3
RUN apk add --no-cache btrfs-progs-dev lvm2-dev
WORKDIR /
COPY --from=builder /go/src/container-image-csi-driver/_output/container-image-csi-driver /usr/bin/
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
VERSION ?= v2.0.1
VERSION ?= v2.1.0

IMAGE_BUILDER ?= docker
IMAGE_BUILD_CMD ?= buildx
Expand Down
7 changes: 5 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,12 @@ git remote set-head origin -a
```

### Migration of CSI driver
In release `v2.0.0`, we are updating the CSI driver name from `csi-image.warm-metal.tech` to `container-image.warm-metal.tech`. This change may cause disruptions to your existing workloads if the driver name is not updated.
In release `v2.0.0`, we are updating the CSI driver name from `csi-image.warm-metal.tech` to `container-image.csi.k8s.io`. This change may cause disruptions to your existing workloads if the driver name is not updated.

**To ensure a smooth transition:**
1. **Install Both Versions**: To avoid any breaking changes, you can install both the old and new versions of the CSI driver simultaneously. Both versions are compatible and have been tested to work side-by-side, as verified in our integration tests.

1. **Update Your Workloads**: Migrate your workloads to use the new driver name `container-image.warm-metal.tech`. This process will involve updating your storage class definitions and any other configurations that reference the old driver name.
1. **Update Your Workloads**: Migrate your workloads to use the new driver name `container-image.csi.k8s.io`. This process will involve updating your storage class definitions and any other configurations that reference the old driver name.

1. **Remove the Old Driver**: Once all workloads have been successfully migrated and verified with the new driver, you can safely remove the older version of the driver from your cluster.

Expand Down Expand Up @@ -66,6 +66,9 @@ Tested changes on below mentioned versions -
| 0.7.x | v1.25 | 1.6.8 | v1.20.9 |
| 0.8.x | v1.25 | 1.6.8 | v1.20.9 |
| 1.0.x | v1.25 | 1.6.8 | v1.25.2 |
| 1.2.x | v1.25 | 1.6.8 | v1.25.2 |
| 2.x.x | v1.25 | 1.6.8 | v1.25.2 |
| 2.1.x | v1.32 | 2.x | v1.25.2 |

#### References:
* containerd [releases](https://containerd.io/releases/#kubernetes-support)
Expand Down
4 changes: 2 additions & 2 deletions charts/warm-metal-csi-driver/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,9 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 2.0.1
version: 2.1.0

# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: v2.0.1
appVersion: v2.1.0
4 changes: 2 additions & 2 deletions cmd/install/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,12 +108,12 @@ func main() {

manifest := &bytes.Buffer{}
if err := t.Execute(manifest, conf); err != nil {
fmt.Fprintf(os.Stderr, err.Error())
fmt.Fprintf(os.Stderr, "%s", err.Error())
}

ds := appsv1.DaemonSet{}
if err := yaml.Unmarshal(manifest.Bytes(), &ds); err != nil {
fmt.Fprintf(os.Stderr, manifest.String())
fmt.Fprintf(os.Stderr, "%s", manifest.String())
panic(err)
}

Expand Down
77 changes: 73 additions & 4 deletions cmd/plugin/controller_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@ import (

"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/pkg/errors"
csicommon "github.com/warm-metal/container-image-csi-driver/pkg/csi-common"
"github.com/warm-metal/container-image-csi-driver/pkg/watcher"
csicommon "github.com/warm-metal/csi-drivers/pkg/csi-common"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
Expand All @@ -20,14 +20,15 @@ const (

func NewControllerServer(driver *csicommon.CSIDriver, watcher *watcher.Watcher) *ControllerServer {
return &ControllerServer{
DefaultControllerServer: csicommon.NewDefaultControllerServer(driver),
watcher: watcher,
driver: driver,
watcher: watcher,
}
}

type ControllerServer struct {
*csicommon.DefaultControllerServer
driver *csicommon.CSIDriver
watcher *watcher.Watcher
csi.UnimplementedControllerServer
}

func (c ControllerServer) ControllerExpandVolume(context.Context, *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
Expand Down Expand Up @@ -60,3 +61,71 @@ func (c ControllerServer) CreateVolume(_ context.Context, req *csi.CreateVolumeR
},
}, nil
}

// ControllerModifyVolume implements the required interface
func (cs *ControllerServer) ControllerModifyVolume(context.Context, *csi.ControllerModifyVolumeRequest) (*csi.ControllerModifyVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}

// ControllerGetCapabilities returns the capabilities of the controller service.
func (c *ControllerServer) ControllerGetCapabilities(_ context.Context, _ *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) {
return &csi.ControllerGetCapabilitiesResponse{
Capabilities: []*csi.ControllerServiceCapability{
{
Type: &csi.ControllerServiceCapability_Rpc{
Rpc: &csi.ControllerServiceCapability_RPC{
Type: csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
},
},
},
},
}, nil
}

// ValidateVolumeCapabilities validates the volume capabilities.
func (c *ControllerServer) ValidateVolumeCapabilities(_ context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
for _, cap := range req.VolumeCapabilities {
if cap.AccessMode.Mode != csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY &&
cap.AccessMode.Mode != csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY {
return &csi.ValidateVolumeCapabilitiesResponse{
Message: "Only ReadOnlyMany or ReadOnlyOnce access modes are supported",
}, nil
}
}

return &csi.ValidateVolumeCapabilitiesResponse{
Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{
VolumeContext: req.VolumeContext,
VolumeCapabilities: req.VolumeCapabilities,
Parameters: req.Parameters,
},
}, nil
}

// ListVolumes is not implemented.
func (c *ControllerServer) ListVolumes(_ context.Context, _ *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}

// GetCapacity is not implemented.
func (c *ControllerServer) GetCapacity(_ context.Context, _ *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}

// CreateSnapshot is not implemented.
func (c *ControllerServer) CreateSnapshot(_ context.Context, _ *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}

// DeleteSnapshot is not implemented.
func (c *ControllerServer) DeleteSnapshot(_ context.Context, _ *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}

// ListSnapshots is not implemented.
func (c *ControllerServer) ListSnapshots(_ context.Context, _ *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}

// Remove exported method and keep only unexported one
func (cs *ControllerServer) mustEmbedUnimplementedControllerServer() {}
16 changes: 10 additions & 6 deletions cmd/plugin/identity_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,20 +2,21 @@ package main

import (
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/protobuf/ptypes/wrappers"
"golang.org/x/net/context"
"google.golang.org/protobuf/types/known/wrapperspb"
)

type IdentityServer struct {
version string
csi.UnimplementedIdentityServer
}

func NewIdentityServer(version string) *IdentityServer {
return &IdentityServer{
version: version,
}
}

type IdentityServer struct {
version string
}

func (ids *IdentityServer) GetPluginInfo(_ context.Context, _ *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) {
return &csi.GetPluginInfoResponse{
Name: driverName,
Expand All @@ -25,7 +26,7 @@ func (ids *IdentityServer) GetPluginInfo(_ context.Context, _ *csi.GetPluginInfo

func (ids *IdentityServer) Probe(_ context.Context, _ *csi.ProbeRequest) (*csi.ProbeResponse, error) {
return &csi.ProbeResponse{
Ready: &wrappers.BoolValue{Value: true},
Ready: &wrapperspb.BoolValue{Value: true},
}, nil
}

Expand All @@ -42,3 +43,6 @@ func (ids *IdentityServer) GetPluginCapabilities(_ context.Context, _ *csi.GetPl
},
}, nil
}

// Keep only unexported method
func (is *IdentityServer) mustEmbedUnimplementedIdentityServer() {}
18 changes: 10 additions & 8 deletions cmd/plugin/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,10 @@ import (
"github.com/warm-metal/container-image-csi-driver/pkg/backend/containerd"
"github.com/warm-metal/container-image-csi-driver/pkg/backend/crio"
"github.com/warm-metal/container-image-csi-driver/pkg/cri"
"github.com/warm-metal/container-image-csi-driver/pkg/csi-common"
"github.com/warm-metal/container-image-csi-driver/pkg/metrics"
"github.com/warm-metal/container-image-csi-driver/pkg/secret"
"github.com/warm-metal/container-image-csi-driver/pkg/watcher"
csicommon "github.com/warm-metal/csi-drivers/pkg/csi-common"
"k8s.io/klog/v2"
)

Expand Down Expand Up @@ -48,14 +48,16 @@ var (
"The path to the credential provider plugin config file.")
icpBin = flag.String("image-credential-provider-bin-dir", "",
"The path to the directory where credential provider plugin binaries are located.")
nodePluginSA = flag.String("node-plugin-sa", "container-image-csi-driver",
"The name of the ServiceAccount for pulling image.")
enableCache = flag.Bool("enable-daemon-image-credential-cache", true,
"Whether to save contents of imagepullsecrets of the daemon ServiceAccount in memory. "+
"If set to false, secrets will be fetched from the API server on every image pull.")
asyncImagePullTimeout = flag.Duration("async-pull-timeout", 0,
"If positive, specifies duration allotted for async image pulls as measured from pull start time. If zero, negative, less than 30s, or omitted, the caller's timeout (usually kubelet: 2m) is used instead of this value. (additional time helps prevent timeout for larger images or slower image pull conditions)")
watcherResyncPeriod = flag.Duration("watcher-resync-period", 30*time.Minute, "The resync period of the pvc watcher.")
mode = flag.String("mode", "", "The mode of the driver. Valid values are: node, controller")
nodePluginSA = flag.String("node-plugin-sa", "container-image-csi-driver", "The name of the ServiceAccount used by the node plugin.")
"Cache image pull secret from the daemon ServiceAccount.")
asyncImagePullTimeout = flag.Duration("async-pull-timeout", 10*time.Minute,
"Timeout for asynchronous image pulling. Only valid if --async-pull is enabled.")
mode = flag.String("mode", nodeMode,
fmt.Sprintf("Mode determines the role this instance plays. One of %q or %q.", nodeMode, controllerMode))
watcherResyncPeriod = flag.Duration("watcher-resync-period", 10*time.Minute,
"Resync period for the PVC watcher. Only valid in controller mode.")
)

func main() {
Expand Down
Loading

0 comments on commit 30514dd

Please sign in to comment.