Skip to content

Commit

Permalink
fix for synced pvs and retain SC and add e2e
Browse files Browse the repository at this point in the history
  • Loading branch information
facchettos committed May 6, 2024
1 parent f0d5638 commit ece23b9
Show file tree
Hide file tree
Showing 8 changed files with 380 additions and 33 deletions.
3 changes: 3 additions & 0 deletions .github/workflows/e2e.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,9 @@ jobs:
- distribution: "k3s"
multinamespace-mode: "true"
test-suite-path: "./test/e2e_target_namespace"
- distribution: "k3s"
multinamespace-mode: "true"
test-suite-path: "./test/e2e_pvc_retain"
- distribution: "k3s"
multinamespace-mode: "true"
test-suite-path: "./test/e2e_plugin"
Expand Down
9 changes: 6 additions & 3 deletions pkg/controllers/resources/persistentvolumeclaims/syncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,14 @@ const (
)

func New(ctx *synccontext.RegisterContext) (syncer.Object, error) {
storageClassesEnabled := ctx.Config.Sync.ToHost.StorageClasses.Enabled
syncSCToHost := ctx.Config.Sync.ToHost.StorageClasses.Enabled
syncSCFromHost := ctx.Config.ControlPlane.Advanced.VirtualScheduler.Enabled || ctx.Config.Sync.FromHost.StorageClasses.Enabled == "true"
excludedAnnotations := []string{bindCompletedAnnotation, boundByControllerAnnotation, storageProvisionerAnnotation}
return &persistentVolumeClaimSyncer{
NamespacedTranslator: translator.NewNamespacedTranslator(ctx, "persistent-volume-claim", &corev1.PersistentVolumeClaim{}, excludedAnnotations...),

storageClassesEnabled: storageClassesEnabled,
syncSCToHost: syncSCToHost,
syncSCFromHost: syncSCFromHost,
schedulerEnabled: ctx.Config.ControlPlane.Advanced.VirtualScheduler.Enabled,
useFakePersistentVolumes: !ctx.Config.Sync.ToHost.PersistentVolumes.Enabled,
}, nil
Expand All @@ -49,7 +51,8 @@ func New(ctx *synccontext.RegisterContext) (syncer.Object, error) {
type persistentVolumeClaimSyncer struct {
translator.NamespacedTranslator

storageClassesEnabled bool
syncSCToHost bool
syncSCFromHost bool
schedulerEnabled bool
useFakePersistentVolumes bool
}
Expand Down
7 changes: 2 additions & 5 deletions pkg/controllers/resources/persistentvolumeclaims/translate.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ func (s *persistentVolumeClaimSyncer) translateSelector(ctx *synccontext.SyncCon
}

// translate storage class if we manage those in vcluster
if s.storageClassesEnabled && storageClassName != "" {
if s.syncSCToHost && storageClassName != "" {
translated := translate.Default.PhysicalNameClusterScoped(storageClassName)
delete(vPvc.Annotations, deprecatedStorageClassAnnotation)
vPvc.Spec.StorageClassName = &translated
Expand All @@ -61,11 +61,8 @@ func (s *persistentVolumeClaimSyncer) translateSelector(ctx *synccontext.SyncCon
if vPvc.Spec.Selector != nil {
vPvc.Spec.Selector = translate.Default.TranslateLabelSelectorCluster(vPvc.Spec.Selector)
}
if vPvc.Spec.VolumeName != "" {
vPvc.Spec.VolumeName = translate.Default.PhysicalNameClusterScoped(vPvc.Spec.VolumeName)
}
// check if the storage class exists in the physical cluster
if !s.storageClassesEnabled && storageClassName != "" {
if !(s.syncSCToHost || s.syncSCFromHost) && storageClassName != "" {
// Should the PVC be dynamically provisioned or not?
if vPvc.Spec.Selector == nil && vPvc.Spec.VolumeName == "" {
err := ctx.PhysicalClient.Get(ctx.Context, types.NamespacedName{Name: storageClassName}, &storagev1.StorageClass{})
Expand Down
68 changes: 43 additions & 25 deletions pkg/controllers/resources/persistentvolumes/syncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,9 @@ func NewSyncer(ctx *synccontext.RegisterContext) (syncertypes.Object, error) {
return &persistentVolumeSyncer{
Translator: translator.NewClusterTranslator(ctx, "persistentvolume", &corev1.PersistentVolume{}, NewPersistentVolumeTranslator(), HostClusterPersistentVolumeAnnotation),

virtualClient: ctx.VirtualManager.GetClient(),
virtualClient: ctx.VirtualManager.GetClient(),
physicalClient: ctx.PhysicalManager.GetClient(),
useFakePersistentVolumes: !ctx.Config.Sync.ToHost.PersistentVolumes.Enabled,
}, nil
}

Expand Down Expand Up @@ -66,7 +68,9 @@ func NewPersistentVolumeTranslator() translate.PhysicalNameTranslator {
type persistentVolumeSyncer struct {
translator.Translator

virtualClient client.Client
virtualClient client.Client
physicalClient client.Client
useFakePersistentVolumes bool
}

var _ syncertypes.IndicesRegisterer = &persistentVolumeSyncer{}
Expand Down Expand Up @@ -142,26 +146,32 @@ func (s *persistentVolumeSyncer) Sync(ctx *synccontext.SyncContext, pObj client.
}

// check if the persistent volume should get synced
sync, vPvc, err := s.shouldSync(ctx.Context, pPersistentVolume)
sync, vPvc, pPvc, err := s.shouldSync(ctx.Context, pPersistentVolume)
if err != nil {
return ctrl.Result{}, err
} else if !sync {
ctx.Log.Infof("delete virtual persistent volume %s, because there is no virtual persistent volume claim with that volume", vPersistentVolume.Name)
return ctrl.Result{}, ctx.VirtualClient.Delete(ctx.Context, vObj)
}

syncBack := true
if !s.useFakePersistentVolumes && vPersistentVolume.Spec.ClaimRef == nil && pPersistentVolume.Spec.ClaimRef != nil && pPvc == nil {
syncBack = false
}
// check if there is a corresponding virtual pvc
updatedObj := s.translateUpdateBackwards(vPersistentVolume, pPersistentVolume, vPvc)
if updatedObj != nil {
ctx.Log.Infof("update virtual persistent volume %s, because spec has changed", vPersistentVolume.Name)
translator.PrintChanges(vPersistentVolume, updatedObj, ctx.Log)
err = ctx.VirtualClient.Update(ctx.Context, updatedObj)
if err != nil {
return ctrl.Result{}, err
}
if syncBack {
updatedObj := s.translateUpdateBackwards(vPersistentVolume, pPersistentVolume, vPvc)
if updatedObj != nil {
ctx.Log.Infof("update virtual persistent volume %s, because spec has changed", vPersistentVolume.Name)
translator.PrintChanges(vPersistentVolume, updatedObj, ctx.Log)
err = ctx.VirtualClient.Update(ctx.Context, updatedObj)
if err != nil {
return ctrl.Result{}, err
}

// we will reconcile anyways
return ctrl.Result{}, nil
// we will reconcile anyways
return ctrl.Result{}, nil
}
}

// check status
Expand All @@ -180,7 +190,7 @@ func (s *persistentVolumeSyncer) Sync(ctx *synccontext.SyncContext, pObj client.
}

// update the physical persistent volume if the virtual has changed
if vPersistentVolume.Annotations == nil || vPersistentVolume.Annotations[HostClusterPersistentVolumeAnnotation] == "" {
if vPersistentVolume.Annotations == nil || vPersistentVolume.Annotations[HostClusterPersistentVolumeAnnotation] == "" || vPersistentVolume.Spec.ClaimRef == nil {
if vPersistentVolume.DeletionTimestamp != nil {
if pPersistentVolume.DeletionTimestamp != nil {
return ctrl.Result{}, nil
Expand Down Expand Up @@ -221,7 +231,7 @@ var _ syncertypes.ToVirtualSyncer = &persistentVolumeSyncer{}

func (s *persistentVolumeSyncer) SyncToVirtual(ctx *synccontext.SyncContext, pObj client.Object) (ctrl.Result, error) {
pPersistentVolume := pObj.(*corev1.PersistentVolume)
sync, vPvc, err := s.shouldSync(ctx.Context, pPersistentVolume)
sync, vPvc, _, err := s.shouldSync(ctx.Context, pPersistentVolume)
if err != nil {
return ctrl.Result{}, err
} else if translate.Default.IsManagedCluster(pObj) {
Expand All @@ -240,33 +250,41 @@ func (s *persistentVolumeSyncer) SyncToVirtual(ctx *synccontext.SyncContext, pOb
return ctrl.Result{}, nil
}

func (s *persistentVolumeSyncer) shouldSync(ctx context.Context, pObj *corev1.PersistentVolume) (bool, *corev1.PersistentVolumeClaim, error) {
func (s *persistentVolumeSyncer) shouldSync(ctx context.Context, pObj *corev1.PersistentVolume) (shouldSync bool, vpvc *corev1.PersistentVolumeClaim, ppvc *corev1.PersistentVolumeClaim, err error) {
// is there an assigned PVC?
if pObj.Spec.ClaimRef == nil {
if translate.Default.IsManagedCluster(pObj) {
return true, nil, nil
return true, nil, nil, nil
}

return false, nil, nil
return false, nil, nil, nil
}

vPvc := &corev1.PersistentVolumeClaim{}
err := clienthelper.GetByIndex(ctx, s.virtualClient, vPvc, constants.IndexByPhysicalName, pObj.Spec.ClaimRef.Namespace+"/"+pObj.Spec.ClaimRef.Name)
err = clienthelper.GetByIndex(ctx, s.virtualClient, vPvc, constants.IndexByPhysicalName, pObj.Spec.ClaimRef.Namespace+"/"+pObj.Spec.ClaimRef.Name)
if err != nil {
if !kerrors.IsNotFound(err) {
return false, nil, err
return false, nil, nil, err
} else if translate.Default.IsManagedCluster(pObj) {
return true, nil, nil
return true, nil, nil, nil
}

namespace, err := translate.Default.LegacyGetTargetNamespace()
if err != nil {
return false, nil, nil
return false, nil, nil, nil
}
return pObj.Spec.ClaimRef.Namespace == namespace && pObj.Spec.PersistentVolumeReclaimPolicy == corev1.PersistentVolumeReclaimRetain, nil, nil
return pObj.Spec.ClaimRef.Namespace == namespace && pObj.Spec.PersistentVolumeReclaimPolicy == corev1.PersistentVolumeReclaimRetain, nil, nil, nil
}

pPvc := &corev1.PersistentVolumeClaim{}
err = s.physicalClient.Get(ctx, types.NamespacedName{Namespace: pObj.Spec.ClaimRef.Namespace, Name: pObj.Spec.ClaimRef.Name}, pPvc)
if kerrors.IsNotFound(err) {
return true, vPvc, nil, nil
} else if err != nil {
return true, vPvc, nil, err
}

return true, vPvc, nil
return true, vPvc, pPvc, nil
}

func (s *persistentVolumeSyncer) IsManaged(ctx context.Context, pObj client.Object) (bool, error) {
Expand All @@ -275,7 +293,7 @@ func (s *persistentVolumeSyncer) IsManaged(ctx context.Context, pObj client.Obje
return false, nil
}

sync, _, err := s.shouldSync(ctx, pPv)
sync, _, _, err := s.shouldSync(ctx, pPv)
if err != nil {
return false, nil
}
Expand Down
9 changes: 9 additions & 0 deletions pkg/controllers/resources/persistentvolumes/translate.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,11 @@ func (s *persistentVolumeSyncer) translateUpdate(ctx context.Context, vPv *corev
updated.Spec.Capacity = vPv.Spec.Capacity
}

if !equality.Semantic.DeepEqual(pPv.Spec.ClaimRef, vPv.Spec.ClaimRef) {
updated = translator.NewIfNil(updated, pPv)
updated.Spec.ClaimRef = vPv.Spec.ClaimRef
}

if !equality.Semantic.DeepEqual(pPv.Spec.AccessModes, vPv.Spec.AccessModes) {
updated = translator.NewIfNil(updated, pPv)
updated.Spec.AccessModes = vPv.Spec.AccessModes
Expand All @@ -124,6 +129,10 @@ func (s *persistentVolumeSyncer) translateUpdate(ctx context.Context, vPv *corev
}

translatedStorageClassName := translateStorageClass(vPv.Spec.StorageClassName)
if !s.useFakePersistentVolumes {
translatedStorageClassName = vPv.Spec.StorageClassName
}

if !equality.Semantic.DeepEqual(pPv.Spec.StorageClassName, translatedStorageClassName) {
updated = translator.NewIfNil(updated, pPv)
updated.Spec.StorageClassName = translatedStorageClassName
Expand Down
58 changes: 58 additions & 0 deletions test/e2e_pvc_retain/e2e_suite_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
package e2e

import (
"context"
"testing"

"github.com/loft-sh/log"
"github.com/loft-sh/vcluster/test/framework"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"

// Enable cloud provider auth
_ "k8s.io/client-go/plugin/pkg/client/auth"

// Register tests
_ "github.com/loft-sh/vcluster/test/e2e_pvc_retain/reusevolume"
)

var (
scheme = runtime.NewScheme()
)

func init() {
_ = clientgoscheme.AddToScheme(scheme)
// API extensions are not in the above scheme set,
// and must thus be added separately.
_ = apiextensionsv1beta1.AddToScheme(scheme)
_ = apiextensionsv1.AddToScheme(scheme)
_ = apiregistrationv1.AddToScheme(scheme)
}

// TestRunE2ETests checks configuration parameters (specified through flags) and then runs
// E2E tests using the Ginkgo runner.
// If a "report directory" is specified, one or more JUnit test reports will be
// generated in this directory, and cluster logs will also be saved.
// This function is called on each Ginkgo node in parallel mode.
func TestRunE2ETests(t *testing.T) {
gomega.RegisterFailHandler(ginkgo.Fail)
err := framework.CreateFramework(context.Background(), scheme)
if err != nil {
log.GetInstance().Fatalf("Error setting up framework: %v", err)
}

var _ = ginkgo.AfterSuite(func() {
err = framework.DefaultFramework.Cleanup()
if err != nil {
log.GetInstance().Warnf("Error executing testsuite cleanup: %v", err)
}
})

ginkgo.RunSpecs(t, "pvc syncing")
}

0 comments on commit ece23b9

Please sign in to comment.