k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/pkg/controller/volume/persistentvolume/pv_controller_base.go (about)

     1  /*
     2  Copyright 2016 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package persistentvolume
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"strconv"
    23  	"strings"
    24  	"time"
    25  
    26  	v1 "k8s.io/api/core/v1"
    27  	"k8s.io/apimachinery/pkg/api/errors"
    28  	"k8s.io/apimachinery/pkg/api/meta"
    29  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    30  	"k8s.io/apimachinery/pkg/labels"
    31  	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
    32  	"k8s.io/apimachinery/pkg/util/wait"
    33  	utilfeature "k8s.io/apiserver/pkg/util/feature"
    34  	coreinformers "k8s.io/client-go/informers/core/v1"
    35  	storageinformers "k8s.io/client-go/informers/storage/v1"
    36  	clientset "k8s.io/client-go/kubernetes"
    37  	"k8s.io/client-go/kubernetes/scheme"
    38  	v1core "k8s.io/client-go/kubernetes/typed/core/v1"
    39  	corelisters "k8s.io/client-go/listers/core/v1"
    40  	"k8s.io/client-go/tools/cache"
    41  	"k8s.io/client-go/tools/record"
    42  	"k8s.io/client-go/util/workqueue"
    43  	storagehelpers "k8s.io/component-helpers/storage/volume"
    44  	csitrans "k8s.io/csi-translation-lib"
    45  	"k8s.io/kubernetes/pkg/controller"
    46  	"k8s.io/kubernetes/pkg/controller/volume/common"
    47  	"k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics"
    48  	"k8s.io/kubernetes/pkg/features"
    49  	"k8s.io/kubernetes/pkg/util/goroutinemap"
    50  	"k8s.io/kubernetes/pkg/util/slice"
    51  	vol "k8s.io/kubernetes/pkg/volume"
    52  	"k8s.io/kubernetes/pkg/volume/csimigration"
    53  
    54  	"k8s.io/klog/v2"
    55  )
    56  
    57  // This file contains the controller base functionality, i.e. framework to
    58  // process PV/PVC added/updated/deleted events. The real binding, provisioning,
    59  // recycling and deleting is done in pv_controller.go
    60  
    61  // ControllerParameters contains arguments for creation of a new
    62  // PersistentVolume controller.
    63  type ControllerParameters struct {
    64  	KubeClient                clientset.Interface
    65  	SyncPeriod                time.Duration
    66  	VolumePlugins             []vol.VolumePlugin
    67  	VolumeInformer            coreinformers.PersistentVolumeInformer
    68  	ClaimInformer             coreinformers.PersistentVolumeClaimInformer
    69  	ClassInformer             storageinformers.StorageClassInformer
    70  	PodInformer               coreinformers.PodInformer
    71  	NodeInformer              coreinformers.NodeInformer
    72  	EnableDynamicProvisioning bool
    73  }
    74  
    75  // NewController creates a new PersistentVolume controller
    76  func NewController(ctx context.Context, p ControllerParameters) (*PersistentVolumeController, error) {
    77  	eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
    78  	eventRecorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "persistentvolume-controller"})
    79  
    80  	controller := &PersistentVolumeController{
    81  		volumes:                       newPersistentVolumeOrderedIndex(),
    82  		claims:                        cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc),
    83  		kubeClient:                    p.KubeClient,
    84  		eventBroadcaster:              eventBroadcaster,
    85  		eventRecorder:                 eventRecorder,
    86  		runningOperations:             goroutinemap.NewGoRoutineMap(true /* exponentialBackOffOnError */),
    87  		enableDynamicProvisioning:     p.EnableDynamicProvisioning,
    88  		createProvisionedPVRetryCount: createProvisionedPVRetryCount,
    89  		createProvisionedPVInterval:   createProvisionedPVInterval,
    90  		claimQueue:                    workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{Name: "claims"}),
    91  		volumeQueue:                   workqueue.NewTypedWithConfig(workqueue.TypedQueueConfig[string]{Name: "volumes"}),
    92  		resyncPeriod:                  p.SyncPeriod,
    93  		operationTimestamps:           metrics.NewOperationStartTimeCache(),
    94  	}
    95  
    96  	// Prober is nil because PV is not aware of Flexvolume.
    97  	if err := controller.volumePluginMgr.InitPlugins(p.VolumePlugins, nil /* prober */, controller); err != nil {
    98  		return nil, fmt.Errorf("could not initialize volume plugins for PersistentVolume Controller: %w", err)
    99  	}
   100  
   101  	p.VolumeInformer.Informer().AddEventHandler(
   102  		cache.ResourceEventHandlerFuncs{
   103  			AddFunc:    func(obj interface{}) { controller.enqueueWork(ctx, controller.volumeQueue, obj) },
   104  			UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(ctx, controller.volumeQueue, newObj) },
   105  			DeleteFunc: func(obj interface{}) { controller.enqueueWork(ctx, controller.volumeQueue, obj) },
   106  		},
   107  	)
   108  	controller.volumeLister = p.VolumeInformer.Lister()
   109  	controller.volumeListerSynced = p.VolumeInformer.Informer().HasSynced
   110  
   111  	p.ClaimInformer.Informer().AddEventHandler(
   112  		cache.ResourceEventHandlerFuncs{
   113  			AddFunc:    func(obj interface{}) { controller.enqueueWork(ctx, controller.claimQueue, obj) },
   114  			UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(ctx, controller.claimQueue, newObj) },
   115  			DeleteFunc: func(obj interface{}) { controller.enqueueWork(ctx, controller.claimQueue, obj) },
   116  		},
   117  	)
   118  	controller.claimLister = p.ClaimInformer.Lister()
   119  	controller.claimListerSynced = p.ClaimInformer.Informer().HasSynced
   120  
   121  	controller.classLister = p.ClassInformer.Lister()
   122  	controller.classListerSynced = p.ClassInformer.Informer().HasSynced
   123  	controller.podLister = p.PodInformer.Lister()
   124  	controller.podIndexer = p.PodInformer.Informer().GetIndexer()
   125  	controller.podListerSynced = p.PodInformer.Informer().HasSynced
   126  	controller.NodeLister = p.NodeInformer.Lister()
   127  	controller.NodeListerSynced = p.NodeInformer.Informer().HasSynced
   128  
   129  	// This custom indexer will index pods by its PVC keys. Then we don't need
   130  	// to iterate all pods every time to find pods which reference given PVC.
   131  	if err := common.AddPodPVCIndexerIfNotPresent(controller.podIndexer); err != nil {
   132  		return nil, fmt.Errorf("could not initialize attach detach controller: %w", err)
   133  	}
   134  
   135  	csiTranslator := csitrans.New()
   136  	controller.translator = csiTranslator
   137  	controller.csiMigratedPluginManager = csimigration.NewPluginManager(csiTranslator, utilfeature.DefaultFeatureGate)
   138  
   139  	return controller, nil
   140  }
   141  
   142  // initializeCaches fills all controller caches with initial data from etcd in
   143  // order to have the caches already filled when first addClaim/addVolume to
   144  // perform initial synchronization of the controller.
   145  func (ctrl *PersistentVolumeController) initializeCaches(logger klog.Logger, volumeLister corelisters.PersistentVolumeLister, claimLister corelisters.PersistentVolumeClaimLister) {
   146  	volumeList, err := volumeLister.List(labels.Everything())
   147  	if err != nil {
   148  		logger.Error(err, "PersistentVolumeController can't initialize caches")
   149  		return
   150  	}
   151  	for _, volume := range volumeList {
   152  		volumeClone := volume.DeepCopy()
   153  		if _, err = ctrl.storeVolumeUpdate(logger, volumeClone); err != nil {
   154  			logger.Error(err, "Error updating volume cache")
   155  		}
   156  	}
   157  
   158  	claimList, err := claimLister.List(labels.Everything())
   159  	if err != nil {
   160  		logger.Error(err, "PersistentVolumeController can't initialize caches")
   161  		return
   162  	}
   163  	for _, claim := range claimList {
   164  		if _, err = ctrl.storeClaimUpdate(logger, claim.DeepCopy()); err != nil {
   165  			logger.Error(err, "Error updating claim cache")
   166  		}
   167  	}
   168  	logger.V(4).Info("Controller initialized")
   169  }
   170  
   171  // enqueueWork adds volume or claim to given work queue.
   172  func (ctrl *PersistentVolumeController) enqueueWork(ctx context.Context, queue workqueue.TypedInterface[string], obj interface{}) {
   173  	// Beware of "xxx deleted" events
   174  	logger := klog.FromContext(ctx)
   175  	if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
   176  		obj = unknown.Obj
   177  	}
   178  	objName, err := controller.KeyFunc(obj)
   179  	if err != nil {
   180  		logger.Error(err, "Failed to get key from object")
   181  		return
   182  	}
   183  	logger.V(5).Info("Enqueued for sync", "objName", objName)
   184  	queue.Add(objName)
   185  }
   186  
   187  func (ctrl *PersistentVolumeController) storeVolumeUpdate(logger klog.Logger, volume interface{}) (bool, error) {
   188  	return storeObjectUpdate(logger, ctrl.volumes.store, volume, "volume")
   189  }
   190  
   191  func (ctrl *PersistentVolumeController) storeClaimUpdate(logger klog.Logger, claim interface{}) (bool, error) {
   192  	return storeObjectUpdate(logger, ctrl.claims, claim, "claim")
   193  }
   194  
   195  // updateVolume runs in worker thread and handles "volume added",
   196  // "volume updated" and "periodic sync" events.
   197  func (ctrl *PersistentVolumeController) updateVolume(ctx context.Context, volume *v1.PersistentVolume) {
   198  	// Store the new volume version in the cache and do not process it if this
   199  	// is an old version.
   200  	logger := klog.FromContext(ctx)
   201  	new, err := ctrl.storeVolumeUpdate(logger, volume)
   202  	if err != nil {
   203  		logger.Error(err, "")
   204  	}
   205  	if !new {
   206  		return
   207  	}
   208  
   209  	err = ctrl.syncVolume(ctx, volume)
   210  	if err != nil {
   211  		if errors.IsConflict(err) {
   212  			// Version conflict error happens quite often and the controller
   213  			// recovers from it easily.
   214  			logger.V(3).Info("Could not sync volume", "volumeName", volume.Name, "err", err)
   215  		} else {
   216  			logger.Error(err, "Could not sync volume", "volumeName", volume.Name, "err", err)
   217  		}
   218  	}
   219  }
   220  
   221  // deleteVolume runs in worker thread and handles "volume deleted" event.
   222  func (ctrl *PersistentVolumeController) deleteVolume(ctx context.Context, volume *v1.PersistentVolume) {
   223  	logger := klog.FromContext(ctx)
   224  	if err := ctrl.volumes.store.Delete(volume); err != nil {
   225  		logger.Error(err, "Volume deletion encountered", "volumeName", volume.Name)
   226  	} else {
   227  		logger.V(4).Info("volume deleted", "volumeName", volume.Name)
   228  	}
   229  	// record deletion metric if a deletion start timestamp is in the cache
   230  	// the following calls will be a no-op if there is nothing for this volume in the cache
   231  	// end of timestamp cache entry lifecycle, "RecordMetric" will do the clean
   232  	metrics.RecordMetric(volume.Name, &ctrl.operationTimestamps, nil)
   233  
   234  	if volume.Spec.ClaimRef == nil {
   235  		return
   236  	}
   237  	// sync the claim when its volume is deleted. Explicitly syncing the
   238  	// claim here in response to volume deletion prevents the claim from
   239  	// waiting until the next sync period for its Lost status.
   240  	claimKey := claimrefToClaimKey(volume.Spec.ClaimRef)
   241  	logger.V(5).Info("deleteVolume: scheduling sync of claim", "PVC", klog.KRef(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name), "volumeName", volume.Name)
   242  	ctrl.claimQueue.Add(claimKey)
   243  }
   244  
   245  // updateClaim runs in worker thread and handles "claim added",
   246  // "claim updated" and "periodic sync" events.
   247  func (ctrl *PersistentVolumeController) updateClaim(ctx context.Context, claim *v1.PersistentVolumeClaim) {
   248  	// Store the new claim version in the cache and do not process it if this is
   249  	// an old version.
   250  	logger := klog.FromContext(ctx)
   251  	new, err := ctrl.storeClaimUpdate(logger, claim)
   252  	if err != nil {
   253  		logger.Error(err, "")
   254  	}
   255  	if !new {
   256  		return
   257  	}
   258  	err = ctrl.syncClaim(ctx, claim)
   259  	if err != nil {
   260  		if errors.IsConflict(err) {
   261  			// Version conflict error happens quite often and the controller
   262  			// recovers from it easily.
   263  			logger.V(3).Info("Could not sync claim", "PVC", klog.KObj(claim), "err", err)
   264  		} else {
   265  			logger.Error(err, "Could not sync volume", "PVC", klog.KObj(claim))
   266  		}
   267  	}
   268  }
   269  
   270  // Unit test [5-5] [5-6] [5-7]
   271  // deleteClaim runs in worker thread and handles "claim deleted" event.
   272  func (ctrl *PersistentVolumeController) deleteClaim(ctx context.Context, claim *v1.PersistentVolumeClaim) {
   273  	logger := klog.FromContext(ctx)
   274  	if err := ctrl.claims.Delete(claim); err != nil {
   275  		logger.Error(err, "Claim deletion encountered", "PVC", klog.KObj(claim))
   276  	}
   277  	claimKey := claimToClaimKey(claim)
   278  	logger.V(4).Info("Claim deleted", "PVC", klog.KObj(claim))
   279  	// clean any possible unfinished provision start timestamp from cache
   280  	// Unit test [5-8] [5-9]
   281  	ctrl.operationTimestamps.Delete(claimKey)
   282  
   283  	volumeName := claim.Spec.VolumeName
   284  	if volumeName == "" {
   285  		logger.V(5).Info("deleteClaim: volume not bound", "PVC", klog.KObj(claim))
   286  		return
   287  	}
   288  
   289  	// sync the volume when its claim is deleted.  Explicitly sync'ing the
   290  	// volume here in response to claim deletion prevents the volume from
   291  	// waiting until the next sync period for its Release.
   292  	logger.V(5).Info("deleteClaim: scheduling sync of volume", "PVC", klog.KObj(claim), "volumeName", volumeName)
   293  	ctrl.volumeQueue.Add(volumeName)
   294  }
   295  
   296  // Run starts all of this controller's control loops
   297  func (ctrl *PersistentVolumeController) Run(ctx context.Context) {
   298  	defer utilruntime.HandleCrash()
   299  	defer ctrl.claimQueue.ShutDown()
   300  	defer ctrl.volumeQueue.ShutDown()
   301  
   302  	// Start events processing pipeline.
   303  	ctrl.eventBroadcaster.StartStructuredLogging(3)
   304  	ctrl.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: ctrl.kubeClient.CoreV1().Events("")})
   305  	defer ctrl.eventBroadcaster.Shutdown()
   306  
   307  	logger := klog.FromContext(ctx)
   308  	logger.Info("Starting persistent volume controller")
   309  	defer logger.Info("Shutting down persistent volume controller")
   310  
   311  	if !cache.WaitForNamedCacheSync("persistent volume", ctx.Done(), ctrl.volumeListerSynced, ctrl.claimListerSynced, ctrl.classListerSynced, ctrl.podListerSynced, ctrl.NodeListerSynced) {
   312  		return
   313  	}
   314  
   315  	ctrl.initializeCaches(logger, ctrl.volumeLister, ctrl.claimLister)
   316  
   317  	go wait.Until(func() { ctrl.resync(ctx) }, ctrl.resyncPeriod, ctx.Done())
   318  	go wait.UntilWithContext(ctx, ctrl.volumeWorker, time.Second)
   319  	go wait.UntilWithContext(ctx, ctrl.claimWorker, time.Second)
   320  
   321  	metrics.Register(ctrl.volumes.store, ctrl.claims, &ctrl.volumePluginMgr)
   322  
   323  	<-ctx.Done()
   324  }
   325  
   326  func (ctrl *PersistentVolumeController) updateClaimMigrationAnnotations(ctx context.Context,
   327  	claim *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) {
   328  	// TODO: update[Claim|Volume]MigrationAnnotations can be optimized to not
   329  	// copy the claim/volume if no modifications are required. Though this
   330  	// requires some refactoring as well as an interesting change in the
   331  	// semantics of the function which may be undesirable. If no copy is made
   332  	// when no modifications are required this function could sometimes return a
   333  	// copy of the volume and sometimes return a ref to the original
   334  	claimClone := claim.DeepCopy()
   335  	logger := klog.FromContext(ctx)
   336  	modified := updateMigrationAnnotations(logger, ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, true)
   337  	if !modified {
   338  		return claimClone, nil
   339  	}
   340  	newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{})
   341  	if err != nil {
   342  		return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err)
   343  	}
   344  	_, err = ctrl.storeClaimUpdate(logger, newClaim)
   345  	if err != nil {
   346  		return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err)
   347  	}
   348  	return newClaim, nil
   349  }
   350  
   351  func (ctrl *PersistentVolumeController) updateVolumeMigrationAnnotationsAndFinalizers(ctx context.Context,
   352  	volume *v1.PersistentVolume) (*v1.PersistentVolume, error) {
   353  	volumeClone := volume.DeepCopy()
   354  	logger := klog.FromContext(ctx)
   355  	annModified := updateMigrationAnnotations(logger, ctrl.csiMigratedPluginManager, ctrl.translator, volumeClone.Annotations, false)
   356  	modifiedFinalizers, finalizersModified := modifyDeletionFinalizers(logger, ctrl.csiMigratedPluginManager, volumeClone)
   357  	if !annModified && !finalizersModified {
   358  		return volumeClone, nil
   359  	}
   360  	if finalizersModified {
   361  		volumeClone.ObjectMeta.SetFinalizers(modifiedFinalizers)
   362  	}
   363  	newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(ctx, volumeClone, metav1.UpdateOptions{})
   364  	if err != nil {
   365  		return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations or finalizer: %v", err)
   366  	}
   367  	_, err = ctrl.storeVolumeUpdate(logger, newVol)
   368  	if err != nil {
   369  		return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations or finalizer: %v", err)
   370  	}
   371  	return newVol, nil
   372  }
   373  
   374  // modifyDeletionFinalizers updates the finalizers based on the reclaim policy and if it is a in-tree volume or not.
   375  // The in-tree PV deletion protection finalizer is only added if the reclaimPolicy associated with the PV is `Delete`.
   376  // The in-tree PV deletion protection finalizer is removed if the reclaimPolicy associated with the PV is `Retain` or
   377  // `Recycle`, removing the finalizer is necessary to reflect the recalimPolicy updates on the PV.
   378  // The method also removes any external PV Deletion Protection finalizers added on the PV, this represents CSI migration
   379  // rollback/disable scenarios.
   380  func modifyDeletionFinalizers(logger klog.Logger, cmpm CSIMigratedPluginManager, volume *v1.PersistentVolume) ([]string, bool) {
   381  	modified := false
   382  	var outFinalizers []string
   383  	if !utilfeature.DefaultFeatureGate.Enabled(features.HonorPVReclaimPolicy) {
   384  		return volume.Finalizers, false
   385  	}
   386  	if !metav1.HasAnnotation(volume.ObjectMeta, storagehelpers.AnnDynamicallyProvisioned) {
   387  		// PV deletion protection finalizer is currently supported only for dynamically
   388  		// provisioned volumes.
   389  		return volume.Finalizers, false
   390  	}
   391  	if volume.Finalizers != nil {
   392  		outFinalizers = append(outFinalizers, volume.Finalizers...)
   393  	}
   394  	provisioner := volume.Annotations[storagehelpers.AnnDynamicallyProvisioned]
   395  	if cmpm.IsMigrationEnabledForPlugin(provisioner) {
   396  		// Remove in-tree delete finalizer on the PV as migration is enabled.
   397  		if slice.ContainsString(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer, nil) {
   398  			outFinalizers = slice.RemoveString(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer, nil)
   399  			modified = true
   400  		}
   401  		return outFinalizers, modified
   402  	}
   403  	// Check if it is a in-tree volume.
   404  	if !strings.HasPrefix(provisioner, "kubernetes.io/") {
   405  		// The provision plugin does not begin with known in-tree plugin volume prefix annotation.
   406  		return volume.Finalizers, false
   407  	}
   408  	reclaimPolicy := volume.Spec.PersistentVolumeReclaimPolicy
   409  	// Add back the in-tree PV deletion protection finalizer if does not already exists
   410  	if reclaimPolicy == v1.PersistentVolumeReclaimDelete && !slice.ContainsString(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer, nil) {
   411  		logger.V(4).Info("Adding in-tree pv deletion protection finalizer on volume", "volumeName", volume.Name)
   412  		outFinalizers = append(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer)
   413  		modified = true
   414  	} else if (reclaimPolicy == v1.PersistentVolumeReclaimRetain || reclaimPolicy == v1.PersistentVolumeReclaimRecycle) && slice.ContainsString(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer, nil) {
   415  		// Remove the in-tree PV deletion protection finalizer if the reclaim policy is 'Retain' or 'Recycle'
   416  		logger.V(4).Info("Removing in-tree pv deletion protection finalizer on volume", "volumeName", volume.Name)
   417  		outFinalizers = slice.RemoveString(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer, nil)
   418  		modified = true
   419  	}
   420  	// Remove the external PV deletion protection finalizer
   421  	if slice.ContainsString(outFinalizers, storagehelpers.PVDeletionProtectionFinalizer, nil) {
   422  		logger.V(4).Info("Removing external pv deletion protection finalizer on volume", "volumeName", volume.Name)
   423  		outFinalizers = slice.RemoveString(outFinalizers, storagehelpers.PVDeletionProtectionFinalizer, nil)
   424  		modified = true
   425  	}
   426  	return outFinalizers, modified
   427  }
   428  
   429  // updateMigrationAnnotations takes an Annotations map and checks for a
   430  // provisioner name using the provisionerKey. It will then add a
   431  // "pv.kubernetes.io/migrated-to" annotation if migration with the CSI
   432  // driver name for that provisioner is "on" based on feature flags, it will also
   433  // remove the annotation is migration is "off" for that provisioner in rollback
   434  // scenarios. Returns true if the annotations map was modified and false otherwise.
   435  func updateMigrationAnnotations(logger klog.Logger, cmpm CSIMigratedPluginManager, translator CSINameTranslator, ann map[string]string, claim bool) bool {
   436  	var csiDriverName string
   437  	var err error
   438  
   439  	if ann == nil {
   440  		// No annotations so we can't get the provisioner and don't know whether
   441  		// this is migrated - no change
   442  		return false
   443  	}
   444  	var provisionerKey string
   445  	if claim {
   446  		provisionerKey = storagehelpers.AnnStorageProvisioner
   447  	} else {
   448  		provisionerKey = storagehelpers.AnnDynamicallyProvisioned
   449  	}
   450  	provisioner, ok := ann[provisionerKey]
   451  	if !ok {
   452  		if claim {
   453  			// Also check beta AnnStorageProvisioner annontation to make sure
   454  			provisioner, ok = ann[storagehelpers.AnnBetaStorageProvisioner]
   455  			if !ok {
   456  				return false
   457  			}
   458  		} else {
   459  			// Volume Statically provisioned.
   460  			return false
   461  		}
   462  	}
   463  
   464  	migratedToDriver := ann[storagehelpers.AnnMigratedTo]
   465  	if cmpm.IsMigrationEnabledForPlugin(provisioner) {
   466  		csiDriverName, err = translator.GetCSINameFromInTreeName(provisioner)
   467  		if err != nil {
   468  			logger.Error(err, "Could not update volume migration annotations. Migration enabled for plugin but could not find corresponding driver name", "plugin", provisioner)
   469  			return false
   470  		}
   471  		if migratedToDriver != csiDriverName {
   472  			ann[storagehelpers.AnnMigratedTo] = csiDriverName
   473  			return true
   474  		}
   475  	} else {
   476  		if migratedToDriver != "" {
   477  			// Migration annotation exists but the driver isn't migrated currently
   478  			delete(ann, storagehelpers.AnnMigratedTo)
   479  			return true
   480  		}
   481  	}
   482  	return false
   483  }
   484  
   485  // volumeWorker processes items from volumeQueue. It must run only once,
   486  // syncVolume is not assured to be reentrant.
   487  func (ctrl *PersistentVolumeController) volumeWorker(ctx context.Context) {
   488  	logger := klog.FromContext(ctx)
   489  	workFunc := func(ctx context.Context) bool {
   490  		key, quit := ctrl.volumeQueue.Get()
   491  		if quit {
   492  			return true
   493  		}
   494  		defer ctrl.volumeQueue.Done(key)
   495  		logger.V(5).Info("volumeWorker", "volumeKey", key)
   496  
   497  		_, name, err := cache.SplitMetaNamespaceKey(key)
   498  		if err != nil {
   499  			logger.V(4).Info("Error getting name of volume to get volume from informer", "volumeKey", key, "err", err)
   500  			return false
   501  		}
   502  		volume, err := ctrl.volumeLister.Get(name)
   503  		if err == nil {
   504  			// The volume still exists in informer cache, the event must have
   505  			// been add/update/sync
   506  			ctrl.updateVolume(ctx, volume)
   507  			return false
   508  		}
   509  		if !errors.IsNotFound(err) {
   510  			logger.V(2).Info("Error getting volume from informer", "volumeKey", key, "err", err)
   511  			return false
   512  		}
   513  
   514  		// The volume is not in informer cache, the event must have been
   515  		// "delete"
   516  		volumeObj, found, err := ctrl.volumes.store.GetByKey(key)
   517  		if err != nil {
   518  			logger.V(2).Info("Error getting volume from cache", "volumeKey", key, "err", err)
   519  			return false
   520  		}
   521  		if !found {
   522  			// The controller has already processed the delete event and
   523  			// deleted the volume from its cache
   524  			logger.V(2).Info("Deletion of volume was already processed", "volumeKey", key)
   525  			return false
   526  		}
   527  		volume, ok := volumeObj.(*v1.PersistentVolume)
   528  		if !ok {
   529  			logger.Error(nil, "Expected volume, got", "obj", volumeObj)
   530  			return false
   531  		}
   532  		ctrl.deleteVolume(ctx, volume)
   533  		return false
   534  	}
   535  	for {
   536  		if quit := workFunc(ctx); quit {
   537  			logger.Info("Volume worker queue shutting down")
   538  			return
   539  		}
   540  	}
   541  }
   542  
   543  // claimWorker processes items from claimQueue. It must run only once,
   544  // syncClaim is not reentrant.
   545  func (ctrl *PersistentVolumeController) claimWorker(ctx context.Context) {
   546  	logger := klog.FromContext(ctx)
   547  	workFunc := func() bool {
   548  		key, quit := ctrl.claimQueue.Get()
   549  		if quit {
   550  			return true
   551  		}
   552  		defer ctrl.claimQueue.Done(key)
   553  		logger.V(5).Info("claimWorker", "claimKey", key)
   554  
   555  		namespace, name, err := cache.SplitMetaNamespaceKey(key)
   556  		if err != nil {
   557  			logger.V(4).Info("Error getting namespace & name of claim to get claim from informer", "claimKey", key, "err", err)
   558  			return false
   559  		}
   560  		claim, err := ctrl.claimLister.PersistentVolumeClaims(namespace).Get(name)
   561  		if err == nil {
   562  			// The claim still exists in informer cache, the event must have
   563  			// been add/update/sync
   564  			ctrl.updateClaim(ctx, claim)
   565  			return false
   566  		}
   567  		if !errors.IsNotFound(err) {
   568  			logger.V(2).Info("Error getting claim from informer", "claimKey", key, "err", err)
   569  			return false
   570  		}
   571  
   572  		// The claim is not in informer cache, the event must have been "delete"
   573  		claimObj, found, err := ctrl.claims.GetByKey(key)
   574  		if err != nil {
   575  			logger.V(2).Info("Error getting claim from cache", "claimKey", key, "err", err)
   576  			return false
   577  		}
   578  		if !found {
   579  			// The controller has already processed the delete event and
   580  			// deleted the claim from its cache
   581  			logger.V(2).Info("Deletion of claim was already processed", "claimKey", key)
   582  			return false
   583  		}
   584  		claim, ok := claimObj.(*v1.PersistentVolumeClaim)
   585  		if !ok {
   586  			logger.Error(nil, "Expected claim, got", "obj", claimObj)
   587  			return false
   588  		}
   589  		ctrl.deleteClaim(ctx, claim)
   590  		return false
   591  	}
   592  	for {
   593  		if quit := workFunc(); quit {
   594  			logger.Info("Claim worker queue shutting down")
   595  			return
   596  		}
   597  	}
   598  }
   599  
   600  // resync supplements short resync period of shared informers - we don't want
   601  // all consumers of PV/PVC shared informer to have a short resync period,
   602  // therefore we do our own.
   603  func (ctrl *PersistentVolumeController) resync(ctx context.Context) {
   604  	logger := klog.FromContext(ctx)
   605  	logger.V(4).Info("Resyncing PV controller")
   606  
   607  	pvcs, err := ctrl.claimLister.List(labels.NewSelector())
   608  	if err != nil {
   609  		logger.Info("Cannot list claims", "err", err)
   610  		return
   611  	}
   612  	for _, pvc := range pvcs {
   613  		ctrl.enqueueWork(ctx, ctrl.claimQueue, pvc)
   614  	}
   615  
   616  	pvs, err := ctrl.volumeLister.List(labels.NewSelector())
   617  	if err != nil {
   618  		logger.Info("Cannot list persistent volumes", "err", err)
   619  		return
   620  	}
   621  	for _, pv := range pvs {
   622  		ctrl.enqueueWork(ctx, ctrl.volumeQueue, pv)
   623  	}
   624  }
   625  
   626  // setClaimProvisioner saves
   627  // claim.Annotations["volume.kubernetes.io/storage-provisioner"] = class.Provisioner
   628  func (ctrl *PersistentVolumeController) setClaimProvisioner(ctx context.Context, claim *v1.PersistentVolumeClaim, provisionerName string) (*v1.PersistentVolumeClaim, error) {
   629  	if val, ok := claim.Annotations[storagehelpers.AnnStorageProvisioner]; ok && val == provisionerName {
   630  		// annotation is already set, nothing to do
   631  		return claim, nil
   632  	}
   633  
   634  	// The volume from method args can be pointing to watcher cache. We must not
   635  	// modify these, therefore create a copy.
   636  	claimClone := claim.DeepCopy()
   637  	// TODO: remove the beta storage provisioner anno after the deprecation period
   638  	logger := klog.FromContext(ctx)
   639  	metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, storagehelpers.AnnBetaStorageProvisioner, provisionerName)
   640  	metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, storagehelpers.AnnStorageProvisioner, provisionerName)
   641  	updateMigrationAnnotations(logger, ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, true)
   642  	newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{})
   643  	if err != nil {
   644  		return newClaim, err
   645  	}
   646  	_, err = ctrl.storeClaimUpdate(logger, newClaim)
   647  	if err != nil {
   648  		return newClaim, err
   649  	}
   650  	return newClaim, nil
   651  }
   652  
   653  // Stateless functions
   654  
   655  func getClaimStatusForLogging(claim *v1.PersistentVolumeClaim) string {
   656  	bound := metav1.HasAnnotation(claim.ObjectMeta, storagehelpers.AnnBindCompleted)
   657  	boundByController := metav1.HasAnnotation(claim.ObjectMeta, storagehelpers.AnnBoundByController)
   658  
   659  	return fmt.Sprintf("phase: %s, bound to: %q, bindCompleted: %v, boundByController: %v", claim.Status.Phase, claim.Spec.VolumeName, bound, boundByController)
   660  }
   661  
   662  func getVolumeStatusForLogging(volume *v1.PersistentVolume) string {
   663  	boundByController := metav1.HasAnnotation(volume.ObjectMeta, storagehelpers.AnnBoundByController)
   664  	claimName := ""
   665  	if volume.Spec.ClaimRef != nil {
   666  		claimName = fmt.Sprintf("%s/%s (uid: %s)", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name, volume.Spec.ClaimRef.UID)
   667  	}
   668  	return fmt.Sprintf("phase: %s, bound to: %q, boundByController: %v", volume.Status.Phase, claimName, boundByController)
   669  }
   670  
   671  // storeObjectUpdate updates given cache with a new object version from Informer
   672  // callback (i.e. with events from etcd) or with an object modified by the
   673  // controller itself. Returns "true", if the cache was updated, false if the
   674  // object is an old version and should be ignored.
   675  func storeObjectUpdate(logger klog.Logger, store cache.Store, obj interface{}, className string) (bool, error) {
   676  	objName, err := controller.KeyFunc(obj)
   677  	if err != nil {
   678  		return false, fmt.Errorf("couldn't get key for object %+v: %w", obj, err)
   679  	}
   680  	oldObj, found, err := store.Get(obj)
   681  	if err != nil {
   682  		return false, fmt.Errorf("error finding %s %q in controller cache: %w", className, objName, err)
   683  	}
   684  
   685  	objAccessor, err := meta.Accessor(obj)
   686  	if err != nil {
   687  		return false, err
   688  	}
   689  	if !found {
   690  		// This is a new object
   691  		logger.V(4).Info("storeObjectUpdate, adding obj", "storageClassName", className, "objName", objName, "resourceVersion", objAccessor.GetResourceVersion())
   692  		if err = store.Add(obj); err != nil {
   693  			return false, fmt.Errorf("error adding %s %q to controller cache: %w", className, objName, err)
   694  		}
   695  		return true, nil
   696  	}
   697  
   698  	oldObjAccessor, err := meta.Accessor(oldObj)
   699  	if err != nil {
   700  		return false, err
   701  	}
   702  
   703  	objResourceVersion, err := strconv.ParseInt(objAccessor.GetResourceVersion(), 10, 64)
   704  	if err != nil {
   705  		return false, fmt.Errorf("error parsing ResourceVersion %q of %s %q: %s", objAccessor.GetResourceVersion(), className, objName, err)
   706  	}
   707  	oldObjResourceVersion, err := strconv.ParseInt(oldObjAccessor.GetResourceVersion(), 10, 64)
   708  	if err != nil {
   709  		return false, fmt.Errorf("error parsing old ResourceVersion %q of %s %q: %s", oldObjAccessor.GetResourceVersion(), className, objName, err)
   710  	}
   711  
   712  	// Throw away only older version, let the same version pass - we do want to
   713  	// get periodic sync events.
   714  	if oldObjResourceVersion > objResourceVersion {
   715  		logger.V(4).Info("storeObjectUpdate: ignoring obj", "storageClassName", className, "objName", objName, "resourceVersion", objAccessor.GetResourceVersion())
   716  		return false, nil
   717  	}
   718  
   719  	logger.V(4).Info("storeObjectUpdate updating obj with version", "storageClassName", className, "objName", objName, "resourceVersion", objAccessor.GetResourceVersion())
   720  	if err = store.Update(obj); err != nil {
   721  		return false, fmt.Errorf("error updating %s %q in controller cache: %w", className, objName, err)
   722  	}
   723  	return true, nil
   724  }