k8s.io/kubernetes@v1.29.3/pkg/controller/volume/persistentvolume/pv_controller_base.go (about)

     1  /*
     2  Copyright 2016 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package persistentvolume
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"strconv"
    23  	"strings"
    24  	"time"
    25  
    26  	v1 "k8s.io/api/core/v1"
    27  	"k8s.io/apimachinery/pkg/api/errors"
    28  	"k8s.io/apimachinery/pkg/api/meta"
    29  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    30  	"k8s.io/apimachinery/pkg/labels"
    31  	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
    32  	"k8s.io/apimachinery/pkg/util/wait"
    33  	utilfeature "k8s.io/apiserver/pkg/util/feature"
    34  	coreinformers "k8s.io/client-go/informers/core/v1"
    35  	storageinformers "k8s.io/client-go/informers/storage/v1"
    36  	clientset "k8s.io/client-go/kubernetes"
    37  	"k8s.io/client-go/kubernetes/scheme"
    38  	v1core "k8s.io/client-go/kubernetes/typed/core/v1"
    39  	corelisters "k8s.io/client-go/listers/core/v1"
    40  	"k8s.io/client-go/tools/cache"
    41  	"k8s.io/client-go/tools/record"
    42  	"k8s.io/client-go/util/workqueue"
    43  	cloudprovider "k8s.io/cloud-provider"
    44  	storagehelpers "k8s.io/component-helpers/storage/volume"
    45  	csitrans "k8s.io/csi-translation-lib"
    46  	"k8s.io/kubernetes/pkg/controller"
    47  	"k8s.io/kubernetes/pkg/controller/volume/common"
    48  	"k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics"
    49  	"k8s.io/kubernetes/pkg/features"
    50  	"k8s.io/kubernetes/pkg/util/goroutinemap"
    51  	"k8s.io/kubernetes/pkg/util/slice"
    52  	vol "k8s.io/kubernetes/pkg/volume"
    53  	"k8s.io/kubernetes/pkg/volume/csimigration"
    54  
    55  	"k8s.io/klog/v2"
    56  )
    57  
    58  // This file contains the controller base functionality, i.e. framework to
    59  // process PV/PVC added/updated/deleted events. The real binding, provisioning,
    60  // recycling and deleting is done in pv_controller.go
    61  
    62  // ControllerParameters contains arguments for creation of a new
    63  // PersistentVolume controller.
    64  type ControllerParameters struct {
    65  	KubeClient                clientset.Interface
    66  	SyncPeriod                time.Duration
    67  	VolumePlugins             []vol.VolumePlugin
    68  	Cloud                     cloudprovider.Interface
    69  	ClusterName               string
    70  	VolumeInformer            coreinformers.PersistentVolumeInformer
    71  	ClaimInformer             coreinformers.PersistentVolumeClaimInformer
    72  	ClassInformer             storageinformers.StorageClassInformer
    73  	PodInformer               coreinformers.PodInformer
    74  	NodeInformer              coreinformers.NodeInformer
    75  	EventRecorder             record.EventRecorder
    76  	EnableDynamicProvisioning bool
    77  }
    78  
    79  // NewController creates a new PersistentVolume controller
    80  func NewController(ctx context.Context, p ControllerParameters) (*PersistentVolumeController, error) {
    81  	eventRecorder := p.EventRecorder
    82  	var eventBroadcaster record.EventBroadcaster
    83  	if eventRecorder == nil {
    84  		eventBroadcaster = record.NewBroadcaster()
    85  		eventRecorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "persistentvolume-controller"})
    86  	}
    87  
    88  	controller := &PersistentVolumeController{
    89  		volumes:                       newPersistentVolumeOrderedIndex(),
    90  		claims:                        cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc),
    91  		kubeClient:                    p.KubeClient,
    92  		eventBroadcaster:              eventBroadcaster,
    93  		eventRecorder:                 eventRecorder,
    94  		runningOperations:             goroutinemap.NewGoRoutineMap(true /* exponentialBackOffOnError */),
    95  		cloud:                         p.Cloud,
    96  		enableDynamicProvisioning:     p.EnableDynamicProvisioning,
    97  		clusterName:                   p.ClusterName,
    98  		createProvisionedPVRetryCount: createProvisionedPVRetryCount,
    99  		createProvisionedPVInterval:   createProvisionedPVInterval,
   100  		claimQueue:                    workqueue.NewNamed("claims"),
   101  		volumeQueue:                   workqueue.NewNamed("volumes"),
   102  		resyncPeriod:                  p.SyncPeriod,
   103  		operationTimestamps:           metrics.NewOperationStartTimeCache(),
   104  	}
   105  
   106  	// Prober is nil because PV is not aware of Flexvolume.
   107  	if err := controller.volumePluginMgr.InitPlugins(p.VolumePlugins, nil /* prober */, controller); err != nil {
   108  		return nil, fmt.Errorf("could not initialize volume plugins for PersistentVolume Controller: %w", err)
   109  	}
   110  
   111  	p.VolumeInformer.Informer().AddEventHandler(
   112  		cache.ResourceEventHandlerFuncs{
   113  			AddFunc:    func(obj interface{}) { controller.enqueueWork(ctx, controller.volumeQueue, obj) },
   114  			UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(ctx, controller.volumeQueue, newObj) },
   115  			DeleteFunc: func(obj interface{}) { controller.enqueueWork(ctx, controller.volumeQueue, obj) },
   116  		},
   117  	)
   118  	controller.volumeLister = p.VolumeInformer.Lister()
   119  	controller.volumeListerSynced = p.VolumeInformer.Informer().HasSynced
   120  
   121  	p.ClaimInformer.Informer().AddEventHandler(
   122  		cache.ResourceEventHandlerFuncs{
   123  			AddFunc:    func(obj interface{}) { controller.enqueueWork(ctx, controller.claimQueue, obj) },
   124  			UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(ctx, controller.claimQueue, newObj) },
   125  			DeleteFunc: func(obj interface{}) { controller.enqueueWork(ctx, controller.claimQueue, obj) },
   126  		},
   127  	)
   128  	controller.claimLister = p.ClaimInformer.Lister()
   129  	controller.claimListerSynced = p.ClaimInformer.Informer().HasSynced
   130  
   131  	controller.classLister = p.ClassInformer.Lister()
   132  	controller.classListerSynced = p.ClassInformer.Informer().HasSynced
   133  	controller.podLister = p.PodInformer.Lister()
   134  	controller.podIndexer = p.PodInformer.Informer().GetIndexer()
   135  	controller.podListerSynced = p.PodInformer.Informer().HasSynced
   136  	controller.NodeLister = p.NodeInformer.Lister()
   137  	controller.NodeListerSynced = p.NodeInformer.Informer().HasSynced
   138  
   139  	// This custom indexer will index pods by its PVC keys. Then we don't need
   140  	// to iterate all pods every time to find pods which reference given PVC.
   141  	if err := common.AddPodPVCIndexerIfNotPresent(controller.podIndexer); err != nil {
   142  		return nil, fmt.Errorf("could not initialize attach detach controller: %w", err)
   143  	}
   144  
   145  	csiTranslator := csitrans.New()
   146  	controller.translator = csiTranslator
   147  	controller.csiMigratedPluginManager = csimigration.NewPluginManager(csiTranslator, utilfeature.DefaultFeatureGate)
   148  
   149  	return controller, nil
   150  }
   151  
   152  // initializeCaches fills all controller caches with initial data from etcd in
   153  // order to have the caches already filled when first addClaim/addVolume to
   154  // perform initial synchronization of the controller.
   155  func (ctrl *PersistentVolumeController) initializeCaches(logger klog.Logger, volumeLister corelisters.PersistentVolumeLister, claimLister corelisters.PersistentVolumeClaimLister) {
   156  	volumeList, err := volumeLister.List(labels.Everything())
   157  	if err != nil {
   158  		logger.Error(err, "PersistentVolumeController can't initialize caches")
   159  		return
   160  	}
   161  	for _, volume := range volumeList {
   162  		volumeClone := volume.DeepCopy()
   163  		if _, err = ctrl.storeVolumeUpdate(logger, volumeClone); err != nil {
   164  			logger.Error(err, "Error updating volume cache")
   165  		}
   166  	}
   167  
   168  	claimList, err := claimLister.List(labels.Everything())
   169  	if err != nil {
   170  		logger.Error(err, "PersistentVolumeController can't initialize caches")
   171  		return
   172  	}
   173  	for _, claim := range claimList {
   174  		if _, err = ctrl.storeClaimUpdate(logger, claim.DeepCopy()); err != nil {
   175  			logger.Error(err, "Error updating claim cache")
   176  		}
   177  	}
   178  	logger.V(4).Info("Controller initialized")
   179  }
   180  
   181  // enqueueWork adds volume or claim to given work queue.
   182  func (ctrl *PersistentVolumeController) enqueueWork(ctx context.Context, queue workqueue.Interface, obj interface{}) {
   183  	// Beware of "xxx deleted" events
   184  	logger := klog.FromContext(ctx)
   185  	if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
   186  		obj = unknown.Obj
   187  	}
   188  	objName, err := controller.KeyFunc(obj)
   189  	if err != nil {
   190  		logger.Error(err, "Failed to get key from object")
   191  		return
   192  	}
   193  	logger.V(5).Info("Enqueued for sync", "objName", objName)
   194  	queue.Add(objName)
   195  }
   196  
   197  func (ctrl *PersistentVolumeController) storeVolumeUpdate(logger klog.Logger, volume interface{}) (bool, error) {
   198  	return storeObjectUpdate(logger, ctrl.volumes.store, volume, "volume")
   199  }
   200  
   201  func (ctrl *PersistentVolumeController) storeClaimUpdate(logger klog.Logger, claim interface{}) (bool, error) {
   202  	return storeObjectUpdate(logger, ctrl.claims, claim, "claim")
   203  }
   204  
   205  // updateVolume runs in worker thread and handles "volume added",
   206  // "volume updated" and "periodic sync" events.
   207  func (ctrl *PersistentVolumeController) updateVolume(ctx context.Context, volume *v1.PersistentVolume) {
   208  	// Store the new volume version in the cache and do not process it if this
   209  	// is an old version.
   210  	logger := klog.FromContext(ctx)
   211  	new, err := ctrl.storeVolumeUpdate(logger, volume)
   212  	if err != nil {
   213  		logger.Error(err, "")
   214  	}
   215  	if !new {
   216  		return
   217  	}
   218  
   219  	err = ctrl.syncVolume(ctx, volume)
   220  	if err != nil {
   221  		if errors.IsConflict(err) {
   222  			// Version conflict error happens quite often and the controller
   223  			// recovers from it easily.
   224  			logger.V(3).Info("Could not sync volume", "volumeName", volume.Name, "err", err)
   225  		} else {
   226  			logger.Error(err, "Could not sync volume", "volumeName", volume.Name, "err", err)
   227  		}
   228  	}
   229  }
   230  
   231  // deleteVolume runs in worker thread and handles "volume deleted" event.
   232  func (ctrl *PersistentVolumeController) deleteVolume(ctx context.Context, volume *v1.PersistentVolume) {
   233  	logger := klog.FromContext(ctx)
   234  	if err := ctrl.volumes.store.Delete(volume); err != nil {
   235  		logger.Error(err, "Volume deletion encountered", "volumeName", volume.Name)
   236  	} else {
   237  		logger.V(4).Info("volume deleted", "volumeName", volume.Name)
   238  	}
   239  	// record deletion metric if a deletion start timestamp is in the cache
   240  	// the following calls will be a no-op if there is nothing for this volume in the cache
   241  	// end of timestamp cache entry lifecycle, "RecordMetric" will do the clean
   242  	metrics.RecordMetric(volume.Name, &ctrl.operationTimestamps, nil)
   243  
   244  	if volume.Spec.ClaimRef == nil {
   245  		return
   246  	}
   247  	// sync the claim when its volume is deleted. Explicitly syncing the
   248  	// claim here in response to volume deletion prevents the claim from
   249  	// waiting until the next sync period for its Lost status.
   250  	claimKey := claimrefToClaimKey(volume.Spec.ClaimRef)
   251  	logger.V(5).Info("deleteVolume: scheduling sync of claim", "PVC", klog.KRef(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name), "volumeName", volume.Name)
   252  	ctrl.claimQueue.Add(claimKey)
   253  }
   254  
   255  // updateClaim runs in worker thread and handles "claim added",
   256  // "claim updated" and "periodic sync" events.
   257  func (ctrl *PersistentVolumeController) updateClaim(ctx context.Context, claim *v1.PersistentVolumeClaim) {
   258  	// Store the new claim version in the cache and do not process it if this is
   259  	// an old version.
   260  	logger := klog.FromContext(ctx)
   261  	new, err := ctrl.storeClaimUpdate(logger, claim)
   262  	if err != nil {
   263  		logger.Error(err, "")
   264  	}
   265  	if !new {
   266  		return
   267  	}
   268  	err = ctrl.syncClaim(ctx, claim)
   269  	if err != nil {
   270  		if errors.IsConflict(err) {
   271  			// Version conflict error happens quite often and the controller
   272  			// recovers from it easily.
   273  			logger.V(3).Info("Could not sync claim", "PVC", klog.KObj(claim), "err", err)
   274  		} else {
   275  			logger.Error(err, "Could not sync volume", "PVC", klog.KObj(claim))
   276  		}
   277  	}
   278  }
   279  
   280  // Unit test [5-5] [5-6] [5-7]
   281  // deleteClaim runs in worker thread and handles "claim deleted" event.
   282  func (ctrl *PersistentVolumeController) deleteClaim(ctx context.Context, claim *v1.PersistentVolumeClaim) {
   283  	logger := klog.FromContext(ctx)
   284  	if err := ctrl.claims.Delete(claim); err != nil {
   285  		logger.Error(err, "Claim deletion encountered", "PVC", klog.KObj(claim))
   286  	}
   287  	claimKey := claimToClaimKey(claim)
   288  	logger.V(4).Info("Claim deleted", "PVC", klog.KObj(claim))
   289  	// clean any possible unfinished provision start timestamp from cache
   290  	// Unit test [5-8] [5-9]
   291  	ctrl.operationTimestamps.Delete(claimKey)
   292  
   293  	volumeName := claim.Spec.VolumeName
   294  	if volumeName == "" {
   295  		logger.V(5).Info("deleteClaim: volume not bound", "PVC", klog.KObj(claim))
   296  		return
   297  	}
   298  
   299  	// sync the volume when its claim is deleted.  Explicitly sync'ing the
   300  	// volume here in response to claim deletion prevents the volume from
   301  	// waiting until the next sync period for its Release.
   302  	logger.V(5).Info("deleteClaim: scheduling sync of volume", "PVC", klog.KObj(claim), "volumeName", volumeName)
   303  	ctrl.volumeQueue.Add(volumeName)
   304  }
   305  
   306  // Run starts all of this controller's control loops
   307  func (ctrl *PersistentVolumeController) Run(ctx context.Context) {
   308  	defer utilruntime.HandleCrash()
   309  	defer ctrl.claimQueue.ShutDown()
   310  	defer ctrl.volumeQueue.ShutDown()
   311  
   312  	// Start events processing pipeline.
   313  	if ctrl.eventBroadcaster != nil {
   314  		ctrl.eventBroadcaster.StartStructuredLogging(0)
   315  		ctrl.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: ctrl.kubeClient.CoreV1().Events("")})
   316  		defer ctrl.eventBroadcaster.Shutdown()
   317  	}
   318  	logger := klog.FromContext(ctx)
   319  	logger.Info("Starting persistent volume controller")
   320  	defer logger.Info("Shutting down persistent volume controller")
   321  
   322  	if !cache.WaitForNamedCacheSync("persistent volume", ctx.Done(), ctrl.volumeListerSynced, ctrl.claimListerSynced, ctrl.classListerSynced, ctrl.podListerSynced, ctrl.NodeListerSynced) {
   323  		return
   324  	}
   325  
   326  	ctrl.initializeCaches(logger, ctrl.volumeLister, ctrl.claimLister)
   327  
   328  	go wait.Until(func() { ctrl.resync(ctx) }, ctrl.resyncPeriod, ctx.Done())
   329  	go wait.UntilWithContext(ctx, ctrl.volumeWorker, time.Second)
   330  	go wait.UntilWithContext(ctx, ctrl.claimWorker, time.Second)
   331  
   332  	metrics.Register(ctrl.volumes.store, ctrl.claims, &ctrl.volumePluginMgr)
   333  
   334  	<-ctx.Done()
   335  }
   336  
   337  func (ctrl *PersistentVolumeController) updateClaimMigrationAnnotations(ctx context.Context,
   338  	claim *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) {
   339  	// TODO: update[Claim|Volume]MigrationAnnotations can be optimized to not
   340  	// copy the claim/volume if no modifications are required. Though this
   341  	// requires some refactoring as well as an interesting change in the
   342  	// semantics of the function which may be undesirable. If no copy is made
   343  	// when no modifications are required this function could sometimes return a
   344  	// copy of the volume and sometimes return a ref to the original
   345  	claimClone := claim.DeepCopy()
   346  	logger := klog.FromContext(ctx)
   347  	modified := updateMigrationAnnotations(logger, ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, true)
   348  	if !modified {
   349  		return claimClone, nil
   350  	}
   351  	newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{})
   352  	if err != nil {
   353  		return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err)
   354  	}
   355  	_, err = ctrl.storeClaimUpdate(logger, newClaim)
   356  	if err != nil {
   357  		return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err)
   358  	}
   359  	return newClaim, nil
   360  }
   361  
   362  func (ctrl *PersistentVolumeController) updateVolumeMigrationAnnotationsAndFinalizers(ctx context.Context,
   363  	volume *v1.PersistentVolume) (*v1.PersistentVolume, error) {
   364  	volumeClone := volume.DeepCopy()
   365  	logger := klog.FromContext(ctx)
   366  	annModified := updateMigrationAnnotations(logger, ctrl.csiMigratedPluginManager, ctrl.translator, volumeClone.Annotations, false)
   367  	modifiedFinalizers, finalizersModified := modifyDeletionFinalizers(logger, ctrl.csiMigratedPluginManager, volumeClone)
   368  	if !annModified && !finalizersModified {
   369  		return volumeClone, nil
   370  	}
   371  	if finalizersModified {
   372  		volumeClone.ObjectMeta.SetFinalizers(modifiedFinalizers)
   373  	}
   374  	newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(ctx, volumeClone, metav1.UpdateOptions{})
   375  	if err != nil {
   376  		return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations or finalizer: %v", err)
   377  	}
   378  	_, err = ctrl.storeVolumeUpdate(logger, newVol)
   379  	if err != nil {
   380  		return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations or finalizer: %v", err)
   381  	}
   382  	return newVol, nil
   383  }
   384  
   385  // modifyDeletionFinalizers updates the finalizers based on the reclaim policy and if it is a in-tree volume or not.
   386  // The in-tree PV deletion protection finalizer is only added if the reclaimPolicy associated with the PV is `Delete`.
   387  // The in-tree PV deletion protection finalizer is removed if the reclaimPolicy associated with the PV is `Retain` or
   388  // `Recycle`, removing the finalizer is necessary to reflect the recalimPolicy updates on the PV.
   389  // The method also removes any external PV Deletion Protection finalizers added on the PV, this represents CSI migration
   390  // rollback/disable scenarios.
   391  func modifyDeletionFinalizers(logger klog.Logger, cmpm CSIMigratedPluginManager, volume *v1.PersistentVolume) ([]string, bool) {
   392  	modified := false
   393  	var outFinalizers []string
   394  	if !utilfeature.DefaultFeatureGate.Enabled(features.HonorPVReclaimPolicy) {
   395  		return volume.Finalizers, false
   396  	}
   397  	if !metav1.HasAnnotation(volume.ObjectMeta, storagehelpers.AnnDynamicallyProvisioned) {
   398  		// PV deletion protection finalizer is currently supported only for dynamically
   399  		// provisioned volumes.
   400  		return volume.Finalizers, false
   401  	}
   402  	if volume.Finalizers != nil {
   403  		outFinalizers = append(outFinalizers, volume.Finalizers...)
   404  	}
   405  	provisioner := volume.Annotations[storagehelpers.AnnDynamicallyProvisioned]
   406  	if cmpm.IsMigrationEnabledForPlugin(provisioner) {
   407  		// Remove in-tree delete finalizer on the PV as migration is enabled.
   408  		if slice.ContainsString(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer, nil) {
   409  			outFinalizers = slice.RemoveString(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer, nil)
   410  			modified = true
   411  		}
   412  		return outFinalizers, modified
   413  	}
   414  	// Check if it is a in-tree volume.
   415  	if !strings.HasPrefix(provisioner, "kubernetes.io/") {
   416  		// The provision plugin does not begin with known in-tree plugin volume prefix annotation.
   417  		return volume.Finalizers, false
   418  	}
   419  	reclaimPolicy := volume.Spec.PersistentVolumeReclaimPolicy
   420  	// Add back the in-tree PV deletion protection finalizer if does not already exists
   421  	if reclaimPolicy == v1.PersistentVolumeReclaimDelete && !slice.ContainsString(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer, nil) {
   422  		logger.V(4).Info("Adding in-tree pv deletion protection finalizer on volume", "volumeName", volume.Name)
   423  		outFinalizers = append(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer)
   424  		modified = true
   425  	} else if (reclaimPolicy == v1.PersistentVolumeReclaimRetain || reclaimPolicy == v1.PersistentVolumeReclaimRecycle) && slice.ContainsString(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer, nil) {
   426  		// Remove the in-tree PV deletion protection finalizer if the reclaim policy is 'Retain' or 'Recycle'
   427  		logger.V(4).Info("Removing in-tree pv deletion protection finalizer on volume", "volumeName", volume.Name)
   428  		outFinalizers = slice.RemoveString(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer, nil)
   429  		modified = true
   430  	}
   431  	// Remove the external PV deletion protection finalizer
   432  	if slice.ContainsString(outFinalizers, storagehelpers.PVDeletionProtectionFinalizer, nil) {
   433  		logger.V(4).Info("Removing external pv deletion protection finalizer on volume", "volumeName", volume.Name)
   434  		outFinalizers = slice.RemoveString(outFinalizers, storagehelpers.PVDeletionProtectionFinalizer, nil)
   435  		modified = true
   436  	}
   437  	return outFinalizers, modified
   438  }
   439  
   440  // updateMigrationAnnotations takes an Annotations map and checks for a
   441  // provisioner name using the provisionerKey. It will then add a
   442  // "pv.kubernetes.io/migrated-to" annotation if migration with the CSI
   443  // driver name for that provisioner is "on" based on feature flags, it will also
   444  // remove the annotation is migration is "off" for that provisioner in rollback
   445  // scenarios. Returns true if the annotations map was modified and false otherwise.
   446  func updateMigrationAnnotations(logger klog.Logger, cmpm CSIMigratedPluginManager, translator CSINameTranslator, ann map[string]string, claim bool) bool {
   447  	var csiDriverName string
   448  	var err error
   449  
   450  	if ann == nil {
   451  		// No annotations so we can't get the provisioner and don't know whether
   452  		// this is migrated - no change
   453  		return false
   454  	}
   455  	var provisionerKey string
   456  	if claim {
   457  		provisionerKey = storagehelpers.AnnStorageProvisioner
   458  	} else {
   459  		provisionerKey = storagehelpers.AnnDynamicallyProvisioned
   460  	}
   461  	provisioner, ok := ann[provisionerKey]
   462  	if !ok {
   463  		if claim {
   464  			// Also check beta AnnStorageProvisioner annontation to make sure
   465  			provisioner, ok = ann[storagehelpers.AnnBetaStorageProvisioner]
   466  			if !ok {
   467  				return false
   468  			}
   469  		} else {
   470  			// Volume Statically provisioned.
   471  			return false
   472  		}
   473  	}
   474  
   475  	migratedToDriver := ann[storagehelpers.AnnMigratedTo]
   476  	if cmpm.IsMigrationEnabledForPlugin(provisioner) {
   477  		csiDriverName, err = translator.GetCSINameFromInTreeName(provisioner)
   478  		if err != nil {
   479  			logger.Error(err, "Could not update volume migration annotations. Migration enabled for plugin but could not find corresponding driver name", "plugin", provisioner)
   480  			return false
   481  		}
   482  		if migratedToDriver != csiDriverName {
   483  			ann[storagehelpers.AnnMigratedTo] = csiDriverName
   484  			return true
   485  		}
   486  	} else {
   487  		if migratedToDriver != "" {
   488  			// Migration annotation exists but the driver isn't migrated currently
   489  			delete(ann, storagehelpers.AnnMigratedTo)
   490  			return true
   491  		}
   492  	}
   493  	return false
   494  }
   495  
   496  // volumeWorker processes items from volumeQueue. It must run only once,
   497  // syncVolume is not assured to be reentrant.
   498  func (ctrl *PersistentVolumeController) volumeWorker(ctx context.Context) {
   499  	logger := klog.FromContext(ctx)
   500  	workFunc := func(ctx context.Context) bool {
   501  		keyObj, quit := ctrl.volumeQueue.Get()
   502  		if quit {
   503  			return true
   504  		}
   505  		defer ctrl.volumeQueue.Done(keyObj)
   506  		key := keyObj.(string)
   507  		logger.V(5).Info("volumeWorker", "volumeKey", key)
   508  
   509  		_, name, err := cache.SplitMetaNamespaceKey(key)
   510  		if err != nil {
   511  			logger.V(4).Info("Error getting name of volume to get volume from informer", "volumeKey", key, "err", err)
   512  			return false
   513  		}
   514  		volume, err := ctrl.volumeLister.Get(name)
   515  		if err == nil {
   516  			// The volume still exists in informer cache, the event must have
   517  			// been add/update/sync
   518  			ctrl.updateVolume(ctx, volume)
   519  			return false
   520  		}
   521  		if !errors.IsNotFound(err) {
   522  			logger.V(2).Info("Error getting volume from informer", "volumeKey", key, "err", err)
   523  			return false
   524  		}
   525  
   526  		// The volume is not in informer cache, the event must have been
   527  		// "delete"
   528  		volumeObj, found, err := ctrl.volumes.store.GetByKey(key)
   529  		if err != nil {
   530  			logger.V(2).Info("Error getting volume from cache", "volumeKey", key, "err", err)
   531  			return false
   532  		}
   533  		if !found {
   534  			// The controller has already processed the delete event and
   535  			// deleted the volume from its cache
   536  			logger.V(2).Info("Deletion of volume was already processed", "volumeKey", key)
   537  			return false
   538  		}
   539  		volume, ok := volumeObj.(*v1.PersistentVolume)
   540  		if !ok {
   541  			logger.Error(nil, "Expected volume, got", "obj", volumeObj)
   542  			return false
   543  		}
   544  		ctrl.deleteVolume(ctx, volume)
   545  		return false
   546  	}
   547  	for {
   548  		if quit := workFunc(ctx); quit {
   549  			logger.Info("Volume worker queue shutting down")
   550  			return
   551  		}
   552  	}
   553  }
   554  
   555  // claimWorker processes items from claimQueue. It must run only once,
   556  // syncClaim is not reentrant.
   557  func (ctrl *PersistentVolumeController) claimWorker(ctx context.Context) {
   558  	logger := klog.FromContext(ctx)
   559  	workFunc := func() bool {
   560  		keyObj, quit := ctrl.claimQueue.Get()
   561  		if quit {
   562  			return true
   563  		}
   564  		defer ctrl.claimQueue.Done(keyObj)
   565  		key := keyObj.(string)
   566  		logger.V(5).Info("claimWorker", "claimKey", key)
   567  
   568  		namespace, name, err := cache.SplitMetaNamespaceKey(key)
   569  		if err != nil {
   570  			logger.V(4).Info("Error getting namespace & name of claim to get claim from informer", "claimKey", key, "err", err)
   571  			return false
   572  		}
   573  		claim, err := ctrl.claimLister.PersistentVolumeClaims(namespace).Get(name)
   574  		if err == nil {
   575  			// The claim still exists in informer cache, the event must have
   576  			// been add/update/sync
   577  			ctrl.updateClaim(ctx, claim)
   578  			return false
   579  		}
   580  		if !errors.IsNotFound(err) {
   581  			logger.V(2).Info("Error getting claim from informer", "claimKey", key, "err", err)
   582  			return false
   583  		}
   584  
   585  		// The claim is not in informer cache, the event must have been "delete"
   586  		claimObj, found, err := ctrl.claims.GetByKey(key)
   587  		if err != nil {
   588  			logger.V(2).Info("Error getting claim from cache", "claimKey", key, "err", err)
   589  			return false
   590  		}
   591  		if !found {
   592  			// The controller has already processed the delete event and
   593  			// deleted the claim from its cache
   594  			logger.V(2).Info("Deletion of claim was already processed", "claimKey", key)
   595  			return false
   596  		}
   597  		claim, ok := claimObj.(*v1.PersistentVolumeClaim)
   598  		if !ok {
   599  			logger.Error(nil, "Expected claim, got", "obj", claimObj)
   600  			return false
   601  		}
   602  		ctrl.deleteClaim(ctx, claim)
   603  		return false
   604  	}
   605  	for {
   606  		if quit := workFunc(); quit {
   607  			logger.Info("Claim worker queue shutting down")
   608  			return
   609  		}
   610  	}
   611  }
   612  
   613  // resync supplements short resync period of shared informers - we don't want
   614  // all consumers of PV/PVC shared informer to have a short resync period,
   615  // therefore we do our own.
   616  func (ctrl *PersistentVolumeController) resync(ctx context.Context) {
   617  	logger := klog.FromContext(ctx)
   618  	logger.V(4).Info("Resyncing PV controller")
   619  
   620  	pvcs, err := ctrl.claimLister.List(labels.NewSelector())
   621  	if err != nil {
   622  		logger.Info("Cannot list claims", "err", err)
   623  		return
   624  	}
   625  	for _, pvc := range pvcs {
   626  		ctrl.enqueueWork(ctx, ctrl.claimQueue, pvc)
   627  	}
   628  
   629  	pvs, err := ctrl.volumeLister.List(labels.NewSelector())
   630  	if err != nil {
   631  		logger.Info("Cannot list persistent volumes", "err", err)
   632  		return
   633  	}
   634  	for _, pv := range pvs {
   635  		ctrl.enqueueWork(ctx, ctrl.volumeQueue, pv)
   636  	}
   637  }
   638  
   639  // setClaimProvisioner saves
   640  // claim.Annotations["volume.kubernetes.io/storage-provisioner"] = class.Provisioner
   641  func (ctrl *PersistentVolumeController) setClaimProvisioner(ctx context.Context, claim *v1.PersistentVolumeClaim, provisionerName string) (*v1.PersistentVolumeClaim, error) {
   642  	if val, ok := claim.Annotations[storagehelpers.AnnStorageProvisioner]; ok && val == provisionerName {
   643  		// annotation is already set, nothing to do
   644  		return claim, nil
   645  	}
   646  
   647  	// The volume from method args can be pointing to watcher cache. We must not
   648  	// modify these, therefore create a copy.
   649  	claimClone := claim.DeepCopy()
   650  	// TODO: remove the beta storage provisioner anno after the deprecation period
   651  	logger := klog.FromContext(ctx)
   652  	metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, storagehelpers.AnnBetaStorageProvisioner, provisionerName)
   653  	metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, storagehelpers.AnnStorageProvisioner, provisionerName)
   654  	updateMigrationAnnotations(logger, ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, true)
   655  	newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{})
   656  	if err != nil {
   657  		return newClaim, err
   658  	}
   659  	_, err = ctrl.storeClaimUpdate(logger, newClaim)
   660  	if err != nil {
   661  		return newClaim, err
   662  	}
   663  	return newClaim, nil
   664  }
   665  
   666  // Stateless functions
   667  
   668  func getClaimStatusForLogging(claim *v1.PersistentVolumeClaim) string {
   669  	bound := metav1.HasAnnotation(claim.ObjectMeta, storagehelpers.AnnBindCompleted)
   670  	boundByController := metav1.HasAnnotation(claim.ObjectMeta, storagehelpers.AnnBoundByController)
   671  
   672  	return fmt.Sprintf("phase: %s, bound to: %q, bindCompleted: %v, boundByController: %v", claim.Status.Phase, claim.Spec.VolumeName, bound, boundByController)
   673  }
   674  
   675  func getVolumeStatusForLogging(volume *v1.PersistentVolume) string {
   676  	boundByController := metav1.HasAnnotation(volume.ObjectMeta, storagehelpers.AnnBoundByController)
   677  	claimName := ""
   678  	if volume.Spec.ClaimRef != nil {
   679  		claimName = fmt.Sprintf("%s/%s (uid: %s)", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name, volume.Spec.ClaimRef.UID)
   680  	}
   681  	return fmt.Sprintf("phase: %s, bound to: %q, boundByController: %v", volume.Status.Phase, claimName, boundByController)
   682  }
   683  
   684  // storeObjectUpdate updates given cache with a new object version from Informer
   685  // callback (i.e. with events from etcd) or with an object modified by the
   686  // controller itself. Returns "true", if the cache was updated, false if the
   687  // object is an old version and should be ignored.
   688  func storeObjectUpdate(logger klog.Logger, store cache.Store, obj interface{}, className string) (bool, error) {
   689  	objName, err := controller.KeyFunc(obj)
   690  	if err != nil {
   691  		return false, fmt.Errorf("couldn't get key for object %+v: %w", obj, err)
   692  	}
   693  	oldObj, found, err := store.Get(obj)
   694  	if err != nil {
   695  		return false, fmt.Errorf("error finding %s %q in controller cache: %w", className, objName, err)
   696  	}
   697  
   698  	objAccessor, err := meta.Accessor(obj)
   699  	if err != nil {
   700  		return false, err
   701  	}
   702  	if !found {
   703  		// This is a new object
   704  		logger.V(4).Info("storeObjectUpdate, adding obj", "storageClassName", className, "objName", objName, "resourceVersion", objAccessor.GetResourceVersion())
   705  		if err = store.Add(obj); err != nil {
   706  			return false, fmt.Errorf("error adding %s %q to controller cache: %w", className, objName, err)
   707  		}
   708  		return true, nil
   709  	}
   710  
   711  	oldObjAccessor, err := meta.Accessor(oldObj)
   712  	if err != nil {
   713  		return false, err
   714  	}
   715  
   716  	objResourceVersion, err := strconv.ParseInt(objAccessor.GetResourceVersion(), 10, 64)
   717  	if err != nil {
   718  		return false, fmt.Errorf("error parsing ResourceVersion %q of %s %q: %s", objAccessor.GetResourceVersion(), className, objName, err)
   719  	}
   720  	oldObjResourceVersion, err := strconv.ParseInt(oldObjAccessor.GetResourceVersion(), 10, 64)
   721  	if err != nil {
   722  		return false, fmt.Errorf("error parsing old ResourceVersion %q of %s %q: %s", oldObjAccessor.GetResourceVersion(), className, objName, err)
   723  	}
   724  
   725  	// Throw away only older version, let the same version pass - we do want to
   726  	// get periodic sync events.
   727  	if oldObjResourceVersion > objResourceVersion {
   728  		logger.V(4).Info("storeObjectUpdate: ignoring obj", "storageClassName", className, "objName", objName, "resourceVersion", objAccessor.GetResourceVersion())
   729  		return false, nil
   730  	}
   731  
   732  	logger.V(4).Info("storeObjectUpdate updating obj with version", "storageClassName", className, "objName", objName, "resourceVersion", objAccessor.GetResourceVersion())
   733  	if err = store.Update(obj); err != nil {
   734  		return false, fmt.Errorf("error updating %s %q in controller cache: %w", className, objName, err)
   735  	}
   736  	return true, nil
   737  }