k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/test/e2e/storage/testsuites/volume_expand.go (about)

     1  /*
     2  Copyright 2017 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package testsuites
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"time"
    23  
    24  	"github.com/onsi/ginkgo/v2"
    25  	"github.com/onsi/gomega"
    26  
    27  	v1 "k8s.io/api/core/v1"
    28  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    29  	"k8s.io/apimachinery/pkg/api/resource"
    30  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    31  	"k8s.io/apimachinery/pkg/util/errors"
    32  	"k8s.io/apimachinery/pkg/util/wait"
    33  	clientset "k8s.io/client-go/kubernetes"
    34  	"k8s.io/kubernetes/test/e2e/framework"
    35  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    36  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    37  	e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
    38  	storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
    39  	admissionapi "k8s.io/pod-security-admission/api"
    40  )
    41  
    42  const (
    43  	resizePollInterval = 2 * time.Second
    44  	// total time to wait for cloudprovider or file system resize to finish
    45  	totalResizeWaitPeriod = 10 * time.Minute
    46  
    47  	// resizedPodStartupTimeout defines time we should wait for pod that uses offline
    48  	// resized volume to startup. This time is higher than default PodStartTimeout because
    49  	// typically time to detach and then attach a volume is amortized in this time duration.
    50  	resizedPodStartupTimeout = 10 * time.Minute
    51  
    52  	// time to wait for PVC conditions to sync
    53  	pvcConditionSyncPeriod = 2 * time.Minute
    54  )
    55  
    56  type volumeExpandTestSuite struct {
    57  	tsInfo storageframework.TestSuiteInfo
    58  }
    59  
    60  // InitCustomVolumeExpandTestSuite returns volumeExpandTestSuite that implements TestSuite interface
    61  // using custom test patterns
    62  func InitCustomVolumeExpandTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
    63  	return &volumeExpandTestSuite{
    64  		tsInfo: storageframework.TestSuiteInfo{
    65  			Name:         "volume-expand",
    66  			TestPatterns: patterns,
    67  			SupportedSizeRange: e2evolume.SizeRange{
    68  				Min: "1Gi",
    69  			},
    70  		},
    71  	}
    72  }
    73  
    74  // InitVolumeExpandTestSuite returns volumeExpandTestSuite that implements TestSuite interface
    75  // using testsuite default patterns
    76  func InitVolumeExpandTestSuite() storageframework.TestSuite {
    77  	patterns := []storageframework.TestPattern{
    78  		storageframework.DefaultFsDynamicPV,
    79  		storageframework.BlockVolModeDynamicPV,
    80  		storageframework.DefaultFsDynamicPVAllowExpansion,
    81  		storageframework.BlockVolModeDynamicPVAllowExpansion,
    82  		storageframework.NtfsDynamicPV,
    83  		storageframework.NtfsDynamicPVAllowExpansion,
    84  	}
    85  	return InitCustomVolumeExpandTestSuite(patterns)
    86  }
    87  
    88  func (v *volumeExpandTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
    89  	return v.tsInfo
    90  }
    91  
    92  func (v *volumeExpandTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
    93  	// Check preconditions.
    94  	if !driver.GetDriverInfo().Capabilities[storageframework.CapControllerExpansion] {
    95  		e2eskipper.Skipf("Driver %q does not support volume expansion - skipping", driver.GetDriverInfo().Name)
    96  	}
    97  	// Check preconditions.
    98  	if !driver.GetDriverInfo().Capabilities[storageframework.CapBlock] && pattern.VolMode == v1.PersistentVolumeBlock {
    99  		e2eskipper.Skipf("Driver %q does not support block volume mode - skipping", driver.GetDriverInfo().Name)
   100  	}
   101  }
   102  
   103  func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
   104  	type local struct {
   105  		config *storageframework.PerTestConfig
   106  
   107  		resource *storageframework.VolumeResource
   108  		pod      *v1.Pod
   109  		pod2     *v1.Pod
   110  
   111  		migrationCheck *migrationOpCheck
   112  	}
   113  	var l local
   114  
   115  	// Beware that it also registers an AfterEach which renders f unusable. Any code using
   116  	// f must run inside an It or Context callback.
   117  	f := framework.NewFrameworkWithCustomTimeouts("volume-expand", storageframework.GetDriverTimeouts(driver))
   118  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
   119  
   120  	init := func(ctx context.Context) {
   121  		l = local{}
   122  
   123  		// Now do the more expensive test initialization.
   124  		l.config = driver.PrepareTest(ctx, f)
   125  		l.migrationCheck = newMigrationOpCheck(ctx, f.ClientSet, f.ClientConfig(), driver.GetDriverInfo().InTreePluginName)
   126  		testVolumeSizeRange := v.GetTestSuiteInfo().SupportedSizeRange
   127  		l.resource = storageframework.CreateVolumeResource(ctx, driver, l.config, pattern, testVolumeSizeRange)
   128  	}
   129  
   130  	cleanup := func(ctx context.Context) {
   131  		var errs []error
   132  		if l.pod != nil {
   133  			ginkgo.By("Deleting pod")
   134  			err := e2epod.DeletePodWithWait(ctx, f.ClientSet, l.pod)
   135  			errs = append(errs, err)
   136  			l.pod = nil
   137  		}
   138  
   139  		if l.pod2 != nil {
   140  			ginkgo.By("Deleting pod2")
   141  			err := e2epod.DeletePodWithWait(ctx, f.ClientSet, l.pod2)
   142  			errs = append(errs, err)
   143  			l.pod2 = nil
   144  		}
   145  
   146  		if l.resource != nil {
   147  			errs = append(errs, l.resource.CleanupResource(ctx))
   148  			l.resource = nil
   149  		}
   150  
   151  		framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource")
   152  		l.migrationCheck.validateMigrationVolumeOpCounts(ctx)
   153  	}
   154  
   155  	if !pattern.AllowExpansion {
   156  		ginkgo.It("should not allow expansion of pvcs without AllowVolumeExpansion property", func(ctx context.Context) {
   157  			init(ctx)
   158  			ginkgo.DeferCleanup(cleanup)
   159  
   160  			var err error
   161  			// create Pod with pvc
   162  			ginkgo.By("Creating a pod with PVC")
   163  			podConfig := e2epod.Config{
   164  				NS:            f.Namespace.Name,
   165  				PVCs:          []*v1.PersistentVolumeClaim{l.resource.Pvc},
   166  				SeLinuxLabel:  e2epod.GetLinuxLabel(),
   167  				NodeSelection: l.config.ClientNodeSelection,
   168  				ImageID:       e2epod.GetDefaultTestImageID(),
   169  			}
   170  			l.pod, err = e2epod.CreateSecPodWithNodeSelection(ctx, f.ClientSet, &podConfig, f.Timeouts.PodStart)
   171  			ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, l.pod)
   172  			framework.ExpectNoError(err, "While creating pods for expanding")
   173  
   174  			// Waiting for pod to run
   175  			ginkgo.By("Waiting for pod to run")
   176  			err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, l.pod.Name, l.pod.Namespace, f.Timeouts.PodStart)
   177  			framework.ExpectNoError(err)
   178  
   179  			gomega.Expect(l.resource.Sc.AllowVolumeExpansion).NotTo(gomega.BeNil())
   180  			allowVolumeExpansion := *l.resource.Sc.AllowVolumeExpansion
   181  			gomega.Expect(allowVolumeExpansion).To(gomega.BeFalse())
   182  			ginkgo.By("Expanding non-expandable pvc")
   183  			currentPvcSize := l.resource.Pvc.Spec.Resources.Requests[v1.ResourceStorage]
   184  			newSize := currentPvcSize.DeepCopy()
   185  			newSize.Add(resource.MustParse("1Gi"))
   186  			framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize)
   187  			_, err = ExpandPVCSize(ctx, l.resource.Pvc, newSize, f.ClientSet)
   188  			gomega.Expect(err).To(gomega.MatchError(apierrors.IsForbidden, "While updating non-expandable PVC"))
   189  		})
   190  	} else {
   191  		ginkgo.It("Verify if offline PVC expansion works", func(ctx context.Context) {
   192  			init(ctx)
   193  			ginkgo.DeferCleanup(cleanup)
   194  
   195  			if !driver.GetDriverInfo().Capabilities[storageframework.CapOfflineExpansion] {
   196  				e2eskipper.Skipf("Driver %q does not support offline volume expansion - skipping", driver.GetDriverInfo().Name)
   197  			}
   198  
   199  			var err error
   200  			ginkgo.By("Creating a pod with dynamically provisioned volume")
   201  			podConfig := e2epod.Config{
   202  				NS:            f.Namespace.Name,
   203  				PVCs:          []*v1.PersistentVolumeClaim{l.resource.Pvc},
   204  				SeLinuxLabel:  e2epod.GetLinuxLabel(),
   205  				NodeSelection: l.config.ClientNodeSelection,
   206  				ImageID:       e2epod.GetDefaultTestImageID(),
   207  			}
   208  			l.pod, err = e2epod.CreateSecPodWithNodeSelection(ctx, f.ClientSet, &podConfig, f.Timeouts.PodStart)
   209  			ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, l.pod)
   210  			framework.ExpectNoError(err, "While creating pods for resizing")
   211  
   212  			ginkgo.By("Deleting the previously created pod")
   213  			err = e2epod.DeletePodWithWait(ctx, f.ClientSet, l.pod)
   214  			framework.ExpectNoError(err, "while deleting pod for resizing")
   215  
   216  			// We expand the PVC while no pod is using it to ensure offline expansion
   217  			ginkgo.By("Expanding current pvc")
   218  			currentPvcSize := l.resource.Pvc.Spec.Resources.Requests[v1.ResourceStorage]
   219  			newSize := currentPvcSize.DeepCopy()
   220  			newSize.Add(resource.MustParse("1Gi"))
   221  			framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize)
   222  			newPVC, err := ExpandPVCSize(ctx, l.resource.Pvc, newSize, f.ClientSet)
   223  			framework.ExpectNoError(err, "While updating pvc for more size")
   224  			l.resource.Pvc = newPVC
   225  			gomega.Expect(l.resource.Pvc).NotTo(gomega.BeNil())
   226  
   227  			pvcSize := l.resource.Pvc.Spec.Resources.Requests[v1.ResourceStorage]
   228  			if pvcSize.Cmp(newSize) != 0 {
   229  				framework.Failf("error updating pvc size %q", l.resource.Pvc.Name)
   230  			}
   231  
   232  			ginkgo.By("Waiting for cloudprovider resize to finish")
   233  			err = WaitForControllerVolumeResize(ctx, l.resource.Pvc, f.ClientSet, totalResizeWaitPeriod)
   234  			framework.ExpectNoError(err, "While waiting for pvc resize to finish")
   235  
   236  			ginkgo.By("Checking for conditions on pvc")
   237  			npvc, err := WaitForPendingFSResizeCondition(ctx, l.resource.Pvc, f.ClientSet)
   238  			framework.ExpectNoError(err, "While waiting for pvc to have fs resizing condition")
   239  			l.resource.Pvc = npvc
   240  
   241  			ginkgo.By("Creating a new pod with same volume")
   242  			podConfig = e2epod.Config{
   243  				NS:            f.Namespace.Name,
   244  				PVCs:          []*v1.PersistentVolumeClaim{l.resource.Pvc},
   245  				SeLinuxLabel:  e2epod.GetLinuxLabel(),
   246  				NodeSelection: l.config.ClientNodeSelection,
   247  				ImageID:       e2epod.GetDefaultTestImageID(),
   248  			}
   249  			l.pod2, err = e2epod.CreateSecPodWithNodeSelection(ctx, f.ClientSet, &podConfig, resizedPodStartupTimeout)
   250  			ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, l.pod2)
   251  			framework.ExpectNoError(err, "while recreating pod for resizing")
   252  
   253  			ginkgo.By("Waiting for file system resize to finish")
   254  			l.resource.Pvc, err = WaitForFSResize(ctx, l.resource.Pvc, f.ClientSet)
   255  			framework.ExpectNoError(err, "while waiting for fs resize to finish")
   256  
   257  			pvcConditions := l.resource.Pvc.Status.Conditions
   258  			gomega.Expect(pvcConditions).To(gomega.BeEmpty(), "pvc should not have conditions")
   259  		})
   260  
   261  		ginkgo.It("should resize volume when PVC is edited while pod is using it", func(ctx context.Context) {
   262  			init(ctx)
   263  			ginkgo.DeferCleanup(cleanup)
   264  
   265  			if !driver.GetDriverInfo().Capabilities[storageframework.CapOnlineExpansion] {
   266  				e2eskipper.Skipf("Driver %q does not support online volume expansion - skipping", driver.GetDriverInfo().Name)
   267  			}
   268  
   269  			var err error
   270  			ginkgo.By("Creating a pod with dynamically provisioned volume")
   271  			podConfig := e2epod.Config{
   272  				NS:            f.Namespace.Name,
   273  				PVCs:          []*v1.PersistentVolumeClaim{l.resource.Pvc},
   274  				SeLinuxLabel:  e2epod.GetLinuxLabel(),
   275  				NodeSelection: l.config.ClientNodeSelection,
   276  				ImageID:       e2epod.GetDefaultTestImageID(),
   277  			}
   278  			l.pod, err = e2epod.CreateSecPodWithNodeSelection(ctx, f.ClientSet, &podConfig, f.Timeouts.PodStart)
   279  			ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, l.pod)
   280  			framework.ExpectNoError(err, "While creating pods for resizing")
   281  
   282  			// We expand the PVC while l.pod is using it for online expansion.
   283  			ginkgo.By("Expanding current pvc")
   284  			currentPvcSize := l.resource.Pvc.Spec.Resources.Requests[v1.ResourceStorage]
   285  			newSize := currentPvcSize.DeepCopy()
   286  			newSize.Add(resource.MustParse("1Gi"))
   287  			framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize)
   288  			newPVC, err := ExpandPVCSize(ctx, l.resource.Pvc, newSize, f.ClientSet)
   289  			framework.ExpectNoError(err, "While updating pvc for more size")
   290  			l.resource.Pvc = newPVC
   291  			gomega.Expect(l.resource.Pvc).NotTo(gomega.BeNil())
   292  
   293  			pvcSize := l.resource.Pvc.Spec.Resources.Requests[v1.ResourceStorage]
   294  			if pvcSize.Cmp(newSize) != 0 {
   295  				framework.Failf("error updating pvc size %q", l.resource.Pvc.Name)
   296  			}
   297  
   298  			ginkgo.By("Waiting for cloudprovider resize to finish")
   299  			err = WaitForControllerVolumeResize(ctx, l.resource.Pvc, f.ClientSet, totalResizeWaitPeriod)
   300  			framework.ExpectNoError(err, "While waiting for pvc resize to finish")
   301  
   302  			ginkgo.By("Waiting for file system resize to finish")
   303  			l.resource.Pvc, err = WaitForFSResize(ctx, l.resource.Pvc, f.ClientSet)
   304  			framework.ExpectNoError(err, "while waiting for fs resize to finish")
   305  
   306  			pvcConditions := l.resource.Pvc.Status.Conditions
   307  			gomega.Expect(pvcConditions).To(gomega.BeEmpty(), "pvc should not have conditions")
   308  		})
   309  
   310  	}
   311  }
   312  
   313  // ExpandPVCSize expands PVC size
   314  func ExpandPVCSize(ctx context.Context, origPVC *v1.PersistentVolumeClaim, size resource.Quantity, c clientset.Interface) (*v1.PersistentVolumeClaim, error) {
   315  	pvcName := origPVC.Name
   316  	updatedPVC := origPVC.DeepCopy()
   317  
   318  	// Retry the update on error, until we hit a timeout.
   319  	// TODO: Determine whether "retry with timeout" is appropriate here. Maybe we should only retry on version conflict.
   320  	var lastUpdateError error
   321  	waitErr := wait.PollImmediate(resizePollInterval, 30*time.Second, func() (bool, error) {
   322  		var err error
   323  		updatedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Get(ctx, pvcName, metav1.GetOptions{})
   324  		if err != nil {
   325  			return false, fmt.Errorf("error fetching pvc %q for resizing: %w", pvcName, err)
   326  		}
   327  
   328  		updatedPVC.Spec.Resources.Requests[v1.ResourceStorage] = size
   329  		updatedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Update(ctx, updatedPVC, metav1.UpdateOptions{})
   330  		if err != nil {
   331  			framework.Logf("Error updating pvc %s: %v", pvcName, err)
   332  			lastUpdateError = err
   333  			return false, nil
   334  		}
   335  		return true, nil
   336  	})
   337  	if wait.Interrupted(waitErr) {
   338  		return nil, fmt.Errorf("timed out attempting to update PVC size. last update error: %w", lastUpdateError)
   339  	}
   340  	if waitErr != nil {
   341  		return nil, fmt.Errorf("failed to expand PVC size (check logs for error): %v", waitErr)
   342  	}
   343  	return updatedPVC, nil
   344  }
   345  
   346  // WaitForResizingCondition waits for the pvc condition to be PersistentVolumeClaimResizing
   347  func WaitForResizingCondition(ctx context.Context, pvc *v1.PersistentVolumeClaim, c clientset.Interface, duration time.Duration) error {
   348  	waitErr := wait.PollUntilContextTimeout(ctx, resizePollInterval, duration, true, func(ctx context.Context) (bool, error) {
   349  		var err error
   350  		updatedPVC, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
   351  
   352  		if err != nil {
   353  			return false, fmt.Errorf("error fetching pvc %q for checking for resize status: %w", pvc.Name, err)
   354  		}
   355  
   356  		pvcConditions := updatedPVC.Status.Conditions
   357  		if len(pvcConditions) > 0 {
   358  			if pvcConditions[0].Type == v1.PersistentVolumeClaimResizing {
   359  				return true, nil
   360  			}
   361  		}
   362  		return false, nil
   363  	})
   364  	if waitErr != nil {
   365  		return fmt.Errorf("error waiting for pvc %q to have resize status: %v", pvc.Name, waitErr)
   366  	}
   367  	return nil
   368  }
   369  
   370  // WaitForControllerVolumeResize waits for the controller resize to be finished
   371  func WaitForControllerVolumeResize(ctx context.Context, pvc *v1.PersistentVolumeClaim, c clientset.Interface, timeout time.Duration) error {
   372  	pvName := pvc.Spec.VolumeName
   373  	waitErr := wait.PollUntilContextTimeout(ctx, resizePollInterval, timeout, true, func(ctx context.Context) (bool, error) {
   374  		pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
   375  
   376  		pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
   377  		if err != nil {
   378  			return false, fmt.Errorf("error fetching pv %q for resizing %v", pvName, err)
   379  		}
   380  
   381  		pvSize := pv.Spec.Capacity[v1.ResourceStorage]
   382  
   383  		// If pv size is greater or equal to requested size that means controller resize is finished.
   384  		if pvSize.Cmp(pvcSize) >= 0 {
   385  			return true, nil
   386  		}
   387  		return false, nil
   388  	})
   389  	if waitErr != nil {
   390  		return fmt.Errorf("error while waiting for controller resize to finish: %v", waitErr)
   391  	}
   392  	return nil
   393  }
   394  
   395  // WaitForPendingFSResizeCondition waits for pvc to have resize condition
   396  func WaitForPendingFSResizeCondition(ctx context.Context, pvc *v1.PersistentVolumeClaim, c clientset.Interface) (*v1.PersistentVolumeClaim, error) {
   397  	var updatedPVC *v1.PersistentVolumeClaim
   398  	waitErr := wait.PollUntilContextTimeout(ctx, resizePollInterval, pvcConditionSyncPeriod, true, func(ctx context.Context) (bool, error) {
   399  		var err error
   400  		updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
   401  
   402  		if err != nil {
   403  			return false, fmt.Errorf("error fetching pvc %q for checking for resize status : %w", pvc.Name, err)
   404  		}
   405  
   406  		inProgressConditions := updatedPVC.Status.Conditions
   407  		// if there are no PVC conditions that means no node expansion is necessary
   408  		if len(inProgressConditions) == 0 {
   409  			return true, nil
   410  		}
   411  		conditionType := inProgressConditions[0].Type
   412  		if conditionType == v1.PersistentVolumeClaimFileSystemResizePending {
   413  			return true, nil
   414  		}
   415  		return false, nil
   416  	})
   417  	if waitErr != nil {
   418  		return nil, fmt.Errorf("error waiting for pvc %q to have filesystem resize status: %v", pvc.Name, waitErr)
   419  	}
   420  	return updatedPVC, nil
   421  }
   422  
   423  // WaitForFSResize waits for the filesystem in the pv to be resized
   424  func WaitForFSResize(ctx context.Context, pvc *v1.PersistentVolumeClaim, c clientset.Interface) (*v1.PersistentVolumeClaim, error) {
   425  	var updatedPVC *v1.PersistentVolumeClaim
   426  	waitErr := wait.PollUntilContextTimeout(ctx, resizePollInterval, totalResizeWaitPeriod, true, func(ctx context.Context) (bool, error) {
   427  		var err error
   428  		updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
   429  
   430  		if err != nil {
   431  			return false, fmt.Errorf("error fetching pvc %q for checking for resize status : %w", pvc.Name, err)
   432  		}
   433  
   434  		pvcSize := updatedPVC.Spec.Resources.Requests[v1.ResourceStorage]
   435  		pvcStatusSize := updatedPVC.Status.Capacity[v1.ResourceStorage]
   436  
   437  		//If pvc's status field size is greater than or equal to pvc's size then done
   438  		if pvcStatusSize.Cmp(pvcSize) >= 0 {
   439  			return true, nil
   440  		}
   441  		return false, nil
   442  	})
   443  	if waitErr != nil {
   444  		return nil, fmt.Errorf("error waiting for pvc %q filesystem resize to finish: %v", pvc.Name, waitErr)
   445  	}
   446  	return updatedPVC, nil
   447  }