k8s.io/kubernetes@v1.29.3/pkg/volume/vsphere_volume/attacher.go (about) 1 //go:build !providerless 2 // +build !providerless 3 4 /* 5 Copyright 2016 The Kubernetes Authors. 6 7 Licensed under the Apache License, Version 2.0 (the "License"); 8 you may not use this file except in compliance with the License. 9 You may obtain a copy of the License at 10 11 http://www.apache.org/licenses/LICENSE-2.0 12 13 Unless required by applicable law or agreed to in writing, software 14 distributed under the License is distributed on an "AS IS" BASIS, 15 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 See the License for the specific language governing permissions and 17 limitations under the License. 18 */ 19 20 package vsphere_volume 21 22 import ( 23 "fmt" 24 "os" 25 "path/filepath" 26 "runtime" 27 "time" 28 29 "k8s.io/klog/v2" 30 "k8s.io/mount-utils" 31 "k8s.io/utils/keymutex" 32 33 v1 "k8s.io/api/core/v1" 34 "k8s.io/apimachinery/pkg/types" 35 "k8s.io/kubernetes/pkg/volume" 36 volumeutil "k8s.io/kubernetes/pkg/volume/util" 37 "k8s.io/legacy-cloud-providers/vsphere" 38 ) 39 40 type vsphereVMDKAttacher struct { 41 host volume.VolumeHost 42 vsphereVolumes vsphere.Volumes 43 } 44 45 var _ volume.Attacher = &vsphereVMDKAttacher{} 46 47 var _ volume.DeviceMounter = &vsphereVMDKAttacher{} 48 49 var _ volume.AttachableVolumePlugin = &vsphereVolumePlugin{} 50 51 var _ volume.DeviceMountableVolumePlugin = &vsphereVolumePlugin{} 52 53 // Singleton key mutex for keeping attach operations for the same host atomic 54 var attachdetachMutex = keymutex.NewHashed(0) 55 56 func (plugin *vsphereVolumePlugin) NewAttacher() (volume.Attacher, error) { 57 vsphereCloud, err := getCloudProvider(plugin.host.GetCloudProvider()) 58 if err != nil { 59 return nil, err 60 } 61 62 return &vsphereVMDKAttacher{ 63 host: plugin.host, 64 vsphereVolumes: vsphereCloud, 65 }, nil 66 } 67 68 func (plugin *vsphereVolumePlugin) NewDeviceMounter() (volume.DeviceMounter, error) { 69 return plugin.NewAttacher() 70 } 71 72 // Attaches the volume specified by the given spec to the given host. 73 // On success, returns the device path where the device was attached on the 74 // node. 75 // Callers are responsible for retryinging on failure. 76 // Callers are responsible for thread safety between concurrent attach and 77 // detach operations. 78 func (attacher *vsphereVMDKAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { 79 volumeSource, _, err := getVolumeSource(spec) 80 if err != nil { 81 return "", err 82 } 83 84 klog.V(4).Infof("vSphere: Attach disk called for node %s", nodeName) 85 86 // Keeps concurrent attach operations to same host atomic 87 attachdetachMutex.LockKey(string(nodeName)) 88 defer attachdetachMutex.UnlockKey(string(nodeName)) 89 90 // vsphereCloud.AttachDisk checks if disk is already attached to host and 91 // succeeds in that case, so no need to do that separately. 92 diskUUID, err := attacher.vsphereVolumes.AttachDisk(volumeSource.VolumePath, volumeSource.StoragePolicyName, nodeName) 93 if err != nil { 94 klog.Errorf("Error attaching volume %q to node %q: %+v", volumeSource.VolumePath, nodeName, err) 95 return "", err 96 } 97 98 return filepath.Join(diskByIDPath, diskSCSIPrefix+diskUUID), nil 99 } 100 101 func (attacher *vsphereVMDKAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) { 102 klog.Warningf("Attacher.VolumesAreAttached called for node %q - Please use BulkVerifyVolumes for vSphere", nodeName) 103 volumeNodeMap := map[types.NodeName][]*volume.Spec{ 104 nodeName: specs, 105 } 106 nodeVolumesResult := make(map[*volume.Spec]bool) 107 nodesVerificationMap, err := attacher.BulkVerifyVolumes(volumeNodeMap) 108 if err != nil { 109 klog.Errorf("Attacher.VolumesAreAttached - error checking volumes for node %q with %v", nodeName, err) 110 return nodeVolumesResult, err 111 } 112 if result, ok := nodesVerificationMap[nodeName]; ok { 113 return result, nil 114 } 115 return nodeVolumesResult, nil 116 } 117 118 func (attacher *vsphereVMDKAttacher) BulkVerifyVolumes(volumesByNode map[types.NodeName][]*volume.Spec) (map[types.NodeName]map[*volume.Spec]bool, error) { 119 volumesAttachedCheck := make(map[types.NodeName]map[*volume.Spec]bool) 120 volumePathsByNode := make(map[types.NodeName][]string) 121 volumeSpecMap := make(map[string]*volume.Spec) 122 123 for nodeName, volumeSpecs := range volumesByNode { 124 for _, volumeSpec := range volumeSpecs { 125 volumeSource, _, err := getVolumeSource(volumeSpec) 126 if err != nil { 127 klog.Errorf("Error getting volume (%q) source : %v", volumeSpec.Name(), err) 128 continue 129 } 130 volPath := volumeSource.VolumePath 131 volumePathsByNode[nodeName] = append(volumePathsByNode[nodeName], volPath) 132 nodeVolume, nodeVolumeExists := volumesAttachedCheck[nodeName] 133 if !nodeVolumeExists { 134 nodeVolume = make(map[*volume.Spec]bool) 135 } 136 nodeVolume[volumeSpec] = true 137 volumeSpecMap[volPath] = volumeSpec 138 volumesAttachedCheck[nodeName] = nodeVolume 139 } 140 } 141 attachedResult, err := attacher.vsphereVolumes.DisksAreAttached(volumePathsByNode) 142 if err != nil { 143 klog.Errorf("Error checking if volumes are attached to nodes: %+v. err: %v", volumePathsByNode, err) 144 return volumesAttachedCheck, err 145 } 146 147 for nodeName, nodeVolumes := range attachedResult { 148 for volumePath, attached := range nodeVolumes { 149 if !attached { 150 spec := volumeSpecMap[volumePath] 151 setNodeVolume(volumesAttachedCheck, spec, nodeName, false) 152 } 153 } 154 } 155 return volumesAttachedCheck, nil 156 } 157 158 func (attacher *vsphereVMDKAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) { 159 volumeSource, _, err := getVolumeSource(spec) 160 if err != nil { 161 return "", err 162 } 163 164 if devicePath == "" { 165 return "", fmt.Errorf("WaitForAttach failed for VMDK %q: devicePath is empty", volumeSource.VolumePath) 166 } 167 168 ticker := time.NewTicker(checkSleepDuration) 169 defer ticker.Stop() 170 171 timer := time.NewTimer(timeout) 172 defer timer.Stop() 173 174 for { 175 select { 176 case <-ticker.C: 177 klog.V(5).Infof("Checking VMDK %q is attached", volumeSource.VolumePath) 178 path, err := verifyDevicePath(devicePath) 179 if err != nil { 180 // Log error, if any, and continue checking periodically. See issue #11321 181 klog.Warningf("Error verifying VMDK (%q) is attached: %v", volumeSource.VolumePath, err) 182 } else if path != "" { 183 // A device path has successfully been created for the VMDK 184 klog.Infof("Successfully found attached VMDK %q.", volumeSource.VolumePath) 185 return path, nil 186 } 187 case <-timer.C: 188 return "", fmt.Errorf("could not find attached VMDK %q. Timeout waiting for mount paths to be created", volumeSource.VolumePath) 189 } 190 } 191 } 192 193 // GetDeviceMountPath returns a path where the device should 194 // point which should be bind mounted for individual volumes. 195 func (attacher *vsphereVMDKAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) { 196 volumeSource, _, err := getVolumeSource(spec) 197 if err != nil { 198 return "", err 199 } 200 201 return makeGlobalPDPath(attacher.host, volumeSource.VolumePath), nil 202 } 203 204 // GetMountDeviceRefs finds all other references to the device referenced 205 // by deviceMountPath; returns a list of paths. 206 func (plugin *vsphereVolumePlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) { 207 mounter := plugin.host.GetMounter(plugin.GetPluginName()) 208 return mounter.GetMountRefs(deviceMountPath) 209 } 210 211 // MountDevice mounts device to global mount point. 212 func (attacher *vsphereVMDKAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, _ volume.DeviceMounterArgs) error { 213 klog.Infof("vsphere MountDevice mount %s to %s", devicePath, deviceMountPath) 214 mounter := attacher.host.GetMounter(vsphereVolumePluginName) 215 notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath) 216 if err != nil { 217 if os.IsNotExist(err) { 218 dir := deviceMountPath 219 if runtime.GOOS == "windows" { 220 dir = filepath.Dir(deviceMountPath) 221 } 222 if err := os.MkdirAll(dir, 0750); err != nil { 223 klog.Errorf("Failed to create directory at %#v. err: %s", dir, err) 224 return err 225 } 226 notMnt = true 227 } else { 228 return err 229 } 230 } 231 232 volumeSource, _, err := getVolumeSource(spec) 233 if err != nil { 234 return err 235 } 236 237 options := []string{} 238 239 if notMnt { 240 diskMounter := volumeutil.NewSafeFormatAndMountFromHost(vsphereVolumePluginName, attacher.host) 241 mountOptions := volumeutil.MountOptionFromSpec(spec, options...) 242 err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions) 243 if err != nil { 244 os.Remove(deviceMountPath) 245 return err 246 } 247 klog.V(4).Infof("formatting spec %v devicePath %v deviceMountPath %v fs %v with options %+v", spec.Name(), devicePath, deviceMountPath, volumeSource.FSType, options) 248 } 249 return nil 250 } 251 252 type vsphereVMDKDetacher struct { 253 mounter mount.Interface 254 vsphereVolumes vsphere.Volumes 255 } 256 257 var _ volume.Detacher = &vsphereVMDKDetacher{} 258 259 var _ volume.DeviceUnmounter = &vsphereVMDKDetacher{} 260 261 func (plugin *vsphereVolumePlugin) NewDetacher() (volume.Detacher, error) { 262 vsphereCloud, err := getCloudProvider(plugin.host.GetCloudProvider()) 263 if err != nil { 264 return nil, err 265 } 266 267 return &vsphereVMDKDetacher{ 268 mounter: plugin.host.GetMounter(plugin.GetPluginName()), 269 vsphereVolumes: vsphereCloud, 270 }, nil 271 } 272 273 func (plugin *vsphereVolumePlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) { 274 return plugin.NewDetacher() 275 } 276 277 // Detach the given device from the given node. 278 func (detacher *vsphereVMDKDetacher) Detach(volumeName string, nodeName types.NodeName) error { 279 volPath := getVolPathfromVolumeName(volumeName) 280 attached, newVolumePath, err := detacher.vsphereVolumes.DiskIsAttached(volPath, nodeName) 281 if err != nil { 282 // Log error and continue with detach 283 klog.Errorf( 284 "Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v", 285 volPath, nodeName, err) 286 } 287 288 if err == nil && !attached { 289 // Volume is already detached from node. 290 klog.Infof("detach operation was successful. volume %q is already detached from node %q.", volPath, nodeName) 291 return nil 292 } 293 294 attachdetachMutex.LockKey(string(nodeName)) 295 defer attachdetachMutex.UnlockKey(string(nodeName)) 296 if err := detacher.vsphereVolumes.DetachDisk(newVolumePath, nodeName); err != nil { 297 klog.Errorf("Error detaching volume %q: %v", volPath, err) 298 return err 299 } 300 return nil 301 } 302 303 func (detacher *vsphereVMDKDetacher) UnmountDevice(deviceMountPath string) error { 304 return mount.CleanupMountPoint(deviceMountPath, detacher.mounter, false) 305 } 306 307 func (plugin *vsphereVolumePlugin) CanAttach(spec *volume.Spec) (bool, error) { 308 return true, nil 309 } 310 311 func (plugin *vsphereVolumePlugin) CanDeviceMount(spec *volume.Spec) (bool, error) { 312 return true, nil 313 } 314 315 func setNodeVolume( 316 nodeVolumeMap map[types.NodeName]map[*volume.Spec]bool, 317 volumeSpec *volume.Spec, 318 nodeName types.NodeName, 319 check bool) { 320 321 volumeMap := nodeVolumeMap[nodeName] 322 if volumeMap == nil { 323 volumeMap = make(map[*volume.Spec]bool) 324 nodeVolumeMap[nodeName] = volumeMap 325 } 326 volumeMap[volumeSpec] = check 327 }