github.com/IBM-Blockchain/fabric-operator@v1.0.4/integration/nativeresourcepoller.go (about) 1 /* 2 * Copyright contributors to the Hyperledger Fabric Operator project 3 * 4 * SPDX-License-Identifier: Apache-2.0 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at: 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 */ 18 19 package integration 20 21 import ( 22 "context" 23 "fmt" 24 "strings" 25 26 . "github.com/onsi/ginkgo/v2" 27 28 appsv1 "k8s.io/api/apps/v1" 29 corev1 "k8s.io/api/core/v1" 30 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 31 "k8s.io/client-go/kubernetes" 32 ) 33 34 type NativeResourcePoller struct { 35 Name string 36 Namespace string 37 Client *kubernetes.Clientset 38 retry int 39 } 40 41 func (p *NativeResourcePoller) PVCExists() bool { 42 opts := metav1.ListOptions{ 43 LabelSelector: fmt.Sprintf("app=%s", p.Name), 44 } 45 pvcList, err := p.Client.CoreV1().PersistentVolumeClaims(p.Namespace).List(context.TODO(), opts) 46 if err != nil { 47 return false 48 } 49 for _, pvc := range pvcList.Items { 50 if strings.HasPrefix(pvc.Name, p.Name) { 51 return true 52 } 53 } 54 55 return false 56 } 57 58 func (p *NativeResourcePoller) IngressExists() bool { 59 opts := metav1.ListOptions{ 60 LabelSelector: fmt.Sprintf("app=%s", p.Name), 61 } 62 ingressList, err := p.Client.NetworkingV1().Ingresses(p.Namespace).List(context.TODO(), opts) 63 if err != nil { 64 return false 65 } 66 for _, ingress := range ingressList.Items { 67 if strings.HasPrefix(ingress.Name, p.Name) { 68 return true 69 } 70 } 71 72 return false 73 } 74 75 func (p *NativeResourcePoller) ServiceExists() bool { 76 opts := metav1.ListOptions{ 77 LabelSelector: fmt.Sprintf("app=%s", p.Name), 78 } 79 serviceList, err := p.Client.CoreV1().Services(p.Namespace).List(context.TODO(), opts) 80 if err != nil { 81 return false 82 } 83 for _, service := range serviceList.Items { 84 if strings.HasPrefix(service.Name, p.Name) { 85 return true 86 } 87 } 88 89 return false 90 } 91 92 func (p *NativeResourcePoller) ConfigMapExists() bool { 93 opts := metav1.ListOptions{ 94 LabelSelector: fmt.Sprintf("app=%s", p.Name), 95 } 96 cmList, err := p.Client.CoreV1().ConfigMaps(p.Namespace).List(context.TODO(), opts) 97 if err != nil { 98 return false 99 } 100 for _, cm := range cmList.Items { 101 if strings.HasPrefix(cm.Name, p.Name) { 102 return true 103 } 104 } 105 106 return false 107 } 108 109 func (p *NativeResourcePoller) DeploymentExists() bool { 110 dep, err := p.Client.AppsV1().Deployments(p.Namespace).Get(context.TODO(), p.Name, metav1.GetOptions{}) 111 if err == nil && dep != nil { 112 return true 113 } 114 115 return false 116 } 117 118 func (p *NativeResourcePoller) Deployment() *appsv1.Deployment { 119 deps := p.DeploymentList() 120 if len(deps.Items) > 0 { 121 return &deps.Items[0] 122 } 123 return nil 124 } 125 126 func (p *NativeResourcePoller) DeploymentList() *appsv1.DeploymentList { 127 opts := metav1.ListOptions{ 128 LabelSelector: fmt.Sprintf("app=%s", p.Name), 129 } 130 deps, err := p.Client.AppsV1().Deployments(p.Namespace).List(context.TODO(), opts) 131 if err != nil { 132 return &appsv1.DeploymentList{} 133 } 134 return deps 135 } 136 137 func (p *NativeResourcePoller) NumberOfDeployments() int { 138 opts := metav1.ListOptions{ 139 LabelSelector: fmt.Sprintf("app=%s", p.Name), 140 } 141 deps, err := p.Client.AppsV1().Deployments(p.Namespace).List(context.TODO(), opts) 142 if err != nil { 143 return 0 144 } 145 146 return len(deps.Items) 147 } 148 149 func (p *NativeResourcePoller) NumberOfOrdererNodeDeployments() int { 150 opts := metav1.ListOptions{ 151 LabelSelector: fmt.Sprintf("parent=%s", p.Name), 152 } 153 154 deps, err := p.Client.AppsV1().Deployments(p.Namespace).List(context.TODO(), opts) 155 if err != nil { 156 return 0 157 } 158 159 return len(deps.Items) 160 } 161 162 func (p *NativeResourcePoller) IsRunning() bool { 163 opts := metav1.ListOptions{ 164 LabelSelector: fmt.Sprintf("name=%s", p.Name), 165 } 166 podList, err := p.Client.CoreV1().Pods(p.Namespace).List(context.TODO(), opts) 167 if err != nil { 168 return false 169 } 170 for _, pod := range podList.Items { 171 if strings.HasPrefix(pod.Name, p.Name) { 172 if pod.Status.Phase == corev1.PodRunning { 173 containerStatuses := pod.Status.ContainerStatuses 174 for _, status := range containerStatuses { 175 if status.State.Running == nil { 176 return false 177 } 178 if !status.Ready { 179 return false 180 } 181 } 182 return true 183 } else if pod.Status.Phase == corev1.PodPending { 184 if p.retry == 0 { 185 if len(pod.Status.InitContainerStatuses) == 0 { 186 return false 187 } 188 initContainerStatuses := pod.Status.InitContainerStatuses 189 for _, status := range initContainerStatuses { 190 if status.State.Waiting != nil { 191 if status.State.Waiting.Reason == "CreateContainerConfigError" { 192 // Handling this error will make no difference 193 _ = p.Client.CoreV1().Pods(p.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) 194 p.retry = 1 195 } 196 } 197 } 198 } 199 } 200 } 201 } 202 203 return false 204 } 205 206 // PodCreated returns true if pod has been created based on app name 207 func (p *NativeResourcePoller) PodCreated() bool { 208 opts := metav1.ListOptions{ 209 LabelSelector: fmt.Sprintf("app=%s", p.Name), 210 } 211 podList, err := p.Client.CoreV1().Pods(p.Namespace).List(context.TODO(), opts) 212 if err != nil { 213 return false 214 } 215 if len(podList.Items) != 0 { 216 return true 217 } 218 return false 219 } 220 221 func (p *NativeResourcePoller) PodIsRunning() bool { 222 opts := metav1.ListOptions{ 223 LabelSelector: fmt.Sprintf("app=%s", p.Name), 224 } 225 podList, err := p.Client.CoreV1().Pods(p.Namespace).List(context.TODO(), opts) 226 if err != nil { 227 return false 228 } 229 for _, pod := range podList.Items { 230 if strings.HasPrefix(pod.Name, p.Name) { 231 switch pod.Status.Phase { 232 case corev1.PodRunning: 233 containerStatuses := pod.Status.ContainerStatuses 234 for _, status := range containerStatuses { 235 if status.State.Running == nil { 236 fmt.Fprintf(GinkgoWriter, "For pod '%s', container '%s' is not yet running\n", pod.Name, status.Name) 237 return false 238 } 239 if !status.Ready { 240 fmt.Fprintf(GinkgoWriter, "For pod '%s', container '%s' is not yet ready\n", pod.Name, status.Name) 241 return false 242 } 243 } 244 fmt.Fprintf(GinkgoWriter, "'%s' and it's containers are ready and running\n", pod.Name) 245 return true 246 case corev1.PodPending: 247 p.CheckForStuckPod(pod) 248 } 249 } 250 } 251 252 return false 253 } 254 255 func (p *NativeResourcePoller) CheckForStuckPod(pod corev1.Pod) bool { 256 fmt.Fprintf(GinkgoWriter, "'%s' in pending state, waiting for pod to start running...\n", pod.Name) 257 if p.retry > 0 { 258 return false // Out of retries, return 259 } 260 261 if len(pod.Status.InitContainerStatuses) == 0 { 262 return false // No containers found, unable to get status to determine if pod is running 263 } 264 265 initContainerStatuses := pod.Status.InitContainerStatuses 266 for _, status := range initContainerStatuses { 267 if status.State.Waiting != nil { 268 fmt.Fprintf(GinkgoWriter, "'%s' is waiting, with reason '%s'\n", pod.Name, status.State.Waiting.Reason) 269 270 // Intermittent issues are see on pods with shared volume mounts that are deleted and created in 271 // quick succession, in suchs situation the pods sometimes ends up with an error stating that it 272 // can't mount to subPath. This can be resolved by deleting the pod and let it try again to 273 // acquire the volume mount. The code below mimics this solution by deleting the pod, which is 274 // brought back by the deployment and pod comes up fine. This is more of a hack to resolve this 275 // issue in test, the root cause might live in portworx or in operator. 276 if status.State.Waiting.Reason == "CreateContainerConfigError" { 277 fmt.Fprintf(GinkgoWriter, "Deleting pod '%s'\n", pod.Name) 278 err := p.Client.CoreV1().Pods(p.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) 279 if err != nil { 280 fmt.Fprintf(GinkgoWriter, "Deleting pod '%s' failed: %s\n", pod.Name, err) 281 } 282 p.retry = 1 283 } 284 } 285 } 286 287 return true 288 } 289 290 func (p *NativeResourcePoller) GetPods() []corev1.Pod { 291 opts := metav1.ListOptions{ 292 LabelSelector: fmt.Sprintf("app=%s", p.Name), 293 } 294 podList, err := p.Client.CoreV1().Pods(p.Namespace).List(context.TODO(), opts) 295 if err != nil { 296 return nil 297 } 298 return podList.Items 299 } 300 301 func (p *NativeResourcePoller) GetRunningPods() []corev1.Pod { 302 opts := metav1.ListOptions{ 303 LabelSelector: fmt.Sprintf("app=%s", p.Name), 304 } 305 podList, err := p.Client.CoreV1().Pods(p.Namespace).List(context.TODO(), opts) 306 if err != nil { 307 return nil 308 } 309 pods := []corev1.Pod{} 310 for _, pod := range podList.Items { 311 switch pod.Status.Phase { 312 case corev1.PodRunning: 313 containerStatuses := pod.Status.ContainerStatuses 314 315 readyContainers := 0 316 numOfContainers := len(containerStatuses) 317 318 for _, status := range containerStatuses { 319 if status.Ready && status.State.Running != nil { 320 readyContainers++ 321 } 322 } 323 if readyContainers == numOfContainers { 324 pods = append(pods, pod) 325 } 326 327 case corev1.PodPending: 328 p.CheckForStuckPod(pod) 329 } 330 } 331 332 return pods 333 } 334 335 func (p *NativeResourcePoller) TestAffinityZone(dep *appsv1.Deployment) bool { 336 zoneExp := "topology.kubernetes.io/zone" 337 338 affinity := dep.Spec.Template.Spec.Affinity.NodeAffinity 339 if affinity != nil { 340 nodes := affinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms 341 for _, node := range nodes { 342 for _, expr := range node.MatchExpressions { 343 depExp := expr.Key 344 if zoneExp == depExp { 345 return true 346 } 347 } 348 } 349 } else { 350 return false 351 } 352 353 return false 354 } 355 356 func (p *NativeResourcePoller) TestAffinityRegion(dep *appsv1.Deployment) bool { 357 regionExp := "topology.kubernetes.io/region" 358 359 affinity := dep.Spec.Template.Spec.Affinity.NodeAffinity 360 if affinity != nil { 361 nodes := affinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms 362 for _, node := range nodes { 363 for _, expr := range node.MatchExpressions { 364 depExp := expr.Key 365 if regionExp == depExp { 366 return true 367 } 368 } 369 } 370 } else { 371 return false 372 } 373 374 return false 375 }