github.com/jenkins-x/jx/v2@v2.1.155/pkg/kube/build_lock_test.go (about) 1 // +build unit 2 3 package kube 4 5 import ( 6 "fmt" 7 "os" 8 "testing" 9 "time" 10 11 appsv1 "k8s.io/api/apps/v1" 12 v1 "k8s.io/api/core/v1" 13 "k8s.io/apimachinery/pkg/api/errors" 14 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 15 "k8s.io/apimachinery/pkg/types" 16 "k8s.io/apimachinery/pkg/watch" 17 "k8s.io/client-go/kubernetes" 18 "k8s.io/client-go/kubernetes/fake" 19 ktesting "k8s.io/client-go/testing" 20 21 "github.com/stretchr/testify/assert" 22 "github.com/stretchr/testify/require" 23 ) 24 25 func Test_compareBuildLocks(t *testing.T) { 26 time1 := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC).Format(time.RFC3339Nano) 27 time2 := time.Date(2000, 1, 1, 0, 0, 0, 200000000, time.UTC).Format(time.RFC3339Nano) 28 time3 := time.Date(2000, 1, 1, 0, 0, 0, 210000000, time.UTC).Format(time.RFC3339Nano) 29 examples := []struct { 30 name string 31 old map[string]string 32 new map[string]string 33 ret map[string]string 34 err bool 35 }{{ 36 "same build", 37 map[string]string{ 38 "owner": "my-owner", 39 "repository": "my-repository", 40 "branch": "my-branch", 41 "build": "123", 42 "pod": "build-pod-123", 43 "timestamp": time2, 44 }, 45 map[string]string{ 46 "owner": "my-owner", 47 "repository": "my-repository", 48 "branch": "my-branch", 49 "build": "123", 50 "pod": "build-pod-123", 51 "timestamp": time2, 52 }, 53 nil, 54 false, 55 }, { 56 "same build but different pod (for some reason)", 57 map[string]string{ 58 "owner": "my-owner", 59 "repository": "my-repository", 60 "branch": "my-branch", 61 "build": "123", 62 "pod": "build-pod-123", 63 "timestamp": time2, 64 }, 65 map[string]string{ 66 "owner": "my-owner", 67 "repository": "my-repository", 68 "branch": "my-branch", 69 "build": "123", 70 "pod": "other-pod-123", 71 "timestamp": time2, 72 }, 73 nil, 74 true, 75 }, { 76 "lower build", 77 map[string]string{ 78 "owner": "my-owner", 79 "repository": "my-repository", 80 "branch": "my-branch", 81 "build": "101", 82 "pod": "build-pod-101", 83 "timestamp": time2, 84 }, 85 map[string]string{ 86 "owner": "my-owner", 87 "repository": "my-repository", 88 "branch": "my-branch", 89 "build": "99", 90 "pod": "build-pod-99", 91 "timestamp": time1, 92 }, 93 nil, 94 true, 95 }, { 96 "higher build", 97 map[string]string{ 98 "owner": "my-owner", 99 "repository": "my-repository", 100 "branch": "my-branch", 101 "build": "101", 102 "pod": "build-pod-101", 103 "timestamp": time1, 104 }, 105 map[string]string{ 106 "owner": "my-owner", 107 "repository": "my-repository", 108 "branch": "my-branch", 109 "build": "103", 110 "pod": "build-pod-103", 111 "timestamp": time2, 112 }, 113 map[string]string{ 114 "owner": "my-owner", 115 "repository": "my-repository", 116 "branch": "my-branch", 117 "build": "103", 118 "pod": "build-pod-103", 119 "timestamp": time2, 120 }, 121 false, 122 }, { 123 "higher build but lower timestamp", 124 map[string]string{ 125 "owner": "my-owner", 126 "repository": "my-repository", 127 "branch": "my-branch", 128 "build": "101", 129 "pod": "build-pod-101", 130 "timestamp": time3, 131 }, 132 map[string]string{ 133 "owner": "my-owner", 134 "repository": "my-repository", 135 "branch": "my-branch", 136 "build": "103", 137 "pod": "build-pod-103", 138 "timestamp": time2, 139 }, 140 map[string]string{ 141 "owner": "my-owner", 142 "repository": "my-repository", 143 "branch": "my-branch", 144 "build": "103", 145 "pod": "build-pod-103", 146 "timestamp": time3, 147 }, 148 false, 149 }, { 150 "other build, same timestamp", 151 map[string]string{ 152 "owner": "other-owner", 153 "repository": "my-repository", 154 "branch": "my-branch", 155 "build": "111", 156 "pod": "build-pod-111", 157 "timestamp": time2, 158 }, 159 map[string]string{ 160 "owner": "my-owner", 161 "repository": "my-repository", 162 "branch": "my-branch", 163 "build": "123", 164 "pod": "other-pod-123", 165 "timestamp": time2, 166 }, 167 nil, 168 true, 169 }, { 170 "other build, lower timestamp", 171 map[string]string{ 172 "owner": "my-owner", 173 "repository": "other-repository", 174 "branch": "my-branch", 175 "build": "111", 176 "pod": "build-pod-111", 177 "timestamp": time2, 178 }, 179 map[string]string{ 180 "owner": "my-owner", 181 "repository": "my-repository", 182 "branch": "my-branch", 183 "build": "123", 184 "pod": "other-pod-123", 185 "timestamp": time1, 186 }, 187 nil, 188 true, 189 }, { 190 "other build, higher timestamp", 191 map[string]string{ 192 "owner": "my-owner", 193 "repository": "my-repository", 194 "branch": "other-branch", 195 "build": "111", 196 "pod": "build-pod-111", 197 "timestamp": time2, 198 }, 199 map[string]string{ 200 "owner": "my-owner", 201 "repository": "my-repository", 202 "branch": "my-branch", 203 "build": "123", 204 "pod": "other-pod-123", 205 "timestamp": time3, 206 }, 207 map[string]string{ 208 "owner": "my-owner", 209 "repository": "my-repository", 210 "branch": "my-branch", 211 "build": "123", 212 "pod": "other-pod-123", 213 "timestamp": time3, 214 }, 215 false, 216 }} 217 for _, example := range examples { 218 ret, err := compareBuildLocks(example.old, example.new) 219 assert.Equal(t, example.ret, ret, example.name) 220 if example.err { 221 assert.Error(t, err, example.name) 222 } else { 223 assert.NoError(t, err, example.name) 224 } 225 } 226 } 227 228 // buildLock_Client creates a fake client with a fake tekton deployment 229 func buildLock_Client(t *testing.T) *fake.Clientset { 230 client := fake.NewSimpleClientset() 231 _, err := client.AppsV1().Deployments("jx").Create(&appsv1.Deployment{ 232 ObjectMeta: metav1.ObjectMeta{ 233 Name: DeploymentTektonController, 234 Namespace: "jx", 235 }, 236 }) 237 require.NoError(t, err) 238 return client 239 } 240 241 // buildLock_CountWatch count watchers for synchronization reasons 242 func buildLock_CountWatch(client *fake.Clientset) chan int { 243 c := make(chan int, 100) 244 count := 0 245 client.PrependWatchReactor("*", func(action ktesting.Action) (handled bool, ret watch.Interface, err error) { 246 count++ 247 c <- count 248 return false, nil, nil 249 }) 250 return c 251 } 252 253 var buildLock_UID int = 1 << 20 // the pid of out fake pods 254 // buildLock_Pod creates a running pod, looking close enough to a pipeline pod 255 func buildLock_Pod(t *testing.T, client kubernetes.Interface, owner, repository, branch, build string) *v1.Pod { 256 buildLock_UID++ 257 pod, err := client.CoreV1().Pods("jx").Create(&v1.Pod{ 258 TypeMeta: metav1.TypeMeta{ 259 Kind: "Pod", 260 APIVersion: "v1", 261 }, 262 ObjectMeta: metav1.ObjectMeta{ 263 Name: fmt.Sprintf("pipeline-%s-%s-%s-%s", owner, repository, branch, build), 264 Namespace: "jx", 265 Labels: map[string]string{ 266 "owner": owner, 267 "repository": repository, 268 "branch": branch, 269 "build": build, 270 "jenkins.io/pipelineType": "build", 271 }, 272 UID: types.UID(fmt.Sprintf("%d", buildLock_UID)), 273 }, 274 Status: v1.PodStatus{ 275 Phase: v1.PodRunning, 276 }, 277 }) 278 require.NoError(t, err) 279 return pod 280 } 281 282 // buildLock_Lock creates a lock 283 func buildLock_Lock(t *testing.T, client kubernetes.Interface, namespace, owner, repository, branch, build string, minutes int, expires time.Duration) *v1.ConfigMap { 284 exp := time.Now().UTC().Add(expires).Format(time.RFC3339Nano) 285 lock, err := client.CoreV1().ConfigMaps("jx").Create(&v1.ConfigMap{ 286 ObjectMeta: metav1.ObjectMeta{ 287 Name: "jx-lock-my-namespace", 288 Namespace: "jx", 289 Labels: map[string]string{ 290 "namespace": namespace, 291 "owner": owner, 292 "repository": repository, 293 "branch": branch, 294 "build": build, 295 "jenkins-x.io/kind": "build-lock", 296 }, 297 Annotations: map[string]string{ 298 "expires": exp, 299 }, 300 }, 301 Data: map[string]string{ 302 "namespace": namespace, 303 "owner": owner, 304 "repository": repository, 305 "branch": branch, 306 "build": build, 307 "timestamp": buildLock_Timestamp(minutes), 308 "expires": exp, 309 }, 310 }) 311 require.NoError(t, err) 312 return lock 313 } 314 315 // buildLock_LockFromPod creates a lock that matches a pod 316 func buildLock_LockFromPod(t *testing.T, client kubernetes.Interface, namespace string, pod *v1.Pod, minutes int) *v1.ConfigMap { 317 lock, err := client.CoreV1().ConfigMaps("jx").Create(&v1.ConfigMap{ 318 ObjectMeta: metav1.ObjectMeta{ 319 Name: "jx-lock-my-namespace", 320 Namespace: "jx", 321 Labels: map[string]string{ 322 "namespace": namespace, 323 "owner": pod.Labels["owner"], 324 "repository": pod.Labels["repository"], 325 "branch": pod.Labels["branch"], 326 "build": pod.Labels["build"], 327 "jenkins-x.io/kind": "build-lock", 328 }, 329 OwnerReferences: []metav1.OwnerReference{{ 330 APIVersion: "v1", 331 Kind: "Pod", 332 Name: pod.Name, 333 UID: pod.UID, 334 }}, 335 }, 336 Data: map[string]string{ 337 "namespace": namespace, 338 "owner": pod.Labels["owner"], 339 "repository": pod.Labels["repository"], 340 "branch": pod.Labels["branch"], 341 "build": pod.Labels["build"], 342 "pod": pod.Name, 343 "timestamp": buildLock_Timestamp(minutes), 344 }, 345 }) 346 require.NoError(t, err) 347 return lock 348 } 349 350 // buildLock_Timestamp create the timestamp for a lock, now plus or minus some minutes 351 func buildLock_Timestamp(minutes int) string { 352 now := time.Now().UTC() 353 now = now.Add(time.Duration(minutes) * time.Minute) 354 return now.Format(time.RFC3339Nano) 355 } 356 357 // buildLock_Env prepares the environment for calling AcquireBuildLock 358 // returns a defer function to restore the environment 359 func buildLock_Env(t *testing.T, owner, repository, branch, build string, interpret bool) func() { 360 v := "" 361 if interpret { 362 v = "true" 363 } 364 env := map[string]string{ 365 "REPO_OWNER": owner, 366 "REPO_NAME": repository, 367 "BRANCH_NAME": branch, 368 "BUILD_NUMBER": build, 369 "JX_INTERPRET_PIPELINE": v, 370 } 371 old := map[string]string{} 372 for k, v := range env { 373 value, ok := os.LookupEnv(k) 374 if ok { 375 old[k] = value 376 } 377 var err error 378 if v == "" { 379 err = os.Unsetenv(k) 380 } else { 381 err = os.Setenv(k, v) 382 } 383 require.NoError(t, err) 384 } 385 return func() { 386 for k := range env { 387 v, ok := old[k] 388 var err error 389 if ok { 390 err = os.Setenv(k, v) 391 } else { 392 err = os.Unsetenv(k) 393 } 394 assert.NoError(t, err) 395 } 396 } 397 } 398 399 // buildLock_Acquire calls AcquireBuildLock with arguments 400 // returns a defer function to restore the environment 401 // returns a chan that is filled once AcquireBuildLock returns 402 // its item will perform some check and call the callback 403 // its item is nil on timeout 404 func buildLock_Acquire(t *testing.T, client kubernetes.Interface, namespace, owner, repository, branch, build string, fails bool) (func(), chan func()) { 405 c := make(chan func(), 2) 406 clean := buildLock_Env(t, owner, repository, branch, build, true) 407 go func() { 408 callback, err := AcquireBuildLock(client, "jx", namespace) 409 c <- func() { 410 if !fails { 411 require.NoError(t, err) 412 assert.NoError(t, callback()) 413 } else { 414 require.Error(t, err) 415 } 416 } 417 }() 418 go func() { 419 time.Sleep(time.Duration(5) * time.Second) 420 c <- nil 421 }() 422 return clean, c 423 } 424 425 // buildLock_AcquireFromPod calls AcquireBuildLock with arguments matching a pod 426 // returns a defer function to restore the environment 427 // returns a chan that is filled once AcquireBuildLock returns 428 // its item will perform some check and call the callback 429 // its item is nil on timeout 430 func buildLock_AcquireFromPod(t *testing.T, client kubernetes.Interface, namespace string, pod *v1.Pod, fails bool) (func(), chan func()) { 431 c := make(chan func(), 2) 432 clean := buildLock_Env(t, pod.Labels["owner"], pod.Labels["repository"], pod.Labels["branch"], pod.Labels["build"], false) 433 go func() { 434 callback, err := AcquireBuildLock(client, "jx", namespace) 435 c <- func() { 436 if !fails { 437 require.NoError(t, err) 438 assert.NoError(t, callback()) 439 } else { 440 require.Error(t, err) 441 } 442 } 443 }() 444 go func() { 445 time.Sleep(time.Duration(5) * time.Second) 446 c <- nil 447 }() 448 return clean, c 449 } 450 451 func buildLock_AssertNoLock(t *testing.T, client kubernetes.Interface, namespace string) { 452 lock, err := client.CoreV1().ConfigMaps("jx").Get("jx-lock-"+namespace, metav1.GetOptions{}) 453 assert.Nil(t, lock) 454 if assert.Error(t, err) { 455 require.IsType(t, &errors.StatusError{}, err) 456 status := err.(*errors.StatusError) 457 require.Equal(t, metav1.StatusReasonNotFound, status.Status().Reason) 458 } 459 } 460 461 // buildLock_AssertLock checks if the lock configmap is correct 462 func buildLock_AssertLock(t *testing.T, client kubernetes.Interface, namespace, owner, repository, branch, build string) { 463 lock, err := client.CoreV1().ConfigMaps("jx").Get("jx-lock-"+namespace, metav1.GetOptions{}) 464 require.NoError(t, err) 465 if assert.NotNil(t, lock) { 466 assert.Equal(t, "build-lock", lock.Labels["jenkins-x.io/kind"]) 467 assert.Equal(t, namespace, lock.Labels["namespace"]) 468 assert.Equal(t, owner, lock.Labels["owner"]) 469 assert.Equal(t, repository, lock.Labels["repository"]) 470 assert.Equal(t, branch, lock.Labels["branch"]) 471 assert.Equal(t, build, lock.Labels["build"]) 472 assert.Empty(t, lock.OwnerReferences) 473 assert.Equal(t, namespace, lock.Data["namespace"]) 474 assert.Equal(t, owner, lock.Data["owner"]) 475 assert.Equal(t, repository, lock.Data["repository"]) 476 assert.Equal(t, branch, lock.Data["branch"]) 477 assert.Equal(t, build, lock.Data["build"]) 478 assert.Equal(t, "", lock.Data["pod"]) 479 ts, err := time.Parse(time.RFC3339Nano, lock.Data["timestamp"]) 480 if assert.NoError(t, err) { 481 assert.True(t, ts.Before(time.Now().Add(time.Minute))) 482 assert.True(t, ts.After(time.Now().Add(time.Duration(-1)*time.Minute))) 483 } 484 ts, err = time.Parse(time.RFC3339Nano, lock.Annotations["expires"]) 485 if assert.NoError(t, err) { 486 // tighter check to be sure that expires is updated 487 assert.True(t, ts.Before(time.Now().Add(buildLockExpires+time.Duration(1500)*time.Millisecond))) 488 assert.True(t, ts.After(time.Now().Add(buildLockExpires+time.Duration(-1500)*time.Millisecond))) 489 assert.Equal(t, lock.Annotations["expires"], lock.Data["expires"]) 490 } 491 } 492 } 493 494 // buildLock_AssertLockFromPod checks if the lock configmap is matching the given pod 495 func buildLock_AssertLockFromPod(t *testing.T, client kubernetes.Interface, namespace string, pod *v1.Pod) { 496 lock, err := client.CoreV1().ConfigMaps("jx").Get("jx-lock-"+namespace, metav1.GetOptions{}) 497 require.NoError(t, err) 498 if assert.NotNil(t, lock) { 499 assert.Equal(t, "build-lock", lock.Labels["jenkins-x.io/kind"]) 500 assert.Equal(t, namespace, lock.Labels["namespace"]) 501 assert.Equal(t, pod.Labels["owner"], lock.Labels["owner"]) 502 assert.Equal(t, pod.Labels["repository"], lock.Labels["repository"]) 503 assert.Equal(t, pod.Labels["branch"], lock.Labels["branch"]) 504 assert.Equal(t, pod.Labels["build"], lock.Labels["build"]) 505 assert.Equal(t, []metav1.OwnerReference{{ 506 APIVersion: pod.APIVersion, 507 Kind: pod.Kind, 508 Name: pod.Name, 509 UID: pod.UID, 510 }}, lock.OwnerReferences) 511 assert.Equal(t, "", lock.Annotations["expires"]) 512 assert.Equal(t, namespace, lock.Data["namespace"]) 513 assert.Equal(t, pod.Labels["owner"], lock.Data["owner"]) 514 assert.Equal(t, pod.Labels["repository"], lock.Data["repository"]) 515 assert.Equal(t, pod.Labels["branch"], lock.Data["branch"]) 516 assert.Equal(t, pod.Labels["build"], lock.Data["build"]) 517 assert.Equal(t, pod.Name, lock.Data["pod"]) 518 ts, err := time.Parse(time.RFC3339Nano, lock.Data["timestamp"]) 519 if assert.NoError(t, err) { 520 assert.True(t, ts.Before(time.Now().Add(time.Minute))) 521 assert.True(t, ts.After(time.Now().Add(time.Duration(-1)*time.Minute))) 522 } 523 assert.Equal(t, "", lock.Data["expires"]) 524 } 525 } 526 527 func TestAcquireBuildLock(t *testing.T) { 528 // just acquire a lock when no lock exists 529 client := buildLock_Client(t) 530 pod := buildLock_Pod(t, client, "my-owner", "my-repository", "my-branch", "13") 531 clean, channel := buildLock_AcquireFromPod(t, client, "my-namespace", pod, false) 532 defer clean() 533 callback := <-channel 534 require.NotNil(t, callback, "timeout") 535 buildLock_AssertLockFromPod(t, client, "my-namespace", pod) 536 callback() 537 buildLock_AssertNoLock(t, client, "my-namespace") 538 } 539 540 func TestAcquireBuildLock_interpret(t *testing.T) { 541 // acquire a lock with an intepreted pipeline 542 client := buildLock_Client(t) 543 clean := buildLock_Env(t, "my-owner", "my-repository", "my-branch", "13", true) 544 defer clean() 545 channel := make(chan func(), 2) 546 go func() { 547 callback, err := AcquireBuildLock(client, "jx", "my-namespace") 548 channel <- func() { 549 require.NoError(t, err) 550 assert.NoError(t, callback()) 551 } 552 }() 553 go func() { 554 time.Sleep(time.Duration(5) * time.Second) 555 channel <- nil 556 }() 557 558 callback := <-channel 559 require.NotNil(t, callback, "timeout") 560 buildLock_AssertLock(t, client, "my-namespace", "my-owner", "my-repository", "my-branch", "13") 561 callback() 562 buildLock_AssertNoLock(t, client, "my-namespace") 563 } 564 565 func TestAcquireBuildLock_invalidLock(t *testing.T) { 566 // acquire a lock when the previous lock is invalid 567 client := buildLock_Client(t) 568 previous := buildLock_Pod(t, client, "my-owner", "my-repository", "my-branch", "42") 569 lock := buildLock_LockFromPod(t, client, "my-namespace", previous, -42) 570 lock.Labels["jenkins-x.io/kind"] = "other-lock" 571 _, err := client.CoreV1().ConfigMaps("jx").Update(lock) 572 require.NoError(t, err) 573 574 pod := buildLock_Pod(t, client, "my-owner", "my-repository", "my-branch", "13") 575 clean, channel := buildLock_AcquireFromPod(t, client, "my-namespace", pod, false) 576 defer clean() 577 callback := <-channel 578 require.NotNil(t, callback, "timeout") 579 buildLock_AssertLockFromPod(t, client, "my-namespace", pod) 580 callback() 581 buildLock_AssertNoLock(t, client, "my-namespace") 582 } 583 584 func TestAcquireBuildLock_previousNotFound(t *testing.T) { 585 // acquire a lock when the locking pod does not exist 586 client := buildLock_Client(t) 587 previous := buildLock_Pod(t, client, "my-owner", "my-repository", "my-branch", "42") 588 buildLock_LockFromPod(t, client, "my-namespace", previous, 42) 589 err := client.CoreV1().Pods("jx").Delete(previous.Name, &metav1.DeleteOptions{}) 590 require.NoError(t, err) 591 592 pod := buildLock_Pod(t, client, "my-owner", "my-repository", "my-branch", "13") 593 clean, channel := buildLock_AcquireFromPod(t, client, "my-namespace", pod, false) 594 defer clean() 595 callback := <-channel 596 require.NotNil(t, callback, "timeout") 597 buildLock_AssertLockFromPod(t, client, "my-namespace", pod) 598 callback() 599 buildLock_AssertNoLock(t, client, "my-namespace") 600 } 601 602 func TestAcquireBuildLock_previousFinished(t *testing.T) { 603 // acquire a lock when the locking pod has finished 604 client := buildLock_Client(t) 605 previous := buildLock_Pod(t, client, "my-owner", "my-repository", "my-branch", "42") 606 buildLock_LockFromPod(t, client, "my-namespace", previous, 42) 607 previous.Status.Phase = v1.PodFailed 608 _, err := client.CoreV1().Pods("jx").Update(previous) 609 require.NoError(t, err) 610 611 pod := buildLock_Pod(t, client, "my-owner", "my-repository", "my-branch", "13") 612 clean, channel := buildLock_AcquireFromPod(t, client, "my-namespace", pod, false) 613 defer clean() 614 callback := <-channel 615 require.NotNil(t, callback, "timeout") 616 buildLock_AssertLockFromPod(t, client, "my-namespace", pod) 617 callback() 618 buildLock_AssertNoLock(t, client, "my-namespace") 619 } 620 621 func TestAcquireBuildLock_expired(t *testing.T) { 622 // acquire a lock when the previous lock has expired 623 client := buildLock_Client(t) 624 buildLock_Lock(t, client, "my-namespace", "my-owner", "my-repository", "my-branch", "42", 42, time.Duration(-1)*time.Minute) 625 626 pod := buildLock_Pod(t, client, "my-owner", "my-repository", "my-branch", "13") 627 clean, channel := buildLock_AcquireFromPod(t, client, "my-namespace", pod, false) 628 defer clean() 629 callback := <-channel 630 require.NotNil(t, callback, "timeout") 631 buildLock_AssertLockFromPod(t, client, "my-namespace", pod) 632 callback() 633 buildLock_AssertNoLock(t, client, "my-namespace") 634 } 635 636 func TestAcquireBuildLock_higherRuns(t *testing.T) { 637 // fails at acquiring the lock because an higher build is running 638 client := buildLock_Client(t) 639 previous := buildLock_Pod(t, client, "my-owner", "my-repository", "my-branch", "42") 640 buildLock_LockFromPod(t, client, "my-namespace", previous, -42) 641 642 pod := buildLock_Pod(t, client, "my-owner", "my-repository", "my-branch", "13") 643 clean, channel := buildLock_AcquireFromPod(t, client, "my-namespace", pod, true) 644 defer clean() 645 callback := <-channel 646 callback() 647 } 648 649 func TestAcquireBuildLock_laterRuns(t *testing.T) { 650 // fails at acquiring the lock because a later build is running 651 client := buildLock_Client(t) 652 previous := buildLock_Pod(t, client, "other-owner", "other-repository", "other-branch", "42") 653 buildLock_LockFromPod(t, client, "my-namespace", previous, 42) 654 655 pod := buildLock_Pod(t, client, "my-owner", "my-repository", "my-branch", "13") 656 clean, channel := buildLock_AcquireFromPod(t, client, "my-namespace", pod, true) 657 defer clean() 658 callback := <-channel 659 callback() 660 } 661 662 func TestAcquireBuildLock_waitLowerPodDeleted(t *testing.T) { 663 // wait for a lower build to be deleted 664 client := buildLock_Client(t) 665 counter := buildLock_CountWatch(client) 666 previous := buildLock_Pod(t, client, "my-owner", "my-repository", "my-branch", "11") 667 old := buildLock_LockFromPod(t, client, "my-namespace", previous, 11) 668 // should update the lock 669 pod := buildLock_Pod(t, client, "my-owner", "my-repository", "my-branch", "13") 670 clean, channel := buildLock_AcquireFromPod(t, client, "my-namespace", pod, false) 671 defer clean() 672 // wait for AcquireBuildLock to be waiting 673 for { 674 count := 0 675 select { 676 case count = <-counter: 677 case callback := <-channel: 678 require.NotNil(t, callback, "timeout") 679 assert.Fail(t, "TestAcquireBuildLock returned") 680 callback() 681 return 682 } 683 if count == 2 { 684 break 685 } 686 } 687 // check the lock 688 lock, err := client.CoreV1().ConfigMaps("jx").Get(old.Name, metav1.GetOptions{}) 689 require.NoError(t, err) 690 assert.Equal(t, old.ObjectMeta, lock.ObjectMeta) 691 assert.Equal(t, "my-namespace", lock.Data["namespace"]) 692 assert.Equal(t, "my-owner", lock.Data["owner"]) 693 assert.Equal(t, "my-repository", lock.Data["repository"]) 694 assert.Equal(t, "my-branch", lock.Data["branch"]) 695 assert.Equal(t, "13", lock.Data["build"]) 696 assert.Equal(t, pod.Name, lock.Data["pod"]) 697 assert.Equal(t, old.Data["timestamp"], lock.Data["timestamp"]) 698 // should acquire the lock 699 err = client.CoreV1().Pods("jx").Delete(previous.Name, &metav1.DeleteOptions{}) 700 require.NoError(t, err) 701 callback := <-channel 702 require.NotNil(t, callback, "timeout") 703 buildLock_AssertLockFromPod(t, client, "my-namespace", pod) 704 callback() 705 buildLock_AssertNoLock(t, client, "my-namespace") 706 } 707 708 func TestAcquireBuildLock_waitLowerLockDeleted(t *testing.T) { 709 // wait for a lower build lock to be deleted 710 client := buildLock_Client(t) 711 counter := buildLock_CountWatch(client) 712 previous := buildLock_Pod(t, client, "my-owner", "my-repository", "my-branch", "11") 713 old := buildLock_LockFromPod(t, client, "my-namespace", previous, 11) 714 // should update the lock 715 pod := buildLock_Pod(t, client, "my-owner", "my-repository", "my-branch", "13") 716 clean, channel := buildLock_AcquireFromPod(t, client, "my-namespace", pod, false) 717 defer clean() 718 // wait for AcquireBuildLock to be waiting 719 for { 720 count := 0 721 select { 722 case count = <-counter: 723 case callback := <-channel: 724 require.NotNil(t, callback, "timeout") 725 assert.Fail(t, "TestAcquireBuildLock returned") 726 callback() 727 return 728 } 729 if count == 2 { 730 break 731 } 732 } 733 // check the lock 734 lock, err := client.CoreV1().ConfigMaps("jx").Get(old.Name, metav1.GetOptions{}) 735 require.NoError(t, err) 736 assert.Equal(t, old.ObjectMeta, lock.ObjectMeta) 737 assert.Equal(t, "my-namespace", lock.Data["namespace"]) 738 assert.Equal(t, "my-owner", lock.Data["owner"]) 739 assert.Equal(t, "my-repository", lock.Data["repository"]) 740 assert.Equal(t, "my-branch", lock.Data["branch"]) 741 assert.Equal(t, "13", lock.Data["build"]) 742 assert.Equal(t, pod.Name, lock.Data["pod"]) 743 assert.Equal(t, old.Data["timestamp"], lock.Data["timestamp"]) 744 // should acquire the lock 745 err = client.CoreV1().ConfigMaps("jx").Delete(old.Name, &metav1.DeleteOptions{}) 746 require.NoError(t, err) 747 callback := <-channel 748 require.NotNil(t, callback, "timeout") 749 buildLock_AssertLockFromPod(t, client, "my-namespace", pod) 750 callback() 751 buildLock_AssertNoLock(t, client, "my-namespace") 752 } 753 754 func TestAcquireBuildLock_waitEarlierFinished(t *testing.T) { 755 // wait for a lower build to finish 756 client := buildLock_Client(t) 757 counter := buildLock_CountWatch(client) 758 previous := buildLock_Pod(t, client, "my-owner", "my-repository", "my-branch", "11") 759 old := buildLock_LockFromPod(t, client, "my-namespace", previous, 11) 760 // should update the lock 761 clean, channel := buildLock_Acquire(t, client, "my-namespace", "my-owner", "my-repository", "my-branch", "13", false) 762 defer clean() 763 // wait for AcquireBuildLock to be waiting 764 for { 765 count := 0 766 select { 767 case count = <-counter: 768 case callback := <-channel: 769 require.NotNil(t, callback, "timeout") 770 assert.Fail(t, "TestAcquireBuildLock returned") 771 callback() 772 return 773 } 774 if count == 1 { 775 break 776 } 777 } 778 // check the lock 779 lock, err := client.CoreV1().ConfigMaps("jx").Get(old.Name, metav1.GetOptions{}) 780 require.NoError(t, err) 781 assert.Equal(t, old.ObjectMeta, lock.ObjectMeta) 782 assert.Equal(t, "my-namespace", lock.Data["namespace"]) 783 assert.Equal(t, "my-owner", lock.Data["owner"]) 784 assert.Equal(t, "my-repository", lock.Data["repository"]) 785 assert.Equal(t, "my-branch", lock.Data["branch"]) 786 assert.Equal(t, "13", lock.Data["build"]) 787 assert.Equal(t, "", lock.Data["pod"]) 788 assert.Equal(t, old.Data["timestamp"], lock.Data["timestamp"]) 789 // should acquire the lock 790 previous.Status.Phase = v1.PodSucceeded 791 _, err = client.CoreV1().Pods("jx").Update(previous) 792 require.NoError(t, err) 793 callback := <-channel 794 require.NotNil(t, callback, "timeout") 795 buildLock_AssertLock(t, client, "my-namespace", "my-owner", "my-repository", "my-branch", "13") 796 callback() 797 buildLock_AssertNoLock(t, client, "my-namespace") 798 } 799 800 func TestAcquireBuildLock_waitLowerExpired(t *testing.T) { 801 // wait for a lock to expire 802 client := buildLock_Client(t) 803 counter := buildLock_CountWatch(client) 804 old := buildLock_Lock(t, client, "my-namespace", "my-owner", "my-repository", "my-branch", "11", 11, time.Duration(2)*time.Second) 805 // should update the lock 806 pod := buildLock_Pod(t, client, "my-owner", "my-repository", "my-branch", "13") 807 clean, channel := buildLock_AcquireFromPod(t, client, "my-namespace", pod, false) 808 defer clean() 809 // wait for AcquireBuildLock to be waiting 810 for { 811 count := 0 812 select { 813 case count = <-counter: 814 case callback := <-channel: 815 require.NotNil(t, callback, "timeout") 816 assert.Fail(t, "TestAcquireBuildLock returned") 817 callback() 818 return 819 } 820 if count == 1 { 821 break 822 } 823 } 824 // check the lock 825 lock, err := client.CoreV1().ConfigMaps("jx").Get(old.Name, metav1.GetOptions{}) 826 require.NoError(t, err) 827 assert.Equal(t, old.ObjectMeta, lock.ObjectMeta) 828 assert.Equal(t, "my-namespace", lock.Data["namespace"]) 829 assert.Equal(t, "my-owner", lock.Data["owner"]) 830 assert.Equal(t, "my-repository", lock.Data["repository"]) 831 assert.Equal(t, "my-branch", lock.Data["branch"]) 832 assert.Equal(t, "13", lock.Data["build"]) 833 assert.Equal(t, pod.Name, lock.Data["pod"]) 834 assert.Equal(t, old.Data["timestamp"], lock.Data["timestamp"]) 835 // should acquire the lock after 2 seconds 836 callback := <-channel 837 require.NotNil(t, callback, "timeout") 838 buildLock_AssertLockFromPod(t, client, "my-namespace", pod) 839 callback() 840 buildLock_AssertNoLock(t, client, "my-namespace") 841 } 842 843 func TestAcquireBuildLock_waitButHigher(t *testing.T) { 844 // wait for a lower run to finish, but an higher run appears 845 client := buildLock_Client(t) 846 counter := buildLock_CountWatch(client) 847 previous := buildLock_Pod(t, client, "my-owner", "my-repository", "my-branch", "11") 848 old := buildLock_LockFromPod(t, client, "my-namespace", previous, -11) 849 // should update the lock 850 pod := buildLock_Pod(t, client, "my-owner", "my-repository", "my-branch", "13") 851 clean, channel := buildLock_AcquireFromPod(t, client, "my-namespace", pod, true) 852 defer clean() 853 // wait for AcquireBuildLock to be waiting 854 for { 855 count := 0 856 select { 857 case count = <-counter: 858 case callback := <-channel: 859 require.NotNil(t, callback, "timeout") 860 assert.Fail(t, "TestAcquireBuildLock returned") 861 callback() 862 return 863 } 864 if count == 2 { 865 break 866 } 867 } 868 // check the lock 869 lock, err := client.CoreV1().ConfigMaps("jx").Get(old.Name, metav1.GetOptions{}) 870 require.NoError(t, err) 871 assert.Equal(t, old.ObjectMeta, lock.ObjectMeta) 872 assert.Equal(t, "my-namespace", lock.Data["namespace"]) 873 assert.Equal(t, "my-owner", lock.Data["owner"]) 874 assert.Equal(t, "my-repository", lock.Data["repository"]) 875 assert.Equal(t, "my-branch", lock.Data["branch"]) 876 assert.Equal(t, "13", lock.Data["build"]) 877 ts, err := time.Parse(time.RFC3339Nano, lock.Data["timestamp"]) 878 if assert.NoError(t, err) { 879 assert.True(t, ts.Before(time.Now().Add(time.Minute))) 880 assert.True(t, ts.After(time.Now().Add(time.Duration(-1)*time.Minute))) 881 } 882 // update the lock and expect failure 883 lock.Data["build"] = "21" 884 _, err = client.CoreV1().ConfigMaps("jx").Update(lock) 885 require.NoError(t, err) 886 callback := <-channel 887 require.NotNil(t, callback, "timeout") 888 callback() 889 }