sigs.k8s.io/prow@v0.0.0-20240503223140-c5e374dc7eb1/pkg/crier/reporters/criercommonlib/shardedlock.go (about) 1 /* 2 Copyright 2022 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 // Package criercommonlib contains shared lib used by reporters 18 package criercommonlib 19 20 import ( 21 "context" 22 "time" 23 24 "github.com/sirupsen/logrus" 25 "golang.org/x/sync/semaphore" 26 ) 27 28 // SimplePull contains info for identifying a shard 29 type SimplePull struct { 30 org, repo string 31 number int 32 } 33 34 // NewSimplePull creates SimplePull 35 func NewSimplePull(org, repo string, number int) *SimplePull { 36 return &SimplePull{org: org, repo: repo, number: number} 37 } 38 39 // ShardedLock contains sharding information based on PRs 40 type ShardedLock struct { 41 // semaphore is chosed over mutex, as Acquire from semaphore respects 42 // context timeout while mutex doesn't 43 mapLock *semaphore.Weighted 44 locks map[SimplePull]*semaphore.Weighted 45 } 46 47 // NewShardedLock creates ShardedLock 48 func NewShardedLock() *ShardedLock { 49 return &ShardedLock{ 50 mapLock: semaphore.NewWeighted(1), 51 locks: map[SimplePull]*semaphore.Weighted{}, 52 } 53 } 54 55 // GetLock aquires the lock for a PR 56 func (s *ShardedLock) GetLock(ctx context.Context, key SimplePull) (*semaphore.Weighted, error) { 57 if err := s.mapLock.Acquire(ctx, 1); err != nil { 58 return nil, err 59 } 60 defer s.mapLock.Release(1) 61 if _, exists := s.locks[key]; !exists { 62 s.locks[key] = semaphore.NewWeighted(1) 63 } 64 return s.locks[key], nil 65 } 66 67 // Cleanup deletes all locks by acquiring first 68 // the mapLock and then each individual lock before 69 // deleting it. The individual lock must be acquired 70 // because otherwise it may be held, we delete it from 71 // the map, it gets recreated and acquired and two 72 // routines report in parallel for the same job. 73 // Note that while this function is running, no new 74 // presubmit reporting can happen, as we hold the mapLock. 75 func (s *ShardedLock) Cleanup() { 76 ctx := context.Background() 77 s.mapLock.Acquire(ctx, 1) 78 defer s.mapLock.Release(1) 79 80 for key, lock := range s.locks { 81 // There is a very low chance of race condition, that two threads got 82 // different locks from the same PR, which would end up with duplicated 83 // report once. Since this is very complicated to fix and the impact is 84 // really low, would just keep it as is. 85 // For details see: https://github.com/kubernetes/test-infra/pull/20343 86 lock.Acquire(ctx, 1) 87 delete(s.locks, key) 88 lock.Release(1) 89 } 90 } 91 92 // RunCleanup asynchronously runs the cleanup once per hour. 93 func (s *ShardedLock) RunCleanup() { 94 go func() { 95 for range time.Tick(time.Hour) { 96 logrus.Debug("Starting to clean up presubmit locks") 97 startTime := time.Now() 98 s.Cleanup() 99 logrus.WithField("duration", time.Since(startTime).String()).Debug("Finished cleaning up presubmit locks") 100 } 101 }() 102 }