github.com/nitinawathare/ethereumassignment3@v0.0.0-20211021213010-f07344c2b868/go-ethereum/les/servingqueue.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package les 18 19 import ( 20 "sync" 21 22 "github.com/ethereum/go-ethereum/common/mclock" 23 "github.com/ethereum/go-ethereum/common/prque" 24 ) 25 26 // servingQueue allows running tasks in a limited number of threads and puts the 27 // waiting tasks in a priority queue 28 type servingQueue struct { 29 tokenCh chan runToken 30 queueAddCh, queueBestCh chan *servingTask 31 stopThreadCh, quit chan struct{} 32 setThreadsCh chan int 33 34 wg sync.WaitGroup 35 threadCount int // number of currently running threads 36 queue *prque.Prque // priority queue for waiting or suspended tasks 37 best *servingTask // the highest priority task (not included in the queue) 38 suspendBias int64 // priority bias against suspending an already running task 39 } 40 41 // servingTask represents a request serving task. Tasks can be implemented to 42 // run in multiple steps, allowing the serving queue to suspend execution between 43 // steps if higher priority tasks are entered. The creator of the task should 44 // set the following fields: 45 // 46 // - priority: greater value means higher priority; values can wrap around the int64 range 47 // - run: execute a single step; return true if finished 48 // - after: executed after run finishes or returns an error, receives the total serving time 49 type servingTask struct { 50 sq *servingQueue 51 servingTime uint64 52 priority int64 53 biasAdded bool 54 token runToken 55 tokenCh chan runToken 56 } 57 58 // runToken received by servingTask.start allows the task to run. Closing the 59 // channel by servingTask.stop signals the thread controller to allow a new task 60 // to start running. 61 type runToken chan struct{} 62 63 // start blocks until the task can start and returns true if it is allowed to run. 64 // Returning false means that the task should be cancelled. 65 func (t *servingTask) start() bool { 66 select { 67 case t.token = <-t.sq.tokenCh: 68 default: 69 t.tokenCh = make(chan runToken, 1) 70 select { 71 case t.sq.queueAddCh <- t: 72 case <-t.sq.quit: 73 return false 74 } 75 select { 76 case t.token = <-t.tokenCh: 77 case <-t.sq.quit: 78 return false 79 } 80 } 81 if t.token == nil { 82 return false 83 } 84 t.servingTime -= uint64(mclock.Now()) 85 return true 86 } 87 88 // done signals the thread controller about the task being finished and returns 89 // the total serving time of the task in nanoseconds. 90 func (t *servingTask) done() uint64 { 91 t.servingTime += uint64(mclock.Now()) 92 close(t.token) 93 return t.servingTime 94 } 95 96 // waitOrStop can be called during the execution of the task. It blocks if there 97 // is a higher priority task waiting (a bias is applied in favor of the currently 98 // running task). Returning true means that the execution can be resumed. False 99 // means the task should be cancelled. 100 func (t *servingTask) waitOrStop() bool { 101 t.done() 102 if !t.biasAdded { 103 t.priority += t.sq.suspendBias 104 t.biasAdded = true 105 } 106 return t.start() 107 } 108 109 // newServingQueue returns a new servingQueue 110 func newServingQueue(suspendBias int64) *servingQueue { 111 sq := &servingQueue{ 112 queue: prque.New(nil), 113 suspendBias: suspendBias, 114 tokenCh: make(chan runToken), 115 queueAddCh: make(chan *servingTask, 100), 116 queueBestCh: make(chan *servingTask), 117 stopThreadCh: make(chan struct{}), 118 quit: make(chan struct{}), 119 setThreadsCh: make(chan int, 10), 120 } 121 sq.wg.Add(2) 122 go sq.queueLoop() 123 go sq.threadCountLoop() 124 return sq 125 } 126 127 // newTask creates a new task with the given priority 128 func (sq *servingQueue) newTask(priority int64) *servingTask { 129 return &servingTask{ 130 sq: sq, 131 priority: priority, 132 } 133 } 134 135 // threadController is started in multiple goroutines and controls the execution 136 // of tasks. The number of active thread controllers equals the allowed number of 137 // concurrently running threads. It tries to fetch the highest priority queued 138 // task first. If there are no queued tasks waiting then it can directly catch 139 // run tokens from the token channel and allow the corresponding tasks to run 140 // without entering the priority queue. 141 func (sq *servingQueue) threadController() { 142 for { 143 token := make(runToken) 144 select { 145 case best := <-sq.queueBestCh: 146 best.tokenCh <- token 147 default: 148 select { 149 case best := <-sq.queueBestCh: 150 best.tokenCh <- token 151 case sq.tokenCh <- token: 152 case <-sq.stopThreadCh: 153 sq.wg.Done() 154 return 155 case <-sq.quit: 156 sq.wg.Done() 157 return 158 } 159 } 160 <-token 161 select { 162 case <-sq.stopThreadCh: 163 sq.wg.Done() 164 return 165 case <-sq.quit: 166 sq.wg.Done() 167 return 168 default: 169 } 170 } 171 } 172 173 // addTask inserts a task into the priority queue 174 func (sq *servingQueue) addTask(task *servingTask) { 175 if sq.best == nil { 176 sq.best = task 177 } else if task.priority > sq.best.priority { 178 sq.queue.Push(sq.best, sq.best.priority) 179 sq.best = task 180 return 181 } else { 182 sq.queue.Push(task, task.priority) 183 } 184 } 185 186 // queueLoop is an event loop running in a goroutine. It receives tasks from queueAddCh 187 // and always tries to send the highest priority task to queueBestCh. Successfully sent 188 // tasks are removed from the queue. 189 func (sq *servingQueue) queueLoop() { 190 for { 191 if sq.best != nil { 192 select { 193 case task := <-sq.queueAddCh: 194 sq.addTask(task) 195 case sq.queueBestCh <- sq.best: 196 if sq.queue.Size() == 0 { 197 sq.best = nil 198 } else { 199 sq.best, _ = sq.queue.PopItem().(*servingTask) 200 } 201 case <-sq.quit: 202 sq.wg.Done() 203 return 204 } 205 } else { 206 select { 207 case task := <-sq.queueAddCh: 208 sq.addTask(task) 209 case <-sq.quit: 210 sq.wg.Done() 211 return 212 } 213 } 214 } 215 } 216 217 // threadCountLoop is an event loop running in a goroutine. It adjusts the number 218 // of active thread controller goroutines. 219 func (sq *servingQueue) threadCountLoop() { 220 var threadCountTarget int 221 for { 222 for threadCountTarget > sq.threadCount { 223 sq.wg.Add(1) 224 go sq.threadController() 225 sq.threadCount++ 226 } 227 if threadCountTarget < sq.threadCount { 228 select { 229 case threadCountTarget = <-sq.setThreadsCh: 230 case sq.stopThreadCh <- struct{}{}: 231 sq.threadCount-- 232 case <-sq.quit: 233 sq.wg.Done() 234 return 235 } 236 } else { 237 select { 238 case threadCountTarget = <-sq.setThreadsCh: 239 case <-sq.quit: 240 sq.wg.Done() 241 return 242 } 243 } 244 } 245 } 246 247 // setThreads sets the allowed processing thread count, suspending tasks as soon as 248 // possible if necessary. 249 func (sq *servingQueue) setThreads(threadCount int) { 250 select { 251 case sq.setThreadsCh <- threadCount: 252 case <-sq.quit: 253 return 254 } 255 } 256 257 // stop stops task processing as soon as possible and shuts down the serving queue. 258 func (sq *servingQueue) stop() { 259 close(sq.quit) 260 sq.wg.Wait() 261 }