github.com/theQRL/go-zond@v0.1.1/les/servingqueue.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package les
    18  
    19  import (
    20  	"sync"
    21  	"sync/atomic"
    22  
    23  	"github.com/theQRL/go-zond/common/mclock"
    24  	"github.com/theQRL/go-zond/common/prque"
    25  	"golang.org/x/exp/slices"
    26  )
    27  
    28  // servingQueue allows running tasks in a limited number of threads and puts the
    29  // waiting tasks in a priority queue
    30  type servingQueue struct {
    31  	recentTime, queuedTime     uint64
    32  	servingTimeDiff            atomic.Uint64
    33  	burstLimit, burstDropLimit uint64
    34  	burstDecRate               float64
    35  	lastUpdate                 mclock.AbsTime
    36  
    37  	queueAddCh, queueBestCh chan *servingTask
    38  	stopThreadCh, quit      chan struct{}
    39  	setThreadsCh            chan int
    40  
    41  	wg          sync.WaitGroup
    42  	threadCount int                               // number of currently running threads
    43  	queue       *prque.Prque[int64, *servingTask] // priority queue for waiting or suspended tasks
    44  	best        *servingTask                      // the highest priority task (not included in the queue)
    45  	suspendBias int64                             // priority bias against suspending an already running task
    46  }
    47  
    48  // servingTask represents a request serving task. Tasks can be implemented to
    49  // run in multiple steps, allowing the serving queue to suspend execution between
    50  // steps if higher priority tasks are entered. The creator of the task should
    51  // set the following fields:
    52  //
    53  // - priority: greater value means higher priority; values can wrap around the int64 range
    54  // - run: execute a single step; return true if finished
    55  // - after: executed after run finishes or returns an error, receives the total serving time
    56  type servingTask struct {
    57  	sq                                       *servingQueue
    58  	servingTime, timeAdded, maxTime, expTime uint64
    59  	peer                                     *clientPeer
    60  	priority                                 int64
    61  	biasAdded                                bool
    62  	token                                    runToken
    63  	tokenCh                                  chan runToken
    64  }
    65  
    66  // runToken received by servingTask.start allows the task to run. Closing the
    67  // channel by servingTask.stop signals the thread controller to allow a new task
    68  // to start running.
    69  type runToken chan struct{}
    70  
    71  // start blocks until the task can start and returns true if it is allowed to run.
    72  // Returning false means that the task should be cancelled.
    73  func (t *servingTask) start() bool {
    74  	if t.peer.isFrozen() {
    75  		return false
    76  	}
    77  	t.tokenCh = make(chan runToken, 1)
    78  	select {
    79  	case t.sq.queueAddCh <- t:
    80  	case <-t.sq.quit:
    81  		return false
    82  	}
    83  	select {
    84  	case t.token = <-t.tokenCh:
    85  	case <-t.sq.quit:
    86  		return false
    87  	}
    88  	if t.token == nil {
    89  		return false
    90  	}
    91  	t.servingTime -= uint64(mclock.Now())
    92  	return true
    93  }
    94  
    95  // done signals the thread controller about the task being finished and returns
    96  // the total serving time of the task in nanoseconds.
    97  func (t *servingTask) done() uint64 {
    98  	t.servingTime += uint64(mclock.Now())
    99  	close(t.token)
   100  	diff := t.servingTime - t.timeAdded
   101  	t.timeAdded = t.servingTime
   102  	if t.expTime > diff {
   103  		t.expTime -= diff
   104  		t.sq.servingTimeDiff.Add(t.expTime)
   105  	} else {
   106  		t.expTime = 0
   107  	}
   108  	return t.servingTime
   109  }
   110  
   111  // waitOrStop can be called during the execution of the task. It blocks if there
   112  // is a higher priority task waiting (a bias is applied in favor of the currently
   113  // running task). Returning true means that the execution can be resumed. False
   114  // means the task should be cancelled.
   115  func (t *servingTask) waitOrStop() bool {
   116  	t.done()
   117  	if !t.biasAdded {
   118  		t.priority += t.sq.suspendBias
   119  		t.biasAdded = true
   120  	}
   121  	return t.start()
   122  }
   123  
   124  // newServingQueue returns a new servingQueue
   125  func newServingQueue(suspendBias int64, utilTarget float64) *servingQueue {
   126  	sq := &servingQueue{
   127  		queue:          prque.New[int64, *servingTask](nil),
   128  		suspendBias:    suspendBias,
   129  		queueAddCh:     make(chan *servingTask, 100),
   130  		queueBestCh:    make(chan *servingTask),
   131  		stopThreadCh:   make(chan struct{}),
   132  		quit:           make(chan struct{}),
   133  		setThreadsCh:   make(chan int, 10),
   134  		burstLimit:     uint64(utilTarget * bufLimitRatio * 1200000),
   135  		burstDropLimit: uint64(utilTarget * bufLimitRatio * 1000000),
   136  		burstDecRate:   utilTarget,
   137  		lastUpdate:     mclock.Now(),
   138  	}
   139  	sq.wg.Add(2)
   140  	go sq.queueLoop()
   141  	go sq.threadCountLoop()
   142  	return sq
   143  }
   144  
   145  // newTask creates a new task with the given priority
   146  func (sq *servingQueue) newTask(peer *clientPeer, maxTime uint64, priority int64) *servingTask {
   147  	return &servingTask{
   148  		sq:       sq,
   149  		peer:     peer,
   150  		maxTime:  maxTime,
   151  		expTime:  maxTime,
   152  		priority: priority,
   153  	}
   154  }
   155  
   156  // threadController is started in multiple goroutines and controls the execution
   157  // of tasks. The number of active thread controllers equals the allowed number of
   158  // concurrently running threads. It tries to fetch the highest priority queued
   159  // task first. If there are no queued tasks waiting then it can directly catch
   160  // run tokens from the token channel and allow the corresponding tasks to run
   161  // without entering the priority queue.
   162  func (sq *servingQueue) threadController() {
   163  	defer sq.wg.Done()
   164  	for {
   165  		token := make(runToken)
   166  		select {
   167  		case best := <-sq.queueBestCh:
   168  			best.tokenCh <- token
   169  		case <-sq.stopThreadCh:
   170  			return
   171  		case <-sq.quit:
   172  			return
   173  		}
   174  		select {
   175  		case <-sq.stopThreadCh:
   176  			return
   177  		case <-sq.quit:
   178  			return
   179  		case <-token:
   180  		}
   181  	}
   182  }
   183  
   184  // peerTasks lists the tasks received from a given peer when selecting peers to freeze
   185  type peerTasks struct {
   186  	peer     *clientPeer
   187  	list     []*servingTask
   188  	sumTime  uint64
   189  	priority float64
   190  }
   191  
   192  // freezePeers selects the peers with the worst priority queued tasks and freezes
   193  // them until burstTime goes under burstDropLimit or all peers are frozen
   194  func (sq *servingQueue) freezePeers() {
   195  	peerMap := make(map[*clientPeer]*peerTasks)
   196  	var peerList []*peerTasks
   197  	if sq.best != nil {
   198  		sq.queue.Push(sq.best, sq.best.priority)
   199  	}
   200  	sq.best = nil
   201  	for sq.queue.Size() > 0 {
   202  		task := sq.queue.PopItem()
   203  		tasks := peerMap[task.peer]
   204  		if tasks == nil {
   205  			bufValue, bufLimit := task.peer.fcClient.BufferStatus()
   206  			if bufLimit < 1 {
   207  				bufLimit = 1
   208  			}
   209  			tasks = &peerTasks{
   210  				peer:     task.peer,
   211  				priority: float64(bufValue) / float64(bufLimit), // lower value comes first
   212  			}
   213  			peerMap[task.peer] = tasks
   214  			peerList = append(peerList, tasks)
   215  		}
   216  		tasks.list = append(tasks.list, task)
   217  		tasks.sumTime += task.expTime
   218  	}
   219  	slices.SortFunc(peerList, func(a, b *peerTasks) int {
   220  		if a.priority < b.priority {
   221  			return -1
   222  		}
   223  		if a.priority > b.priority {
   224  			return 1
   225  		}
   226  		return 0
   227  	})
   228  	drop := true
   229  	for _, tasks := range peerList {
   230  		if drop {
   231  			tasks.peer.freeze()
   232  			tasks.peer.fcClient.Freeze()
   233  			sq.queuedTime -= tasks.sumTime
   234  			sqQueuedGauge.Update(int64(sq.queuedTime))
   235  			clientFreezeMeter.Mark(1)
   236  			drop = sq.recentTime+sq.queuedTime > sq.burstDropLimit
   237  			for _, task := range tasks.list {
   238  				task.tokenCh <- nil
   239  			}
   240  		} else {
   241  			for _, task := range tasks.list {
   242  				sq.queue.Push(task, task.priority)
   243  			}
   244  		}
   245  	}
   246  	if sq.queue.Size() > 0 {
   247  		sq.best = sq.queue.PopItem()
   248  	}
   249  }
   250  
   251  // updateRecentTime recalculates the recent serving time value
   252  func (sq *servingQueue) updateRecentTime() {
   253  	subTime := sq.servingTimeDiff.Swap(0)
   254  	now := mclock.Now()
   255  	dt := now - sq.lastUpdate
   256  	sq.lastUpdate = now
   257  	if dt > 0 {
   258  		subTime += uint64(float64(dt) * sq.burstDecRate)
   259  	}
   260  	if sq.recentTime > subTime {
   261  		sq.recentTime -= subTime
   262  	} else {
   263  		sq.recentTime = 0
   264  	}
   265  }
   266  
   267  // addTask inserts a task into the priority queue
   268  func (sq *servingQueue) addTask(task *servingTask) {
   269  	if sq.best == nil {
   270  		sq.best = task
   271  	} else if task.priority-sq.best.priority > 0 {
   272  		sq.queue.Push(sq.best, sq.best.priority)
   273  		sq.best = task
   274  	} else {
   275  		sq.queue.Push(task, task.priority)
   276  	}
   277  	sq.updateRecentTime()
   278  	sq.queuedTime += task.expTime
   279  	sqServedGauge.Update(int64(sq.recentTime))
   280  	sqQueuedGauge.Update(int64(sq.queuedTime))
   281  	if sq.recentTime+sq.queuedTime > sq.burstLimit {
   282  		sq.freezePeers()
   283  	}
   284  }
   285  
   286  // queueLoop is an event loop running in a goroutine. It receives tasks from queueAddCh
   287  // and always tries to send the highest priority task to queueBestCh. Successfully sent
   288  // tasks are removed from the queue.
   289  func (sq *servingQueue) queueLoop() {
   290  	defer sq.wg.Done()
   291  	for {
   292  		if sq.best != nil {
   293  			expTime := sq.best.expTime
   294  			select {
   295  			case task := <-sq.queueAddCh:
   296  				sq.addTask(task)
   297  			case sq.queueBestCh <- sq.best:
   298  				sq.updateRecentTime()
   299  				sq.queuedTime -= expTime
   300  				sq.recentTime += expTime
   301  				sqServedGauge.Update(int64(sq.recentTime))
   302  				sqQueuedGauge.Update(int64(sq.queuedTime))
   303  				if sq.queue.Size() == 0 {
   304  					sq.best = nil
   305  				} else {
   306  					sq.best = sq.queue.PopItem()
   307  				}
   308  			case <-sq.quit:
   309  				return
   310  			}
   311  		} else {
   312  			select {
   313  			case task := <-sq.queueAddCh:
   314  				sq.addTask(task)
   315  			case <-sq.quit:
   316  				return
   317  			}
   318  		}
   319  	}
   320  }
   321  
   322  // threadCountLoop is an event loop running in a goroutine. It adjusts the number
   323  // of active thread controller goroutines.
   324  func (sq *servingQueue) threadCountLoop() {
   325  	var threadCountTarget int
   326  	defer sq.wg.Done()
   327  	for {
   328  		for threadCountTarget > sq.threadCount {
   329  			sq.wg.Add(1)
   330  			go sq.threadController()
   331  			sq.threadCount++
   332  		}
   333  		if threadCountTarget < sq.threadCount {
   334  			select {
   335  			case threadCountTarget = <-sq.setThreadsCh:
   336  			case sq.stopThreadCh <- struct{}{}:
   337  				sq.threadCount--
   338  			case <-sq.quit:
   339  				return
   340  			}
   341  		} else {
   342  			select {
   343  			case threadCountTarget = <-sq.setThreadsCh:
   344  			case <-sq.quit:
   345  				return
   346  			}
   347  		}
   348  	}
   349  }
   350  
   351  // setThreads sets the allowed processing thread count, suspending tasks as soon as
   352  // possible if necessary.
   353  func (sq *servingQueue) setThreads(threadCount int) {
   354  	select {
   355  	case sq.setThreadsCh <- threadCount:
   356  	case <-sq.quit:
   357  		return
   358  	}
   359  }
   360  
   361  // stop stops task processing as soon as possible and shuts down the serving queue.
   362  func (sq *servingQueue) stop() {
   363  	close(sq.quit)
   364  	sq.wg.Wait()
   365  }