github.com/coyove/common@v0.0.0-20240403014525-f70e643f9de8/sched/sched.go (about)

     1  package sched
     2  
     3  import (
     4  	"runtime"
     5  	"sync"
     6  	"sync/atomic"
     7  	"time"
     8  	_ "unsafe"
     9  )
    10  
    11  //go:linkname runtimeNano runtime.nanotime
    12  func runtimeNano() int64
    13  
    14  var taskCanceled = new(int)
    15  
    16  type timer struct {
    17  	real  *time.Timer
    18  	lock  int32
    19  	dead  int32
    20  	tasks []any
    21  }
    22  
    23  func (t *timer) spinlock() {
    24  	for !atomic.CompareAndSwapInt32(&t.lock, 0, 1) {
    25  		if atomic.LoadInt32(&t.dead) == 1 {
    26  			return
    27  		}
    28  		runtime.Gosched()
    29  	}
    30  }
    31  
    32  func (t *timer) spinunlock() {
    33  	atomic.StoreInt32(&t.lock, 0)
    34  }
    35  
    36  type shard struct {
    37  	timers sync.Map
    38  	mgr    *Group
    39  }
    40  
    41  func (m *shard) start(d time.Duration, data any) Key {
    42  	nano := time.Duration(runtimeNano())
    43  	now := nano / time.Second
    44  	at := (nano + d) / time.Second
    45  	if at == now {
    46  		go m.mgr.wakeup([]any{data})
    47  		return Key{}
    48  	}
    49  
    50  	v, loaded := m.timers.LoadOrStore(at, &timer{lock: 1})
    51  	t := v.(*timer)
    52  	if !loaded {
    53  		// runtime.SetFinalizer(t, func(t *timer) {
    54  		// 	atomic.StoreInt32(&t.dead, 1)
    55  		// })
    56  		t.real = time.AfterFunc(d, func() {
    57  			t.spinlock()
    58  			defer func() {
    59  				atomic.StoreInt32(&t.dead, 1)
    60  				t.spinunlock()
    61  				m.timers.Delete(at)
    62  			}()
    63  			end := len(t.tasks) - 1
    64  			for i := 0; i <= end; i++ {
    65  				if t.tasks[i] == taskCanceled {
    66  					for ; end >= i; end-- {
    67  						if t.tasks[end] != taskCanceled {
    68  							t.tasks[end], t.tasks[i] = t.tasks[i], t.tasks[end]
    69  							break
    70  						}
    71  					}
    72  				}
    73  			}
    74  			tt := t.tasks[:end+1]
    75  			if len(tt) > 0 {
    76  				m.mgr.wakeup(tt)
    77  			}
    78  		})
    79  	} else {
    80  		t.spinlock()
    81  	}
    82  	i := len(t.tasks)
    83  	t.tasks = append(t.tasks, data)
    84  	t.spinunlock()
    85  	return Key{tm: t, index: i}
    86  }
    87  
    88  type Key struct {
    89  	tm    *timer
    90  	index int
    91  }
    92  
    93  type Group struct {
    94  	shards   []shard
    95  	shardCtr atomic.Int64
    96  	wakeup   func([]any)
    97  }
    98  
    99  // NewGroup creates a schedule group, where payloads can be queued and fired at specific
   100  // time, use wakeup function to handle fired payloads.
   101  func NewGroup(wakeup func([]any)) *Group {
   102  	m := &Group{
   103  		shards: make([]shard, runtime.NumCPU()),
   104  		wakeup: wakeup,
   105  	}
   106  	for i := range m.shards {
   107  		m.shards[i].mgr = m
   108  	}
   109  	return m
   110  }
   111  
   112  // Schedule schedules the payload to be fired after d, which is counted in seconds.
   113  // If d is less than a second, the payload will be fired immediately.
   114  func (m *Group) Schedule(d time.Duration, payload any) Key {
   115  	mgr := &m.shards[m.shardCtr.Add(1)%int64(runtime.NumCPU())]
   116  	return mgr.start(d, payload)
   117  }
   118  
   119  // Cancel cancels the payload associated with the key anyway, which may be fired or not.
   120  // The caller should implement its own coordinate logic if this matters.
   121  func (m *Group) Cancel(key Key) {
   122  	if key.tm == nil {
   123  		return
   124  	}
   125  	t := key.tm
   126  	t.spinlock()
   127  	defer t.spinunlock()
   128  	if atomic.LoadInt32(&t.dead) == 1 {
   129  		return
   130  	}
   131  	t.tasks[key.index] = taskCanceled
   132  }