github.com/afumu/libc@v0.0.6/pthread.go (about)

     1  // Copyright 2021 The Libc Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package libc // import "github.com/afumu/libc"
     6  
     7  import (
     8  	"runtime"
     9  	"sync"
    10  	"sync/atomic"
    11  	"time"
    12  	"unsafe"
    13  
    14  	"github.com/afumu/libc/errno"
    15  	"github.com/afumu/libc/pthread"
    16  	"github.com/afumu/libc/sys/types"
    17  	ctime "github.com/afumu/libc/time"
    18  )
    19  
    20  var (
    21  	mutexes   = map[uintptr]*mutex{}
    22  	mutexesMu sync.Mutex
    23  
    24  	threads   = map[int32]*TLS{}
    25  	threadsMu sync.Mutex
    26  
    27  	threadKey            pthread.Pthread_key_t
    28  	threadKeyDestructors = map[pthread.Pthread_key_t][]uintptr{} // key: []destructor
    29  	threadsKeysMu        sync.Mutex
    30  
    31  	conds   = map[uintptr]*cond{}
    32  	condsMu sync.Mutex
    33  )
    34  
    35  // Thread local storage.
    36  type TLS struct {
    37  	errnop uintptr
    38  	pthreadData
    39  	stack stackHeader
    40  
    41  	ID                 int32
    42  	reentryGuard       int32 // memgrind
    43  	stackHeaderBalance int32
    44  }
    45  
    46  var errno0 int32 // Temp errno for NewTLS
    47  
    48  func NewTLS() *TLS {
    49  	return newTLS(false)
    50  }
    51  
    52  func newTLS(detached bool) *TLS {
    53  	id := atomic.AddInt32(&tid, 1)
    54  	t := &TLS{ID: id, errnop: uintptr(unsafe.Pointer(&errno0))}
    55  	t.pthreadData.init(t, detached)
    56  	if memgrind {
    57  		atomic.AddInt32(&tlsBalance, 1)
    58  	}
    59  	t.errnop = t.Alloc(int(unsafe.Sizeof(int32(0))))
    60  	*(*int32)(unsafe.Pointer(t.errnop)) = 0
    61  	return t
    62  }
    63  
    64  // Pthread specific part of a TLS.
    65  type pthreadData struct {
    66  	done   chan struct{}
    67  	kv     map[pthread.Pthread_key_t]uintptr
    68  	retVal uintptr
    69  	wait   chan struct{} // cond var interaction
    70  
    71  	detached bool
    72  }
    73  
    74  func (d *pthreadData) init(t *TLS, detached bool) {
    75  	d.detached = detached
    76  	d.wait = make(chan struct{}, 1)
    77  	if detached {
    78  		return
    79  	}
    80  
    81  	d.done = make(chan struct{})
    82  
    83  	threadsMu.Lock()
    84  
    85  	defer threadsMu.Unlock()
    86  
    87  	threads[t.ID] = t
    88  }
    89  
    90  func (d *pthreadData) close(t *TLS) {
    91  	threadsMu.Lock()
    92  
    93  	defer threadsMu.Unlock()
    94  
    95  	delete(threads, t.ID)
    96  }
    97  
    98  // int pthread_attr_destroy(pthread_attr_t *attr);
    99  func Xpthread_attr_destroy(t *TLS, pAttr uintptr) int32 {
   100  	return 0
   101  }
   102  
   103  // int pthread_attr_setscope(pthread_attr_t *attr, int contentionscope);
   104  func Xpthread_attr_setscope(t *TLS, pAttr uintptr, contentionScope int32) int32 {
   105  	switch contentionScope {
   106  	case pthread.PTHREAD_SCOPE_SYSTEM:
   107  		return 0
   108  	default:
   109  		panic(todo("", contentionScope))
   110  	}
   111  }
   112  
   113  // int pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize);
   114  func Xpthread_attr_setstacksize(t *TLS, attr uintptr, stackSize types.Size_t) int32 {
   115  	panic(todo(""))
   116  }
   117  
   118  // Go side data of pthread_cond_t.
   119  type cond struct {
   120  	sync.Mutex
   121  	waiters map[*TLS]struct{}
   122  }
   123  
   124  func newCond() *cond {
   125  	return &cond{
   126  		waiters: map[*TLS]struct{}{},
   127  	}
   128  }
   129  
   130  func (c *cond) signal(all bool) int32 {
   131  	if c == nil {
   132  		return errno.EINVAL
   133  	}
   134  
   135  	c.Lock()
   136  
   137  	defer c.Unlock()
   138  
   139  	// The pthread_cond_broadcast() and pthread_cond_signal() functions shall have
   140  	// no effect if there are no threads currently blocked on cond.
   141  	for tls := range c.waiters {
   142  		tls.wait <- struct{}{}
   143  		delete(c.waiters, tls)
   144  		if !all {
   145  			break
   146  		}
   147  	}
   148  	return 0
   149  }
   150  
   151  // The pthread_cond_init() function shall initialize the condition variable
   152  // referenced by cond with attributes referenced by attr. If attr is NULL, the
   153  // default condition variable attributes shall be used; the effect is the same
   154  // as passing the address of a default condition variable attributes object.
   155  // Upon successful initialization, the state of the condition variable shall
   156  // become initialized.
   157  //
   158  // If successful, the pthread_cond_destroy() and pthread_cond_init() functions
   159  // shall return zero; otherwise, an error number shall be returned to indicate
   160  // the error.
   161  //
   162  // int pthread_cond_init(pthread_cond_t *restrict cond, const pthread_condattr_t *restrict attr);
   163  func Xpthread_cond_init(t *TLS, pCond, pAttr uintptr) int32 {
   164  	if pCond == 0 {
   165  		return errno.EINVAL
   166  	}
   167  
   168  	if pAttr != 0 {
   169  		panic(todo("%#x %#x", pCond, pAttr))
   170  	}
   171  
   172  	condsMu.Lock()
   173  
   174  	defer condsMu.Unlock()
   175  
   176  	conds[pCond] = newCond()
   177  	return 0
   178  }
   179  
   180  // int pthread_cond_destroy(pthread_cond_t *cond);
   181  func Xpthread_cond_destroy(t *TLS, pCond uintptr) int32 {
   182  	if pCond == 0 {
   183  		return errno.EINVAL
   184  	}
   185  
   186  	condsMu.Lock()
   187  
   188  	defer condsMu.Unlock()
   189  
   190  	cond := conds[pCond]
   191  	if cond == nil {
   192  		return errno.EINVAL
   193  	}
   194  
   195  	cond.Lock()
   196  
   197  	defer cond.Unlock()
   198  
   199  	if len(cond.waiters) != 0 {
   200  		return errno.EBUSY
   201  	}
   202  
   203  	delete(conds, pCond)
   204  	return 0
   205  }
   206  
   207  // int pthread_cond_signal(pthread_cond_t *cond);
   208  func Xpthread_cond_signal(t *TLS, pCond uintptr) int32 {
   209  	return condSignal(pCond, false)
   210  }
   211  
   212  // int pthread_cond_broadcast(pthread_cond_t *cond);
   213  func Xpthread_cond_broadcast(t *TLS, pCond uintptr) int32 {
   214  	return condSignal(pCond, true)
   215  }
   216  
   217  func condSignal(pCond uintptr, all bool) int32 {
   218  	if pCond == 0 {
   219  		return errno.EINVAL
   220  	}
   221  
   222  	condsMu.Lock()
   223  	cond := conds[pCond]
   224  	condsMu.Unlock()
   225  
   226  	return cond.signal(all)
   227  }
   228  
   229  // int pthread_cond_wait(pthread_cond_t *restrict cond, pthread_mutex_t *restrict mutex);
   230  func Xpthread_cond_wait(t *TLS, pCond, pMutex uintptr) int32 {
   231  	if pCond == 0 {
   232  		return errno.EINVAL
   233  	}
   234  
   235  	condsMu.Lock()
   236  	cond := conds[pCond]
   237  	if cond == nil { // static initialized condition variables are valid
   238  		cond = newCond()
   239  		conds[pCond] = cond
   240  	}
   241  
   242  	cond.Lock()
   243  	cond.waiters[t] = struct{}{}
   244  	cond.Unlock()
   245  
   246  	condsMu.Unlock()
   247  
   248  	mutexesMu.Lock()
   249  	mu := mutexes[pMutex]
   250  	mutexesMu.Unlock()
   251  
   252  	mu.Unlock()
   253  	<-t.wait
   254  	mu.Lock()
   255  	return 0
   256  }
   257  
   258  // int pthread_cond_timedwait(pthread_cond_t *restrict cond, pthread_mutex_t *restrict mutex, const struct timespec *restrict abstime);
   259  func Xpthread_cond_timedwait(t *TLS, pCond, pMutex, pAbsTime uintptr) int32 {
   260  	if pCond == 0 {
   261  		return errno.EINVAL
   262  	}
   263  
   264  	condsMu.Lock()
   265  	cond := conds[pCond]
   266  	if cond == nil { // static initialized condition variables are valid
   267  		cond = newCond()
   268  		conds[pCond] = cond
   269  	}
   270  
   271  	cond.Lock()
   272  	cond.waiters[t] = struct{}{}
   273  	cond.Unlock()
   274  
   275  	condsMu.Unlock()
   276  
   277  	mutexesMu.Lock()
   278  	mu := mutexes[pMutex]
   279  	mutexesMu.Unlock()
   280  
   281  	deadlineSecs := (*ctime.Timespec)(unsafe.Pointer(pAbsTime)).Ftv_sec
   282  	deadlineNsecs := (*ctime.Timespec)(unsafe.Pointer(pAbsTime)).Ftv_nsec
   283  	deadline := time.Unix(int64(deadlineSecs), int64(deadlineNsecs))
   284  	d := deadline.Sub(time.Now())
   285  	switch {
   286  	case d <= 0:
   287  		return errno.ETIMEDOUT
   288  	default:
   289  		to := time.After(d)
   290  		mu.Unlock()
   291  
   292  		defer mu.Lock()
   293  
   294  		select {
   295  		case <-t.wait:
   296  			return 0
   297  		case <-to:
   298  			cond.Lock()
   299  
   300  			defer cond.Unlock()
   301  
   302  			delete(cond.waiters, t)
   303  			return errno.ETIMEDOUT
   304  		}
   305  	}
   306  }
   307  
   308  // Go side data of pthread_mutex_t
   309  type mutex struct {
   310  	sync.Mutex
   311  	typ  int // PTHREAD_MUTEX_NORMAL, ...
   312  	wait sync.Mutex
   313  
   314  	id  int32 // owner's t.ID
   315  	cnt int32
   316  
   317  	robust bool
   318  }
   319  
   320  func newMutex(typ int) *mutex {
   321  	return &mutex{
   322  		typ: typ,
   323  	}
   324  }
   325  
   326  func (m *mutex) lock(id int32) int32 {
   327  	if m.robust {
   328  		panic(todo(""))
   329  	}
   330  
   331  	// If successful, the pthread_mutex_lock() and pthread_mutex_unlock() functions
   332  	// shall return zero; otherwise, an error number shall be returned to indicate
   333  	// the error.
   334  	switch m.typ {
   335  	case pthread.PTHREAD_MUTEX_NORMAL:
   336  		// If the mutex type is PTHREAD_MUTEX_NORMAL, deadlock detection shall not be
   337  		// provided. Attempting to relock the mutex causes deadlock. If a thread
   338  		// attempts to unlock a mutex that it has not locked or a mutex which is
   339  		// unlocked, undefined behavior results.
   340  		m.Lock()
   341  		m.id = id
   342  		return 0
   343  	case pthread.PTHREAD_MUTEX_RECURSIVE:
   344  		for {
   345  			m.Lock()
   346  			switch m.id {
   347  			case 0:
   348  				m.cnt = 1
   349  				m.id = id
   350  				m.wait.Lock()
   351  				m.Unlock()
   352  				return 0
   353  			case id:
   354  				m.cnt++
   355  				m.Unlock()
   356  				return 0
   357  			}
   358  
   359  			m.Unlock()
   360  			m.wait.Lock()
   361  			m.wait.Unlock()
   362  		}
   363  	default:
   364  		panic(todo("", m.typ))
   365  	}
   366  }
   367  
   368  func (m *mutex) tryLock(id int32) int32 {
   369  	if m.robust {
   370  		panic(todo(""))
   371  	}
   372  
   373  	switch m.typ {
   374  	case pthread.PTHREAD_MUTEX_NORMAL:
   375  		return errno.EBUSY
   376  	case pthread.PTHREAD_MUTEX_RECURSIVE:
   377  		m.Lock()
   378  		switch m.id {
   379  		case 0:
   380  			m.cnt = 1
   381  			m.id = id
   382  			m.wait.Lock()
   383  			m.Unlock()
   384  			return 0
   385  		case id:
   386  			m.cnt++
   387  			m.Unlock()
   388  			return 0
   389  		}
   390  
   391  		m.Unlock()
   392  		return errno.EBUSY
   393  	default:
   394  		panic(todo("", m.typ))
   395  	}
   396  }
   397  
   398  func (m *mutex) unlock() int32 {
   399  	if m.robust {
   400  		panic(todo(""))
   401  	}
   402  
   403  	// If successful, the pthread_mutex_lock() and pthread_mutex_unlock() functions
   404  	// shall return zero; otherwise, an error number shall be returned to indicate
   405  	// the error.
   406  	switch m.typ {
   407  	case pthread.PTHREAD_MUTEX_NORMAL:
   408  		// If the mutex type is PTHREAD_MUTEX_NORMAL, deadlock detection shall not be
   409  		// provided. Attempting to relock the mutex causes deadlock. If a thread
   410  		// attempts to unlock a mutex that it has not locked or a mutex which is
   411  		// unlocked, undefined behavior results.
   412  		m.id = 0
   413  		m.Unlock()
   414  		return 0
   415  	case pthread.PTHREAD_MUTEX_RECURSIVE:
   416  		m.Lock()
   417  		m.cnt--
   418  		if m.cnt == 0 {
   419  			m.id = 0
   420  			m.wait.Unlock()
   421  		}
   422  		m.Unlock()
   423  		return 0
   424  	default:
   425  		panic(todo("", m.typ))
   426  	}
   427  }
   428  
   429  // int pthread_mutex_destroy(pthread_mutex_t *mutex);
   430  func Xpthread_mutex_destroy(t *TLS, pMutex uintptr) int32 {
   431  	mutexesMu.Lock()
   432  
   433  	defer mutexesMu.Unlock()
   434  
   435  	delete(mutexes, pMutex)
   436  	return 0
   437  }
   438  
   439  // int pthread_mutex_lock(pthread_mutex_t *mutex);
   440  func Xpthread_mutex_lock(t *TLS, pMutex uintptr) int32 {
   441  	mutexesMu.Lock()
   442  	mu := mutexes[pMutex]
   443  	if mu == nil { // static initialized mutexes are valid
   444  		mu = newMutex(int(X__ccgo_getMutexType(t, pMutex)))
   445  		mutexes[pMutex] = mu
   446  	}
   447  	mutexesMu.Unlock()
   448  	return mu.lock(t.ID)
   449  }
   450  
   451  // int pthread_mutex_trylock(pthread_mutex_t *mutex);
   452  func Xpthread_mutex_trylock(t *TLS, pMutex uintptr) int32 {
   453  	mutexesMu.Lock()
   454  	mu := mutexes[pMutex]
   455  	if mu == nil { // static initialized mutexes are valid
   456  		mu = newMutex(int(X__ccgo_getMutexType(t, pMutex)))
   457  		mutexes[pMutex] = mu
   458  	}
   459  	mutexesMu.Unlock()
   460  	return mu.tryLock(t.ID)
   461  }
   462  
   463  // int pthread_mutex_unlock(pthread_mutex_t *mutex);
   464  func Xpthread_mutex_unlock(t *TLS, pMutex uintptr) int32 {
   465  	mutexesMu.Lock()
   466  
   467  	defer mutexesMu.Unlock()
   468  
   469  	return mutexes[pMutex].unlock()
   470  }
   471  
   472  // int pthread_key_create(pthread_key_t *key, void (*destructor)(void*));
   473  func Xpthread_key_create(t *TLS, pKey, destructor uintptr) int32 {
   474  	threadsKeysMu.Lock()
   475  
   476  	defer threadsKeysMu.Unlock()
   477  
   478  	threadKey++
   479  	r := threadKey
   480  	if destructor != 0 {
   481  		threadKeyDestructors[r] = append(threadKeyDestructors[r], destructor)
   482  	}
   483  	*(*pthread.Pthread_key_t)(unsafe.Pointer(pKey)) = pthread.Pthread_key_t(r)
   484  	return 0
   485  }
   486  
   487  // int pthread_key_delete(pthread_key_t key);
   488  func Xpthread_key_delete(t *TLS, key pthread.Pthread_key_t) int32 {
   489  	if _, ok := t.kv[key]; ok {
   490  		delete(t.kv, key)
   491  		return 0
   492  	}
   493  
   494  	panic(todo(""))
   495  
   496  }
   497  
   498  // void *pthread_getspecific(pthread_key_t key);
   499  func Xpthread_getspecific(t *TLS, key pthread.Pthread_key_t) uintptr {
   500  	return t.kv[key]
   501  }
   502  
   503  // int pthread_setspecific(pthread_key_t key, const void *value);
   504  func Xpthread_setspecific(t *TLS, key pthread.Pthread_key_t, value uintptr) int32 {
   505  	if t.kv == nil {
   506  		t.kv = map[pthread.Pthread_key_t]uintptr{}
   507  	}
   508  	t.kv[key] = value
   509  	return 0
   510  }
   511  
   512  // int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
   513  func Xpthread_create(t *TLS, pThread, pAttr, startRoutine, arg uintptr) int32 {
   514  	fn := (*struct {
   515  		f func(*TLS, uintptr) uintptr
   516  	})(unsafe.Pointer(&struct{ uintptr }{startRoutine})).f
   517  	detached := pAttr != 0 && X__ccgo_pthreadAttrGetDetachState(t, pAttr) == pthread.PTHREAD_CREATE_DETACHED
   518  	tls := newTLS(detached)
   519  	*(*pthread.Pthread_t)(unsafe.Pointer(pThread)) = pthread.Pthread_t(tls.ID)
   520  
   521  	go func() {
   522  		Xpthread_exit(tls, fn(tls, arg))
   523  	}()
   524  
   525  	return 0
   526  }
   527  
   528  // int pthread_detach(pthread_t thread);
   529  func Xpthread_detach(t *TLS, thread pthread.Pthread_t) int32 {
   530  	threadsMu.Lock()
   531  	threads[int32(thread)].detached = true
   532  	threadsMu.Unlock()
   533  	return 0
   534  }
   535  
   536  // int pthread_equal(pthread_t t1, pthread_t t2);
   537  func Xpthread_equal(t *TLS, t1, t2 pthread.Pthread_t) int32 {
   538  	return Bool32(t1 == t2)
   539  }
   540  
   541  // void pthread_exit(void *value_ptr);
   542  func Xpthread_exit(t *TLS, value uintptr) {
   543  	t.retVal = value
   544  
   545  	// At thread exit, if a key value has a non-NULL destructor pointer, and the
   546  	// thread has a non-NULL value associated with that key, the value of the key
   547  	// is set to NULL, and then the function pointed to is called with the
   548  	// previously associated value as its sole argument. The order of destructor
   549  	// calls is unspecified if more than one destructor exists for a thread when it
   550  	// exits.
   551  	for k, v := range t.kv {
   552  		if v == 0 {
   553  			continue
   554  		}
   555  
   556  		threadsKeysMu.Lock()
   557  		destructors := threadKeyDestructors[k]
   558  		threadsKeysMu.Unlock()
   559  
   560  		for _, destructor := range destructors {
   561  			delete(t.kv, k)
   562  			panic(todo("%#x", destructor)) //TODO call destructor(v)
   563  		}
   564  	}
   565  
   566  	switch {
   567  	case t.detached:
   568  		threadsMu.Lock()
   569  		delete(threads, t.ID)
   570  		threadsMu.Unlock()
   571  	default:
   572  		close(t.done)
   573  	}
   574  	runtime.Goexit()
   575  }
   576  
   577  // int pthread_join(pthread_t thread, void **value_ptr);
   578  func Xpthread_join(t *TLS, thread pthread.Pthread_t, pValue uintptr) int32 {
   579  	threadsMu.Lock()
   580  	tls := threads[int32(thread)]
   581  	delete(threads, int32(thread))
   582  	threadsMu.Unlock()
   583  	<-tls.done
   584  	if pValue != 0 {
   585  		*(*uintptr)(unsafe.Pointer(pValue)) = tls.retVal
   586  	}
   587  	return 0
   588  }
   589  
   590  // pthread_t pthread_self(void);
   591  func Xpthread_self(t *TLS) pthread.Pthread_t {
   592  	return pthread.Pthread_t(t.ID)
   593  }