github.com/afumu/libc@v0.0.6/musl/src/thread/pthread_cond_timedwait.c (about)

     1  #include "pthread_impl.h"
     2  
     3  /*
     4   * struct waiter
     5   *
     6   * Waiter objects have automatic storage on the waiting thread, and
     7   * are used in building a linked list representing waiters currently
     8   * waiting on the condition variable or a group of waiters woken
     9   * together by a broadcast or signal; in the case of signal, this is a
    10   * degenerate list of one member.
    11   *
    12   * Waiter lists attached to the condition variable itself are
    13   * protected by the lock on the cv. Detached waiter lists are never
    14   * modified again, but can only be traversed in reverse order, and are
    15   * protected by the "barrier" locks in each node, which are unlocked
    16   * in turn to control wake order.
    17   *
    18   * Since process-shared cond var semantics do not necessarily allow
    19   * one thread to see another's automatic storage (they may be in
    20   * different processes), the waiter list is not used for the
    21   * process-shared case, but the structure is still used to store data
    22   * needed by the cancellation cleanup handler.
    23   */
    24  
    25  struct waiter {
    26  	struct waiter *prev, *next;
    27  	volatile int state, barrier;
    28  	volatile int *notify;
    29  };
    30  
    31  /* Self-synchronized-destruction-safe lock functions */
    32  
    33  static inline void lock(volatile int *l)
    34  {
    35  	if (a_cas(l, 0, 1)) {
    36  		a_cas(l, 1, 2);
    37  		do __wait(l, 0, 2, 1);
    38  		while (a_cas(l, 0, 2));
    39  	}
    40  }
    41  
    42  static inline void unlock(volatile int *l)
    43  {
    44  	if (a_swap(l, 0)==2)
    45  		__wake(l, 1, 1);
    46  }
    47  
    48  static inline void unlock_requeue(volatile int *l, volatile int *r, int w)
    49  {
    50  	a_store(l, 0);
    51  	if (w) __wake(l, 1, 1);
    52  	else __syscall(SYS_futex, l, FUTEX_REQUEUE|FUTEX_PRIVATE, 0, 1, r) != -ENOSYS
    53  		|| __syscall(SYS_futex, l, FUTEX_REQUEUE, 0, 1, r);
    54  }
    55  
    56  enum {
    57  	WAITING,
    58  	SIGNALED,
    59  	LEAVING,
    60  };
    61  
    62  int __pthread_cond_timedwait(pthread_cond_t *restrict c, pthread_mutex_t *restrict m, const struct timespec *restrict ts)
    63  {
    64  	struct waiter node = { 0 };
    65  	int e, seq, clock = c->_c_clock, cs, shared=0, oldstate, tmp;
    66  	volatile int *fut;
    67  
    68  	if ((m->_m_type&15) && (m->_m_lock&INT_MAX) != __pthread_self()->tid)
    69  		return EPERM;
    70  
    71  	if (ts && ts->tv_nsec >= 1000000000UL)
    72  		return EINVAL;
    73  
    74  	__pthread_testcancel();
    75  
    76  	if (c->_c_shared) {
    77  		shared = 1;
    78  		fut = &c->_c_seq;
    79  		seq = c->_c_seq;
    80  		a_inc(&c->_c_waiters);
    81  	} else {
    82  		lock(&c->_c_lock);
    83  
    84  		seq = node.barrier = 2;
    85  		fut = &node.barrier;
    86  		node.state = WAITING;
    87  		node.next = c->_c_head;
    88  		c->_c_head = &node;
    89  		if (!c->_c_tail) c->_c_tail = &node;
    90  		else node.next->prev = &node;
    91  
    92  		unlock(&c->_c_lock);
    93  	}
    94  
    95  	__pthread_mutex_unlock(m);
    96  
    97  	__pthread_setcancelstate(PTHREAD_CANCEL_MASKED, &cs);
    98  	if (cs == PTHREAD_CANCEL_DISABLE) __pthread_setcancelstate(cs, 0);
    99  
   100  	do e = __timedwait_cp(fut, seq, clock, ts, !shared);
   101  	while (*fut==seq && (!e || e==EINTR));
   102  	if (e == EINTR) e = 0;
   103  
   104  	if (shared) {
   105  		/* Suppress cancellation if a signal was potentially
   106  		 * consumed; this is a legitimate form of spurious
   107  		 * wake even if not. */
   108  		if (e == ECANCELED && c->_c_seq != seq) e = 0;
   109  		if (a_fetch_add(&c->_c_waiters, -1) == -0x7fffffff)
   110  			__wake(&c->_c_waiters, 1, 0);
   111  		oldstate = WAITING;
   112  		goto relock;
   113  	}
   114  
   115  	oldstate = a_cas(&node.state, WAITING, LEAVING);
   116  
   117  	if (oldstate == WAITING) {
   118  		/* Access to cv object is valid because this waiter was not
   119  		 * yet signaled and a new signal/broadcast cannot return
   120  		 * after seeing a LEAVING waiter without getting notified
   121  		 * via the futex notify below. */
   122  
   123  		lock(&c->_c_lock);
   124  		
   125  		if (c->_c_head == &node) c->_c_head = node.next;
   126  		else if (node.prev) node.prev->next = node.next;
   127  		if (c->_c_tail == &node) c->_c_tail = node.prev;
   128  		else if (node.next) node.next->prev = node.prev;
   129  		
   130  		unlock(&c->_c_lock);
   131  
   132  		if (node.notify) {
   133  			if (a_fetch_add(node.notify, -1)==1)
   134  				__wake(node.notify, 1, 1);
   135  		}
   136  	} else {
   137  		/* Lock barrier first to control wake order. */
   138  		lock(&node.barrier);
   139  	}
   140  
   141  relock:
   142  	/* Errors locking the mutex override any existing error or
   143  	 * cancellation, since the caller must see them to know the
   144  	 * state of the mutex. */
   145  	if ((tmp = pthread_mutex_lock(m))) e = tmp;
   146  
   147  	if (oldstate == WAITING) goto done;
   148  
   149  	if (!node.next) a_inc(&m->_m_waiters);
   150  
   151  	/* Unlock the barrier that's holding back the next waiter, and
   152  	 * either wake it or requeue it to the mutex. */
   153  	if (node.prev)
   154  		unlock_requeue(&node.prev->barrier, &m->_m_lock, m->_m_type & 128);
   155  	else
   156  		a_dec(&m->_m_waiters);
   157  
   158  	/* Since a signal was consumed, cancellation is not permitted. */
   159  	if (e == ECANCELED) e = 0;
   160  
   161  done:
   162  	__pthread_setcancelstate(cs, 0);
   163  
   164  	if (e == ECANCELED) {
   165  		__pthread_testcancel();
   166  		__pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, 0);
   167  	}
   168  
   169  	return e;
   170  }
   171  
   172  int __private_cond_signal(pthread_cond_t *c, int n)
   173  {
   174  	struct waiter *p, *first=0;
   175  	volatile int ref = 0;
   176  	int cur;
   177  
   178  	lock(&c->_c_lock);
   179  	for (p=c->_c_tail; n && p; p=p->prev) {
   180  		if (a_cas(&p->state, WAITING, SIGNALED) != WAITING) {
   181  			ref++;
   182  			p->notify = &ref;
   183  		} else {
   184  			n--;
   185  			if (!first) first=p;
   186  		}
   187  	}
   188  	/* Split the list, leaving any remainder on the cv. */
   189  	if (p) {
   190  		if (p->next) p->next->prev = 0;
   191  		p->next = 0;
   192  	} else {
   193  		c->_c_head = 0;
   194  	}
   195  	c->_c_tail = p;
   196  	unlock(&c->_c_lock);
   197  
   198  	/* Wait for any waiters in the LEAVING state to remove
   199  	 * themselves from the list before returning or allowing
   200  	 * signaled threads to proceed. */
   201  	while ((cur = ref)) __wait(&ref, 0, cur, 1);
   202  
   203  	/* Allow first signaled waiter, if any, to proceed. */
   204  	if (first) unlock(&first->barrier);
   205  
   206  	return 0;
   207  }
   208  
   209  weak_alias(__pthread_cond_timedwait, pthread_cond_timedwait);