github.com/grumpyhome/grumpy@v0.3.1-0.20201208125205-7b775405bdf1/grumpy-runtime-src/third_party/stdlib/test/test_threading.py (about)

     1  # Very rudimentary test of threading module
     2  
     3  import test.test_support
     4  from test.test_support import verbose, cpython_only
     5  #from test.script_helper import assert_python_ok
     6  
     7  import random
     8  import re
     9  import sys
    10  #thread = test.test_support.import_module('thread')
    11  import thread
    12  #threading = test.test_support.import_module('threading')
    13  import threading
    14  import time
    15  import unittest
    16  import weakref
    17  import os
    18  #import subprocess
    19  #try:
    20  #    import _testcapi
    21  #except ImportError:
    22  _testcapi = None
    23  
    24  from test import lock_tests
    25  
    26  # A trivial mutable counter.
    27  class Counter(object):
    28      def __init__(self):
    29          self.value = 0
    30      def inc(self):
    31          self.value += 1
    32      def dec(self):
    33          self.value -= 1
    34      def get(self):
    35          return self.value
    36  
    37  class TestThread(threading.Thread):
    38      def __init__(self, name, testcase, sema, mutex, nrunning):
    39          threading.Thread.__init__(self, name=name)
    40          self.testcase = testcase
    41          self.sema = sema
    42          self.mutex = mutex
    43          self.nrunning = nrunning
    44  
    45      def run(self):
    46          delay = random.random() / 10000.0
    47          if verbose:
    48              print 'task %s will run for %s usec' % (
    49                  self.name, delay * 1e6)
    50  
    51          with self.sema:
    52              with self.mutex:
    53                  self.nrunning.inc()
    54                  if verbose:
    55                      print self.nrunning.get(), 'tasks are running'
    56                  self.testcase.assertLessEqual(self.nrunning.get(), 3)
    57  
    58              time.sleep(delay)
    59              if verbose:
    60                  print 'task', self.name, 'done'
    61  
    62              with self.mutex:
    63                  self.nrunning.dec()
    64                  self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
    65                  if verbose:
    66                      print '%s is finished. %d tasks are running' % (
    67                          self.name, self.nrunning.get())
    68  
    69  class BaseTestCase(unittest.TestCase):
    70      def setUp(self):
    71          self._threads = test.test_support.threading_setup()
    72  
    73      def tearDown(self):
    74          test.test_support.threading_cleanup(*self._threads)
    75          test.test_support.reap_children()
    76  
    77  
    78  class ThreadTests(BaseTestCase):
    79  
    80      # Create a bunch of threads, let each do some work, wait until all are
    81      # done.
    82      def test_various_ops(self):
    83          # This takes about n/3 seconds to run (about n/3 clumps of tasks,
    84          # times about 1 second per clump).
    85          NUMTASKS = 10
    86  
    87          # no more than 3 of the 10 can run at once
    88          sema = threading.BoundedSemaphore(value=3)
    89          mutex = threading.RLock()
    90          numrunning = Counter()
    91  
    92          threads = []
    93  
    94          for i in range(NUMTASKS):
    95              t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
    96              threads.append(t)
    97              self.assertIsNone(t.ident)
    98              self.assertRegexpMatches(repr(t), r'^<TestThread\(.*, initial\)>$')
    99              t.start()
   100  
   101          if verbose:
   102              print 'waiting for all tasks to complete'
   103          for t in threads:
   104              t.join(NUMTASKS)
   105              self.assertFalse(t.is_alive())
   106              self.assertNotEqual(t.ident, 0)
   107              self.assertIsNotNone(t.ident)
   108              self.assertRegexpMatches(repr(t), r'^<TestThread\(.*, \w+ -?\d+\)>$')
   109          if verbose:
   110              print 'all tasks done'
   111          self.assertEqual(numrunning.get(), 0)
   112  
   113      def test_ident_of_no_threading_threads(self):
   114          # The ident still must work for the main thread and dummy threads.
   115          self.assertIsNotNone(threading.currentThread().ident)
   116          def f():
   117              ident.append(threading.currentThread().ident)
   118              done.set()
   119          done = threading.Event()
   120          ident = []
   121          thread.start_new_thread(f, ())
   122          done.wait()
   123          self.assertIsNotNone(ident[0])
   124          # Kill the "immortal" _DummyThread
   125          del threading._active[ident[0]]
   126  
   127      # run with a small(ish) thread stack size (256kB)
   128      def test_various_ops_small_stack(self):
   129          if verbose:
   130              print 'with 256kB thread stack size...'
   131          try:
   132              threading.stack_size(262144)
   133          except thread.error:
   134              self.skipTest('platform does not support changing thread stack size')
   135          self.test_various_ops()
   136          threading.stack_size(0)
   137  
   138      # run with a large thread stack size (1MB)
   139      def test_various_ops_large_stack(self):
   140          if verbose:
   141              print 'with 1MB thread stack size...'
   142          try:
   143              threading.stack_size(0x100000)
   144          except thread.error:
   145              self.skipTest('platform does not support changing thread stack size')
   146          self.test_various_ops()
   147          threading.stack_size(0)
   148  
   149      def test_foreign_thread(self):
   150          # Check that a "foreign" thread can use the threading module.
   151          def f(mutex):
   152              # Calling current_thread() forces an entry for the foreign
   153              # thread to get made in the threading._active map.
   154              threading.current_thread()
   155              mutex.release()
   156  
   157          mutex = threading.Lock()
   158          mutex.acquire()
   159          tid = thread.start_new_thread(f, (mutex,))
   160          # Wait for the thread to finish.
   161          mutex.acquire()
   162          self.assertIn(tid, threading._active)
   163          self.assertIsInstance(threading._active[tid], threading._DummyThread)
   164          del threading._active[tid]
   165  
   166      # PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
   167      # exposed at the Python level.  This test relies on ctypes to get at it.
   168      @unittest.skip('grumpy')
   169      def test_PyThreadState_SetAsyncExc(self):
   170          try:
   171              #import ctypes
   172              pass
   173          except ImportError:
   174              self.skipTest('requires ctypes')
   175  
   176          set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
   177  
   178          class AsyncExc(Exception):
   179              pass
   180  
   181          exception = ctypes.py_object(AsyncExc)
   182  
   183          # First check it works when setting the exception from the same thread.
   184          tid = thread.get_ident()
   185  
   186          try:
   187              result = set_async_exc(ctypes.c_long(tid), exception)
   188              # The exception is async, so we might have to keep the VM busy until
   189              # it notices.
   190              while True:
   191                  pass
   192          except AsyncExc:
   193              pass
   194          else:
   195              # This code is unreachable but it reflects the intent. If we wanted
   196              # to be smarter the above loop wouldn't be infinite.
   197              self.fail("AsyncExc not raised")
   198          try:
   199              self.assertEqual(result, 1) # one thread state modified
   200          except UnboundLocalError:
   201              # The exception was raised too quickly for us to get the result.
   202              pass
   203  
   204          # `worker_started` is set by the thread when it's inside a try/except
   205          # block waiting to catch the asynchronously set AsyncExc exception.
   206          # `worker_saw_exception` is set by the thread upon catching that
   207          # exception.
   208          worker_started = threading.Event()
   209          worker_saw_exception = threading.Event()
   210  
   211          class Worker(threading.Thread):
   212              def run(self):
   213                  self.id = thread.get_ident()
   214                  self.finished = False
   215  
   216                  try:
   217                      while True:
   218                          worker_started.set()
   219                          time.sleep(0.1)
   220                  except AsyncExc:
   221                      self.finished = True
   222                      worker_saw_exception.set()
   223  
   224          t = Worker()
   225          t.daemon = True # so if this fails, we don't hang Python at shutdown
   226          t.start()
   227          if verbose:
   228              print "    started worker thread"
   229  
   230          # Try a thread id that doesn't make sense.
   231          if verbose:
   232              print "    trying nonsensical thread id"
   233          result = set_async_exc(ctypes.c_long(-1), exception)
   234          self.assertEqual(result, 0)  # no thread states modified
   235  
   236          # Now raise an exception in the worker thread.
   237          if verbose:
   238              print "    waiting for worker thread to get started"
   239          ret = worker_started.wait()
   240          self.assertTrue(ret)
   241          if verbose:
   242              print "    verifying worker hasn't exited"
   243          self.assertFalse(t.finished)
   244          if verbose:
   245              print "    attempting to raise asynch exception in worker"
   246          result = set_async_exc(ctypes.c_long(t.id), exception)
   247          self.assertEqual(result, 1) # one thread state modified
   248          if verbose:
   249              print "    waiting for worker to say it caught the exception"
   250          worker_saw_exception.wait(timeout=10)
   251          self.assertTrue(t.finished)
   252          if verbose:
   253              print "    all OK -- joining worker"
   254          if t.finished:
   255              t.join()
   256          # else the thread is still running, and we have no way to kill it
   257  
   258      def test_limbo_cleanup(self):
   259          # Issue 7481: Failure to start thread should cleanup the limbo map.
   260          def fail_new_thread(*args):
   261              raise thread.error()
   262          _start_new_thread = threading._start_new_thread
   263          threading._start_new_thread = fail_new_thread
   264          try:
   265              t = threading.Thread(target=lambda: None)
   266              self.assertRaises(thread.error, t.start)
   267              self.assertFalse(
   268                  t in threading._limbo,
   269                  "Failed to cleanup _limbo map on failure of Thread.start().")
   270          finally:
   271              threading._start_new_thread = _start_new_thread
   272  
   273      @unittest.skip('grumpy')
   274      def test_finalize_runnning_thread(self):
   275          # Issue 1402: the PyGILState_Ensure / _Release functions may be called
   276          # very late on python exit: on deallocation of a running thread for
   277          # example.
   278          try:
   279              #import ctypes
   280              pass
   281          except ImportError:
   282              self.skipTest('requires ctypes')
   283  
   284          rc = subprocess.call([sys.executable, "-c", """if 1:
   285              import ctypes, sys, time, thread
   286  
   287              # This lock is used as a simple event variable.
   288              ready = thread.allocate_lock()
   289              ready.acquire()
   290  
   291              # Module globals are cleared before __del__ is run
   292              # So we save the functions in class dict
   293              class C:
   294                  ensure = ctypes.pythonapi.PyGILState_Ensure
   295                  release = ctypes.pythonapi.PyGILState_Release
   296                  def __del__(self):
   297                      state = self.ensure()
   298                      self.release(state)
   299  
   300              def waitingThread():
   301                  x = C()
   302                  ready.release()
   303                  time.sleep(100)
   304  
   305              thread.start_new_thread(waitingThread, ())
   306              ready.acquire()  # Be sure the other thread is waiting.
   307              sys.exit(42)
   308              """])
   309          self.assertEqual(rc, 42)
   310  
   311      @unittest.skip('grumpy')
   312      def test_finalize_with_trace(self):
   313          # Issue1733757
   314          # Avoid a deadlock when sys.settrace steps into threading._shutdown
   315          p = subprocess.Popen([sys.executable, "-c", """if 1:
   316              import sys, threading
   317  
   318              # A deadlock-killer, to prevent the
   319              # testsuite to hang forever
   320              def killer():
   321                  import os, time
   322                  time.sleep(2)
   323                  print 'program blocked; aborting'
   324                  os._exit(2)
   325              t = threading.Thread(target=killer)
   326              t.daemon = True
   327              t.start()
   328  
   329              # This is the trace function
   330              def func(frame, event, arg):
   331                  threading.current_thread()
   332                  return func
   333  
   334              sys.settrace(func)
   335              """],
   336              stdout=subprocess.PIPE,
   337              stderr=subprocess.PIPE)
   338          self.addCleanup(p.stdout.close)
   339          self.addCleanup(p.stderr.close)
   340          stdout, stderr = p.communicate()
   341          rc = p.returncode
   342          self.assertFalse(rc == 2, "interpreted was blocked")
   343          self.assertTrue(rc == 0,
   344                          "Unexpected error: " + repr(stderr))
   345  
   346      @unittest.skip('grumpy')
   347      def test_join_nondaemon_on_shutdown(self):
   348          # Issue 1722344
   349          # Raising SystemExit skipped threading._shutdown
   350          p = subprocess.Popen([sys.executable, "-c", """if 1:
   351                  import threading
   352                  from time import sleep
   353  
   354                  def child():
   355                      sleep(1)
   356                      # As a non-daemon thread we SHOULD wake up and nothing
   357                      # should be torn down yet
   358                      print "Woke up, sleep function is:", sleep
   359  
   360                  threading.Thread(target=child).start()
   361                  raise SystemExit
   362              """],
   363              stdout=subprocess.PIPE,
   364              stderr=subprocess.PIPE)
   365          self.addCleanup(p.stdout.close)
   366          self.addCleanup(p.stderr.close)
   367          stdout, stderr = p.communicate()
   368          self.assertEqual(stdout.strip(),
   369              "Woke up, sleep function is: <built-in function sleep>")
   370          stderr = re.sub(r"^\[\d+ refs\]", "", stderr, re.MULTILINE).strip()
   371          self.assertEqual(stderr, "")
   372  
   373      @unittest.skip('grumpy')
   374      def test_enumerate_after_join(self):
   375          # Try hard to trigger #1703448: a thread is still returned in
   376          # threading.enumerate() after it has been join()ed.
   377          enum = threading.enumerate
   378          old_interval = sys.getcheckinterval()
   379          try:
   380              for i in xrange(1, 100):
   381                  # Try a couple times at each thread-switching interval
   382                  # to get more interleavings.
   383                  sys.setcheckinterval(i // 5)
   384                  t = threading.Thread(target=lambda: None)
   385                  t.start()
   386                  t.join()
   387                  l = enum()
   388                  self.assertNotIn(t, l,
   389                      "#1703448 triggered after %d trials: %s" % (i, l))
   390          finally:
   391              sys.setcheckinterval(old_interval)
   392  
   393      @unittest.skip('grumpy')
   394      def test_no_refcycle_through_target(self):
   395          class RunSelfFunction(object):
   396              def __init__(self, should_raise):
   397                  # The links in this refcycle from Thread back to self
   398                  # should be cleaned up when the thread completes.
   399                  self.should_raise = should_raise
   400                  self.thread = threading.Thread(target=self._run,
   401                                                 args=(self,),
   402                                                 kwargs={'yet_another':self})
   403                  self.thread.start()
   404  
   405              def _run(self, other_ref, yet_another):
   406                  if self.should_raise:
   407                      raise SystemExit
   408  
   409          cyclic_object = RunSelfFunction(should_raise=False)
   410          weak_cyclic_object = weakref.ref(cyclic_object)
   411          cyclic_object.thread.join()
   412          del cyclic_object
   413          self.assertEqual(None, weak_cyclic_object(),
   414                           msg=('%d references still around' %
   415                                sys.getrefcount(weak_cyclic_object())))
   416  
   417          raising_cyclic_object = RunSelfFunction(should_raise=True)
   418          weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
   419          raising_cyclic_object.thread.join()
   420          del raising_cyclic_object
   421          self.assertEqual(None, weak_raising_cyclic_object(),
   422                           msg=('%d references still around' %
   423                                sys.getrefcount(weak_raising_cyclic_object())))
   424  
   425      @unittest.skip('grumpy')
   426      @unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
   427      def test_dummy_thread_after_fork(self):
   428          # Issue #14308: a dummy thread in the active list doesn't mess up
   429          # the after-fork mechanism.
   430          code = """if 1:
   431              import thread, threading, os, time
   432  
   433              def background_thread(evt):
   434                  # Creates and registers the _DummyThread instance
   435                  threading.current_thread()
   436                  evt.set()
   437                  time.sleep(10)
   438  
   439              evt = threading.Event()
   440              thread.start_new_thread(background_thread, (evt,))
   441              evt.wait()
   442              assert threading.active_count() == 2, threading.active_count()
   443              if os.fork() == 0:
   444                  assert threading.active_count() == 1, threading.active_count()
   445                  os._exit(0)
   446              else:
   447                  os.wait()
   448          """
   449          _, out, err = assert_python_ok("-c", code)
   450          self.assertEqual(out, '')
   451          self.assertEqual(err, '')
   452  
   453      @unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
   454      def test_is_alive_after_fork(self):
   455          # Try hard to trigger #18418: is_alive() could sometimes be True on
   456          # threads that vanished after a fork.
   457          old_interval = sys.getcheckinterval()
   458  
   459          # Make the bug more likely to manifest.
   460          sys.setcheckinterval(10)
   461  
   462          try:
   463              for i in range(20):
   464                  t = threading.Thread(target=lambda: None)
   465                  t.start()
   466                  pid = os.fork()
   467                  if pid == 0:
   468                      os._exit(1 if t.is_alive() else 0)
   469                  else:
   470                      t.join()
   471                      pid, status = os.waitpid(pid, 0)
   472                      self.assertEqual(0, status)
   473          finally:
   474              sys.setcheckinterval(old_interval)
   475  
   476      def test_BoundedSemaphore_limit(self):
   477          # BoundedSemaphore should raise ValueError if released too often.
   478          for limit in range(1, 10):
   479              bs = threading.BoundedSemaphore(limit)
   480              threads = [threading.Thread(target=bs.acquire)
   481                         for _ in range(limit)]
   482              for t in threads:
   483                  t.start()
   484              for t in threads:
   485                  t.join()
   486              threads = [threading.Thread(target=bs.release)
   487                         for _ in range(limit)]
   488              for t in threads:
   489                  t.start()
   490              for t in threads:
   491                  t.join()
   492              self.assertRaises(ValueError, bs.release)
   493  
   494  class ThreadJoinOnShutdown(BaseTestCase):
   495  
   496      # Between fork() and exec(), only async-safe functions are allowed (issues
   497      # #12316 and #11870), and fork() from a worker thread is known to trigger
   498      # problems with some operating systems (issue #3863): skip problematic tests
   499      # on platforms known to behave badly.
   500      platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
   501                           'os2emx')
   502  
   503      def _run_and_join(self, script):
   504          script = """if 1:
   505              import sys, os, time, threading
   506  
   507              # a thread, which waits for the main program to terminate
   508              def joiningfunc(mainthread):
   509                  mainthread.join()
   510                  print 'end of thread'
   511          \n""" + script
   512  
   513          p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE)
   514          rc = p.wait()
   515          data = p.stdout.read().replace('\r', '')
   516          p.stdout.close()
   517          self.assertEqual(data, "end of main\nend of thread\n")
   518          self.assertFalse(rc == 2, "interpreter was blocked")
   519          self.assertTrue(rc == 0, "Unexpected error")
   520  
   521      @unittest.skip('grumpy')
   522      def test_1_join_on_shutdown(self):
   523          # The usual case: on exit, wait for a non-daemon thread
   524          script = """if 1:
   525              import os
   526              t = threading.Thread(target=joiningfunc,
   527                                   args=(threading.current_thread(),))
   528              t.start()
   529              time.sleep(0.1)
   530              print 'end of main'
   531              """
   532          self._run_and_join(script)
   533  
   534  
   535      @unittest.skip('grumpy')
   536      @unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
   537      @unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
   538      def test_2_join_in_forked_process(self):
   539          # Like the test above, but from a forked interpreter
   540          script = """if 1:
   541              childpid = os.fork()
   542              if childpid != 0:
   543                  os.waitpid(childpid, 0)
   544                  sys.exit(0)
   545  
   546              t = threading.Thread(target=joiningfunc,
   547                                   args=(threading.current_thread(),))
   548              t.start()
   549              print 'end of main'
   550              """
   551          self._run_and_join(script)
   552  
   553      @unittest.skip('grumpy')
   554      @unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
   555      @unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
   556      def test_3_join_in_forked_from_thread(self):
   557          # Like the test above, but fork() was called from a worker thread
   558          # In the forked process, the main Thread object must be marked as stopped.
   559          script = """if 1:
   560              main_thread = threading.current_thread()
   561              def worker():
   562                  childpid = os.fork()
   563                  if childpid != 0:
   564                      os.waitpid(childpid, 0)
   565                      sys.exit(0)
   566  
   567                  t = threading.Thread(target=joiningfunc,
   568                                       args=(main_thread,))
   569                  print 'end of main'
   570                  t.start()
   571                  t.join() # Should not block: main_thread is already stopped
   572  
   573              w = threading.Thread(target=worker)
   574              w.start()
   575              """
   576          self._run_and_join(script)
   577  
   578      def assertScriptHasOutput(self, script, expected_output):
   579          p = subprocess.Popen([sys.executable, "-c", script],
   580                               stdout=subprocess.PIPE)
   581          rc = p.wait()
   582          data = p.stdout.read().decode().replace('\r', '')
   583          self.assertEqual(rc, 0, "Unexpected error")
   584          self.assertEqual(data, expected_output)
   585  
   586      @unittest.skip('grumpy')
   587      @unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
   588      @unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
   589      def test_4_joining_across_fork_in_worker_thread(self):
   590          # There used to be a possible deadlock when forking from a child
   591          # thread.  See http://bugs.python.org/issue6643.
   592  
   593          # The script takes the following steps:
   594          # - The main thread in the parent process starts a new thread and then
   595          #   tries to join it.
   596          # - The join operation acquires the Lock inside the thread's _block
   597          #   Condition.  (See threading.py:Thread.join().)
   598          # - We stub out the acquire method on the condition to force it to wait
   599          #   until the child thread forks.  (See LOCK ACQUIRED HERE)
   600          # - The child thread forks.  (See LOCK HELD and WORKER THREAD FORKS
   601          #   HERE)
   602          # - The main thread of the parent process enters Condition.wait(),
   603          #   which releases the lock on the child thread.
   604          # - The child process returns.  Without the necessary fix, when the
   605          #   main thread of the child process (which used to be the child thread
   606          #   in the parent process) attempts to exit, it will try to acquire the
   607          #   lock in the Thread._block Condition object and hang, because the
   608          #   lock was held across the fork.
   609  
   610          script = """if 1:
   611              import os, time, threading
   612  
   613              finish_join = False
   614              start_fork = False
   615  
   616              def worker():
   617                  # Wait until this thread's lock is acquired before forking to
   618                  # create the deadlock.
   619                  global finish_join
   620                  while not start_fork:
   621                      time.sleep(0.01)
   622                  # LOCK HELD: Main thread holds lock across this call.
   623                  childpid = os.fork()
   624                  finish_join = True
   625                  if childpid != 0:
   626                      # Parent process just waits for child.
   627                      os.waitpid(childpid, 0)
   628                  # Child process should just return.
   629  
   630              w = threading.Thread(target=worker)
   631  
   632              # Stub out the private condition variable's lock acquire method.
   633              # This acquires the lock and then waits until the child has forked
   634              # before returning, which will release the lock soon after.  If
   635              # someone else tries to fix this test case by acquiring this lock
   636              # before forking instead of resetting it, the test case will
   637              # deadlock when it shouldn't.
   638              condition = w._block
   639              orig_acquire = condition.acquire
   640              call_count_lock = threading.Lock()
   641              call_count = 0
   642              def my_acquire():
   643                  global call_count
   644                  global start_fork
   645                  orig_acquire()  # LOCK ACQUIRED HERE
   646                  start_fork = True
   647                  if call_count == 0:
   648                      while not finish_join:
   649                          time.sleep(0.01)  # WORKER THREAD FORKS HERE
   650                  with call_count_lock:
   651                      call_count += 1
   652              condition.acquire = my_acquire
   653  
   654              w.start()
   655              w.join()
   656              print('end of main')
   657              """
   658          self.assertScriptHasOutput(script, "end of main\n")
   659  
   660      @unittest.skip('grumpy')
   661      @unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
   662      @unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
   663      def test_5_clear_waiter_locks_to_avoid_crash(self):
   664          # Check that a spawned thread that forks doesn't segfault on certain
   665          # platforms, namely OS X.  This used to happen if there was a waiter
   666          # lock in the thread's condition variable's waiters list.  Even though
   667          # we know the lock will be held across the fork, it is not safe to
   668          # release locks held across forks on all platforms, so releasing the
   669          # waiter lock caused a segfault on OS X.  Furthermore, since locks on
   670          # OS X are (as of this writing) implemented with a mutex + condition
   671          # variable instead of a semaphore, while we know that the Python-level
   672          # lock will be acquired, we can't know if the internal mutex will be
   673          # acquired at the time of the fork.
   674  
   675          script = """if True:
   676              import os, time, threading
   677  
   678              start_fork = False
   679  
   680              def worker():
   681                  # Wait until the main thread has attempted to join this thread
   682                  # before continuing.
   683                  while not start_fork:
   684                      time.sleep(0.01)
   685                  childpid = os.fork()
   686                  if childpid != 0:
   687                      # Parent process just waits for child.
   688                      (cpid, rc) = os.waitpid(childpid, 0)
   689                      assert cpid == childpid
   690                      assert rc == 0
   691                      print('end of worker thread')
   692                  else:
   693                      # Child process should just return.
   694                      pass
   695  
   696              w = threading.Thread(target=worker)
   697  
   698              # Stub out the private condition variable's _release_save method.
   699              # This releases the condition's lock and flips the global that
   700              # causes the worker to fork.  At this point, the problematic waiter
   701              # lock has been acquired once by the waiter and has been put onto
   702              # the waiters list.
   703              condition = w._block
   704              orig_release_save = condition._release_save
   705              def my_release_save():
   706                  global start_fork
   707                  orig_release_save()
   708                  # Waiter lock held here, condition lock released.
   709                  start_fork = True
   710              condition._release_save = my_release_save
   711  
   712              w.start()
   713              w.join()
   714              print('end of main thread')
   715              """
   716          output = "end of worker thread\nend of main thread\n"
   717          self.assertScriptHasOutput(script, output)
   718  
   719      @unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
   720      @unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
   721      def test_reinit_tls_after_fork(self):
   722          # Issue #13817: fork() would deadlock in a multithreaded program with
   723          # the ad-hoc TLS implementation.
   724  
   725          def do_fork_and_wait():
   726              # just fork a child process and wait it
   727              pid = os.fork()
   728              if pid > 0:
   729                  os.waitpid(pid, 0)
   730              else:
   731                  os._exit(0)
   732  
   733          # start a bunch of threads that will fork() child processes
   734          threads = []
   735          for i in range(16):
   736              t = threading.Thread(target=do_fork_and_wait)
   737              threads.append(t)
   738              t.start()
   739  
   740          for t in threads:
   741              t.join()
   742  
   743      @cpython_only
   744      @unittest.skipIf(_testcapi is None, "need _testcapi module")
   745      def test_frame_tstate_tracing(self):
   746          # Issue #14432: Crash when a generator is created in a C thread that is
   747          # destroyed while the generator is still used. The issue was that a
   748          # generator contains a frame, and the frame kept a reference to the
   749          # Python state of the destroyed C thread. The crash occurs when a trace
   750          # function is setup.
   751  
   752          def noop_trace(frame, event, arg):
   753              # no operation
   754              return noop_trace
   755  
   756          def generator():
   757              while 1:
   758                  yield "generator"
   759  
   760          def callback():
   761              if callback.gen is None:
   762                  callback.gen = generator()
   763              return next(callback.gen)
   764          callback.gen = None
   765  
   766          old_trace = sys.gettrace()
   767          sys.settrace(noop_trace)
   768          try:
   769              # Install a trace function
   770              threading.settrace(noop_trace)
   771  
   772              # Create a generator in a C thread which exits after the call
   773              _testcapi.call_in_temporary_c_thread(callback)
   774  
   775              # Call the generator in a different Python thread, check that the
   776              # generator didn't keep a reference to the destroyed thread state
   777              for test in range(3):
   778                  # The trace function is still called here
   779                  callback()
   780          finally:
   781              sys.settrace(old_trace)
   782  
   783  
   784  class ThreadingExceptionTests(BaseTestCase):
   785      # A RuntimeError should be raised if Thread.start() is called
   786      # multiple times.
   787      def test_start_thread_again(self):
   788          thread = threading.Thread()
   789          thread.start()
   790          self.assertRaises(RuntimeError, thread.start)
   791  
   792      def test_joining_current_thread(self):
   793          current_thread = threading.current_thread()
   794          self.assertRaises(RuntimeError, current_thread.join);
   795  
   796      def test_joining_inactive_thread(self):
   797          thread = threading.Thread()
   798          self.assertRaises(RuntimeError, thread.join)
   799  
   800      def test_daemonize_active_thread(self):
   801          thread = threading.Thread()
   802          thread.start()
   803          self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
   804  
   805      @unittest.skip('grumpy')
   806      def test_print_exception(self):
   807          script = r"""if 1:
   808              import threading
   809              import time
   810  
   811              running = False
   812              def run():
   813                  global running
   814                  running = True
   815                  while running:
   816                      time.sleep(0.01)
   817                  1.0/0.0
   818              t = threading.Thread(target=run)
   819              t.start()
   820              while not running:
   821                  time.sleep(0.01)
   822              running = False
   823              t.join()
   824              """
   825          rc, out, err = assert_python_ok("-c", script)
   826          self.assertEqual(out, '')
   827          self.assertIn("Exception in thread", err)
   828          self.assertIn("Traceback (most recent call last):", err)
   829          self.assertIn("ZeroDivisionError", err)
   830          self.assertNotIn("Unhandled exception", err)
   831  
   832      @unittest.skip('grumpy')
   833      def test_print_exception_stderr_is_none_1(self):
   834          script = r"""if 1:
   835              import sys
   836              import threading
   837              import time
   838  
   839              running = False
   840              def run():
   841                  global running
   842                  running = True
   843                  while running:
   844                      time.sleep(0.01)
   845                  1.0/0.0
   846              t = threading.Thread(target=run)
   847              t.start()
   848              while not running:
   849                  time.sleep(0.01)
   850              sys.stderr = None
   851              running = False
   852              t.join()
   853              """
   854          rc, out, err = assert_python_ok("-c", script)
   855          self.assertEqual(out, '')
   856          self.assertIn("Exception in thread", err)
   857          self.assertIn("Traceback (most recent call last):", err)
   858          self.assertIn("ZeroDivisionError", err)
   859          self.assertNotIn("Unhandled exception", err)
   860  
   861      @unittest.skip('grumpy')
   862      def test_print_exception_stderr_is_none_2(self):
   863          script = r"""if 1:
   864              import sys
   865              import threading
   866              import time
   867  
   868              running = False
   869              def run():
   870                  global running
   871                  running = True
   872                  while running:
   873                      time.sleep(0.01)
   874                  1.0/0.0
   875              sys.stderr = None
   876              t = threading.Thread(target=run)
   877              t.start()
   878              while not running:
   879                  time.sleep(0.01)
   880              running = False
   881              t.join()
   882              """
   883          rc, out, err = assert_python_ok("-c", script)
   884          self.assertEqual(out, '')
   885          self.assertNotIn("Unhandled exception", err)
   886  
   887  
   888  class LockTests(lock_tests.LockTests):
   889      locktype = staticmethod(threading.Lock)
   890  
   891  class RLockTests(lock_tests.RLockTests):
   892      locktype = staticmethod(threading.RLock)
   893  
   894  class EventTests(lock_tests.EventTests):
   895      eventtype = staticmethod(threading.Event)
   896  
   897  class ConditionAsRLockTests(lock_tests.RLockTests):
   898      # Condition uses an RLock by default and exports its API.
   899      locktype = staticmethod(threading.Condition)
   900  
   901  class ConditionTests(lock_tests.ConditionTests):
   902      condtype = staticmethod(threading.Condition)
   903  
   904  class SemaphoreTests(lock_tests.SemaphoreTests):
   905      semtype = staticmethod(threading.Semaphore)
   906  
   907  class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
   908      semtype = staticmethod(threading.BoundedSemaphore)
   909  
   910      @unittest.skip('grumpy')
   911      @unittest.skipUnless(sys.platform == 'darwin', 'test macosx problem')
   912      def test_recursion_limit(self):
   913          # Issue 9670
   914          # test that excessive recursion within a non-main thread causes
   915          # an exception rather than crashing the interpreter on platforms
   916          # like Mac OS X or FreeBSD which have small default stack sizes
   917          # for threads
   918          script = """if True:
   919              import threading
   920  
   921              def recurse():
   922                  return recurse()
   923  
   924              def outer():
   925                  try:
   926                      recurse()
   927                  except RuntimeError:
   928                      pass
   929  
   930              w = threading.Thread(target=outer)
   931              w.start()
   932              w.join()
   933              print('end of main thread')
   934              """
   935          expected_output = "end of main thread\n"
   936          p = subprocess.Popen([sys.executable, "-c", script],
   937                               stdout=subprocess.PIPE)
   938          stdout, stderr = p.communicate()
   939          data = stdout.decode().replace('\r', '')
   940          self.assertEqual(p.returncode, 0, "Unexpected error")
   941          self.assertEqual(data, expected_output)
   942  
   943  def test_main():
   944      test.test_support.run_unittest(LockTests, RLockTests, EventTests,
   945                                     ConditionAsRLockTests, ConditionTests,
   946                                     SemaphoreTests, BoundedSemaphoreTests,
   947                                     ThreadTests,
   948                                     ThreadJoinOnShutdown,
   949                                     ThreadingExceptionTests,
   950                                     )
   951  
   952  if __name__ == "__main__":
   953      test_main()