github.com/varialus/godfly@v0.0.0-20130904042352-1934f9f095ab/src/pkg/runtime/proc.p (about)

     1  // Copyright 2011 The Go Authors.  All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  /*
     6  model for proc.c as of 2011/07/22.
     7  
     8  takes 4900 seconds to explore 1189070 states
     9  with G=3, var_gomaxprocs=1
    10  on a Core i7 L640 2.13 GHz Lenovo X201s.
    11  
    12  rm -f proc.p.trail pan.* pan
    13  spin -a proc.p
    14  gcc -DSAFETY -DREACH -DMEMLIM'='4000 -o pan pan.c
    15  pan -w28 -n -i -m500000
    16  test -f proc.p.trail && pan -r proc.p.trail
    17  */
    18  
    19  /*
    20   * scheduling parameters
    21   */
    22  
    23  /*
    24   * the number of goroutines G doubles as the maximum
    25   * number of OS threads; the max is reachable when all
    26   * the goroutines are blocked in system calls.
    27   */
    28  #define G 3
    29  
    30  /*
    31   * whether to allow gomaxprocs to vary during execution.
    32   * enabling this checks the scheduler even when code is
    33   * calling GOMAXPROCS, but it also slows down the verification
    34   * by about 10x.
    35   */
    36  #define var_gomaxprocs 1  /* allow gomaxprocs to vary */
    37  
    38  /* gomaxprocs */
    39  #if var_gomaxprocs
    40  byte gomaxprocs = 3;
    41  #else
    42  #define gomaxprocs 3
    43  #endif
    44  
    45  /* queue of waiting M's: sched_mhead[:mwait] */
    46  byte mwait;
    47  byte sched_mhead[G];
    48  
    49  /* garbage collector state */
    50  bit gc_lock, gcwaiting;
    51  
    52  /* goroutines sleeping, waiting to run */
    53  byte gsleep, gwait;
    54  
    55  /* scheduler state */
    56  bit sched_lock;
    57  bit sched_stopped;
    58  bit atomic_gwaiting, atomic_waitstop;
    59  byte atomic_mcpu, atomic_mcpumax;
    60  
    61  /* M struct fields - state for handing off g to m. */
    62  bit m_waitnextg[G];
    63  bit m_havenextg[G];
    64  bit m_nextg[G];
    65  
    66  /*
    67   * opt_atomic/opt_dstep mark atomic/deterministics
    68   * sequences that are marked only for reasons of
    69   * optimization, not for correctness of the algorithms.
    70   *
    71   * in general any code that runs while holding the
    72   * schedlock and does not refer to or modify the atomic_*
    73   * fields can be marked atomic/dstep without affecting
    74   * the usefulness of the model.  since we trust the lock
    75   * implementation, what we really want to test is the
    76   * interleaving of the atomic fast paths with entersyscall
    77   * and exitsyscall.
    78   */
    79  #define opt_atomic atomic
    80  #define opt_dstep d_step
    81  
    82  /* locks */
    83  inline lock(x) {
    84  	d_step { x == 0; x = 1 }
    85  }
    86  
    87  inline unlock(x) {
    88  	d_step { assert x == 1; x = 0 }
    89  }
    90  
    91  /* notes */
    92  inline noteclear(x) {
    93  	x = 0
    94  }
    95  
    96  inline notesleep(x) {
    97  	x == 1
    98  }
    99  
   100  inline notewakeup(x) {
   101  	opt_dstep { assert x == 0; x = 1 }
   102  }
   103  
   104  /*
   105   * scheduler
   106   */
   107  inline schedlock() {
   108  	lock(sched_lock)
   109  }
   110  
   111  inline schedunlock() {
   112  	unlock(sched_lock)
   113  }
   114  
   115  /*
   116   * canaddmcpu is like the C function but takes
   117   * an extra argument to include in the test, to model
   118   * "cannget() && canaddmcpu()" as "canaddmcpu(cangget())"
   119   */
   120  inline canaddmcpu(g) {
   121  	d_step {
   122  		g && atomic_mcpu < atomic_mcpumax;
   123  		atomic_mcpu++;
   124  	}
   125  }
   126  
   127  /*
   128   * gput is like the C function.
   129   * instead of tracking goroutines explicitly we
   130   * maintain only the count of the number of
   131   * waiting goroutines.
   132   */
   133  inline gput() {
   134  	/* omitted: lockedm, idlem concerns */
   135  	opt_dstep {
   136  		gwait++;
   137  		if
   138  		:: gwait == 1 ->
   139  			atomic_gwaiting = 1
   140  		:: else
   141  		fi
   142  	}
   143  }
   144  
   145  /*
   146   * cangget is a macro so it can be passed to
   147   * canaddmcpu (see above).
   148   */
   149  #define cangget()  (gwait>0)
   150  
   151  /*
   152   * gget is like the C function.
   153   */
   154  inline gget() {
   155  	opt_dstep {
   156  		assert gwait > 0;
   157  		gwait--;
   158  		if
   159  		:: gwait == 0 ->
   160  			atomic_gwaiting = 0
   161  		:: else
   162  		fi
   163  	}
   164  }
   165  
   166  /*
   167   * mput is like the C function.
   168   * here we do keep an explicit list of waiting M's,
   169   * so that we know which ones can be awakened.
   170   * we use _pid-1 because the monitor is proc 0.
   171   */
   172  inline mput() {
   173  	opt_dstep {
   174  		sched_mhead[mwait] = _pid - 1;
   175  		mwait++
   176  	}
   177  }
   178  
   179  /*
   180   * mnextg is like the C function mnextg(m, g).
   181   * it passes an unspecified goroutine to m to start running.
   182   */
   183  inline mnextg(m) {
   184  	opt_dstep {
   185  		m_nextg[m] = 1;
   186  		if
   187  		:: m_waitnextg[m] ->
   188  			m_waitnextg[m] = 0;
   189  			notewakeup(m_havenextg[m])
   190  		:: else
   191  		fi
   192  	}
   193  }
   194  
   195  /*
   196   * mgetnextg handles the main m handoff in matchmg.
   197   * it is like mget() || new M followed by mnextg(m, g),
   198   * but combined to avoid a local variable.
   199   * unlike the C code, a new M simply assumes it is
   200   * running a g instead of using the mnextg coordination
   201   * to obtain one.
   202   */
   203  inline mgetnextg() {
   204  	opt_atomic {
   205  		if
   206  		:: mwait > 0 ->
   207  			mwait--;
   208  			mnextg(sched_mhead[mwait]);
   209  			sched_mhead[mwait] = 0;
   210  		:: else ->
   211  			run mstart();
   212  		fi
   213  	}
   214  }
   215  
   216  /*
   217   * nextgandunlock is like the C function.
   218   * it pulls a g off the queue or else waits for one.
   219   */
   220  inline nextgandunlock() {
   221  	assert atomic_mcpu <= G;
   222  
   223  	if
   224  	:: m_nextg[_pid-1] ->
   225  		m_nextg[_pid-1] = 0;
   226  		schedunlock();
   227  	:: canaddmcpu(!m_nextg[_pid-1] && cangget()) ->
   228  		gget();
   229  		schedunlock();
   230  	:: else ->
   231  		opt_dstep {
   232  			mput();
   233  			m_nextg[_pid-1] = 0;
   234  			m_waitnextg[_pid-1] = 1;
   235  			noteclear(m_havenextg[_pid-1]);
   236  		}
   237  		if
   238  		:: atomic_waitstop && atomic_mcpu <= atomic_mcpumax ->
   239  			atomic_waitstop = 0;
   240  			notewakeup(sched_stopped)
   241  		:: else
   242  		fi;
   243  		schedunlock();
   244  		opt_dstep {
   245  			notesleep(m_havenextg[_pid-1]);
   246  			assert m_nextg[_pid-1];
   247  			m_nextg[_pid-1] = 0;
   248  		}
   249  	fi
   250  }
   251  
   252  /*
   253   * stoptheworld is like the C function.
   254   */
   255  inline stoptheworld() {
   256  	schedlock();
   257  	gcwaiting = 1;
   258  	atomic_mcpumax = 1;
   259  	do
   260  	:: d_step { atomic_mcpu > 1 ->
   261  		noteclear(sched_stopped);
   262  		assert !atomic_waitstop;
   263  		atomic_waitstop = 1 }
   264  		schedunlock();
   265  		notesleep(sched_stopped);
   266  		schedlock();
   267  	:: else ->
   268  		break
   269  	od;
   270  	schedunlock();
   271  }
   272  
   273  /*
   274   * starttheworld is like the C function.
   275   */
   276  inline starttheworld() {
   277  	schedlock();
   278  	gcwaiting = 0;
   279  	atomic_mcpumax = gomaxprocs;
   280  	matchmg();
   281  	schedunlock();
   282  }
   283  
   284  /*
   285   * matchmg is like the C function.
   286   */
   287  inline matchmg() {
   288  	do
   289  	:: canaddmcpu(cangget()) ->
   290  		gget();
   291  		mgetnextg();
   292  	:: else -> break
   293  	od
   294  }
   295  
   296  /*
   297   * ready is like the C function.
   298   * it puts a g on the run queue.
   299   */
   300  inline ready() {
   301  	schedlock();
   302  	gput()
   303  	matchmg()
   304  	schedunlock()
   305  }
   306  
   307  /*
   308   * schedule simulates the C scheduler.
   309   * it assumes that there is always a goroutine
   310   * running already, and the goroutine has entered
   311   * the scheduler for an unspecified reason,
   312   * either to yield or to block.
   313   */
   314  inline schedule() {
   315  	schedlock();
   316  
   317  	mustsched = 0;
   318  	atomic_mcpu--;
   319  	assert atomic_mcpu <= G;
   320  	if
   321  	:: skip ->
   322  		// goroutine yields, still runnable
   323  		gput();
   324  	:: gsleep+1 < G ->
   325  		// goroutine goes to sleep (but there is another that can wake it)
   326  		gsleep++
   327  	fi;
   328  
   329  	// Find goroutine to run.
   330  	nextgandunlock()
   331  }
   332  
   333  /*
   334   * schedpend is > 0 if a goroutine is about to committed to
   335   * entering the scheduler but has not yet done so.
   336   * Just as we don't test for the undesirable conditions when a
   337   * goroutine is in the scheduler, we don't test for them when
   338   * a goroutine will be in the scheduler shortly.
   339   * Modeling this state lets us replace mcpu cas loops with
   340   * simpler mcpu atomic adds.
   341   */
   342  byte schedpend;
   343  
   344  /*
   345   * entersyscall is like the C function.
   346   */
   347  inline entersyscall() {
   348  	bit willsched;
   349  
   350  	/*
   351  	 * Fast path.  Check all the conditions tested during schedlock/schedunlock
   352  	 * below, and if we can get through the whole thing without stopping, run it
   353  	 * in one atomic cas-based step.
   354  	 */
   355  	atomic {
   356  		atomic_mcpu--;
   357  		if
   358  		:: atomic_gwaiting ->
   359  			skip
   360  		:: atomic_waitstop && atomic_mcpu <= atomic_mcpumax ->
   361  			skip
   362  		:: else ->
   363  			goto Lreturn_entersyscall;
   364  		fi;
   365  		willsched = 1;
   366  		schedpend++;
   367  	}
   368  
   369  	/*
   370  	 * Normal path.
   371  	 */
   372  	schedlock()
   373  	opt_dstep {
   374  		if
   375  		:: willsched ->
   376  			schedpend--;
   377  			willsched = 0
   378  		:: else
   379  		fi
   380  	}
   381  	if
   382  	:: atomic_gwaiting ->
   383  		matchmg()
   384  	:: else
   385  	fi;
   386  	if
   387  	:: atomic_waitstop && atomic_mcpu <= atomic_mcpumax ->
   388  		atomic_waitstop = 0;
   389  		notewakeup(sched_stopped)
   390  	:: else
   391  	fi;
   392  	schedunlock();
   393  Lreturn_entersyscall:
   394  	skip
   395  }
   396  
   397  /*
   398   * exitsyscall is like the C function.
   399   */
   400  inline exitsyscall() {
   401  	/*
   402  	 * Fast path.  If there's a cpu available, use it.
   403  	 */
   404  	atomic {
   405  		// omitted profilehz check
   406  		atomic_mcpu++;
   407  		if
   408  		:: atomic_mcpu >= atomic_mcpumax ->
   409  			skip
   410  		:: else ->
   411  			goto Lreturn_exitsyscall
   412  		fi
   413  	}
   414  
   415  	/*
   416  	 * Normal path.
   417  	 */
   418  	schedlock();
   419  	d_step {
   420  		if
   421  		:: atomic_mcpu <= atomic_mcpumax ->
   422  			skip
   423  		:: else ->
   424  			mustsched = 1
   425  		fi
   426  	}
   427  	schedunlock()
   428  Lreturn_exitsyscall:
   429  	skip
   430  }
   431  
   432  #if var_gomaxprocs
   433  inline gomaxprocsfunc() {
   434  	schedlock();
   435  	opt_atomic {
   436  		if
   437  		:: gomaxprocs != 1 -> gomaxprocs = 1
   438  		:: gomaxprocs != 2 -> gomaxprocs = 2
   439  		:: gomaxprocs != 3 -> gomaxprocs = 3
   440  		fi;
   441  	}
   442  	if
   443  	:: gcwaiting != 0 ->
   444  		assert atomic_mcpumax == 1
   445  	:: else ->
   446  		atomic_mcpumax = gomaxprocs;
   447  		if
   448  		:: atomic_mcpu > gomaxprocs ->
   449  			mustsched = 1
   450  		:: else ->
   451  			matchmg()
   452  		fi
   453  	fi;
   454  	schedunlock();
   455  }
   456  #endif
   457  
   458  /*
   459   * mstart is the entry point for a new M.
   460   * our model of an M is always running some
   461   * unspecified goroutine.
   462   */
   463  proctype mstart() {
   464  	/*
   465  	 * mustsched is true if the goroutine must enter the
   466  	 * scheduler instead of continuing to execute.
   467  	 */
   468  	bit mustsched;
   469  
   470  	do
   471  	:: skip ->
   472  		// goroutine reschedules.
   473  		schedule()
   474  	:: !mustsched ->
   475  		// goroutine does something.
   476  		if
   477  		:: skip ->
   478  			// goroutine executes system call
   479  			entersyscall();
   480  			exitsyscall()
   481  		:: atomic { gsleep > 0; gsleep-- } ->
   482  			// goroutine wakes another goroutine
   483  			ready()
   484  		:: lock(gc_lock) ->
   485  			// goroutine runs a garbage collection
   486  			stoptheworld();
   487  			starttheworld();
   488  			unlock(gc_lock)
   489  #if var_gomaxprocs
   490  		:: skip ->
   491  			// goroutine picks a new gomaxprocs
   492  			gomaxprocsfunc()
   493  #endif
   494  		fi
   495  	od;
   496  
   497  	assert 0;
   498  }
   499  
   500  /*
   501   * monitor initializes the scheduler state
   502   * and then watches for impossible conditions.
   503   */
   504  active proctype monitor() {
   505  	opt_dstep {
   506  		byte i = 1;
   507  		do
   508  		:: i < G ->
   509  			gput();
   510  			i++
   511  		:: else -> break
   512  		od;
   513  		atomic_mcpu = 1;
   514  		atomic_mcpumax = 1;
   515  	}
   516  	run mstart();
   517  
   518  	do
   519  	// Should never have goroutines waiting with procs available.
   520  	:: !sched_lock && schedpend==0 && gwait > 0 && atomic_mcpu < atomic_mcpumax ->
   521  		assert 0
   522  	// Should never have gc waiting for stop if things have already stopped.
   523  	:: !sched_lock && schedpend==0 && atomic_waitstop && atomic_mcpu <= atomic_mcpumax ->
   524  		assert 0
   525  	od
   526  }