github.com/SagerNet/gvisor@v0.0.0-20210707092255-7731c139d75c/tools/checklocks/analysis.go (about)

     1  // Copyright 2020 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package checklocks
    16  
    17  import (
    18  	"go/token"
    19  	"go/types"
    20  	"strings"
    21  
    22  	"golang.org/x/tools/go/ssa"
    23  )
    24  
    25  func gcd(a, b atomicAlignment) atomicAlignment {
    26  	for b != 0 {
    27  		a, b = b, a%b
    28  	}
    29  	return a
    30  }
    31  
    32  // typeAlignment returns the type alignment for the given type.
    33  func (pc *passContext) typeAlignment(pkg *types.Package, obj types.Object) atomicAlignment {
    34  	requiredOffset := atomicAlignment(1)
    35  	if pc.pass.ImportObjectFact(obj, &requiredOffset) {
    36  		return requiredOffset
    37  	}
    38  
    39  	switch x := obj.Type().Underlying().(type) {
    40  	case *types.Struct:
    41  		fields := make([]*types.Var, x.NumFields())
    42  		for i := 0; i < x.NumFields(); i++ {
    43  			fields[i] = x.Field(i)
    44  		}
    45  		offsets := pc.pass.TypesSizes.Offsetsof(fields)
    46  		for i := 0; i < x.NumFields(); i++ {
    47  			// Check the offset, and then assuming that this offset
    48  			// aligns with the offset for the broader type.
    49  			fieldRequired := pc.typeAlignment(pkg, fields[i])
    50  			if offsets[i]%int64(fieldRequired) != 0 {
    51  				// The offset of this field is not compatible.
    52  				pc.maybeFail(fields[i].Pos(), "have alignment %d, need %d", offsets[i], fieldRequired)
    53  			}
    54  			// Ensure the requiredOffset is the LCM of the offset.
    55  			requiredOffset *= fieldRequired / gcd(requiredOffset, fieldRequired)
    56  		}
    57  	case *types.Array:
    58  		// Export direct alignment requirements.
    59  		if named, ok := x.Elem().(*types.Named); ok {
    60  			requiredOffset = pc.typeAlignment(pkg, named.Obj())
    61  		}
    62  	default:
    63  		// Use the compiler's underlying alignment.
    64  		requiredOffset = atomicAlignment(pc.pass.TypesSizes.Alignof(obj.Type().Underlying()))
    65  	}
    66  
    67  	if pkg == obj.Pkg() {
    68  		// Cache as an object fact, to subsequent calls. Note that we
    69  		// can only export object facts for the package that we are
    70  		// currently analyzing. There may be no exported facts for
    71  		// array types or alias types, for example.
    72  		pc.pass.ExportObjectFact(obj, &requiredOffset)
    73  	}
    74  
    75  	return requiredOffset
    76  }
    77  
    78  // checkTypeAlignment checks the alignment of the given type.
    79  //
    80  // This calls typeAlignment, which resolves all types recursively. This method
    81  // should be called for all types individual to ensure full coverage.
    82  func (pc *passContext) checkTypeAlignment(pkg *types.Package, typ *types.Named) {
    83  	_ = pc.typeAlignment(pkg, typ.Obj())
    84  }
    85  
    86  // checkAtomicCall checks for an atomic access.
    87  //
    88  // inst is the instruction analyzed, obj is used only for maybeFail.
    89  //
    90  // If mustBeAtomic is true, then we assert that the instruction *is* an atomic
    91  // fucnction call. If it is false, then we assert that it is *not* an atomic
    92  // dispatch.
    93  //
    94  // If readOnly is true, then only atomic read access are allowed. Note that
    95  // readOnly is only meaningful if mustBeAtomic is set.
    96  func (pc *passContext) checkAtomicCall(inst ssa.Instruction, obj types.Object, mustBeAtomic, readOnly bool) {
    97  	switch x := inst.(type) {
    98  	case *ssa.Call:
    99  		if x.Common().IsInvoke() {
   100  			if mustBeAtomic {
   101  				// This is an illegal interface dispatch.
   102  				pc.maybeFail(inst.Pos(), "dynamic dispatch with atomic-only field")
   103  			}
   104  			return
   105  		}
   106  		fn, ok := x.Common().Value.(*ssa.Function)
   107  		if !ok {
   108  			if mustBeAtomic {
   109  				// This is an illegal call to a non-static function.
   110  				pc.maybeFail(inst.Pos(), "dispatch to non-static function with atomic-only field")
   111  			}
   112  			return
   113  		}
   114  		pkg := fn.Package()
   115  		if pkg == nil {
   116  			if mustBeAtomic {
   117  				// This is a call to some shared wrapper function.
   118  				pc.maybeFail(inst.Pos(), "dispatch to shared function or wrapper")
   119  			}
   120  			return
   121  		}
   122  		var lff lockFunctionFacts // Check for exemption.
   123  		if obj := fn.Object(); obj != nil && pc.pass.ImportObjectFact(obj, &lff) && lff.Ignore {
   124  			return
   125  		}
   126  		if name := pkg.Pkg.Name(); name != "atomic" && name != "atomicbitops" {
   127  			if mustBeAtomic {
   128  				// This is an illegal call to a non-atomic package function.
   129  				pc.maybeFail(inst.Pos(), "dispatch to non-atomic function with atomic-only field")
   130  			}
   131  			return
   132  		}
   133  		if !mustBeAtomic {
   134  			// We are *not* expecting an atomic dispatch.
   135  			if _, ok := pc.forced[pc.positionKey(inst.Pos())]; !ok {
   136  				pc.maybeFail(inst.Pos(), "unexpected call to atomic function")
   137  			}
   138  		}
   139  		if !strings.HasPrefix(fn.Name(), "Load") && readOnly {
   140  			// We are not allowing any reads in this context.
   141  			if _, ok := pc.forced[pc.positionKey(inst.Pos())]; !ok {
   142  				pc.maybeFail(inst.Pos(), "unexpected call to atomic write function, is a lock missing?")
   143  			}
   144  			return
   145  		}
   146  	default:
   147  		if mustBeAtomic {
   148  			// This is something else entirely.
   149  			if _, ok := pc.forced[pc.positionKey(inst.Pos())]; !ok {
   150  				pc.maybeFail(inst.Pos(), "illegal use of atomic-only field by %T instruction", inst)
   151  			}
   152  			return
   153  		}
   154  	}
   155  }
   156  
   157  func resolveStruct(typ types.Type) (*types.Struct, bool) {
   158  	structType, ok := typ.Underlying().(*types.Struct)
   159  	if ok {
   160  		return structType, true
   161  	}
   162  	ptrType, ok := typ.Underlying().(*types.Pointer)
   163  	if ok {
   164  		return resolveStruct(ptrType.Elem())
   165  	}
   166  	return nil, false
   167  }
   168  
   169  func findField(typ types.Type, field int) (types.Object, bool) {
   170  	structType, ok := resolveStruct(typ)
   171  	if !ok {
   172  		return nil, false
   173  	}
   174  	return structType.Field(field), true
   175  }
   176  
   177  // instructionWithReferrers is a generalization over ssa.Field, ssa.FieldAddr.
   178  type instructionWithReferrers interface {
   179  	ssa.Instruction
   180  	Referrers() *[]ssa.Instruction
   181  }
   182  
   183  // checkFieldAccess checks the validity of a field access.
   184  //
   185  // This also enforces atomicity constraints for fields that must be accessed
   186  // atomically. The parameter isWrite indicates whether this field is used
   187  // downstream for a write operation.
   188  func (pc *passContext) checkFieldAccess(inst instructionWithReferrers, structObj ssa.Value, field int, ls *lockState, isWrite bool) {
   189  	var (
   190  		lff         lockFieldFacts
   191  		lgf         lockGuardFacts
   192  		guardsFound int
   193  		guardsHeld  int
   194  	)
   195  
   196  	fieldObj, _ := findField(structObj.Type(), field)
   197  	pc.pass.ImportObjectFact(fieldObj, &lff)
   198  	pc.pass.ImportObjectFact(fieldObj, &lgf)
   199  
   200  	for guardName, fl := range lgf.GuardedBy {
   201  		guardsFound++
   202  		r := fl.resolve(structObj)
   203  		if _, ok := ls.isHeld(r); ok {
   204  			guardsHeld++
   205  			continue
   206  		}
   207  		if _, ok := pc.forced[pc.positionKey(inst.Pos())]; ok {
   208  			// Mark this as locked, since it has been forced.
   209  			ls.lockField(r)
   210  			guardsHeld++
   211  			continue
   212  		}
   213  		// Note that we may allow this if the disposition is atomic,
   214  		// and we are allowing atomic reads only. This will fall into
   215  		// the atomic disposition check below, which asserts that the
   216  		// access is atomic. Further, guardsHeld < guardsFound will be
   217  		// true for this case, so we require it to be read-only.
   218  		if lgf.AtomicDisposition != atomicRequired {
   219  			// There is no force key, no atomic access and no lock held.
   220  			pc.maybeFail(inst.Pos(), "invalid field access, %s must be locked when accessing %s (locks: %s)", guardName, fieldObj.Name(), ls.String())
   221  		}
   222  	}
   223  
   224  	// Check the atomic access for this field.
   225  	switch lgf.AtomicDisposition {
   226  	case atomicRequired:
   227  		// Check that this is used safely as an input.
   228  		readOnly := guardsHeld < guardsFound
   229  		if refs := inst.Referrers(); refs != nil {
   230  			for _, otherInst := range *refs {
   231  				pc.checkAtomicCall(otherInst, fieldObj, true, readOnly)
   232  			}
   233  		}
   234  		// Check that this is not otherwise written non-atomically,
   235  		// even if we do hold all the locks.
   236  		if isWrite {
   237  			pc.maybeFail(inst.Pos(), "non-atomic write of field %s, writes must still be atomic with locks held (locks: %s)", fieldObj.Name(), ls.String())
   238  		}
   239  	case atomicDisallow:
   240  		// Check that this is *not* used atomically.
   241  		if refs := inst.Referrers(); refs != nil {
   242  			for _, otherInst := range *refs {
   243  				pc.checkAtomicCall(otherInst, fieldObj, false, false)
   244  			}
   245  		}
   246  	}
   247  }
   248  
   249  func (pc *passContext) checkCall(call callCommon, ls *lockState) {
   250  	// See: https://godoc.org/golang.org/x/tools/go/ssa#CallCommon
   251  	//
   252  	// 1. "call" mode: when Method is nil (!IsInvoke), a CallCommon represents an ordinary
   253  	//  function call of the value in Value, which may be a *Builtin, a *Function or any
   254  	//  other value of kind 'func'.
   255  	//
   256  	// 	Value may be one of:
   257  	// (a) a *Function, indicating a statically dispatched call
   258  	// to a package-level function, an anonymous function, or
   259  	// a method of a named type.
   260  	//
   261  	// (b) a *MakeClosure, indicating an immediately applied
   262  	// function literal with free variables.
   263  	//
   264  	// (c) a *Builtin, indicating a statically dispatched call
   265  	// to a built-in function.
   266  	//
   267  	// (d) any other value, indicating a dynamically dispatched
   268  	//     function call.
   269  	switch fn := call.Common().Value.(type) {
   270  	case *ssa.Function:
   271  		var lff lockFunctionFacts
   272  		if fn.Object() != nil {
   273  			pc.pass.ImportObjectFact(fn.Object(), &lff)
   274  			pc.checkFunctionCall(call, fn, &lff, ls)
   275  		} else {
   276  			// Anonymous functions have no facts, and cannot be
   277  			// annotated.  We don't check for violations using the
   278  			// function facts, since they cannot exist. Instead, we
   279  			// do a fresh analysis using the current lock state.
   280  			fnls := ls.fork()
   281  			for i, arg := range call.Common().Args {
   282  				fnls.store(fn.Params[i], arg)
   283  			}
   284  			pc.checkFunction(call, fn, &lff, fnls, true /* force */)
   285  		}
   286  	case *ssa.MakeClosure:
   287  		// Note that creating and then invoking closures locally is
   288  		// allowed, but analysis of passing closures is done when
   289  		// checking individual instructions.
   290  		pc.checkClosure(call, fn, ls)
   291  	default:
   292  		return
   293  	}
   294  }
   295  
   296  // postFunctionCallUpdate updates all conditions.
   297  func (pc *passContext) postFunctionCallUpdate(call callCommon, lff *lockFunctionFacts, ls *lockState) {
   298  	// Release all locks not still held.
   299  	for fieldName, fg := range lff.HeldOnEntry {
   300  		if _, ok := lff.HeldOnExit[fieldName]; ok {
   301  			continue
   302  		}
   303  		r := fg.resolveCall(call.Common().Args, call.Value())
   304  		if s, ok := ls.unlockField(r); !ok {
   305  			if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
   306  				pc.maybeFail(call.Pos(), "attempt to release %s (%s), but not held (locks: %s)", fieldName, s, ls.String())
   307  			}
   308  		}
   309  	}
   310  
   311  	// Update all held locks if acquired.
   312  	for fieldName, fg := range lff.HeldOnExit {
   313  		if _, ok := lff.HeldOnEntry[fieldName]; ok {
   314  			continue
   315  		}
   316  		// Acquire the lock per the annotation.
   317  		r := fg.resolveCall(call.Common().Args, call.Value())
   318  		if s, ok := ls.lockField(r); !ok {
   319  			if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
   320  				pc.maybeFail(call.Pos(), "attempt to acquire %s (%s), but already held (locks: %s)", fieldName, s, ls.String())
   321  			}
   322  		}
   323  	}
   324  }
   325  
   326  // checkFunctionCall checks preconditions for function calls, and tracks the
   327  // lock state by recording relevant calls to sync functions. Note that calls to
   328  // atomic functions are tracked by checkFieldAccess by looking directly at the
   329  // referrers (because ordering doesn't matter there, so we need not scan in
   330  // instruction order).
   331  func (pc *passContext) checkFunctionCall(call callCommon, fn *ssa.Function, lff *lockFunctionFacts, ls *lockState) {
   332  	// Check all guards required are held.
   333  	for fieldName, fg := range lff.HeldOnEntry {
   334  		r := fg.resolveCall(call.Common().Args, call.Value())
   335  		if s, ok := ls.isHeld(r); !ok {
   336  			if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
   337  				pc.maybeFail(call.Pos(), "must hold %s (%s) to call %s, but not held (locks: %s)", fieldName, s, fn.Name(), ls.String())
   338  			} else {
   339  				// Force the lock to be acquired.
   340  				ls.lockField(r)
   341  			}
   342  		}
   343  	}
   344  
   345  	// Update all lock state accordingly.
   346  	pc.postFunctionCallUpdate(call, lff, ls)
   347  
   348  	// Check if it's a method dispatch for something in the sync package.
   349  	// See: https://godoc.org/golang.org/x/tools/go/ssa#Function
   350  	if fn.Package() != nil && fn.Package().Pkg.Name() == "sync" && fn.Signature.Recv() != nil {
   351  		switch fn.Name() {
   352  		case "Lock", "RLock":
   353  			if s, ok := ls.lockField(resolvedValue{value: call.Common().Args[0], valid: true}); !ok {
   354  				if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
   355  					// Double locking a mutex that is already locked.
   356  					pc.maybeFail(call.Pos(), "%s already locked (locks: %s)", s, ls.String())
   357  				}
   358  			}
   359  		case "Unlock", "RUnlock":
   360  			if s, ok := ls.unlockField(resolvedValue{value: call.Common().Args[0], valid: true}); !ok {
   361  				if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
   362  					// Unlocking something that is already unlocked.
   363  					pc.maybeFail(call.Pos(), "%s already unlocked (locks: %s)", s, ls.String())
   364  				}
   365  			}
   366  		}
   367  	}
   368  }
   369  
   370  // checkClosure forks the lock state, and creates a binding for the FreeVars of
   371  // the closure. This allows the analysis to resolve the closure.
   372  func (pc *passContext) checkClosure(call callCommon, fn *ssa.MakeClosure, ls *lockState) {
   373  	clls := ls.fork()
   374  	clfn := fn.Fn.(*ssa.Function)
   375  	for i, fv := range clfn.FreeVars {
   376  		clls.store(fv, fn.Bindings[i])
   377  	}
   378  
   379  	// Note that this is *not* a call to check function call, which checks
   380  	// against the function preconditions. Instead, this does a fresh
   381  	// analysis of the function from source code with a different state.
   382  	var nolff lockFunctionFacts
   383  	pc.checkFunction(call, clfn, &nolff, clls, true /* force */)
   384  }
   385  
   386  // freshAlloc indicates that v has been allocated within the local scope. There
   387  // is no lock checking done on objects that are freshly allocated.
   388  func freshAlloc(v ssa.Value) bool {
   389  	switch x := v.(type) {
   390  	case *ssa.Alloc:
   391  		return true
   392  	case *ssa.FieldAddr:
   393  		return freshAlloc(x.X)
   394  	case *ssa.Field:
   395  		return freshAlloc(x.X)
   396  	case *ssa.IndexAddr:
   397  		return freshAlloc(x.X)
   398  	case *ssa.Index:
   399  		return freshAlloc(x.X)
   400  	case *ssa.Convert:
   401  		return freshAlloc(x.X)
   402  	case *ssa.ChangeType:
   403  		return freshAlloc(x.X)
   404  	default:
   405  		return false
   406  	}
   407  }
   408  
   409  // isWrite indicates that this value is used as the addr field in a store.
   410  //
   411  // Note that this may still be used for a write. The return here is optimistic
   412  // but sufficient for basic analysis.
   413  func isWrite(v ssa.Value) bool {
   414  	refs := v.Referrers()
   415  	if refs == nil {
   416  		return false
   417  	}
   418  	for _, ref := range *refs {
   419  		if s, ok := ref.(*ssa.Store); ok && s.Addr == v {
   420  			return true
   421  		}
   422  	}
   423  	return false
   424  }
   425  
   426  // callCommon is an ssa.Value that also implements Common.
   427  type callCommon interface {
   428  	Pos() token.Pos
   429  	Common() *ssa.CallCommon
   430  	Value() *ssa.Call
   431  }
   432  
   433  // checkInstruction checks the legality the single instruction based on the
   434  // current lockState.
   435  func (pc *passContext) checkInstruction(inst ssa.Instruction, ls *lockState) (*ssa.Return, *lockState) {
   436  	switch x := inst.(type) {
   437  	case *ssa.Store:
   438  		// Record that this value is holding this other value. This is
   439  		// because at the beginning of each ssa execution, there is a
   440  		// series of assignments of parameter values to alloc objects.
   441  		// This allows us to trace these back to the original
   442  		// parameters as aliases above.
   443  		//
   444  		// Note that this may overwrite an existing value in the lock
   445  		// state, but this is intentional.
   446  		ls.store(x.Addr, x.Val)
   447  	case *ssa.Field:
   448  		if !freshAlloc(x.X) {
   449  			pc.checkFieldAccess(x, x.X, x.Field, ls, false)
   450  		}
   451  	case *ssa.FieldAddr:
   452  		if !freshAlloc(x.X) {
   453  			pc.checkFieldAccess(x, x.X, x.Field, ls, isWrite(x))
   454  		}
   455  	case *ssa.Call:
   456  		pc.checkCall(x, ls)
   457  	case *ssa.Defer:
   458  		ls.pushDefer(x)
   459  	case *ssa.RunDefers:
   460  		for d := ls.popDefer(); d != nil; d = ls.popDefer() {
   461  			pc.checkCall(d, ls)
   462  		}
   463  	case *ssa.MakeClosure:
   464  		refs := x.Referrers()
   465  		if refs == nil {
   466  			// This is strange, it's not used? Ignore this case,
   467  			// since it will probably be optimized away.
   468  			return nil, nil
   469  		}
   470  		hasNonCall := false
   471  		for _, ref := range *refs {
   472  			switch ref.(type) {
   473  			case *ssa.Call, *ssa.Defer:
   474  				// Analysis will be done on the call itself
   475  				// subsequently, including the lock state at
   476  				// the time of the call.
   477  			default:
   478  				// We need to analyze separately. Per below,
   479  				// this means that we'll analyze at closure
   480  				// construction time no zero assumptions about
   481  				// when it will be called.
   482  				hasNonCall = true
   483  			}
   484  		}
   485  		if !hasNonCall {
   486  			return nil, nil
   487  		}
   488  		// Analyze the closure without bindings. This means that we
   489  		// assume no lock facts or have any existing lock state. Only
   490  		// trivial closures are acceptable in this case.
   491  		clfn := x.Fn.(*ssa.Function)
   492  		var nolff lockFunctionFacts
   493  		pc.checkFunction(nil, clfn, &nolff, nil, false /* force */)
   494  	case *ssa.Return:
   495  		return x, ls // Valid return state.
   496  	}
   497  	return nil, nil
   498  }
   499  
   500  // checkBasicBlock traverses the control flow graph starting at a set of given
   501  // block and checks each instruction for allowed operations.
   502  func (pc *passContext) checkBasicBlock(fn *ssa.Function, block *ssa.BasicBlock, lff *lockFunctionFacts, parent *lockState, seen map[*ssa.BasicBlock]*lockState) *lockState {
   503  	if oldLS, ok := seen[block]; ok && oldLS.isCompatible(parent) {
   504  		return nil
   505  	}
   506  
   507  	// If the lock state is not compatible, then we need to do the
   508  	// recursive analysis to ensure that it is still sane. For example, the
   509  	// following is guaranteed to generate incompatible locking states:
   510  	//
   511  	//	if foo {
   512  	//		mu.Lock()
   513  	//	}
   514  	//	other stuff ...
   515  	//	if foo {
   516  	//		mu.Unlock()
   517  	//	}
   518  
   519  	var (
   520  		rv  *ssa.Return
   521  		rls *lockState
   522  	)
   523  
   524  	// Analyze this block.
   525  	seen[block] = parent
   526  	ls := parent.fork()
   527  	for _, inst := range block.Instrs {
   528  		rv, rls = pc.checkInstruction(inst, ls)
   529  		if rls != nil {
   530  			failed := false
   531  			// Validate held locks.
   532  			for fieldName, fg := range lff.HeldOnExit {
   533  				r := fg.resolveStatic(fn, rv)
   534  				if s, ok := rls.isHeld(r); !ok {
   535  					if _, ok := pc.forced[pc.positionKey(rv.Pos())]; !ok {
   536  						pc.maybeFail(rv.Pos(), "lock %s (%s) not held (locks: %s)", fieldName, s, rls.String())
   537  						failed = true
   538  					} else {
   539  						// Force the lock to be acquired.
   540  						rls.lockField(r)
   541  					}
   542  				}
   543  			}
   544  			// Check for other locks, but only if the above didn't trip.
   545  			if !failed && rls.count() != len(lff.HeldOnExit) {
   546  				pc.maybeFail(rv.Pos(), "return with unexpected locks held (locks: %s)", rls.String())
   547  			}
   548  		}
   549  	}
   550  
   551  	// Analyze all successors.
   552  	for _, succ := range block.Succs {
   553  		// Collect possible return values, and make sure that the lock
   554  		// state aligns with any return value that we may have found
   555  		// above. Note that checkBasicBlock will recursively analyze
   556  		// the lock state to ensure that Releases and Acquires are
   557  		// respected.
   558  		if pls := pc.checkBasicBlock(fn, succ, lff, ls, seen); pls != nil {
   559  			if rls != nil && !rls.isCompatible(pls) {
   560  				if _, ok := pc.forced[pc.positionKey(fn.Pos())]; !ok {
   561  					pc.maybeFail(fn.Pos(), "incompatible return states (first: %s, second: %v)", rls.String(), pls.String())
   562  				}
   563  			}
   564  			rls = pls
   565  		}
   566  	}
   567  	return rls
   568  }
   569  
   570  // checkFunction checks a function invocation, typically starting with nil lockState.
   571  func (pc *passContext) checkFunction(call callCommon, fn *ssa.Function, lff *lockFunctionFacts, parent *lockState, force bool) {
   572  	defer func() {
   573  		// Mark this function as checked. This is used by the top-level
   574  		// loop to ensure that all anonymous functions are scanned, if
   575  		// they are not explicitly invoked here. Note that this can
   576  		// happen if the anonymous functions are e.g. passed only as
   577  		// parameters or used to initialize some structure.
   578  		pc.functions[fn] = struct{}{}
   579  	}()
   580  	if _, ok := pc.functions[fn]; !force && ok {
   581  		// This function has already been analyzed at least once.
   582  		// That's all we permit for each function, although this may
   583  		// cause some anonymous functions to be analyzed in only one
   584  		// context.
   585  		return
   586  	}
   587  
   588  	// If no return value is provided, then synthesize one. This is used
   589  	// below only to check against the locks preconditions, which may
   590  	// include return values.
   591  	if call == nil {
   592  		call = &ssa.Call{Call: ssa.CallCommon{Value: fn}}
   593  	}
   594  
   595  	// Initialize ls with any preconditions that require locks to be held
   596  	// for the method to be invoked. Note that in the overwhleming majority
   597  	// of cases, parent will be nil. However, in the case of closures and
   598  	// anonymous functions, we may start with a non-nil lock state.
   599  	ls := parent.fork()
   600  	for fieldName, fg := range lff.HeldOnEntry {
   601  		// The first is the method object itself so we skip that when looking
   602  		// for receiver/function parameters.
   603  		r := fg.resolveStatic(fn, call.Value())
   604  		if s, ok := ls.lockField(r); !ok {
   605  			// This can only happen if the same value is declared
   606  			// multiple times, and should be caught by the earlier
   607  			// fact scanning. Keep it here as a sanity check.
   608  			pc.maybeFail(fn.Pos(), "lock %s (%s) acquired multiple times (locks: %s)", fieldName, s, ls.String())
   609  		}
   610  	}
   611  
   612  	// Scan the blocks.
   613  	seen := make(map[*ssa.BasicBlock]*lockState)
   614  	if len(fn.Blocks) > 0 {
   615  		pc.checkBasicBlock(fn, fn.Blocks[0], lff, ls, seen)
   616  	}
   617  
   618  	// Scan the recover block.
   619  	if fn.Recover != nil {
   620  		pc.checkBasicBlock(fn, fn.Recover, lff, ls, seen)
   621  	}
   622  
   623  	// Update all lock state accordingly. This will be called only if we
   624  	// are doing inline analysis for e.g. an anonymous function.
   625  	if call != nil && parent != nil {
   626  		pc.postFunctionCallUpdate(call, lff, parent)
   627  	}
   628  }