github.com/metacubex/gvisor@v0.0.0-20240320004321-933faba989ec/tools/checklocks/analysis.go (about)

     1  // Copyright 2020 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package checklocks
    16  
    17  import (
    18  	"go/token"
    19  	"go/types"
    20  	"strings"
    21  
    22  	"golang.org/x/tools/go/ssa"
    23  )
    24  
    25  func gcd(a, b atomicAlignment) atomicAlignment {
    26  	for b != 0 {
    27  		a, b = b, a%b
    28  	}
    29  	return a
    30  }
    31  
    32  // typeAlignment returns the type alignment for the given type.
    33  func (pc *passContext) typeAlignment(pkg *types.Package, obj types.Object) atomicAlignment {
    34  	requiredOffset := atomicAlignment(1)
    35  	if pc.pass.ImportObjectFact(obj, &requiredOffset) {
    36  		return requiredOffset
    37  	}
    38  
    39  	switch x := obj.Type().Underlying().(type) {
    40  	case *types.Struct:
    41  		fields := make([]*types.Var, x.NumFields())
    42  		for i := 0; i < x.NumFields(); i++ {
    43  			fields[i] = x.Field(i)
    44  		}
    45  		offsets := pc.pass.TypesSizes.Offsetsof(fields)
    46  		for i := 0; i < x.NumFields(); i++ {
    47  			// Check the offset, and then assuming that this offset
    48  			// aligns with the offset for the broader type.
    49  			fieldRequired := pc.typeAlignment(pkg, fields[i])
    50  			if offsets[i]%int64(fieldRequired) != 0 {
    51  				// The offset of this field is not compatible.
    52  				pc.maybeFail(fields[i].Pos(), "have alignment %d, need %d", offsets[i], fieldRequired)
    53  			}
    54  			// Ensure the requiredOffset is the LCM of the offset.
    55  			requiredOffset *= fieldRequired / gcd(requiredOffset, fieldRequired)
    56  		}
    57  	case *types.Array:
    58  		// Export direct alignment requirements.
    59  		if named, ok := x.Elem().(*types.Named); ok && !hasTypeParams(named) {
    60  			requiredOffset = pc.typeAlignment(pkg, named.Obj())
    61  		}
    62  	default:
    63  		// Use the compiler's underlying alignment.
    64  		requiredOffset = atomicAlignment(pc.pass.TypesSizes.Alignof(obj.Type().Underlying()))
    65  	}
    66  
    67  	if pkg == obj.Pkg() {
    68  		// Cache as an object fact, to subsequent calls. Note that we
    69  		// can only export object facts for the package that we are
    70  		// currently analyzing. There may be no exported facts for
    71  		// array types or alias types, for example.
    72  		pc.pass.ExportObjectFact(obj, &requiredOffset)
    73  	}
    74  
    75  	return requiredOffset
    76  }
    77  
    78  // hasTypeParams returns true iff the named type has type parameters.
    79  func hasTypeParams(typ *types.Named) bool {
    80  	return typ.TypeParams() != nil && typ.TypeParams().Len() > 0
    81  }
    82  
    83  // checkTypeAlignment checks the alignment of the given type.
    84  //
    85  // This calls typeAlignment, which resolves all types recursively. This method
    86  // should be called for all types individual to ensure full coverage.
    87  func (pc *passContext) checkTypeAlignment(pkg *types.Package, typ *types.Named) {
    88  	if !hasTypeParams(typ) {
    89  		_ = pc.typeAlignment(pkg, typ.Obj())
    90  	}
    91  }
    92  
    93  // atomicRules specify read constraints.
    94  type atomicRules int
    95  
    96  const (
    97  	nonAtomic atomicRules = iota
    98  	readWriteAtomic
    99  	readOnlyAtomic
   100  	mixedAtomic
   101  )
   102  
   103  // checkAtomicCall checks for an atomic access.
   104  //
   105  // inst is the instruction analyzed, obj is used only for maybeFail.
   106  func (pc *passContext) checkAtomicCall(inst ssa.Instruction, obj types.Object, ar atomicRules) {
   107  	switch x := inst.(type) {
   108  	case *ssa.Call:
   109  		if x.Common().IsInvoke() {
   110  			if ar != nonAtomic {
   111  				// This is an illegal interface dispatch.
   112  				pc.maybeFail(inst.Pos(), "dynamic dispatch with atomic-only field")
   113  			}
   114  			return
   115  		}
   116  		fn, ok := x.Common().Value.(*ssa.Function)
   117  		if !ok {
   118  			if ar != nonAtomic {
   119  				// This is an illegal call to a non-static function.
   120  				pc.maybeFail(inst.Pos(), "dispatch to non-static function with atomic-only field")
   121  			}
   122  			return
   123  		}
   124  		pkg := fn.Package()
   125  		if pkg == nil {
   126  			if ar != nonAtomic {
   127  				// This is a call to some shared wrapper function.
   128  				pc.maybeFail(inst.Pos(), "dispatch to shared function or wrapper")
   129  			}
   130  			return
   131  		}
   132  		var lff lockFunctionFacts // Check for exemption.
   133  		if obj := fn.Object(); obj != nil && pc.pass.ImportObjectFact(obj, &lff) && lff.Ignore {
   134  			return
   135  		}
   136  		if name := pkg.Pkg.Name(); name != "atomic" && name != "atomicbitops" {
   137  			if ar != nonAtomic {
   138  				// This is an illegal call to a non-atomic package function.
   139  				pc.maybeFail(inst.Pos(), "dispatch to non-atomic function with atomic-only field")
   140  			}
   141  			return
   142  		}
   143  		if fn.Signature.Recv() != nil {
   144  			// always allow calls to methods of atomic wrappers such as atomic.Int32 introduced in Go 1.19
   145  			return
   146  		}
   147  		if ar == nonAtomic {
   148  			// We are *not* expecting an atomic dispatch.
   149  			if _, ok := pc.forced[pc.positionKey(inst.Pos())]; !ok {
   150  				pc.maybeFail(inst.Pos(), "unexpected call to atomic function")
   151  			}
   152  		}
   153  		if !strings.HasPrefix(fn.Name(), "Load") && ar == readOnlyAtomic {
   154  			// We are not allowing any reads in this context.
   155  			if _, ok := pc.forced[pc.positionKey(inst.Pos())]; !ok {
   156  				pc.maybeFail(inst.Pos(), "unexpected call to atomic write function, is a lock missing?")
   157  			}
   158  			return
   159  		}
   160  		return // Don't hit common case.
   161  	case *ssa.ChangeType:
   162  		// Allow casts for atomic values, but nothing else.
   163  		if refs := x.Referrers(); refs != nil && len(*refs) == 1 {
   164  			pc.checkAtomicCall((*refs)[0], obj, ar)
   165  			return
   166  		}
   167  	case *ssa.UnOp:
   168  		if x.Op == token.MUL && ar == mixedAtomic {
   169  			// This is allowed; this is a strict reading.
   170  			return
   171  		}
   172  	}
   173  	if ar != nonAtomic {
   174  		// This is something else entirely.
   175  		if _, ok := pc.forced[pc.positionKey(inst.Pos())]; !ok {
   176  			pc.maybeFail(inst.Pos(), "illegal use of atomic-only field by %T instruction", inst)
   177  		}
   178  		return
   179  	}
   180  }
   181  
   182  func resolveStruct(typ types.Type) (*types.Struct, bool) {
   183  	structType, ok := typ.Underlying().(*types.Struct)
   184  	if ok {
   185  		return structType, true
   186  	}
   187  	ptrType, ok := typ.Underlying().(*types.Pointer)
   188  	if ok {
   189  		return resolveStruct(ptrType.Elem())
   190  	}
   191  	return nil, false
   192  }
   193  
   194  func findField(typ types.Type, field int) (types.Object, bool) {
   195  	structType, ok := resolveStruct(typ)
   196  	if !ok || field >= structType.NumFields() {
   197  		return nil, false
   198  	}
   199  	return structType.Field(field), true
   200  }
   201  
   202  // almostInst is a generalization over ssa.Field, ssa.FieldAddr, ssa.Global.
   203  type almostInst interface {
   204  	Pos() token.Pos
   205  	Referrers() *[]ssa.Instruction
   206  }
   207  
   208  // checkGuards checks the guards held.
   209  //
   210  // This also enforces atomicity constraints for fields that must be accessed
   211  // atomically. The parameter isWrite indicates whether this field is used
   212  // downstream for a write operation.
   213  //
   214  // Note that this function is not called if lff.Ignore is true, since it cannot
   215  // discover any local anonymous functions or closures.
   216  func (pc *passContext) checkGuards(inst almostInst, from ssa.Value, accessObj types.Object, ls *lockState, isWrite bool) {
   217  	var (
   218  		lgf         lockGuardFacts
   219  		guardsFound int
   220  		guardsHeld  = make(map[string]struct{}) // Keyed by resolved string.
   221  	)
   222  
   223  	// Load the facts for the object accessed.
   224  	pc.pass.ImportObjectFact(accessObj, &lgf)
   225  
   226  	// Check guards held.
   227  	for guardName, fgr := range lgf.GuardedBy {
   228  		guardsFound++
   229  		r := fgr.resolveField(pc, ls, from)
   230  		if !r.valid() {
   231  			// See above; this cannot be forced.
   232  			pc.maybeFail(inst.Pos(), "field %s cannot be resolved", guardName)
   233  			continue
   234  		}
   235  		s, ok := ls.isHeld(r, isWrite)
   236  		if ok {
   237  			guardsHeld[s] = struct{}{}
   238  			continue
   239  		}
   240  		if _, ok := pc.forced[pc.positionKey(inst.Pos())]; ok {
   241  			// Mark this as locked, since it has been forced. All
   242  			// forces are treated as an exclusive lock.
   243  			s, _ := ls.lockField(r, true /* exclusive */)
   244  			guardsHeld[s] = struct{}{}
   245  			continue
   246  		}
   247  		// Note that we may allow this if the disposition is atomic,
   248  		// and we are allowing atomic reads only. This will fall into
   249  		// the atomic disposition check below, which asserts that the
   250  		// access is atomic. Further, len(guardsHeld) < guardsFound
   251  		// will be true for this case, so we require it to be
   252  		// read-only.
   253  		if lgf.AtomicDisposition != atomicRequired {
   254  			// There is no force key, no atomic access and no lock held.
   255  			pc.maybeFail(inst.Pos(), "invalid field access, %s (%s) must be locked when accessing %s (locks: %s)", guardName, s, accessObj.Name(), ls.String())
   256  		}
   257  	}
   258  
   259  	// Check the atomic access for this field.
   260  	switch lgf.AtomicDisposition {
   261  	case atomicRequired:
   262  		// Check that this is used safely as an input.
   263  		ar := readWriteAtomic
   264  		if guardsFound > 0 {
   265  			if len(guardsHeld) < guardsFound {
   266  				ar = readOnlyAtomic
   267  			} else {
   268  				ar = mixedAtomic
   269  			}
   270  		}
   271  		if refs := inst.Referrers(); refs != nil {
   272  			for _, otherInst := range *refs {
   273  				pc.checkAtomicCall(otherInst, accessObj, ar)
   274  			}
   275  		}
   276  		// Check that this is not otherwise written non-atomically,
   277  		// even if we do hold all the locks.
   278  		if isWrite {
   279  			pc.maybeFail(inst.Pos(), "non-atomic write of field %s, writes must still be atomic with locks held (locks: %s)", accessObj.Name(), ls.String())
   280  		}
   281  	case atomicDisallow:
   282  		// If atomic analysis is not enabled, skip.
   283  		if !enableAtomic {
   284  			break
   285  		}
   286  		// Check that this is *not* used atomically.
   287  		if refs := inst.Referrers(); refs != nil {
   288  			for _, otherInst := range *refs {
   289  				pc.checkAtomicCall(otherInst, accessObj, nonAtomic)
   290  			}
   291  		}
   292  	}
   293  
   294  	// Check inferred locks.
   295  	if accessObj.Pkg() == pc.pass.Pkg {
   296  		oo := pc.observationsFor(accessObj)
   297  		oo.total++
   298  		for s, info := range ls.lockedMutexes {
   299  			// Is this an object for which we have facts? If there
   300  			// is no ability to name this object, then we don't
   301  			// bother with any inferrence. We also ignore any self
   302  			// references (e.g. accessing a mutex while you are
   303  			// holding that exact mutex).
   304  			if info.object == nil || accessObj == info.object {
   305  				continue
   306  			}
   307  			// Has this already been held?
   308  			if _, ok := guardsHeld[s]; ok {
   309  				oo.counts[info.object]++
   310  				continue
   311  			}
   312  			// Is this a global? Record directly.
   313  			if _, ok := from.(*ssa.Global); ok {
   314  				oo.counts[info.object]++
   315  				continue
   316  			}
   317  			// Is the object a sibling to the accessObj? We need to
   318  			// check all fields and see if they match. We accept
   319  			// only siblings and globals for this recommendation.
   320  			structType, ok := resolveStruct(from.Type())
   321  			if !ok {
   322  				continue
   323  			}
   324  			for i := 0; i < structType.NumFields(); i++ {
   325  				if fieldObj := structType.Field(i); fieldObj == info.object {
   326  					// Add to the maybe list.
   327  					oo.counts[info.object]++
   328  				}
   329  			}
   330  		}
   331  	}
   332  }
   333  
   334  // checkFieldAccess checks the validity of a field access.
   335  func (pc *passContext) checkFieldAccess(inst almostInst, structObj ssa.Value, field int, ls *lockState, isWrite bool) {
   336  	fieldObj, _ := findField(structObj.Type(), field)
   337  	pc.checkGuards(inst, structObj, fieldObj, ls, isWrite)
   338  }
   339  
   340  // noReferrers wraps an instruction as an almostInst.
   341  type noReferrers struct {
   342  	ssa.Instruction
   343  }
   344  
   345  // Referrers implements almostInst.Referrers.
   346  func (noReferrers) Referrers() *[]ssa.Instruction { return nil }
   347  
   348  // checkGlobalAccess checks the validity of a global access.
   349  func (pc *passContext) checkGlobalAccess(inst ssa.Instruction, g *ssa.Global, ls *lockState, isWrite bool) {
   350  	pc.checkGuards(noReferrers{inst}, g, g.Object(), ls, isWrite)
   351  }
   352  
   353  func (pc *passContext) checkCall(call callCommon, lff *lockFunctionFacts, ls *lockState) {
   354  	// See: https://godoc.org/golang.org/x/tools/go/ssa#CallCommon
   355  	//
   356  	// "invoke" mode: Method is non-nil, and Value is the underlying value.
   357  	if fn := call.Common().Method; fn != nil {
   358  		var nlff lockFunctionFacts
   359  		pc.pass.ImportObjectFact(fn, &nlff)
   360  		nlff.Ignore = nlff.Ignore || lff.Ignore // Inherit ignore.
   361  		pc.checkFunctionCall(call, fn, &nlff, ls)
   362  		return
   363  	}
   364  
   365  	// "call" mode: when Method is nil (!IsInvoke), a CallCommon represents an ordinary
   366  	//  function call of the value in Value, which may be a *Builtin, a *Function or any
   367  	//  other value of kind 'func'.
   368  	//
   369  	// 	Value may be one of:
   370  	// (a) a *Function, indicating a statically dispatched call
   371  	// to a package-level function, an anonymous function, or
   372  	// a method of a named type.
   373  	//
   374  	// (b) a *MakeClosure, indicating an immediately applied
   375  	// function literal with free variables.
   376  	//
   377  	// (c) a *Builtin, indicating a statically dispatched call
   378  	// to a built-in function.
   379  	//
   380  	// (d) any other value, indicating a dynamically dispatched
   381  	//     function call.
   382  	switch fn := call.Common().Value.(type) {
   383  	case *ssa.Function:
   384  		nlff := lockFunctionFacts{
   385  			Ignore: lff.Ignore, // Inherit ignore.
   386  		}
   387  		if obj := fn.Object(); obj != nil {
   388  			pc.pass.ImportObjectFact(obj, &nlff)
   389  			nlff.Ignore = nlff.Ignore || lff.Ignore // See above.
   390  			pc.checkFunctionCall(call, obj.(*types.Func), &nlff, ls)
   391  		} else {
   392  			// Anonymous functions have no facts, and cannot be
   393  			// annotated.  We don't check for violations using the
   394  			// function facts, since they cannot exist. Instead, we
   395  			// do a fresh analysis using the current lock state.
   396  			fnls := ls.fork()
   397  			for i, arg := range call.Common().Args {
   398  				fnls.store(fn.Params[i], arg)
   399  			}
   400  			pc.checkFunction(call, fn, &nlff, fnls, true /* force */)
   401  		}
   402  	case *ssa.MakeClosure:
   403  		// Note that creating and then invoking closures locally is
   404  		// allowed, but analysis of passing closures is done when
   405  		// checking individual instructions.
   406  		pc.checkClosure(call, fn, lff, ls)
   407  	default:
   408  		return
   409  	}
   410  }
   411  
   412  // postFunctionCallUpdate updates all conditions.
   413  func (pc *passContext) postFunctionCallUpdate(call callCommon, lff *lockFunctionFacts, ls *lockState, aliases bool) {
   414  	// Release all locks not still held.
   415  	for fieldName, fg := range lff.HeldOnEntry {
   416  		if _, ok := lff.HeldOnExit[fieldName]; ok {
   417  			continue
   418  		}
   419  		if fg.IsAlias && !aliases {
   420  			continue
   421  		}
   422  		r := fg.Resolver.resolveCall(pc, ls, call.Common().Args, call.Value())
   423  		if !r.valid() {
   424  			// See above: this cannot be forced.
   425  			pc.maybeFail(call.Pos(), "field %s cannot be resolved", fieldName)
   426  			continue
   427  		}
   428  		if s, ok := ls.unlockField(r, fg.Exclusive); !ok && !lff.Ignore {
   429  			if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok && !lff.Ignore {
   430  				pc.maybeFail(call.Pos(), "attempt to release %s (%s), but not held (locks: %s)", fieldName, s, ls.String())
   431  			}
   432  		}
   433  	}
   434  
   435  	// Update all held locks if acquired.
   436  	for fieldName, fg := range lff.HeldOnExit {
   437  		if _, ok := lff.HeldOnEntry[fieldName]; ok {
   438  			continue
   439  		}
   440  		if fg.IsAlias && !aliases {
   441  			continue
   442  		}
   443  		// Acquire the lock per the annotation.
   444  		r := fg.Resolver.resolveCall(pc, ls, call.Common().Args, call.Value())
   445  		if s, ok := ls.lockField(r, fg.Exclusive); !ok && !lff.Ignore {
   446  			if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok && !lff.Ignore {
   447  				pc.maybeFail(call.Pos(), "attempt to acquire %s (%s), but already held (locks: %s)", fieldName, s, ls.String())
   448  			}
   449  		}
   450  	}
   451  }
   452  
   453  // exclusiveStr returns a string describing exclusive requirements.
   454  func exclusiveStr(exclusive bool) string {
   455  	if exclusive {
   456  		return "exclusively"
   457  	}
   458  	return "non-exclusively"
   459  }
   460  
   461  // checkFunctionCall checks preconditions for function calls, and tracks the
   462  // lock state by recording relevant calls to sync functions. Note that calls to
   463  // atomic functions are tracked by checkFieldAccess by looking directly at the
   464  // referrers (because ordering doesn't matter there, so we need not scan in
   465  // instruction order).
   466  func (pc *passContext) checkFunctionCall(call callCommon, fn *types.Func, lff *lockFunctionFacts, ls *lockState) {
   467  	// Extract the "receiver" properly.
   468  	var args []ssa.Value
   469  	if call.Common().Method != nil {
   470  		// This is an interface dispatch for sync.Locker.
   471  		args = append([]ssa.Value{call.Common().Value}, call.Common().Args...)
   472  	} else {
   473  		// This matches the signature for the relevant
   474  		// sync.Lock/sync.Unlock functions below.
   475  		args = call.Common().Args
   476  	}
   477  
   478  	// Check all guards required are held. Note that this explicitly does
   479  	// not include aliases, hence false being passed below.
   480  	for fieldName, fg := range lff.HeldOnEntry {
   481  		if fg.IsAlias {
   482  			continue
   483  		}
   484  		r := fg.Resolver.resolveCall(pc, ls, args, call.Value())
   485  		if s, ok := ls.isHeld(r, fg.Exclusive); !ok {
   486  			if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok && !lff.Ignore {
   487  				pc.maybeFail(call.Pos(), "must hold %s %s (%s) to call %s, but not held (locks: %s)", fieldName, exclusiveStr(fg.Exclusive), s, fn.Name(), ls.String())
   488  			} else {
   489  				// Force the lock to be acquired.
   490  				ls.lockField(r, fg.Exclusive)
   491  			}
   492  		}
   493  	}
   494  
   495  	// Update all lock state accordingly.
   496  	pc.postFunctionCallUpdate(call, lff, ls, false /* aliases */)
   497  
   498  	// Check if it's a method dispatch for something in the sync package.
   499  	// See: https://godoc.org/golang.org/x/tools/go/ssa#Function
   500  
   501  	if (lockerRE.MatchString(fn.FullName()) || mutexRE.MatchString(fn.FullName())) && len(args) > 0 {
   502  		rv := makeResolvedValue(args[0], nil)
   503  		isExclusive := false
   504  		switch fn.Name() {
   505  		case "Lock", "NestedLock":
   506  			isExclusive = true
   507  			fallthrough
   508  		case "RLock":
   509  			if s, ok := ls.lockField(rv, isExclusive); !ok && !lff.Ignore {
   510  				if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
   511  					// Double locking a mutex that is already locked.
   512  					pc.maybeFail(call.Pos(), "%s already locked (locks: %s)", s, ls.String())
   513  				}
   514  			}
   515  		case "Unlock", "NestedUnlock":
   516  			isExclusive = true
   517  			fallthrough
   518  		case "RUnlock":
   519  			if s, ok := ls.unlockField(rv, isExclusive); !ok && !lff.Ignore {
   520  				if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
   521  					// Unlocking something that is already unlocked.
   522  					pc.maybeFail(call.Pos(), "%s already unlocked or locked differently (locks: %s)", s, ls.String())
   523  				}
   524  			}
   525  		case "DowngradeLock":
   526  			if s, ok := ls.downgradeField(rv); !ok {
   527  				if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok && !lff.Ignore {
   528  					// Downgrading something that may not be downgraded.
   529  					pc.maybeFail(call.Pos(), "%s already unlocked or not exclusive (locks: %s)", s, ls.String())
   530  				}
   531  			}
   532  		}
   533  	}
   534  }
   535  
   536  // checkClosure forks the lock state, and creates a binding for the FreeVars of
   537  // the closure. This allows the analysis to resolve the closure.
   538  func (pc *passContext) checkClosure(call callCommon, fn *ssa.MakeClosure, lff *lockFunctionFacts, ls *lockState) {
   539  	clls := ls.fork()
   540  	clfn := fn.Fn.(*ssa.Function)
   541  	for i, fv := range clfn.FreeVars {
   542  		clls.store(fv, fn.Bindings[i])
   543  	}
   544  
   545  	// Note that this is *not* a call to check function call, which checks
   546  	// against the function preconditions. Instead, this does a fresh
   547  	// analysis of the function from source code with a different state.
   548  	nlff := lockFunctionFacts{
   549  		Ignore: lff.Ignore, // Inherit ignore.
   550  	}
   551  	pc.checkFunction(call, clfn, &nlff, clls, true /* force */)
   552  }
   553  
   554  // freshAlloc indicates that v has been allocated within the local scope. There
   555  // is no lock checking done on objects that are freshly allocated.
   556  func freshAlloc(v ssa.Value) bool {
   557  	switch x := v.(type) {
   558  	case *ssa.Alloc:
   559  		return true
   560  	case *ssa.FieldAddr:
   561  		return freshAlloc(x.X)
   562  	case *ssa.Field:
   563  		return freshAlloc(x.X)
   564  	case *ssa.IndexAddr:
   565  		return freshAlloc(x.X)
   566  	case *ssa.Index:
   567  		return freshAlloc(x.X)
   568  	case *ssa.Convert:
   569  		return freshAlloc(x.X)
   570  	case *ssa.ChangeType:
   571  		return freshAlloc(x.X)
   572  	default:
   573  		return false
   574  	}
   575  }
   576  
   577  // isWrite indicates that this value is used as the addr field in a store.
   578  //
   579  // Note that this may still be used for a write. The return here is optimistic
   580  // but sufficient for basic analysis.
   581  func isWrite(v ssa.Value) bool {
   582  	refs := v.Referrers()
   583  	if refs == nil {
   584  		return false
   585  	}
   586  	for _, ref := range *refs {
   587  		if s, ok := ref.(*ssa.Store); ok && s.Addr == v {
   588  			return true
   589  		}
   590  	}
   591  	return false
   592  }
   593  
   594  // callCommon is an ssa.Value that also implements Common.
   595  type callCommon interface {
   596  	Pos() token.Pos
   597  	Common() *ssa.CallCommon
   598  	Value() *ssa.Call
   599  }
   600  
   601  // checkInstruction checks the legality the single instruction based on the
   602  // current lockState.
   603  func (pc *passContext) checkInstruction(inst ssa.Instruction, lff *lockFunctionFacts, ls *lockState) (*ssa.Return, *lockState) {
   604  	// Record any observed globals, and check for violations. The global
   605  	// value is not itself an instruction, but we check all referrers to
   606  	// see where they are consumed.
   607  	var stackLocal [16]*ssa.Value
   608  	ops := inst.Operands(stackLocal[:])
   609  	for _, v := range ops {
   610  		if v == nil {
   611  			continue
   612  		}
   613  		g, ok := (*v).(*ssa.Global)
   614  		if !ok {
   615  			continue
   616  		}
   617  		_, isWrite := inst.(*ssa.Store)
   618  		pc.checkGlobalAccess(inst, g, ls, isWrite)
   619  	}
   620  
   621  	// Process the instruction.
   622  	switch x := inst.(type) {
   623  	case *ssa.Store:
   624  		// Record that this value is holding this other value. This is
   625  		// because at the beginning of each ssa execution, there is a
   626  		// series of assignments of parameter values to alloc objects.
   627  		// This allows us to trace these back to the original
   628  		// parameters as aliases above.
   629  		//
   630  		// Note that this may overwrite an existing value in the lock
   631  		// state, but this is intentional.
   632  		ls.store(x.Addr, x.Val)
   633  	case *ssa.Field:
   634  		if !freshAlloc(x.X) && !lff.Ignore {
   635  			pc.checkFieldAccess(x, x.X, x.Field, ls, false)
   636  		}
   637  	case *ssa.FieldAddr:
   638  		if !freshAlloc(x.X) && !lff.Ignore {
   639  			pc.checkFieldAccess(x, x.X, x.Field, ls, isWrite(x))
   640  		}
   641  	case *ssa.Call:
   642  		pc.checkCall(x, lff, ls)
   643  	case *ssa.Defer:
   644  		ls.pushDefer(x)
   645  	case *ssa.RunDefers:
   646  		for d := ls.popDefer(); d != nil; d = ls.popDefer() {
   647  			pc.checkCall(d, lff, ls)
   648  		}
   649  	case *ssa.MakeClosure:
   650  		if refs := x.Referrers(); refs != nil {
   651  			var (
   652  				calls    int
   653  				nonCalls int
   654  			)
   655  			for _, ref := range *refs {
   656  				switch ref.(type) {
   657  				case *ssa.Call, *ssa.Defer:
   658  					// Analysis will be done on the call
   659  					// itself subsequently, including the
   660  					// lock state at the time of the call.
   661  					calls++
   662  				default:
   663  					// We need to analyze separately. Per
   664  					// below, this means that we'll analyze
   665  					// at closure construction time no zero
   666  					// assumptions about when it will be
   667  					// called.
   668  					nonCalls++
   669  				}
   670  			}
   671  			if calls > 0 && nonCalls == 0 {
   672  				return nil, nil
   673  			}
   674  		}
   675  		// Analyze the closure without bindings. This means that we
   676  		// assume no lock facts or have any existing lock state. Only
   677  		// trivial closures are acceptable in this case.
   678  		clfn := x.Fn.(*ssa.Function)
   679  		nlff := lockFunctionFacts{
   680  			Ignore: lff.Ignore, // Inherit ignore.
   681  		}
   682  		pc.checkFunction(nil, clfn, &nlff, nil, false /* force */)
   683  	case *ssa.Return:
   684  		return x, ls // Valid return state.
   685  	}
   686  	return nil, nil
   687  }
   688  
   689  // checkBasicBlock traverses the control flow graph starting at a set of given
   690  // block and checks each instruction for allowed operations.
   691  func (pc *passContext) checkBasicBlock(fn *ssa.Function, block *ssa.BasicBlock, lff *lockFunctionFacts, parent *lockState, seen map[*ssa.BasicBlock]*lockState, rg map[*ssa.BasicBlock]struct{}) *lockState {
   692  	// Check for cached results from entering this block from a *different*
   693  	// execution path. Note that this is not the same path, which is
   694  	// checked with the recursion guard below.
   695  	if oldLS, ok := seen[block]; ok && oldLS.isCompatible(parent) {
   696  		return nil
   697  	}
   698  
   699  	// Prevent recursion. If the lock state is constantly changing and we
   700  	// are a recursive path, then there will never be a return block.
   701  	if rg == nil {
   702  		rg = make(map[*ssa.BasicBlock]struct{})
   703  	}
   704  	if _, ok := rg[block]; ok {
   705  		return nil
   706  	}
   707  	rg[block] = struct{}{}
   708  	defer func() { delete(rg, block) }()
   709  
   710  	// If the lock state is not compatible, then we need to do the
   711  	// recursive analysis to ensure that it is still sane. For example, the
   712  	// following is guaranteed to generate incompatible locking states:
   713  	//
   714  	//	if foo {
   715  	//		mu.Lock()
   716  	//	}
   717  	//	other stuff ...
   718  	//	if foo {
   719  	//		mu.Unlock()
   720  	//	}
   721  
   722  	var (
   723  		rv  *ssa.Return
   724  		rls *lockState
   725  	)
   726  
   727  	// Analyze this block.
   728  	seen[block] = parent
   729  	ls := parent.fork()
   730  	for _, inst := range block.Instrs {
   731  		rv, rls = pc.checkInstruction(inst, lff, ls)
   732  		if rls != nil {
   733  			failed := false
   734  			// Validate held locks.
   735  			for fieldName, fg := range lff.HeldOnExit {
   736  				r := fg.Resolver.resolveStatic(pc, ls, fn, rv)
   737  				if !r.valid() {
   738  					// This cannot be forced, since we have no reference.
   739  					pc.maybeFail(rv.Pos(), "lock %s cannot be resolved", fieldName)
   740  					continue
   741  				}
   742  				if s, ok := rls.isHeld(r, fg.Exclusive); !ok {
   743  					if _, ok := pc.forced[pc.positionKey(rv.Pos())]; !ok && !lff.Ignore {
   744  						pc.maybeFail(rv.Pos(), "lock %s (%s) not held %s (locks: %s)", fieldName, s, exclusiveStr(fg.Exclusive), rls.String())
   745  						failed = true
   746  					} else {
   747  						// Force the lock to be acquired.
   748  						rls.lockField(r, fg.Exclusive)
   749  					}
   750  				}
   751  			}
   752  			// Check for other locks, but only if the above didn't trip.
   753  			if !failed && rls.count() != len(lff.HeldOnExit) && !lff.Ignore {
   754  				pc.maybeFail(rv.Pos(), "return with unexpected locks held (locks: %s)", rls.String())
   755  			}
   756  		}
   757  	}
   758  
   759  	// Analyze all successors.
   760  	for _, succ := range block.Succs {
   761  		// Collect possible return values, and make sure that the lock
   762  		// state aligns with any return value that we may have found
   763  		// above. Note that checkBasicBlock will recursively analyze
   764  		// the lock state to ensure that Releases and Acquires are
   765  		// respected.
   766  		if pls := pc.checkBasicBlock(fn, succ, lff, ls, seen, rg); pls != nil {
   767  			if rls != nil && !rls.isCompatible(pls) {
   768  				if _, ok := pc.forced[pc.positionKey(fn.Pos())]; !ok && !lff.Ignore {
   769  					pc.maybeFail(fn.Pos(), "incompatible return states (first: %s, second: %s)", rls.String(), pls.String())
   770  				}
   771  			}
   772  			rls = pls
   773  		}
   774  	}
   775  	return rls
   776  }
   777  
   778  // checkFunction checks a function invocation, typically starting with nil lockState.
   779  func (pc *passContext) checkFunction(call callCommon, fn *ssa.Function, lff *lockFunctionFacts, parent *lockState, force bool) {
   780  	defer func() {
   781  		// Mark this function as checked. This is used by the top-level
   782  		// loop to ensure that all anonymous functions are scanned, if
   783  		// they are not explicitly invoked here. Note that this can
   784  		// happen if the anonymous functions are e.g. passed only as
   785  		// parameters or used to initialize some structure.
   786  		pc.functions[fn] = struct{}{}
   787  	}()
   788  	if _, ok := pc.functions[fn]; !force && ok {
   789  		// This function has already been analyzed at least once.
   790  		// That's all we permit for each function, although this may
   791  		// cause some anonymous functions to be analyzed in only one
   792  		// context.
   793  		return
   794  	}
   795  
   796  	// If no return value is provided, then synthesize one. This is used
   797  	// below only to check against the locks preconditions, which may
   798  	// include return values.
   799  	if call == nil {
   800  		call = &ssa.Call{Call: ssa.CallCommon{Value: fn}}
   801  	}
   802  
   803  	// Initialize ls with any preconditions that require locks to be held
   804  	// for the method to be invoked. Note that in the overwhleming majority
   805  	// of cases, parent will be nil. However, in the case of closures and
   806  	// anonymous functions, we may start with a non-nil lock state.
   807  	//
   808  	// Note that this will include all aliases, which are also released
   809  	// appropriately below.
   810  	ls := parent.fork()
   811  	for fieldName, fg := range lff.HeldOnEntry {
   812  		// The first is the method object itself so we skip that when looking
   813  		// for receiver/function parameters.
   814  		r := fg.Resolver.resolveStatic(pc, ls, fn, call.Value())
   815  		if !r.valid() {
   816  			// See above: this cannot be forced.
   817  			pc.maybeFail(fn.Pos(), "lock %s cannot be resolved", fieldName)
   818  			continue
   819  		}
   820  		if s, ok := ls.lockField(r, fg.Exclusive); !ok && !lff.Ignore {
   821  			// This can only happen if the same value is declared
   822  			// multiple times, and should be caught by the earlier
   823  			// fact scanning. Keep it here as a sanity check.
   824  			pc.maybeFail(fn.Pos(), "lock %s (%s) acquired multiple times or differently (locks: %s)", fieldName, s, ls.String())
   825  		}
   826  	}
   827  
   828  	// Scan the blocks.
   829  	seen := make(map[*ssa.BasicBlock]*lockState)
   830  	if len(fn.Blocks) > 0 {
   831  		pc.checkBasicBlock(fn, fn.Blocks[0], lff, ls, seen, nil)
   832  	}
   833  
   834  	// Scan the recover block.
   835  	if fn.Recover != nil {
   836  		pc.checkBasicBlock(fn, fn.Recover, lff, ls, seen, nil)
   837  	}
   838  
   839  	// Update all lock state accordingly. This will be called only if we
   840  	// are doing inline analysis for e.g. an anonymous function.
   841  	if call != nil && parent != nil {
   842  		pc.postFunctionCallUpdate(call, lff, parent, true /* aliases */)
   843  	}
   844  }
   845  
   846  // checkInferred checks for any inferred lock annotations.
   847  func (pc *passContext) checkInferred() {
   848  	for obj, oo := range pc.observations {
   849  		var lgf lockGuardFacts
   850  		pc.pass.ImportObjectFact(obj, &lgf)
   851  		for other, count := range oo.counts {
   852  			// Is this already a guard?
   853  			if _, ok := lgf.GuardedBy[other.Name()]; ok {
   854  				continue
   855  			}
   856  			// Check to see if this field is used with a given lock
   857  			// held above the threshold. If yes, provide a helpful
   858  			// hint that this may something you wish to annotate.
   859  			const threshold = 0.9
   860  			if usage := float64(count) / float64(oo.total); usage >= threshold {
   861  				pc.maybeFail(obj.Pos(), "may require checklocks annotation for %s, used with lock held %2.0f%% of the time", other.Name(), usage*100)
   862  			}
   863  		}
   864  	}
   865  }