github.com/ttpreport/gvisor-ligolo@v0.0.0-20240123134145-a858404967ba/tools/checklocks/analysis.go (about)

     1  // Copyright 2020 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package checklocks
    16  
    17  import (
    18  	"go/token"
    19  	"go/types"
    20  	"strings"
    21  
    22  	"golang.org/x/tools/go/ssa"
    23  )
    24  
    25  func gcd(a, b atomicAlignment) atomicAlignment {
    26  	for b != 0 {
    27  		a, b = b, a%b
    28  	}
    29  	return a
    30  }
    31  
    32  // typeAlignment returns the type alignment for the given type.
    33  func (pc *passContext) typeAlignment(pkg *types.Package, obj types.Object) atomicAlignment {
    34  	requiredOffset := atomicAlignment(1)
    35  	if pc.pass.ImportObjectFact(obj, &requiredOffset) {
    36  		return requiredOffset
    37  	}
    38  
    39  	switch x := obj.Type().Underlying().(type) {
    40  	case *types.Struct:
    41  		fields := make([]*types.Var, x.NumFields())
    42  		for i := 0; i < x.NumFields(); i++ {
    43  			fields[i] = x.Field(i)
    44  		}
    45  		offsets := pc.pass.TypesSizes.Offsetsof(fields)
    46  		for i := 0; i < x.NumFields(); i++ {
    47  			// Check the offset, and then assuming that this offset
    48  			// aligns with the offset for the broader type.
    49  			fieldRequired := pc.typeAlignment(pkg, fields[i])
    50  			if offsets[i]%int64(fieldRequired) != 0 {
    51  				// The offset of this field is not compatible.
    52  				pc.maybeFail(fields[i].Pos(), "have alignment %d, need %d", offsets[i], fieldRequired)
    53  			}
    54  			// Ensure the requiredOffset is the LCM of the offset.
    55  			requiredOffset *= fieldRequired / gcd(requiredOffset, fieldRequired)
    56  		}
    57  	case *types.Array:
    58  		// Export direct alignment requirements.
    59  		if named, ok := x.Elem().(*types.Named); ok && !hasTypeParams(named) {
    60  			requiredOffset = pc.typeAlignment(pkg, named.Obj())
    61  		}
    62  	default:
    63  		// Use the compiler's underlying alignment.
    64  		requiredOffset = atomicAlignment(pc.pass.TypesSizes.Alignof(obj.Type().Underlying()))
    65  	}
    66  
    67  	if pkg == obj.Pkg() {
    68  		// Cache as an object fact, to subsequent calls. Note that we
    69  		// can only export object facts for the package that we are
    70  		// currently analyzing. There may be no exported facts for
    71  		// array types or alias types, for example.
    72  		pc.pass.ExportObjectFact(obj, &requiredOffset)
    73  	}
    74  
    75  	return requiredOffset
    76  }
    77  
    78  // hasTypeParams returns true iff the named type has type parameters.
    79  func hasTypeParams(typ *types.Named) bool {
    80  	return typ.TypeParams() != nil && typ.TypeParams().Len() > 0
    81  }
    82  
    83  // checkTypeAlignment checks the alignment of the given type.
    84  //
    85  // This calls typeAlignment, which resolves all types recursively. This method
    86  // should be called for all types individual to ensure full coverage.
    87  func (pc *passContext) checkTypeAlignment(pkg *types.Package, typ *types.Named) {
    88  	if !hasTypeParams(typ) {
    89  		_ = pc.typeAlignment(pkg, typ.Obj())
    90  	}
    91  }
    92  
    93  // atomicRules specify read constraints.
    94  type atomicRules int
    95  
    96  const (
    97  	nonAtomic atomicRules = iota
    98  	readWriteAtomic
    99  	readOnlyAtomic
   100  	mixedAtomic
   101  )
   102  
   103  // checkAtomicCall checks for an atomic access.
   104  //
   105  // inst is the instruction analyzed, obj is used only for maybeFail.
   106  func (pc *passContext) checkAtomicCall(inst ssa.Instruction, obj types.Object, ar atomicRules) {
   107  	switch x := inst.(type) {
   108  	case *ssa.Call:
   109  		if x.Common().IsInvoke() {
   110  			if ar != nonAtomic {
   111  				// This is an illegal interface dispatch.
   112  				pc.maybeFail(inst.Pos(), "dynamic dispatch with atomic-only field")
   113  			}
   114  			return
   115  		}
   116  		fn, ok := x.Common().Value.(*ssa.Function)
   117  		if !ok {
   118  			if ar != nonAtomic {
   119  				// This is an illegal call to a non-static function.
   120  				pc.maybeFail(inst.Pos(), "dispatch to non-static function with atomic-only field")
   121  			}
   122  			return
   123  		}
   124  		pkg := fn.Package()
   125  		if pkg == nil {
   126  			if ar != nonAtomic {
   127  				// This is a call to some shared wrapper function.
   128  				pc.maybeFail(inst.Pos(), "dispatch to shared function or wrapper")
   129  			}
   130  			return
   131  		}
   132  		var lff lockFunctionFacts // Check for exemption.
   133  		if obj := fn.Object(); obj != nil && pc.pass.ImportObjectFact(obj, &lff) && lff.Ignore {
   134  			return
   135  		}
   136  		if name := pkg.Pkg.Name(); name != "atomic" && name != "atomicbitops" {
   137  			if ar != nonAtomic {
   138  				// This is an illegal call to a non-atomic package function.
   139  				pc.maybeFail(inst.Pos(), "dispatch to non-atomic function with atomic-only field")
   140  			}
   141  			return
   142  		}
   143  		if ar == nonAtomic {
   144  			// We are *not* expecting an atomic dispatch.
   145  			if _, ok := pc.forced[pc.positionKey(inst.Pos())]; !ok {
   146  				pc.maybeFail(inst.Pos(), "unexpected call to atomic function")
   147  			}
   148  		}
   149  		if !strings.HasPrefix(fn.Name(), "Load") && ar == readOnlyAtomic {
   150  			// We are not allowing any reads in this context.
   151  			if _, ok := pc.forced[pc.positionKey(inst.Pos())]; !ok {
   152  				pc.maybeFail(inst.Pos(), "unexpected call to atomic write function, is a lock missing?")
   153  			}
   154  			return
   155  		}
   156  		return // Don't hit common case.
   157  	case *ssa.ChangeType:
   158  		// Allow casts for atomic values, but nothing else.
   159  		if refs := x.Referrers(); refs != nil && len(*refs) == 1 {
   160  			pc.checkAtomicCall((*refs)[0], obj, ar)
   161  			return
   162  		}
   163  	case *ssa.UnOp:
   164  		if x.Op == token.MUL && ar == mixedAtomic {
   165  			// This is allowed; this is a strict reading.
   166  			return
   167  		}
   168  	}
   169  	if ar != nonAtomic {
   170  		// This is something else entirely.
   171  		if _, ok := pc.forced[pc.positionKey(inst.Pos())]; !ok {
   172  			pc.maybeFail(inst.Pos(), "illegal use of atomic-only field by %T instruction", inst)
   173  		}
   174  		return
   175  	}
   176  }
   177  
   178  func resolveStruct(typ types.Type) (*types.Struct, bool) {
   179  	structType, ok := typ.Underlying().(*types.Struct)
   180  	if ok {
   181  		return structType, true
   182  	}
   183  	ptrType, ok := typ.Underlying().(*types.Pointer)
   184  	if ok {
   185  		return resolveStruct(ptrType.Elem())
   186  	}
   187  	return nil, false
   188  }
   189  
   190  func findField(typ types.Type, field int) (types.Object, bool) {
   191  	structType, ok := resolveStruct(typ)
   192  	if !ok || field >= structType.NumFields() {
   193  		return nil, false
   194  	}
   195  	return structType.Field(field), true
   196  }
   197  
   198  // almostInst is a generalization over ssa.Field, ssa.FieldAddr, ssa.Global.
   199  type almostInst interface {
   200  	Pos() token.Pos
   201  	Referrers() *[]ssa.Instruction
   202  }
   203  
   204  // checkGuards checks the guards held.
   205  //
   206  // This also enforces atomicity constraints for fields that must be accessed
   207  // atomically. The parameter isWrite indicates whether this field is used
   208  // downstream for a write operation.
   209  //
   210  // Note that this function is not called if lff.Ignore is true, since it cannot
   211  // discover any local anonymous functions or closures.
   212  func (pc *passContext) checkGuards(inst almostInst, from ssa.Value, accessObj types.Object, ls *lockState, isWrite bool) {
   213  	var (
   214  		lgf         lockGuardFacts
   215  		guardsFound int
   216  		guardsHeld  = make(map[string]struct{}) // Keyed by resolved string.
   217  	)
   218  
   219  	// Load the facts for the object accessed.
   220  	pc.pass.ImportObjectFact(accessObj, &lgf)
   221  
   222  	// Check guards held.
   223  	for guardName, fgr := range lgf.GuardedBy {
   224  		guardsFound++
   225  		r := fgr.resolveField(pc, ls, from)
   226  		if !r.valid() {
   227  			// See above; this cannot be forced.
   228  			pc.maybeFail(inst.Pos(), "field %s cannot be resolved", guardName)
   229  			continue
   230  		}
   231  		s, ok := ls.isHeld(r, isWrite)
   232  		if ok {
   233  			guardsHeld[s] = struct{}{}
   234  			continue
   235  		}
   236  		if _, ok := pc.forced[pc.positionKey(inst.Pos())]; ok {
   237  			// Mark this as locked, since it has been forced. All
   238  			// forces are treated as an exclusive lock.
   239  			s, _ := ls.lockField(r, true /* exclusive */)
   240  			guardsHeld[s] = struct{}{}
   241  			continue
   242  		}
   243  		// Note that we may allow this if the disposition is atomic,
   244  		// and we are allowing atomic reads only. This will fall into
   245  		// the atomic disposition check below, which asserts that the
   246  		// access is atomic. Further, len(guardsHeld) < guardsFound
   247  		// will be true for this case, so we require it to be
   248  		// read-only.
   249  		if lgf.AtomicDisposition != atomicRequired {
   250  			// There is no force key, no atomic access and no lock held.
   251  			pc.maybeFail(inst.Pos(), "invalid field access, %s (%s) must be locked when accessing %s (locks: %s)", guardName, s, accessObj.Name(), ls.String())
   252  		}
   253  	}
   254  
   255  	// Check the atomic access for this field.
   256  	switch lgf.AtomicDisposition {
   257  	case atomicRequired:
   258  		// Check that this is used safely as an input.
   259  		ar := readWriteAtomic
   260  		if guardsFound > 0 {
   261  			if len(guardsHeld) < guardsFound {
   262  				ar = readOnlyAtomic
   263  			} else {
   264  				ar = mixedAtomic
   265  			}
   266  		}
   267  		if refs := inst.Referrers(); refs != nil {
   268  			for _, otherInst := range *refs {
   269  				pc.checkAtomicCall(otherInst, accessObj, ar)
   270  			}
   271  		}
   272  		// Check that this is not otherwise written non-atomically,
   273  		// even if we do hold all the locks.
   274  		if isWrite {
   275  			pc.maybeFail(inst.Pos(), "non-atomic write of field %s, writes must still be atomic with locks held (locks: %s)", accessObj.Name(), ls.String())
   276  		}
   277  	case atomicDisallow:
   278  		// If atomic analysis is not enabled, skip.
   279  		if !enableAtomic {
   280  			break
   281  		}
   282  		// Check that this is *not* used atomically.
   283  		if refs := inst.Referrers(); refs != nil {
   284  			for _, otherInst := range *refs {
   285  				pc.checkAtomicCall(otherInst, accessObj, nonAtomic)
   286  			}
   287  		}
   288  	}
   289  
   290  	// Check inferred locks.
   291  	if accessObj.Pkg() == pc.pass.Pkg {
   292  		oo := pc.observationsFor(accessObj)
   293  		oo.total++
   294  		for s, info := range ls.lockedMutexes {
   295  			// Is this an object for which we have facts? If there
   296  			// is no ability to name this object, then we don't
   297  			// bother with any inferrence. We also ignore any self
   298  			// references (e.g. accessing a mutex while you are
   299  			// holding that exact mutex).
   300  			if info.object == nil || accessObj == info.object {
   301  				continue
   302  			}
   303  			// Has this already been held?
   304  			if _, ok := guardsHeld[s]; ok {
   305  				oo.counts[info.object]++
   306  				continue
   307  			}
   308  			// Is this a global? Record directly.
   309  			if _, ok := from.(*ssa.Global); ok {
   310  				oo.counts[info.object]++
   311  				continue
   312  			}
   313  			// Is the object a sibling to the accessObj? We need to
   314  			// check all fields and see if they match. We accept
   315  			// only siblings and globals for this recommendation.
   316  			structType, ok := resolveStruct(from.Type())
   317  			if !ok {
   318  				continue
   319  			}
   320  			for i := 0; i < structType.NumFields(); i++ {
   321  				if fieldObj := structType.Field(i); fieldObj == info.object {
   322  					// Add to the maybe list.
   323  					oo.counts[info.object]++
   324  				}
   325  			}
   326  		}
   327  	}
   328  }
   329  
   330  // checkFieldAccess checks the validity of a field access.
   331  func (pc *passContext) checkFieldAccess(inst almostInst, structObj ssa.Value, field int, ls *lockState, isWrite bool) {
   332  	fieldObj, _ := findField(structObj.Type(), field)
   333  	pc.checkGuards(inst, structObj, fieldObj, ls, isWrite)
   334  }
   335  
   336  // noReferrers wraps an instruction as an almostInst.
   337  type noReferrers struct {
   338  	ssa.Instruction
   339  }
   340  
   341  // Referrers implements almostInst.Referrers.
   342  func (noReferrers) Referrers() *[]ssa.Instruction { return nil }
   343  
   344  // checkGlobalAccess checks the validity of a global access.
   345  func (pc *passContext) checkGlobalAccess(inst ssa.Instruction, g *ssa.Global, ls *lockState, isWrite bool) {
   346  	pc.checkGuards(noReferrers{inst}, g, g.Object(), ls, isWrite)
   347  }
   348  
   349  func (pc *passContext) checkCall(call callCommon, lff *lockFunctionFacts, ls *lockState) {
   350  	// See: https://godoc.org/golang.org/x/tools/go/ssa#CallCommon
   351  	//
   352  	// "invoke" mode: Method is non-nil, and Value is the underlying value.
   353  	if fn := call.Common().Method; fn != nil {
   354  		var nlff lockFunctionFacts
   355  		pc.pass.ImportObjectFact(fn, &nlff)
   356  		nlff.Ignore = nlff.Ignore || lff.Ignore // Inherit ignore.
   357  		pc.checkFunctionCall(call, fn, &nlff, ls)
   358  		return
   359  	}
   360  
   361  	// "call" mode: when Method is nil (!IsInvoke), a CallCommon represents an ordinary
   362  	//  function call of the value in Value, which may be a *Builtin, a *Function or any
   363  	//  other value of kind 'func'.
   364  	//
   365  	// 	Value may be one of:
   366  	// (a) a *Function, indicating a statically dispatched call
   367  	// to a package-level function, an anonymous function, or
   368  	// a method of a named type.
   369  	//
   370  	// (b) a *MakeClosure, indicating an immediately applied
   371  	// function literal with free variables.
   372  	//
   373  	// (c) a *Builtin, indicating a statically dispatched call
   374  	// to a built-in function.
   375  	//
   376  	// (d) any other value, indicating a dynamically dispatched
   377  	//     function call.
   378  	switch fn := call.Common().Value.(type) {
   379  	case *ssa.Function:
   380  		nlff := lockFunctionFacts{
   381  			Ignore: lff.Ignore, // Inherit ignore.
   382  		}
   383  		if obj := fn.Object(); obj != nil {
   384  			pc.pass.ImportObjectFact(obj, &nlff)
   385  			nlff.Ignore = nlff.Ignore || lff.Ignore // See above.
   386  			pc.checkFunctionCall(call, obj.(*types.Func), &nlff, ls)
   387  		} else {
   388  			// Anonymous functions have no facts, and cannot be
   389  			// annotated.  We don't check for violations using the
   390  			// function facts, since they cannot exist. Instead, we
   391  			// do a fresh analysis using the current lock state.
   392  			fnls := ls.fork()
   393  			for i, arg := range call.Common().Args {
   394  				fnls.store(fn.Params[i], arg)
   395  			}
   396  			pc.checkFunction(call, fn, &nlff, fnls, true /* force */)
   397  		}
   398  	case *ssa.MakeClosure:
   399  		// Note that creating and then invoking closures locally is
   400  		// allowed, but analysis of passing closures is done when
   401  		// checking individual instructions.
   402  		pc.checkClosure(call, fn, lff, ls)
   403  	default:
   404  		return
   405  	}
   406  }
   407  
   408  // postFunctionCallUpdate updates all conditions.
   409  func (pc *passContext) postFunctionCallUpdate(call callCommon, lff *lockFunctionFacts, ls *lockState, aliases bool) {
   410  	// Release all locks not still held.
   411  	for fieldName, fg := range lff.HeldOnEntry {
   412  		if _, ok := lff.HeldOnExit[fieldName]; ok {
   413  			continue
   414  		}
   415  		if fg.IsAlias && !aliases {
   416  			continue
   417  		}
   418  		r := fg.Resolver.resolveCall(pc, ls, call.Common().Args, call.Value())
   419  		if !r.valid() {
   420  			// See above: this cannot be forced.
   421  			pc.maybeFail(call.Pos(), "field %s cannot be resolved", fieldName)
   422  			continue
   423  		}
   424  		if s, ok := ls.unlockField(r, fg.Exclusive); !ok && !lff.Ignore {
   425  			if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok && !lff.Ignore {
   426  				pc.maybeFail(call.Pos(), "attempt to release %s (%s), but not held (locks: %s)", fieldName, s, ls.String())
   427  			}
   428  		}
   429  	}
   430  
   431  	// Update all held locks if acquired.
   432  	for fieldName, fg := range lff.HeldOnExit {
   433  		if _, ok := lff.HeldOnEntry[fieldName]; ok {
   434  			continue
   435  		}
   436  		if fg.IsAlias && !aliases {
   437  			continue
   438  		}
   439  		// Acquire the lock per the annotation.
   440  		r := fg.Resolver.resolveCall(pc, ls, call.Common().Args, call.Value())
   441  		if s, ok := ls.lockField(r, fg.Exclusive); !ok && !lff.Ignore {
   442  			if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok && !lff.Ignore {
   443  				pc.maybeFail(call.Pos(), "attempt to acquire %s (%s), but already held (locks: %s)", fieldName, s, ls.String())
   444  			}
   445  		}
   446  	}
   447  }
   448  
   449  // exclusiveStr returns a string describing exclusive requirements.
   450  func exclusiveStr(exclusive bool) string {
   451  	if exclusive {
   452  		return "exclusively"
   453  	}
   454  	return "non-exclusively"
   455  }
   456  
   457  // checkFunctionCall checks preconditions for function calls, and tracks the
   458  // lock state by recording relevant calls to sync functions. Note that calls to
   459  // atomic functions are tracked by checkFieldAccess by looking directly at the
   460  // referrers (because ordering doesn't matter there, so we need not scan in
   461  // instruction order).
   462  func (pc *passContext) checkFunctionCall(call callCommon, fn *types.Func, lff *lockFunctionFacts, ls *lockState) {
   463  	// Extract the "receiver" properly.
   464  	var args []ssa.Value
   465  	if call.Common().Method != nil {
   466  		// This is an interface dispatch for sync.Locker.
   467  		args = append([]ssa.Value{call.Common().Value}, call.Common().Args...)
   468  	} else {
   469  		// This matches the signature for the relevant
   470  		// sync.Lock/sync.Unlock functions below.
   471  		args = call.Common().Args
   472  	}
   473  
   474  	// Check all guards required are held. Note that this explicitly does
   475  	// not include aliases, hence false being passed below.
   476  	for fieldName, fg := range lff.HeldOnEntry {
   477  		if fg.IsAlias {
   478  			continue
   479  		}
   480  		r := fg.Resolver.resolveCall(pc, ls, args, call.Value())
   481  		if s, ok := ls.isHeld(r, fg.Exclusive); !ok {
   482  			if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok && !lff.Ignore {
   483  				pc.maybeFail(call.Pos(), "must hold %s %s (%s) to call %s, but not held (locks: %s)", fieldName, exclusiveStr(fg.Exclusive), s, fn.Name(), ls.String())
   484  			} else {
   485  				// Force the lock to be acquired.
   486  				ls.lockField(r, fg.Exclusive)
   487  			}
   488  		}
   489  	}
   490  
   491  	// Update all lock state accordingly.
   492  	pc.postFunctionCallUpdate(call, lff, ls, false /* aliases */)
   493  
   494  	// Check if it's a method dispatch for something in the sync package.
   495  	// See: https://godoc.org/golang.org/x/tools/go/ssa#Function
   496  
   497  	if (lockerRE.MatchString(fn.FullName()) || mutexRE.MatchString(fn.FullName())) && len(args) > 0 {
   498  		rv := makeResolvedValue(args[0], nil)
   499  		isExclusive := false
   500  		switch fn.Name() {
   501  		case "Lock", "NestedLock":
   502  			isExclusive = true
   503  			fallthrough
   504  		case "RLock":
   505  			if s, ok := ls.lockField(rv, isExclusive); !ok && !lff.Ignore {
   506  				if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
   507  					// Double locking a mutex that is already locked.
   508  					pc.maybeFail(call.Pos(), "%s already locked (locks: %s)", s, ls.String())
   509  				}
   510  			}
   511  		case "Unlock", "NestedUnlock":
   512  			isExclusive = true
   513  			fallthrough
   514  		case "RUnlock":
   515  			if s, ok := ls.unlockField(rv, isExclusive); !ok && !lff.Ignore {
   516  				if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
   517  					// Unlocking something that is already unlocked.
   518  					pc.maybeFail(call.Pos(), "%s already unlocked or locked differently (locks: %s)", s, ls.String())
   519  				}
   520  			}
   521  		case "DowngradeLock":
   522  			if s, ok := ls.downgradeField(rv); !ok {
   523  				if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok && !lff.Ignore {
   524  					// Downgrading something that may not be downgraded.
   525  					pc.maybeFail(call.Pos(), "%s already unlocked or not exclusive (locks: %s)", s, ls.String())
   526  				}
   527  			}
   528  		}
   529  	}
   530  }
   531  
   532  // checkClosure forks the lock state, and creates a binding for the FreeVars of
   533  // the closure. This allows the analysis to resolve the closure.
   534  func (pc *passContext) checkClosure(call callCommon, fn *ssa.MakeClosure, lff *lockFunctionFacts, ls *lockState) {
   535  	clls := ls.fork()
   536  	clfn := fn.Fn.(*ssa.Function)
   537  	for i, fv := range clfn.FreeVars {
   538  		clls.store(fv, fn.Bindings[i])
   539  	}
   540  
   541  	// Note that this is *not* a call to check function call, which checks
   542  	// against the function preconditions. Instead, this does a fresh
   543  	// analysis of the function from source code with a different state.
   544  	nlff := lockFunctionFacts{
   545  		Ignore: lff.Ignore, // Inherit ignore.
   546  	}
   547  	pc.checkFunction(call, clfn, &nlff, clls, true /* force */)
   548  }
   549  
   550  // freshAlloc indicates that v has been allocated within the local scope. There
   551  // is no lock checking done on objects that are freshly allocated.
   552  func freshAlloc(v ssa.Value) bool {
   553  	switch x := v.(type) {
   554  	case *ssa.Alloc:
   555  		return true
   556  	case *ssa.FieldAddr:
   557  		return freshAlloc(x.X)
   558  	case *ssa.Field:
   559  		return freshAlloc(x.X)
   560  	case *ssa.IndexAddr:
   561  		return freshAlloc(x.X)
   562  	case *ssa.Index:
   563  		return freshAlloc(x.X)
   564  	case *ssa.Convert:
   565  		return freshAlloc(x.X)
   566  	case *ssa.ChangeType:
   567  		return freshAlloc(x.X)
   568  	default:
   569  		return false
   570  	}
   571  }
   572  
   573  // isWrite indicates that this value is used as the addr field in a store.
   574  //
   575  // Note that this may still be used for a write. The return here is optimistic
   576  // but sufficient for basic analysis.
   577  func isWrite(v ssa.Value) bool {
   578  	refs := v.Referrers()
   579  	if refs == nil {
   580  		return false
   581  	}
   582  	for _, ref := range *refs {
   583  		if s, ok := ref.(*ssa.Store); ok && s.Addr == v {
   584  			return true
   585  		}
   586  	}
   587  	return false
   588  }
   589  
   590  // callCommon is an ssa.Value that also implements Common.
   591  type callCommon interface {
   592  	Pos() token.Pos
   593  	Common() *ssa.CallCommon
   594  	Value() *ssa.Call
   595  }
   596  
   597  // checkInstruction checks the legality the single instruction based on the
   598  // current lockState.
   599  func (pc *passContext) checkInstruction(inst ssa.Instruction, lff *lockFunctionFacts, ls *lockState) (*ssa.Return, *lockState) {
   600  	// Record any observed globals, and check for violations. The global
   601  	// value is not itself an instruction, but we check all referrers to
   602  	// see where they are consumed.
   603  	var stackLocal [16]*ssa.Value
   604  	ops := inst.Operands(stackLocal[:])
   605  	for _, v := range ops {
   606  		if v == nil {
   607  			continue
   608  		}
   609  		g, ok := (*v).(*ssa.Global)
   610  		if !ok {
   611  			continue
   612  		}
   613  		_, isWrite := inst.(*ssa.Store)
   614  		pc.checkGlobalAccess(inst, g, ls, isWrite)
   615  	}
   616  
   617  	// Process the instruction.
   618  	switch x := inst.(type) {
   619  	case *ssa.Store:
   620  		// Record that this value is holding this other value. This is
   621  		// because at the beginning of each ssa execution, there is a
   622  		// series of assignments of parameter values to alloc objects.
   623  		// This allows us to trace these back to the original
   624  		// parameters as aliases above.
   625  		//
   626  		// Note that this may overwrite an existing value in the lock
   627  		// state, but this is intentional.
   628  		ls.store(x.Addr, x.Val)
   629  	case *ssa.Field:
   630  		if !freshAlloc(x.X) && !lff.Ignore {
   631  			pc.checkFieldAccess(x, x.X, x.Field, ls, false)
   632  		}
   633  	case *ssa.FieldAddr:
   634  		if !freshAlloc(x.X) && !lff.Ignore {
   635  			pc.checkFieldAccess(x, x.X, x.Field, ls, isWrite(x))
   636  		}
   637  	case *ssa.Call:
   638  		pc.checkCall(x, lff, ls)
   639  	case *ssa.Defer:
   640  		ls.pushDefer(x)
   641  	case *ssa.RunDefers:
   642  		for d := ls.popDefer(); d != nil; d = ls.popDefer() {
   643  			pc.checkCall(d, lff, ls)
   644  		}
   645  	case *ssa.MakeClosure:
   646  		if refs := x.Referrers(); refs != nil {
   647  			var (
   648  				calls    int
   649  				nonCalls int
   650  			)
   651  			for _, ref := range *refs {
   652  				switch ref.(type) {
   653  				case *ssa.Call, *ssa.Defer:
   654  					// Analysis will be done on the call
   655  					// itself subsequently, including the
   656  					// lock state at the time of the call.
   657  					calls++
   658  				default:
   659  					// We need to analyze separately. Per
   660  					// below, this means that we'll analyze
   661  					// at closure construction time no zero
   662  					// assumptions about when it will be
   663  					// called.
   664  					nonCalls++
   665  				}
   666  			}
   667  			if calls > 0 && nonCalls == 0 {
   668  				return nil, nil
   669  			}
   670  		}
   671  		// Analyze the closure without bindings. This means that we
   672  		// assume no lock facts or have any existing lock state. Only
   673  		// trivial closures are acceptable in this case.
   674  		clfn := x.Fn.(*ssa.Function)
   675  		nlff := lockFunctionFacts{
   676  			Ignore: lff.Ignore, // Inherit ignore.
   677  		}
   678  		pc.checkFunction(nil, clfn, &nlff, nil, false /* force */)
   679  	case *ssa.Return:
   680  		return x, ls // Valid return state.
   681  	}
   682  	return nil, nil
   683  }
   684  
   685  // checkBasicBlock traverses the control flow graph starting at a set of given
   686  // block and checks each instruction for allowed operations.
   687  func (pc *passContext) checkBasicBlock(fn *ssa.Function, block *ssa.BasicBlock, lff *lockFunctionFacts, parent *lockState, seen map[*ssa.BasicBlock]*lockState, rg map[*ssa.BasicBlock]struct{}) *lockState {
   688  	// Check for cached results from entering this block from a *different*
   689  	// execution path. Note that this is not the same path, which is
   690  	// checked with the recursion guard below.
   691  	if oldLS, ok := seen[block]; ok && oldLS.isCompatible(parent) {
   692  		return nil
   693  	}
   694  
   695  	// Prevent recursion. If the lock state is constantly changing and we
   696  	// are a recursive path, then there will never be a return block.
   697  	if rg == nil {
   698  		rg = make(map[*ssa.BasicBlock]struct{})
   699  	}
   700  	if _, ok := rg[block]; ok {
   701  		return nil
   702  	}
   703  	rg[block] = struct{}{}
   704  	defer func() { delete(rg, block) }()
   705  
   706  	// If the lock state is not compatible, then we need to do the
   707  	// recursive analysis to ensure that it is still sane. For example, the
   708  	// following is guaranteed to generate incompatible locking states:
   709  	//
   710  	//	if foo {
   711  	//		mu.Lock()
   712  	//	}
   713  	//	other stuff ...
   714  	//	if foo {
   715  	//		mu.Unlock()
   716  	//	}
   717  
   718  	var (
   719  		rv  *ssa.Return
   720  		rls *lockState
   721  	)
   722  
   723  	// Analyze this block.
   724  	seen[block] = parent
   725  	ls := parent.fork()
   726  	for _, inst := range block.Instrs {
   727  		rv, rls = pc.checkInstruction(inst, lff, ls)
   728  		if rls != nil {
   729  			failed := false
   730  			// Validate held locks.
   731  			for fieldName, fg := range lff.HeldOnExit {
   732  				r := fg.Resolver.resolveStatic(pc, ls, fn, rv)
   733  				if !r.valid() {
   734  					// This cannot be forced, since we have no reference.
   735  					pc.maybeFail(rv.Pos(), "lock %s cannot be resolved", fieldName)
   736  					continue
   737  				}
   738  				if s, ok := rls.isHeld(r, fg.Exclusive); !ok {
   739  					if _, ok := pc.forced[pc.positionKey(rv.Pos())]; !ok && !lff.Ignore {
   740  						pc.maybeFail(rv.Pos(), "lock %s (%s) not held %s (locks: %s)", fieldName, s, exclusiveStr(fg.Exclusive), rls.String())
   741  						failed = true
   742  					} else {
   743  						// Force the lock to be acquired.
   744  						rls.lockField(r, fg.Exclusive)
   745  					}
   746  				}
   747  			}
   748  			// Check for other locks, but only if the above didn't trip.
   749  			if !failed && rls.count() != len(lff.HeldOnExit) && !lff.Ignore {
   750  				pc.maybeFail(rv.Pos(), "return with unexpected locks held (locks: %s)", rls.String())
   751  			}
   752  		}
   753  	}
   754  
   755  	// Analyze all successors.
   756  	for _, succ := range block.Succs {
   757  		// Collect possible return values, and make sure that the lock
   758  		// state aligns with any return value that we may have found
   759  		// above. Note that checkBasicBlock will recursively analyze
   760  		// the lock state to ensure that Releases and Acquires are
   761  		// respected.
   762  		if pls := pc.checkBasicBlock(fn, succ, lff, ls, seen, rg); pls != nil {
   763  			if rls != nil && !rls.isCompatible(pls) {
   764  				if _, ok := pc.forced[pc.positionKey(fn.Pos())]; !ok && !lff.Ignore {
   765  					pc.maybeFail(fn.Pos(), "incompatible return states (first: %s, second: %s)", rls.String(), pls.String())
   766  				}
   767  			}
   768  			rls = pls
   769  		}
   770  	}
   771  	return rls
   772  }
   773  
   774  // checkFunction checks a function invocation, typically starting with nil lockState.
   775  func (pc *passContext) checkFunction(call callCommon, fn *ssa.Function, lff *lockFunctionFacts, parent *lockState, force bool) {
   776  	defer func() {
   777  		// Mark this function as checked. This is used by the top-level
   778  		// loop to ensure that all anonymous functions are scanned, if
   779  		// they are not explicitly invoked here. Note that this can
   780  		// happen if the anonymous functions are e.g. passed only as
   781  		// parameters or used to initialize some structure.
   782  		pc.functions[fn] = struct{}{}
   783  	}()
   784  	if _, ok := pc.functions[fn]; !force && ok {
   785  		// This function has already been analyzed at least once.
   786  		// That's all we permit for each function, although this may
   787  		// cause some anonymous functions to be analyzed in only one
   788  		// context.
   789  		return
   790  	}
   791  
   792  	// If no return value is provided, then synthesize one. This is used
   793  	// below only to check against the locks preconditions, which may
   794  	// include return values.
   795  	if call == nil {
   796  		call = &ssa.Call{Call: ssa.CallCommon{Value: fn}}
   797  	}
   798  
   799  	// Initialize ls with any preconditions that require locks to be held
   800  	// for the method to be invoked. Note that in the overwhleming majority
   801  	// of cases, parent will be nil. However, in the case of closures and
   802  	// anonymous functions, we may start with a non-nil lock state.
   803  	//
   804  	// Note that this will include all aliases, which are also released
   805  	// appropriately below.
   806  	ls := parent.fork()
   807  	for fieldName, fg := range lff.HeldOnEntry {
   808  		// The first is the method object itself so we skip that when looking
   809  		// for receiver/function parameters.
   810  		r := fg.Resolver.resolveStatic(pc, ls, fn, call.Value())
   811  		if !r.valid() {
   812  			// See above: this cannot be forced.
   813  			pc.maybeFail(fn.Pos(), "lock %s cannot be resolved", fieldName)
   814  			continue
   815  		}
   816  		if s, ok := ls.lockField(r, fg.Exclusive); !ok && !lff.Ignore {
   817  			// This can only happen if the same value is declared
   818  			// multiple times, and should be caught by the earlier
   819  			// fact scanning. Keep it here as a sanity check.
   820  			pc.maybeFail(fn.Pos(), "lock %s (%s) acquired multiple times or differently (locks: %s)", fieldName, s, ls.String())
   821  		}
   822  	}
   823  
   824  	// Scan the blocks.
   825  	seen := make(map[*ssa.BasicBlock]*lockState)
   826  	if len(fn.Blocks) > 0 {
   827  		pc.checkBasicBlock(fn, fn.Blocks[0], lff, ls, seen, nil)
   828  	}
   829  
   830  	// Scan the recover block.
   831  	if fn.Recover != nil {
   832  		pc.checkBasicBlock(fn, fn.Recover, lff, ls, seen, nil)
   833  	}
   834  
   835  	// Update all lock state accordingly. This will be called only if we
   836  	// are doing inline analysis for e.g. an anonymous function.
   837  	if call != nil && parent != nil {
   838  		pc.postFunctionCallUpdate(call, lff, parent, true /* aliases */)
   839  	}
   840  }
   841  
   842  // checkInferred checks for any inferred lock annotations.
   843  func (pc *passContext) checkInferred() {
   844  	for obj, oo := range pc.observations {
   845  		var lgf lockGuardFacts
   846  		pc.pass.ImportObjectFact(obj, &lgf)
   847  		for other, count := range oo.counts {
   848  			// Is this already a guard?
   849  			if _, ok := lgf.GuardedBy[other.Name()]; ok {
   850  				continue
   851  			}
   852  			// Check to see if this field is used with a given lock
   853  			// held above the threshold. If yes, provide a helpful
   854  			// hint that this may something you wish to annotate.
   855  			const threshold = 0.9
   856  			if usage := float64(count) / float64(oo.total); usage >= threshold {
   857  				pc.maybeFail(obj.Pos(), "may require checklocks annotation for %s, used with lock held %2.0f%% of the time", other.Name(), usage*100)
   858  			}
   859  		}
   860  	}
   861  }