github.com/searKing/golang/go@v1.2.117/sync/lru_pool.go (about)

     1  // Copyright 2021 The searKing Author. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  // borrowed from https://github.com/golang/go/blob/master/src/net/http/transport.go
     5  
     6  package sync
     7  
     8  import (
     9  	"context"
    10  	"errors"
    11  	"log"
    12  	"sync"
    13  	"time"
    14  )
    15  
    16  var (
    17  	errKeepAlivesDisabled  = errors.New("sync: putIdleResource: keep alives disabled")
    18  	errResourceBroken      = errors.New("sync: putIdleResource: resource is in bad state")
    19  	errCloseIdle           = errors.New("sync: putIdleResource: CloseIdleResources was called")
    20  	errTooManyIdle         = errors.New("sync: putIdleResource: too many idle resources")
    21  	errTooManyIdleResource = errors.New("sync: putIdleResource: too many idle resources for bucket")
    22  	errCloseIdleResources  = errors.New("sync: CloseIdleResources called")
    23  
    24  	errIdleResourceTimeout = errors.New("sync: idle resource timeout")
    25  )
    26  
    27  // DefaultLruPool is new resources as needed and caches them for reuse by subsequent calls.
    28  var DefaultLruPool = &LruPool{
    29  	MaxIdleResources:    100,
    30  	IdleResourceTimeout: 90 * time.Second,
    31  }
    32  
    33  // DefaultMaxIdleResourcesPerBucket is the default value of LruPool's
    34  // MaxIdleResourcesPerBucket.
    35  const DefaultMaxIdleResourcesPerBucket = 2
    36  
    37  type targetKey any
    38  
    39  // LruPool is an implementation of sync.Pool with LRU.
    40  //
    41  // By default, LruPool caches resources for future re-use.
    42  // This may leave many open resources when accessing many buckets.
    43  // This behavior can be managed using LruPool's CloseIdleResources method
    44  // and the MaxIdleResourcesPerBucket and DisableKeepAlives fields.
    45  //
    46  // LruPools should be reused instead of created as needed.
    47  // LruPools are safe for concurrent use by multiple goroutines.
    48  //
    49  // A LruPool is a low-level primitive for making resources.
    50  type LruPool struct {
    51  	// New optionally specifies a function to generate
    52  	// a value when Get would otherwise return nil.
    53  	// It may not be changed concurrently with calls to Get.
    54  	New func(ctx context.Context, req any) (resp any, err error)
    55  
    56  	idleMu           sync.Mutex
    57  	closeIdle        bool                             // user has requested to close all idle resources
    58  	idleResource     map[targetKey][]*PersistResource // most recently used at end
    59  	idleResourceWait map[targetKey]wantResourceQueue  // waiting getResources
    60  	idleLRU          resourceLRU
    61  
    62  	resourcesPerBucketMu   sync.Mutex
    63  	resourcesPerBucket     map[targetKey]int
    64  	resourcesPerBucketWait map[targetKey]wantResourceQueue // waiting getResources
    65  	// DisableKeepAlives, if true, disables keep-alives and
    66  	// will only use the resource to the server for a single request.
    67  	DisableKeepAlives bool
    68  
    69  	// MaxIdleResources controls the maximum number of idle (keep-alive)
    70  	// resources across all buckets. Zero means no limit.
    71  	MaxIdleResources int
    72  
    73  	// MaxIdleResourcesPerBucket, if non-zero, controls the maximum idle
    74  	// (keep-alive) resources to keep per-bucket. If zero,
    75  	// DefaultMaxIdleResourcesPerBucket is used.
    76  	MaxIdleResourcesPerBucket int
    77  
    78  	// MaxResourcesPerBucket optionally limits the total number of
    79  	// resources per bucket, including resources in the newResource,
    80  	// active, and idle states. On limit violation, news will block.
    81  	//
    82  	// Zero means no limit.
    83  	MaxResourcesPerBucket int
    84  
    85  	// IdleResourceTimeout is the maximum amount of time an idle
    86  	// (keep-alive) resource will remain idle before closing
    87  	// itself.
    88  	// Zero means no limit.
    89  	IdleResourceTimeout time.Duration
    90  }
    91  
    92  // GetByKeyOrError creates a new PersistResource to the target as specified in the key.
    93  // If this doesn't return an error, the PersistResource is ready to write requests to.
    94  func (t *LruPool) GetByKeyOrError(ctx context.Context, key any, req any) (pc *PersistResource, err error) {
    95  
    96  	w := &wantResource{
    97  		req:   req,
    98  		key:   key,
    99  		ctx:   ctx,
   100  		ready: make(chan struct{}, 1),
   101  	}
   102  	defer func() {
   103  		if err != nil {
   104  			w.cancel(t, err)
   105  		}
   106  	}()
   107  
   108  	// Queue for idle resource.
   109  	if delivered := t.queueForIdleResource(w); delivered {
   110  		pc := w.pr
   111  		return pc, nil
   112  	}
   113  
   114  	// Queue for permission to new resource.
   115  	t.queueForNewResource(w)
   116  
   117  	// Wait for completion or cancellation.
   118  	select {
   119  	case <-w.ready:
   120  		if w.err != nil {
   121  			// If the request has been cancelled, that's probably
   122  			// what caused w.err; if so, prefer to return the
   123  			// cancellation error (see golang.org/issue/16049).
   124  			select {
   125  			case <-ctx.Done():
   126  				return nil, ctx.Err()
   127  			default:
   128  				// return below
   129  			}
   130  		}
   131  		return w.pr, w.err
   132  	case <-ctx.Done():
   133  		return nil, ctx.Err()
   134  	}
   135  }
   136  
   137  // GetByKey creates a new PersistResource to the target as specified in the key.
   138  // If this doesn't return an error, the PersistResource is ready to write requests to.
   139  func (t *LruPool) GetByKey(ctx context.Context, key any, req any) (v any, put context.CancelFunc) {
   140  	pc, _ := t.GetByKeyOrError(ctx, key, req)
   141  	put = func() {
   142  		pc.Put()
   143  	}
   144  	return pc.Get(), put
   145  }
   146  
   147  // GetOrError creates a new PersistResource to the target as specified in the key.
   148  // If this doesn't return an error, the PersistResource is ready to write requests to.
   149  func (t *LruPool) GetOrError(ctx context.Context, req any) (v any, put context.CancelFunc, err error) {
   150  	pc, err := t.GetByKeyOrError(ctx, req, req)
   151  	put = func() {
   152  		pc.Put()
   153  	}
   154  	return pc.Get(), put, err
   155  }
   156  
   157  // Get creates a new PersistResource to the target as specified in the key.
   158  // If this doesn't return an error, the PersistResource is ready to write requests to.
   159  func (t *LruPool) Get(ctx context.Context, req any) (v any, put context.CancelFunc) {
   160  	return t.GetByKey(ctx, req, req)
   161  }
   162  
   163  func (t *LruPool) Put(presource *PersistResource) {
   164  	t.putOrCloseIdleResource(presource)
   165  }
   166  
   167  func (t *LruPool) putOrCloseIdleResource(presource *PersistResource) {
   168  	if t == nil {
   169  		return
   170  	}
   171  	if err := t.tryPutIdleResource(presource); err != nil {
   172  		presource.close(err)
   173  	}
   174  }
   175  
   176  func (t *LruPool) maxIdleResourcesPerBucket() int {
   177  	if v := t.MaxIdleResourcesPerBucket; v != 0 {
   178  		return v
   179  	}
   180  	return DefaultMaxIdleResourcesPerBucket
   181  }
   182  
   183  // tryPutIdleResource adds presource to the list of idle persistent resources awaiting
   184  // a new request.
   185  // If presource is no longer needed or not in a good state, tryPutIdleResource returns
   186  // an error explaining why it wasn't registered.
   187  // tryPutIdleResource does not close presource. Use putOrCloseIdleResource instead for that.
   188  func (t *LruPool) tryPutIdleResource(presource *PersistResource) error {
   189  	if t.DisableKeepAlives || t.MaxIdleResourcesPerBucket < 0 {
   190  		return errKeepAlivesDisabled
   191  	}
   192  	if presource.isBroken() {
   193  		return errResourceBroken
   194  	}
   195  	presource.markReused()
   196  
   197  	t.idleMu.Lock()
   198  	defer t.idleMu.Unlock()
   199  
   200  	// Deliver presource to goroutine waiting for idle resource, if any.
   201  	// (They may be actively newResource, but this resource is ready first.
   202  	// Chrome calls this socket late binding.
   203  	// See syncs://www.chromium.org/developers/design-documents/network-stack#TOC-Resourceection-Management.)
   204  	key := presource.cacheKey
   205  	if q, ok := t.idleResourceWait[key]; ok {
   206  		done := false
   207  		// Loop over the waiting list until we find a w that isn't done already, and hand it presource.
   208  		for q.len() > 0 {
   209  			w := q.popFront()
   210  			if w.tryDeliver(presource, nil) {
   211  				done = true
   212  				break
   213  			}
   214  		}
   215  		if q.len() == 0 {
   216  			delete(t.idleResourceWait, key)
   217  		} else {
   218  			t.idleResourceWait[key] = q
   219  		}
   220  		if done {
   221  			return nil
   222  		}
   223  	}
   224  
   225  	if t.closeIdle {
   226  		return errCloseIdle
   227  	}
   228  	if t.idleResource == nil {
   229  		t.idleResource = make(map[targetKey][]*PersistResource)
   230  	}
   231  	idles := t.idleResource[key]
   232  	if len(idles) >= t.maxIdleResourcesPerBucket() {
   233  		return errTooManyIdleResource
   234  	}
   235  	for _, exist := range idles {
   236  		if exist == presource {
   237  			log.Fatalf("dup idle presource %p in freelist", presource)
   238  		}
   239  	}
   240  	t.idleResource[key] = append(idles, presource)
   241  	t.idleLRU.add(presource)
   242  	if t.MaxIdleResources != 0 && t.idleLRU.len() > t.MaxIdleResources {
   243  		oldest := t.idleLRU.removeOldest()
   244  		oldest.close(errTooManyIdle)
   245  		t.removeIdleResourceLocked(oldest)
   246  	}
   247  
   248  	// Set idle timer, but only for HTTP/1 (presource.alt == nil).
   249  	// The HTTP/2 implementation manages the idle timer itself
   250  	// (see idleResourceTimeout in h2_bundle.go).
   251  	if t.IdleResourceTimeout > 0 {
   252  		if presource.idleTimer != nil {
   253  			presource.idleTimer.Reset(t.IdleResourceTimeout)
   254  		} else {
   255  			presource.idleTimer = time.AfterFunc(t.IdleResourceTimeout, presource.closeResourceIfStillIdle)
   256  		}
   257  	}
   258  	presource.idleAt = time.Now()
   259  	return nil
   260  }
   261  
   262  // queueForIdleResource queues w to receive the next idle resource for w.cm.
   263  // As an optimization hint to the caller, queueForIdleResource reports whether
   264  // it successfully delivered an already-idle resource.
   265  func (t *LruPool) queueForIdleResource(w *wantResource) (delivered bool) {
   266  	if t.DisableKeepAlives {
   267  		return false
   268  	}
   269  
   270  	t.idleMu.Lock()
   271  	defer t.idleMu.Unlock()
   272  
   273  	// Stop closing resources that become idle - we might want one.
   274  	// (That is, undo the effect of t.CloseIdleResources.)
   275  	t.closeIdle = false
   276  
   277  	if w == nil {
   278  		// Happens in test hook.
   279  		return false
   280  	}
   281  
   282  	// If IdleResourceTimeout is set, calculate the oldest
   283  	// PersistResource.idleAt time we're willing to use a cached idle
   284  	// resource.
   285  	var oldTime time.Time
   286  	if t.IdleResourceTimeout > 0 {
   287  		oldTime = time.Now().Add(-t.IdleResourceTimeout)
   288  	}
   289  
   290  	// Look for most recently-used idle resource.
   291  	if list, ok := t.idleResource[w.key]; ok {
   292  		stop := false
   293  		delivered := false
   294  		for len(list) > 0 && !stop {
   295  			presource := list[len(list)-1]
   296  
   297  			// See whether this resource has been idle too long, considering
   298  			// only the wall time (the Round(0)), in case this is a laptop or VM
   299  			// coming out of suspend with previously cached idle resources.
   300  			tooOld := !oldTime.IsZero() && presource.idleAt.Round(0).Before(oldTime)
   301  			if tooOld {
   302  				// Async cleanup. Launch in its own goroutine (as if a
   303  				// time.AfterFunc called it); it acquires idleMu, which we're
   304  				// holding, and does a synchronous net.Resource.Close.
   305  				go presource.closeResourceIfStillIdle()
   306  			}
   307  			if presource.isBroken() || tooOld {
   308  				// If either PersistResource.readLoop has marked the resource
   309  				// broken, but LruPool.RemoveIdleResource has not yet removed it
   310  				// from the idle list, or if this PersistResource is too old (it was
   311  				// idle too long), then ignore it and look for another. In both
   312  				// cases it's already in the process of being closed.
   313  				list = list[:len(list)-1]
   314  				continue
   315  			}
   316  			delivered = w.tryDeliver(presource, nil)
   317  			if delivered {
   318  				// only one client can use presource.
   319  				// Remove it from the list.
   320  				t.idleLRU.remove(presource)
   321  				list = list[:len(list)-1]
   322  			}
   323  			stop = true
   324  		}
   325  		if len(list) > 0 {
   326  			t.idleResource[w.key] = list
   327  		} else {
   328  			delete(t.idleResource, w.key)
   329  		}
   330  		if stop {
   331  			return delivered
   332  		}
   333  	}
   334  
   335  	// Register to receive next resource that becomes idle.
   336  	if t.idleResourceWait == nil {
   337  		t.idleResourceWait = make(map[targetKey]wantResourceQueue)
   338  	}
   339  	q := t.idleResourceWait[w.key]
   340  	q.cleanFront()
   341  	q.pushBack(w)
   342  	t.idleResourceWait[w.key] = q
   343  	return false
   344  }
   345  
   346  // RemoveIdleResource marks presource as dead.
   347  func (t *LruPool) RemoveIdleResource(presource *PersistResource) bool {
   348  	t.idleMu.Lock()
   349  	defer t.idleMu.Unlock()
   350  	return t.removeIdleResourceLocked(presource)
   351  }
   352  
   353  // t.idleMu must be held.
   354  func (t *LruPool) removeIdleResourceLocked(presource *PersistResource) bool {
   355  	if presource.idleTimer != nil {
   356  		presource.idleTimer.Stop()
   357  	}
   358  	t.idleLRU.remove(presource)
   359  	key := presource.cacheKey
   360  	presources := t.idleResource[key]
   361  	var removed bool
   362  	switch len(presources) {
   363  	case 0:
   364  		// Nothing
   365  	case 1:
   366  		if presources[0] == presource {
   367  			delete(t.idleResource, key)
   368  			removed = true
   369  		}
   370  	default:
   371  		for i, v := range presources {
   372  			if v != presource {
   373  				continue
   374  			}
   375  			// Slide down, keeping most recently-used resources at the end.
   376  			copy(presources[i:], presources[i+1:])
   377  			t.idleResource[key] = presources[:len(presources)-1]
   378  			removed = true
   379  			break
   380  		}
   381  	}
   382  	return removed
   383  }
   384  
   385  // queueForNewResource queues w to wait for permission to begin newResource.
   386  // Once w receives permission to dial, it will do so in a separate goroutine.
   387  func (t *LruPool) queueForNewResource(w *wantResource) {
   388  	if t.MaxResourcesPerBucket <= 0 {
   389  		go t.newResourceFor(w)
   390  		return
   391  	}
   392  
   393  	t.resourcesPerBucketMu.Lock()
   394  	defer t.resourcesPerBucketMu.Unlock()
   395  
   396  	if n := t.resourcesPerBucket[w.key]; n < t.MaxResourcesPerBucket {
   397  		if t.resourcesPerBucket == nil {
   398  			t.resourcesPerBucket = make(map[targetKey]int)
   399  		}
   400  		t.resourcesPerBucket[w.key] = n + 1
   401  		go t.newResourceFor(w)
   402  		return
   403  	}
   404  
   405  	if t.resourcesPerBucketWait == nil {
   406  		t.resourcesPerBucketWait = make(map[targetKey]wantResourceQueue)
   407  	}
   408  	q := t.resourcesPerBucketWait[w.key]
   409  	q.cleanFront()
   410  	q.pushBack(w)
   411  	t.resourcesPerBucketWait[w.key] = q
   412  }
   413  
   414  // newResourceFor news on behalf of w and delivers the result to w.
   415  // newResourceFor has received permission to dial w.cm and is counted in t.resourceCount[w.cm.key()].
   416  // If the dial is cancelled or unsuccessful, newResourceFor decrements t.resourceCount[w.cm.key()].
   417  func (t *LruPool) newResourceFor(w *wantResource) {
   418  	pc, err := t.buildResource(w.ctx, w.key, w.req)
   419  	delivered := w.tryDeliver(pc, err)
   420  	if err == nil && (!delivered) {
   421  		// presource was not passed to w,
   422  		// or it can be shared.
   423  		// Add to the idle resource pool.
   424  		t.putOrCloseIdleResource(pc)
   425  	}
   426  	if err != nil {
   427  		t.decResourcesPerBucket(w.key)
   428  	}
   429  }
   430  
   431  // decResourcesPerBucket decrements the per-bucket resource count for key,
   432  // which may in turn give a different waiting goroutine permission to dial.
   433  func (t *LruPool) decResourcesPerBucket(key targetKey) {
   434  	if t.MaxResourcesPerBucket <= 0 {
   435  		return
   436  	}
   437  
   438  	t.resourcesPerBucketMu.Lock()
   439  	defer t.resourcesPerBucketMu.Unlock()
   440  	n := t.resourcesPerBucket[key]
   441  	if n == 0 {
   442  		// Shouldn't happen, but if it does, the counting is buggy and could
   443  		// easily lead to a silent deadlock, so report the problem loudly.
   444  		panic("sync: internal error: resourceCount underflow")
   445  	}
   446  
   447  	// Can we hand this count to a goroutine still waiting to dial?
   448  	// (Some goroutines on the wait list may have timed out or
   449  	// gotten a resource another way. If they're all gone,
   450  	// we don't want to kick off any spurious dial operations.)
   451  	if q := t.resourcesPerBucketWait[key]; q.len() > 0 {
   452  		done := false
   453  		for q.len() > 0 {
   454  			w := q.popFront()
   455  			if w.waiting() {
   456  				go t.newResourceFor(w)
   457  				done = true
   458  				break
   459  			}
   460  		}
   461  		if q.len() == 0 {
   462  			delete(t.resourcesPerBucketWait, key)
   463  		} else {
   464  			// q is a value (like a slice), so we have to store
   465  			// the updated q back into the map.
   466  			t.resourcesPerBucketWait[key] = q
   467  		}
   468  		if done {
   469  			return
   470  		}
   471  	}
   472  
   473  	// Otherwise, decrement the recorded count.
   474  	if n--; n == 0 {
   475  		delete(t.resourcesPerBucket, key)
   476  	} else {
   477  		t.resourcesPerBucket[key] = n
   478  	}
   479  }
   480  
   481  func (t *LruPool) buildResource(ctx context.Context, key any, req any) (presource *PersistResource, err error) {
   482  	presource = &PersistResource{
   483  		t:        t,
   484  		cacheKey: key,
   485  	}
   486  
   487  	if t.New != nil {
   488  		presource.object, err = t.New(ctx, req)
   489  	}
   490  	return presource, err
   491  }
   492  
   493  // CloseIdleResources closes any connections which were previously
   494  // connected from previous requests but are now sitting idle in
   495  // a "keep-alive" state. It does not interrupt any connections currently
   496  // in use.
   497  func (t *LruPool) CloseIdleResources() {
   498  	t.idleMu.Lock()
   499  	m := t.idleResource
   500  	t.idleResource = nil
   501  	t.closeIdle = true // close newly idle connections
   502  	t.idleLRU = resourceLRU{}
   503  	t.idleMu.Unlock()
   504  	for _, conns := range m {
   505  		for _, pconn := range conns {
   506  			pconn.close(errCloseIdleResources)
   507  		}
   508  	}
   509  }