github.com/mre-fog/trillianxx@v1.1.2-0.20180615153820-ae375a99d36a/quota/cacheqm/cache.go (about)

     1  // Copyright 2017 Google Inc. All Rights Reserved.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // Package cacheqm contains a caching quota.Manager implementation.
    16  package cacheqm
    17  
    18  import (
    19  	"context"
    20  	"fmt"
    21  	"sort"
    22  	"sync"
    23  	"time"
    24  
    25  	"github.com/golang/glog"
    26  	"github.com/google/trillian/quota"
    27  )
    28  
    29  const (
    30  	// DefaultMinBatchSize is the suggested default for minBatchSize.
    31  	DefaultMinBatchSize = 100
    32  
    33  	// DefaultMaxCacheEntries is the suggested default for maxEntries.
    34  	DefaultMaxCacheEntries = 1000
    35  )
    36  
    37  // now is used in place of time.Now to allow tests to take control of time.
    38  var now = time.Now
    39  
    40  type manager struct {
    41  	qm                       quota.Manager
    42  	minBatchSize, maxEntries int
    43  
    44  	// mu guards cache
    45  	mu    sync.Mutex
    46  	cache map[quota.Spec]*bucket
    47  
    48  	// evictWg tracks evict() goroutines.
    49  	evictWg sync.WaitGroup
    50  }
    51  
    52  type bucket struct {
    53  	tokens       int
    54  	lastModified time.Time
    55  }
    56  
    57  // NewCachedManager wraps a quota.Manager with an implementation that caches tokens locally.
    58  //
    59  // minBatchSize determines the minimum number of tokens requested from qm for each GetTokens()
    60  // request.
    61  //
    62  // maxEntries determines the maximum number of cache entries, apart from global quotas. The oldest
    63  // entries are evicted as necessary, their tokens replenished via PutTokens() to avoid excessive
    64  // leakage.
    65  func NewCachedManager(qm quota.Manager, minBatchSize, maxEntries int) (quota.Manager, error) {
    66  	switch {
    67  	case minBatchSize <= 0:
    68  		return nil, fmt.Errorf("invalid minBatchSize: %v", minBatchSize)
    69  	case maxEntries <= 0:
    70  		return nil, fmt.Errorf("invalid maxEntries: %v", minBatchSize)
    71  	}
    72  	return &manager{
    73  		qm:           qm,
    74  		minBatchSize: minBatchSize,
    75  		maxEntries:   maxEntries,
    76  		cache:        make(map[quota.Spec]*bucket),
    77  	}, nil
    78  }
    79  
    80  func (m *manager) GetUser(ctx context.Context, req interface{}) string {
    81  	return m.qm.GetUser(ctx, req)
    82  }
    83  
    84  func (m *manager) PeekTokens(ctx context.Context, specs []quota.Spec) (map[quota.Spec]int, error) {
    85  	return m.qm.PeekTokens(ctx, specs)
    86  }
    87  
    88  func (m *manager) PutTokens(ctx context.Context, numTokens int, specs []quota.Spec) error {
    89  	return m.qm.PutTokens(ctx, numTokens, specs)
    90  }
    91  
    92  func (m *manager) ResetQuota(ctx context.Context, specs []quota.Spec) error {
    93  	return m.qm.ResetQuota(ctx, specs)
    94  }
    95  
    96  func (m *manager) GetTokens(ctx context.Context, numTokens int, specs []quota.Spec) error {
    97  	m.mu.Lock()
    98  	defer m.mu.Unlock()
    99  
   100  	// Verify which buckets need more tokens, if any
   101  	specsToRefill := []quota.Spec{}
   102  	for _, spec := range specs {
   103  		bucket, ok := m.cache[spec]
   104  		if !ok || bucket.tokens < numTokens {
   105  			specsToRefill = append(specsToRefill, spec)
   106  		}
   107  	}
   108  
   109  	// Request the required number of tokens and add them to buckets
   110  	if len(specsToRefill) != 0 {
   111  		defer func() {
   112  			// Do not hold GetTokens on eviction, it won't change the result.
   113  			m.evictWg.Add(1)
   114  			go func() {
   115  				m.evict(ctx)
   116  				m.evictWg.Done()
   117  			}()
   118  		}()
   119  
   120  		// A more accurate count would be numTokens+m.minBatchSize-bucket.tokens, but that might
   121  		// force us to make a GetTokens call for each spec. A single call is likely to be more
   122  		// efficient.
   123  		tokens := numTokens + m.minBatchSize
   124  		if err := m.qm.GetTokens(ctx, tokens, specsToRefill); err != nil {
   125  			return err
   126  		}
   127  		for _, spec := range specsToRefill {
   128  			b, ok := m.cache[spec]
   129  			if !ok {
   130  				b = &bucket{}
   131  				m.cache[spec] = b
   132  			}
   133  			b.tokens += tokens
   134  		}
   135  	}
   136  
   137  	// Subtract tokens from cache
   138  	lastModified := now()
   139  	for _, spec := range specs {
   140  		bucket, ok := m.cache[spec]
   141  		// Sanity check
   142  		if !ok || bucket.tokens < 0 || bucket.tokens < numTokens {
   143  			glog.Errorf("Bucket invariants failed for spec %+v: ok = %v, bucket = %+v", spec, ok, bucket)
   144  			return nil // Something is wrong with the implementation, let requests go through.
   145  		}
   146  		bucket.tokens -= numTokens
   147  		bucket.lastModified = lastModified
   148  	}
   149  	return nil
   150  }
   151  
   152  func (m *manager) evict(ctx context.Context) {
   153  	m.mu.Lock()
   154  	// m.mu is explicitly unlocked, so we don't have to hold it while we wait for goroutines to
   155  	// complete.
   156  
   157  	if len(m.cache) <= m.maxEntries {
   158  		m.mu.Unlock()
   159  		return
   160  	}
   161  
   162  	// Find and evict the oldest entries. To avoid excessive token leakage, let's try and
   163  	// replenish the tokens held for the evicted entries.
   164  	var buckets bucketsByTime = make([]specBucket, 0, len(m.cache))
   165  	for spec, b := range m.cache {
   166  		if spec.Group != quota.Global {
   167  			buckets = append(buckets, specBucket{bucket: b, spec: spec})
   168  		}
   169  	}
   170  	sort.Sort(buckets)
   171  
   172  	wg := sync.WaitGroup{}
   173  	evicts := len(m.cache) - m.maxEntries
   174  	for i := 0; i < evicts; i++ {
   175  		b := buckets[i]
   176  		glog.V(1).Infof("Too many tokens cached, returning least recently used (%v tokens for %+v)", b.tokens, b.spec)
   177  		delete(m.cache, b.spec)
   178  
   179  		// goroutines must not access the cache, the lock is released before they complete.
   180  		wg.Add(1)
   181  		go func() {
   182  			if err := m.qm.PutTokens(ctx, b.tokens, []quota.Spec{b.spec}); err != nil {
   183  				glog.Warningf("Error replenishing tokens from evicted bucket (spec = %+v, bucket = %+v): %v", b.spec, b.bucket, err)
   184  			}
   185  			wg.Done()
   186  		}()
   187  	}
   188  
   189  	m.mu.Unlock()
   190  	wg.Wait()
   191  }
   192  
   193  // wait waits for spawned goroutines to complete. Used by eviction tests.
   194  func (m *manager) wait() {
   195  	m.evictWg.Wait()
   196  }
   197  
   198  // specBucket is a bucket with the corresponding spec.
   199  type specBucket struct {
   200  	*bucket
   201  	spec quota.Spec
   202  }
   203  
   204  // bucketsByTime is a sortable slice of specBuckets.
   205  type bucketsByTime []specBucket
   206  
   207  func (b bucketsByTime) Len() int {
   208  	return len(b)
   209  }
   210  
   211  func (b bucketsByTime) Less(i, j int) bool {
   212  	return b[i].lastModified.Before(b[j].lastModified)
   213  }
   214  
   215  func (b bucketsByTime) Swap(i, j int) {
   216  	b[i], b[j] = b[j], b[i]
   217  }