go.chromium.org/luci@v0.0.0-20240309015107-7cdc2e660f33/server/auth/cache.go (about) 1 // Copyright 2016 The LUCI Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package auth 16 17 import ( 18 "context" 19 "crypto/sha256" 20 "encoding/base64" 21 "encoding/json" 22 "fmt" 23 "strings" 24 "time" 25 26 "go.chromium.org/luci/common/clock" 27 "go.chromium.org/luci/common/errors" 28 "go.chromium.org/luci/common/logging" 29 30 "go.chromium.org/luci/server/caching/layered" 31 ) 32 33 // globalCacheNamespace is global cache namespace to use for storing tokens. 34 const globalCacheNamespace = "__luciauth__" 35 36 // tokenCacheConfig contains configuration of a token cache for a single token 37 // kind. 38 type tokenCacheConfig struct { 39 // Kind defines the token kind. Will be used as part of the global cache key. 40 Kind string 41 42 // Version defines format of the data. Will be used as part of the global 43 // cache key. 44 // 45 // If you change a type behind any in Token field, you MUST bump the 46 // version. It will also "invalidate" all existing cached entries (they will 47 // just become inaccessible and eventually will be evicted from the cache). 48 Version int 49 50 // ProcessCacheCapacity is capacity of a process cache that holds the tokens. 51 ProcessCacheCapacity int 52 53 // ExpiryRandomizationThreshold defines a threshold for item expiration after 54 // which the randomized early expiration kick in. 55 // 56 // See layered.WithRandomizedExpiration for more details. 57 ExpiryRandomizationThreshold time.Duration 58 } 59 60 // tokenCache knows how to store tokens of some particular kind. 61 // 62 // Must be initialized during init-time via newTokenCache. 63 type tokenCache struct { 64 cfg tokenCacheConfig 65 lc layered.Cache[*cachedToken] 66 } 67 68 // newTokenCache configures tokenCache based on given parameters. 69 func newTokenCache(cfg tokenCacheConfig) *tokenCache { 70 return &tokenCache{ 71 cfg: cfg, 72 lc: layered.RegisterCache(layered.Parameters[*cachedToken]{ 73 ProcessCacheCapacity: cfg.ProcessCacheCapacity, 74 GlobalNamespace: globalCacheNamespace, 75 Marshal: func(item *cachedToken) ([]byte, error) { 76 return json.Marshal(item) 77 }, 78 Unmarshal: func(blob []byte) (*cachedToken, error) { 79 out := &cachedToken{} 80 err := json.Unmarshal(blob, out) 81 return out, err 82 }, 83 }), 84 } 85 } 86 87 // cachedToken is stored in the token cache. 88 type cachedToken struct { 89 // Key is cache key, must be unique (no other restrictions). 90 Key string `json:"key,omitempty"` 91 // Created is when the token was created, required. 92 Created time.Time `json:"created,omitempty"` 93 // Expire is when the token expires, required. 94 Expiry time.Time `json:"expiry,omitempty"` 95 96 // TODO(fmatenaar): Remove this after migrating projects to scoped accounts. 97 // ProjectScopeFallback indicates a project scoped token migration fallback case. 98 ProjectScopeFallback bool `json:"fallback,omitempty"` 99 100 // OAuth2Token is set when caching an OAuth2 tokens, otherwise empty. 101 OAuth2Token string `json:"oauth2_token,omitempty"` 102 // DelegationToken is set when caching a delegation token, otherwise empty. 103 DelegationToken string `json:"delegation_token,omitempty"` 104 // IDToken is set when caching ID tokens, otherwise empty. 105 IDToken string `json:"id_token,omitempty"` 106 } 107 108 type fetchOrMintTokenOp struct { 109 CacheKey string 110 MinTTL time.Duration 111 Mint func(context.Context) (tok *cachedToken, err error, label string) 112 MintTimeout time.Duration 113 } 114 115 // fetchOrMintToken implements high level logic of using a token cache. 116 // 117 // It's basis or MintAccessTokenForServiceAccount and MintDelegationToken 118 // implementations. 119 // 120 // Returns a token, an error and a label to use for monitoring metric (its value 121 // depends on how exactly the operation was performed or how it failed). 122 func (tc *tokenCache) fetchOrMintToken(ctx context.Context, op *fetchOrMintTokenOp) (tok *cachedToken, err error, label string) { 123 defer func() { 124 if err != nil { 125 logging.WithError(err).Warningf(ctx, "Failed to get the token") 126 } 127 }() 128 129 // Derive a short unique cache key that also depends on Kind and Version. 130 // op.CacheKey is allowed to be of any length, but global cache keys must be 131 // short-ish. 132 digest := sha256.Sum256([]byte(op.CacheKey)) 133 cacheKey := fmt.Sprintf("%s/%d/%s", 134 tc.cfg.Kind, tc.cfg.Version, base64.RawURLEncoding.EncodeToString(digest[:])) 135 136 label = "SUCCESS_CACHE_HIT" // will be replaced on cache miss or on error 137 138 // Pull our token from the cache or create a new one (construct options first 139 // for better readability). 140 opts := []layered.Option{ 141 layered.WithMinTTL(op.MinTTL), 142 layered.WithRandomizedExpiration(tc.cfg.ExpiryRandomizationThreshold), 143 } 144 tok, err = tc.lc.GetOrCreate(ctx, cacheKey, func() (val *cachedToken, ttl time.Duration, err error) { 145 logging.Debugf(ctx, "Minting the new token") 146 147 // Minting a new token involves RPCs to remote services that should be fast. 148 // Abort the attempt if it gets stuck for longer than N sec, it's unlikely 149 // it'll succeed. Note that we setup the new context only on slow code path 150 // (on cache miss), since it involves some overhead we don't want to pay on 151 // the fast path. We assume memcache RPCs don't get stuck for a long time 152 // (unlike URL Fetch calls to GAE). 153 ctx, cancel := clock.WithTimeout(ctx, op.MintTimeout) 154 defer cancel() 155 156 // Note: we set 'label' from the outer scope here. 157 var tok *cachedToken 158 if tok, err, label = op.Mint(ctx); err != nil { 159 if label == "" { 160 label = "ERROR_UNSPECIFIED" 161 } 162 return nil, 0, err 163 } 164 165 tok.Key = op.CacheKey // the original key before hashing 166 167 label = "SUCCESS_CACHE_MISS" 168 return tok, clock.Until(ctx, tok.Expiry), nil 169 }, opts...) 170 171 switch { 172 case errors.Unwrap(err) == context.DeadlineExceeded: 173 return nil, err, "ERROR_DEADLINE" 174 case err == layered.ErrCantSatisfyMinTTL: 175 // This happens if op.Mint failed to produce a token that lives longer 176 // than MinTTL. 177 return nil, err, "ERROR_INSUFFICIENT_MINTED_TTL" 178 case err != nil: 179 return nil, err, label 180 case tok.Key != op.CacheKey: 181 // A paranoid check we've got the token we wanted. This is very-very-very 182 // unlikely to happen in practice, SHA256 collisions are rare. So it's fine 183 // to handle it sloppily and just return an error (still better than 184 // accidentally using wrong token). 185 err = fmt.Errorf("SHA256 collision in the token cache: %q vs %q", tok.Key, op.CacheKey) 186 return nil, err, "ERROR_HASH_COLLISION" 187 default: 188 return tok, nil, label 189 } 190 } 191 192 // Helper for constructing token cache keys which are a list of pairs. 193 type cacheKeyBuilder struct { 194 b strings.Builder 195 } 196 197 func (c *cacheKeyBuilder) add(typ, val string) error { 198 if strings.ContainsRune(val, '\n') { 199 return fmt.Errorf("forbidden character in a %s: %q", typ, val) 200 } 201 fmt.Fprintf(&c.b, "%s:%s\n", typ, val) 202 return nil 203 } 204 205 func (c *cacheKeyBuilder) finish() string { 206 return c.b.String() 207 }