github.com/insionng/yougam@v0.0.0-20170714101924-2bc18d833463/libraries/golang/groupcache/groupcache_test.go (about) 1 /* 2 Copyright 2012 Google Inc. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 // Tests for groupcache. 18 19 package groupcache 20 21 import ( 22 "errors" 23 "fmt" 24 "hash/crc32" 25 "math/rand" 26 "reflect" 27 "sync" 28 "testing" 29 "time" 30 31 "github.com/insionng/yougam/libraries/golang/protobuf/proto" 32 33 pb "github.com/insionng/yougam/libraries/golang/groupcache/groupcachepb" 34 testpb "github.com/insionng/yougam/libraries/golang/groupcache/testpb" 35 ) 36 37 var ( 38 once sync.Once 39 stringGroup, protoGroup Getter 40 41 stringc = make(chan string) 42 43 dummyCtx Context 44 45 // cacheFills is the number of times stringGroup or 46 // protoGroup's Getter have been called. Read using the 47 // cacheFills function. 48 cacheFills AtomicInt 49 ) 50 51 const ( 52 stringGroupName = "string-group" 53 protoGroupName = "proto-group" 54 testMessageType = "google3/net/groupcache/go/test_proto.TestMessage" 55 fromChan = "from-chan" 56 cacheSize = 1 << 20 57 ) 58 59 func testSetup() { 60 stringGroup = NewGroup(stringGroupName, cacheSize, GetterFunc(func(_ Context, key string, dest Sink) error { 61 if key == fromChan { 62 key = <-stringc 63 } 64 cacheFills.Add(1) 65 return dest.SetString("ECHO:" + key) 66 })) 67 68 protoGroup = NewGroup(protoGroupName, cacheSize, GetterFunc(func(_ Context, key string, dest Sink) error { 69 if key == fromChan { 70 key = <-stringc 71 } 72 cacheFills.Add(1) 73 return dest.SetProto(&testpb.TestMessage{ 74 Name: proto.String("ECHO:" + key), 75 City: proto.String("SOME-CITY"), 76 }) 77 })) 78 } 79 80 // tests that a Getter's Get method is only called once with two 81 // outstanding callers. This is the string variant. 82 func TestGetDupSuppressString(t *testing.T) { 83 once.Do(testSetup) 84 // Start two getters. The first should block (waiting reading 85 // from stringc) and the second should latch on to the first 86 // one. 87 resc := make(chan string, 2) 88 for i := 0; i < 2; i++ { 89 go func() { 90 var s string 91 if err := stringGroup.Get(dummyCtx, fromChan, StringSink(&s)); err != nil { 92 resc <- "ERROR:" + err.Error() 93 return 94 } 95 resc <- s 96 }() 97 } 98 99 // Wait a bit so both goroutines get merged together via 100 // singleflight. 101 // TODO(bradfitz): decide whether there are any non-offensive 102 // debug/test hooks that could be added to singleflight to 103 // make a sleep here unnecessary. 104 time.Sleep(250 * time.Millisecond) 105 106 // Unblock the first getter, which should unblock the second 107 // as well. 108 stringc <- "foo" 109 110 for i := 0; i < 2; i++ { 111 select { 112 case v := <-resc: 113 if v != "ECHO:foo" { 114 t.Errorf("got %q; want %q", v, "ECHO:foo") 115 } 116 case <-time.After(5 * time.Second): 117 t.Errorf("timeout waiting on getter #%d of 2", i+1) 118 } 119 } 120 } 121 122 // tests that a Getter's Get method is only called once with two 123 // outstanding callers. This is the proto variant. 124 func TestGetDupSuppressProto(t *testing.T) { 125 once.Do(testSetup) 126 // Start two getters. The first should block (waiting reading 127 // from stringc) and the second should latch on to the first 128 // one. 129 resc := make(chan *testpb.TestMessage, 2) 130 for i := 0; i < 2; i++ { 131 go func() { 132 tm := new(testpb.TestMessage) 133 if err := protoGroup.Get(dummyCtx, fromChan, ProtoSink(tm)); err != nil { 134 tm.Name = proto.String("ERROR:" + err.Error()) 135 } 136 resc <- tm 137 }() 138 } 139 140 // Wait a bit so both goroutines get merged together via 141 // singleflight. 142 // TODO(bradfitz): decide whether there are any non-offensive 143 // debug/test hooks that could be added to singleflight to 144 // make a sleep here unnecessary. 145 time.Sleep(250 * time.Millisecond) 146 147 // Unblock the first getter, which should unblock the second 148 // as well. 149 stringc <- "Fluffy" 150 want := &testpb.TestMessage{ 151 Name: proto.String("ECHO:Fluffy"), 152 City: proto.String("SOME-CITY"), 153 } 154 for i := 0; i < 2; i++ { 155 select { 156 case v := <-resc: 157 if !reflect.DeepEqual(v, want) { 158 t.Errorf(" Got: %v\nWant: %v", proto.CompactTextString(v), proto.CompactTextString(want)) 159 } 160 case <-time.After(5 * time.Second): 161 t.Errorf("timeout waiting on getter #%d of 2", i+1) 162 } 163 } 164 } 165 166 func countFills(f func()) int64 { 167 fills0 := cacheFills.Get() 168 f() 169 return cacheFills.Get() - fills0 170 } 171 172 func TestCaching(t *testing.T) { 173 once.Do(testSetup) 174 fills := countFills(func() { 175 for i := 0; i < 10; i++ { 176 var s string 177 if err := stringGroup.Get(dummyCtx, "TestCaching-key", StringSink(&s)); err != nil { 178 t.Fatal(err) 179 } 180 } 181 }) 182 if fills != 1 { 183 t.Errorf("expected 1 cache fill; got %d", fills) 184 } 185 } 186 187 func TestCacheEviction(t *testing.T) { 188 once.Do(testSetup) 189 testKey := "TestCacheEviction-key" 190 getTestKey := func() { 191 var res string 192 for i := 0; i < 10; i++ { 193 if err := stringGroup.Get(dummyCtx, testKey, StringSink(&res)); err != nil { 194 t.Fatal(err) 195 } 196 } 197 } 198 fills := countFills(getTestKey) 199 if fills != 1 { 200 t.Fatalf("expected 1 cache fill; got %d", fills) 201 } 202 203 g := stringGroup.(*Group) 204 evict0 := g.mainCache.nevict 205 206 // Trash the cache with other keys. 207 var bytesFlooded int64 208 // cacheSize/len(testKey) is approximate 209 for bytesFlooded < cacheSize+1024 { 210 var res string 211 key := fmt.Sprintf("dummy-key-%d", bytesFlooded) 212 stringGroup.Get(dummyCtx, key, StringSink(&res)) 213 bytesFlooded += int64(len(key) + len(res)) 214 } 215 evicts := g.mainCache.nevict - evict0 216 if evicts <= 0 { 217 t.Errorf("evicts = %v; want more than 0", evicts) 218 } 219 220 // Test that the key is gone. 221 fills = countFills(getTestKey) 222 if fills != 1 { 223 t.Fatalf("expected 1 cache fill after cache trashing; got %d", fills) 224 } 225 } 226 227 type fakePeer struct { 228 hits int 229 fail bool 230 } 231 232 func (p *fakePeer) Get(_ Context, in *pb.GetRequest, out *pb.GetResponse) error { 233 p.hits++ 234 if p.fail { 235 return errors.New("simulated error from peer") 236 } 237 out.Value = []byte("got:" + in.GetKey()) 238 return nil 239 } 240 241 type fakePeers []ProtoGetter 242 243 func (p fakePeers) PickPeer(key string) (peer ProtoGetter, ok bool) { 244 if len(p) == 0 { 245 return 246 } 247 n := crc32.Checksum([]byte(key), crc32.IEEETable) % uint32(len(p)) 248 return p[n], p[n] != nil 249 } 250 251 // tests that peers (virtual, in-process) are hit, and how much. 252 func TestPeers(t *testing.T) { 253 once.Do(testSetup) 254 rand.Seed(123) 255 peer0 := &fakePeer{} 256 peer1 := &fakePeer{} 257 peer2 := &fakePeer{} 258 peerList := fakePeers([]ProtoGetter{peer0, peer1, peer2, nil}) 259 const cacheSize = 0 // disabled 260 localHits := 0 261 getter := func(_ Context, key string, dest Sink) error { 262 localHits++ 263 return dest.SetString("got:" + key) 264 } 265 testGroup := newGroup("TestPeers-group", cacheSize, GetterFunc(getter), peerList) 266 run := func(name string, n int, wantSummary string) { 267 // Reset counters 268 localHits = 0 269 for _, p := range []*fakePeer{peer0, peer1, peer2} { 270 p.hits = 0 271 } 272 273 for i := 0; i < n; i++ { 274 key := fmt.Sprintf("key-%d", i) 275 want := "got:" + key 276 var got string 277 err := testGroup.Get(dummyCtx, key, StringSink(&got)) 278 if err != nil { 279 t.Errorf("%s: error on key %q: %v", name, key, err) 280 continue 281 } 282 if got != want { 283 t.Errorf("%s: for key %q, got %q; want %q", name, key, got, want) 284 } 285 } 286 summary := func() string { 287 return fmt.Sprintf("localHits = %d, peers = %d %d %d", localHits, peer0.hits, peer1.hits, peer2.hits) 288 } 289 if got := summary(); got != wantSummary { 290 t.Errorf("%s: got %q; want %q", name, got, wantSummary) 291 } 292 } 293 resetCacheSize := func(maxBytes int64) { 294 g := testGroup 295 g.cacheBytes = maxBytes 296 g.mainCache = cache{} 297 g.hotCache = cache{} 298 } 299 300 // Base case; peers all up, with no problems. 301 resetCacheSize(1 << 20) 302 run("base", 200, "localHits = 49, peers = 51 49 51") 303 304 // Verify cache was hit. All localHits are gone, and some of 305 // the peer hits (the ones randomly selected to be maybe hot) 306 run("cached_base", 200, "localHits = 0, peers = 49 47 48") 307 resetCacheSize(0) 308 309 // With one of the peers being down. 310 // TODO(bradfitz): on a peer number being unavailable, the 311 // consistent hashing should maybe keep trying others to 312 // spread the load out. Currently it fails back to local 313 // execution if the first consistent-hash slot is unavailable. 314 peerList[0] = nil 315 run("one_peer_down", 200, "localHits = 100, peers = 0 49 51") 316 317 // Failing peer 318 peerList[0] = peer0 319 peer0.fail = true 320 run("peer0_failing", 200, "localHits = 100, peers = 51 49 51") 321 } 322 323 func TestTruncatingByteSliceTarget(t *testing.T) { 324 var buf [100]byte 325 s := buf[:] 326 if err := stringGroup.Get(dummyCtx, "short", TruncatingByteSliceSink(&s)); err != nil { 327 t.Fatal(err) 328 } 329 if want := "ECHO:short"; string(s) != want { 330 t.Errorf("short key got %q; want %q", s, want) 331 } 332 333 s = buf[:6] 334 if err := stringGroup.Get(dummyCtx, "truncated", TruncatingByteSliceSink(&s)); err != nil { 335 t.Fatal(err) 336 } 337 if want := "ECHO:t"; string(s) != want { 338 t.Errorf("truncated key got %q; want %q", s, want) 339 } 340 } 341 342 func TestAllocatingByteSliceTarget(t *testing.T) { 343 var dst []byte 344 sink := AllocatingByteSliceSink(&dst) 345 346 inBytes := []byte("some bytes") 347 sink.SetBytes(inBytes) 348 if want := "some bytes"; string(dst) != want { 349 t.Errorf("SetBytes resulted in %q; want %q", dst, want) 350 } 351 v, err := sink.view() 352 if err != nil { 353 t.Fatalf("view after SetBytes failed: %v", err) 354 } 355 if &inBytes[0] == &dst[0] { 356 t.Error("inBytes and dst share memory") 357 } 358 if &inBytes[0] == &v.b[0] { 359 t.Error("inBytes and view share memory") 360 } 361 if &dst[0] == &v.b[0] { 362 t.Error("dst and view share memory") 363 } 364 } 365 366 // orderedFlightGroup allows the caller to force the schedule of when 367 // orig.Do will be called. This is useful to serialize calls such 368 // that singleflight cannot dedup them. 369 type orderedFlightGroup struct { 370 mu sync.Mutex 371 stage1 chan bool 372 stage2 chan bool 373 orig flightGroup 374 } 375 376 func (g *orderedFlightGroup) Do(key string, fn func() (interface{}, error)) (interface{}, error) { 377 <-g.stage1 378 <-g.stage2 379 g.mu.Lock() 380 defer g.mu.Unlock() 381 return g.orig.Do(key, fn) 382 } 383 384 // TestNoDedup tests invariants on the cache size when singleflight is 385 // unable to dedup calls. 386 func TestNoDedup(t *testing.T) { 387 const testkey = "testkey" 388 const testval = "testval" 389 g := newGroup("testgroup", 1024, GetterFunc(func(_ Context, key string, dest Sink) error { 390 return dest.SetString(testval) 391 }), nil) 392 393 orderedGroup := &orderedFlightGroup{ 394 stage1: make(chan bool), 395 stage2: make(chan bool), 396 orig: g.loadGroup, 397 } 398 // Replace loadGroup with our wrapper so we can control when 399 // loadGroup.Do is entered for each concurrent request. 400 g.loadGroup = orderedGroup 401 402 // Issue two idential requests concurrently. Since the cache is 403 // empty, it will miss. Both will enter load(), but we will only 404 // allow one at a time to enter singleflight.Do, so the callback 405 // function will be called twice. 406 resc := make(chan string, 2) 407 for i := 0; i < 2; i++ { 408 go func() { 409 var s string 410 if err := g.Get(dummyCtx, testkey, StringSink(&s)); err != nil { 411 resc <- "ERROR:" + err.Error() 412 return 413 } 414 resc <- s 415 }() 416 } 417 418 // Ensure both goroutines have entered the Do routine. This implies 419 // both concurrent requests have checked the cache, found it empty, 420 // and called load(). 421 orderedGroup.stage1 <- true 422 orderedGroup.stage1 <- true 423 orderedGroup.stage2 <- true 424 orderedGroup.stage2 <- true 425 426 for i := 0; i < 2; i++ { 427 if s := <-resc; s != testval { 428 t.Errorf("result is %s want %s", s, testval) 429 } 430 } 431 432 const wantItems = 1 433 if g.mainCache.items() != wantItems { 434 t.Errorf("mainCache has %d items, want %d", g.mainCache.items(), wantItems) 435 } 436 437 // If the singleflight callback doesn't double-check the cache again 438 // upon entry, we would increment nbytes twice but the entry would 439 // only be in the cache once. 440 const wantBytes = int64(len(testkey) + len(testval)) 441 if g.mainCache.nbytes != wantBytes { 442 t.Errorf("cache has %d bytes, want %d", g.mainCache.nbytes, wantBytes) 443 } 444 } 445 446 // TODO(bradfitz): port the Google-internal full integration test into here, 447 // using HTTP requests instead of our RPC system.