github.com/axw/juju@v0.0.0-20161005053422-4bd6544d08d4/worker/instancepoller/aggregate_test.go (about) 1 // Copyright 2014 Canonical Ltd. 2 // Licensed under the AGPLv3, see LICENCE file for details. 3 4 package instancepoller 5 6 import ( 7 "sync" 8 "sync/atomic" 9 "time" 10 11 "github.com/juju/errors" 12 jujutesting "github.com/juju/testing" 13 jc "github.com/juju/testing/checkers" 14 gc "gopkg.in/check.v1" 15 16 "github.com/juju/juju/environs" 17 "github.com/juju/juju/instance" 18 "github.com/juju/juju/network" 19 "github.com/juju/juju/status" 20 "github.com/juju/juju/testing" 21 "github.com/juju/juju/worker/workertest" 22 ) 23 24 type aggregateSuite struct { 25 testing.BaseSuite 26 } 27 28 var _ = gc.Suite(&aggregateSuite{}) 29 30 type testInstance struct { 31 instance.Instance 32 id instance.Id 33 addresses []network.Address 34 status string 35 err error 36 } 37 38 var _ instance.Instance = (*testInstance)(nil) 39 40 func (t *testInstance) Id() instance.Id { 41 return t.id 42 } 43 44 func (t *testInstance) Addresses() ([]network.Address, error) { 45 if t.err != nil { 46 return nil, t.err 47 } 48 return t.addresses, nil 49 } 50 51 func (t *testInstance) Status() instance.InstanceStatus { 52 return instance.InstanceStatus{Status: status.Unknown, Message: t.status} 53 } 54 55 type testInstanceGetter struct { 56 sync.RWMutex 57 // ids is set when the Instances method is called. 58 ids []instance.Id 59 results map[instance.Id]instance.Instance 60 err error 61 counter int32 62 } 63 64 func (tig *testInstanceGetter) Instances(ids []instance.Id) (result []instance.Instance, err error) { 65 tig.ids = ids 66 atomic.AddInt32(&tig.counter, 1) 67 results := make([]instance.Instance, len(ids)) 68 for i, id := range ids { 69 // We don't check 'ok' here, because we want the Instance{nil} 70 // response for those 71 results[i] = tig.results[id] 72 } 73 return results, tig.err 74 } 75 76 func (tig *testInstanceGetter) newTestInstance(id instance.Id, status string, addresses []string) *testInstance { 77 if tig.results == nil { 78 tig.results = make(map[instance.Id]instance.Instance) 79 } 80 thisInstance := &testInstance{ 81 id: id, 82 status: status, 83 addresses: network.NewAddresses(addresses...), 84 } 85 tig.results[thisInstance.Id()] = thisInstance 86 return thisInstance 87 } 88 89 // Test that one request gets sent after suitable delay. 90 func (s *aggregateSuite) TestSingleRequest(c *gc.C) { 91 // We setup a couple variables here so that we can use them locally without 92 // type assertions. Then we use them in the aggregatorConfig. 93 testGetter := new(testInstanceGetter) 94 clock := jujutesting.NewClock(time.Now()) 95 delay := time.Minute 96 cfg := aggregatorConfig{ 97 Clock: clock, 98 Delay: delay, 99 Environ: testGetter, 100 } 101 102 // Add a new test instance. 103 testGetter.newTestInstance("foo", "foobar", []string{"127.0.0.1", "192.168.1.1"}) 104 105 aggregator, err := newAggregator(cfg) 106 c.Check(err, jc.ErrorIsNil) 107 108 // Ensure the worker is killed and cleaned up if the test exits early. 109 defer workertest.CleanKill(c, aggregator) 110 111 // Create a test in a goroutine and make sure we wait for it to finish. 112 var wg sync.WaitGroup 113 wg.Add(1) 114 go func() { 115 defer wg.Done() 116 info, err := aggregator.instanceInfo("foo") 117 c.Check(err, jc.ErrorIsNil) 118 c.Check(info.status.Message, gc.DeepEquals, "foobar") 119 }() 120 121 // Unwind the test clock 122 waitAlarms(c, clock, 1) 123 clock.Advance(delay) 124 125 wg.Wait() 126 127 // Ensure we kill the worker before looking at our testInstanceGetter to 128 // ensure there's no possibility of a race. 129 workertest.CleanKill(c, aggregator) 130 131 ids := testGetter.ids 132 c.Assert(ids, gc.DeepEquals, []instance.Id{"foo"}) 133 } 134 135 // Test several requests in a short space of time get batched. 136 func (s *aggregateSuite) TestMultipleResponseHandling(c *gc.C) { 137 // We setup a couple variables here so that we can use them locally without 138 // type assertions. Then we use them in the aggregatorConfig. 139 testGetter := new(testInstanceGetter) 140 clock := jujutesting.NewClock(time.Now()) 141 delay := time.Minute 142 cfg := aggregatorConfig{ 143 Clock: clock, 144 Delay: delay, 145 Environ: testGetter, 146 } 147 148 // Setup multiple instances to batch 149 testGetter.newTestInstance("foo", "foobar", []string{"127.0.0.1", "192.168.1.1"}) 150 testGetter.newTestInstance("foo2", "not foobar", []string{"192.168.1.2"}) 151 testGetter.newTestInstance("foo3", "ok-ish", []string{"192.168.1.3"}) 152 153 aggregator, err := newAggregator(cfg) 154 c.Check(err, jc.ErrorIsNil) 155 156 // Ensure the worker is killed and cleaned up if the test exits early. 157 defer workertest.CleanKill(c, aggregator) 158 159 // Create a closure for tests we can launch in goroutines. 160 var wg sync.WaitGroup 161 checkInfo := func(id instance.Id, expectStatus string) { 162 defer wg.Done() 163 info, err := aggregator.instanceInfo(id) 164 c.Check(err, jc.ErrorIsNil) 165 c.Check(info.status.Message, gc.Equals, expectStatus) 166 } 167 168 // Launch and wait for these 169 wg.Add(2) 170 go checkInfo("foo2", "not foobar") 171 go checkInfo("foo3", "ok-ish") 172 173 // Unwind the testing clock to let our requests through. 174 waitAlarms(c, clock, 2) 175 clock.Advance(delay) 176 177 // Check we're still alive. 178 workertest.CheckAlive(c, aggregator) 179 180 // Wait until the tests pass. 181 wg.Wait() 182 183 // Ensure we kill the worker before looking at our testInstanceGetter to 184 // ensure there's no possibility of a race. 185 workertest.CleanKill(c, aggregator) 186 187 // Ensure we got our list back with the expected contents. 188 c.Assert(testGetter.ids, jc.SameContents, []instance.Id{"foo2", "foo3"}) 189 190 // Ensure we called instances once and have no errors there. 191 c.Assert(testGetter.err, jc.ErrorIsNil) 192 c.Assert(testGetter.counter, gc.DeepEquals, int32(1)) 193 } 194 195 // Test that advancing delay-time.Nanosecond and then killing causes all 196 // pending reqs to fail. 197 func (s *aggregateSuite) TestKillingWorkerKillsPendinReqs(c *gc.C) { 198 // Setup local variables. 199 testGetter := new(testInstanceGetter) 200 clock := jujutesting.NewClock(time.Now()) 201 delay := time.Minute 202 cfg := aggregatorConfig{ 203 Clock: clock, 204 Delay: delay, 205 Environ: testGetter, 206 } 207 208 testGetter.newTestInstance("foo", "foobar", []string{"127.0.0.1", "192.168.1.1"}) 209 testGetter.newTestInstance("foo2", "not foobar", []string{"192.168.1.2"}) 210 testGetter.newTestInstance("foo3", "ok-ish", []string{"192.168.1.3"}) 211 212 aggregator, err := newAggregator(cfg) 213 c.Check(err, jc.ErrorIsNil) 214 215 defer workertest.CleanKill(c, aggregator) 216 217 // Set up a couple tests we can launch. 218 var wg sync.WaitGroup 219 checkInfo := func(id instance.Id) { 220 defer wg.Done() 221 info, err := aggregator.instanceInfo(id) 222 c.Check(err.Error(), gc.Equals, "instanceInfo call aborted") 223 c.Check(info.status.Message, gc.Equals, "") 224 } 225 226 // Launch a couple tests. 227 wg.Add(2) 228 go checkInfo("foo2") 229 230 // Advance the clock and kill the worker. 231 waitAlarms(c, clock, 1) 232 clock.Advance(delay - time.Nanosecond) 233 aggregator.Kill() 234 235 go checkInfo("foo3") 236 237 // Make sure we're dead. 238 workertest.CheckKilled(c, aggregator) 239 wg.Wait() 240 241 // Make sure we have no ids, since we're dead. 242 c.Assert(len(testGetter.ids), gc.DeepEquals, 0) 243 244 // Ensure we called instances once and have no errors there. 245 c.Assert(testGetter.err, jc.ErrorIsNil) 246 c.Assert(testGetter.counter, gc.DeepEquals, int32(0)) 247 248 } 249 250 // Test that having sent/advanced/received one batch, you can 251 // send/advance/receive again and that works, too. 252 func (s *aggregateSuite) TestMultipleBatches(c *gc.C) { 253 // Setup some local variables. 254 testGetter := new(testInstanceGetter) 255 clock := jujutesting.NewClock(time.Now()) 256 delay := time.Second 257 cfg := aggregatorConfig{ 258 Clock: clock, 259 Delay: delay, 260 Environ: testGetter, 261 } 262 263 testGetter.newTestInstance("foo2", "not foobar", []string{"192.168.1.2"}) 264 testGetter.newTestInstance("foo3", "ok-ish", []string{"192.168.1.3"}) 265 266 aggregator, err := newAggregator(cfg) 267 c.Check(err, jc.ErrorIsNil) 268 269 // Ensure the worker is killed and cleaned up if the test exits early. 270 defer workertest.CleanKill(c, aggregator) 271 272 // Create a checker we can launch as goroutines 273 var wg sync.WaitGroup 274 checkInfo := func(id instance.Id, expectStatus string) { 275 defer wg.Done() 276 info, err := aggregator.instanceInfo(id) 277 c.Check(err, jc.ErrorIsNil) 278 c.Check(info.status.Message, gc.Equals, expectStatus) 279 } 280 281 // Launch and wait for these 282 wg.Add(2) 283 go checkInfo("foo2", "not foobar") 284 go checkInfo("foo3", "ok-ish") 285 286 // Unwind the testing clock to let our requests through. 287 waitAlarms(c, clock, 2) 288 clock.Advance(delay) 289 290 // Check we're still alive 291 workertest.CheckAlive(c, aggregator) 292 293 // Wait until the checkers pass 294 // TODO(redir): These could block forever, we should make the effort to be 295 // robust here per http://reviews.vapour.ws/r/4885/ 296 wg.Wait() 297 298 // Ensure we got our list back with the expected length. 299 c.Assert(len(testGetter.ids), gc.DeepEquals, 2) 300 301 // And then a second batch 302 testGetter.newTestInstance("foo4", "spam", []string{"192.168.1.4"}) 303 testGetter.newTestInstance("foo5", "eggs", []string{"192.168.1.5"}) 304 305 // Launch and wait for this second batch 306 wg.Add(2) 307 go checkInfo("foo4", "spam") 308 go checkInfo("foo5", "eggs") 309 310 for i := 0; i < 2; i++ { 311 // Unwind again to let our next batch through. 312 <-clock.Alarms() 313 } 314 // // Advance the clock again. 315 clock.Advance(delay) 316 317 // Check we're still alive 318 workertest.CheckAlive(c, aggregator) 319 320 // Wait until the checkers pass 321 wg.Wait() 322 323 // Shutdown the worker. 324 workertest.CleanKill(c, aggregator) 325 326 // Ensure we got our list back with the correct length 327 c.Assert(len(testGetter.ids), gc.DeepEquals, 2) 328 329 // Ensure we called instances once and have no errors there. 330 c.Assert(testGetter.err, jc.ErrorIsNil) 331 c.Assert(testGetter.counter, gc.Equals, int32(2)) 332 } 333 334 // Test that things behave as expected when env.Instances errors. 335 func (s *aggregateSuite) TestInstancesErrors(c *gc.C) { 336 // Setup local variables. 337 testGetter := new(testInstanceGetter) 338 clock := jujutesting.NewClock(time.Now()) 339 delay := time.Millisecond 340 cfg := aggregatorConfig{ 341 Clock: clock, 342 Delay: delay, 343 Environ: testGetter, 344 } 345 346 testGetter.newTestInstance("foo", "foobar", []string{"192.168.1.2"}) 347 testGetter.err = environs.ErrNoInstances 348 aggregator, err := newAggregator(cfg) 349 c.Check(err, jc.ErrorIsNil) 350 351 defer workertest.CleanKill(c, aggregator) 352 353 // Launch test in a goroutine and wait for it. 354 var wg sync.WaitGroup 355 wg.Add(1) 356 go func() { 357 defer wg.Done() 358 _, err = aggregator.instanceInfo("foo") 359 c.Assert(err, gc.Equals, environs.ErrNoInstances) 360 }() 361 362 // Unwind to let our request through. 363 waitAlarms(c, clock, 1) 364 clock.Advance(delay) 365 366 wg.Wait() 367 368 // Kill the worker so we know there is no race checking the erroringTestGetter. 369 workertest.CleanKill(c, aggregator) 370 371 c.Assert(testGetter.err, gc.Equals, environs.ErrNoInstances) 372 c.Assert(testGetter.counter, gc.Equals, int32(1)) 373 } 374 375 func (s *aggregateSuite) TestPartialInstanceErrors(c *gc.C) { 376 testGetter := new(testInstanceGetter) 377 clock := jujutesting.NewClock(time.Now()) 378 delay := time.Second 379 380 cfg := aggregatorConfig{ 381 Clock: clock, 382 Delay: delay, 383 Environ: testGetter, 384 } 385 386 testGetter.err = environs.ErrPartialInstances 387 testGetter.newTestInstance("foo", "not foobar", []string{"192.168.1.2"}) 388 389 aggregator, err := newAggregator(cfg) 390 c.Check(err, jc.ErrorIsNil) 391 392 // Ensure the worker is killed and cleaned up if the test exits early. 393 defer workertest.CleanKill(c, aggregator) 394 395 // // Create a checker we can launch as goroutines 396 var wg sync.WaitGroup 397 checkInfo := func(id instance.Id, expectStatus string, expectedError error) { 398 defer wg.Done() 399 info, err := aggregator.instanceInfo(id) 400 if expectedError == nil { 401 c.Check(err, jc.ErrorIsNil) 402 } else { 403 c.Check(err.Error(), gc.Equals, expectedError.Error()) 404 } 405 c.Check(info.status.Message, gc.Equals, expectStatus) 406 } 407 408 // Launch and wait for these 409 wg.Add(2) 410 go checkInfo("foo", "not foobar", nil) 411 go checkInfo("foo2", "", errors.New("instance foo2 not found")) 412 413 // Unwind the testing clock to let our requests through. 414 waitAlarms(c, clock, 2) 415 clock.Advance(delay) 416 417 // Check we're still alive. 418 workertest.CheckAlive(c, aggregator) 419 420 // Wait until the checkers pass. 421 wg.Wait() 422 423 // Now kill the worker so we don't risk a race in the following assertions. 424 workertest.CleanKill(c, aggregator) 425 426 // Ensure we got our list back with the correct length. 427 c.Assert(len(testGetter.ids), gc.Equals, 2) 428 429 // Ensure we called instances once. 430 // TODO(redir): all this stuff is really crying out to be, e.g. 431 // testGetter.CheckOneCall(c, "foo", "foo2") per 432 // http://reviews.vapour.ws/r/4885/ 433 c.Assert(testGetter.counter, gc.Equals, int32(1)) 434 } 435 436 func waitAlarms(c *gc.C, clock *jujutesting.Clock, count int) { 437 timeout := time.After(testing.LongWait) 438 for i := 0; i < count; i++ { 439 select { 440 case <-clock.Alarms(): 441 case <-timeout: 442 c.Fatalf("timed out waiting for %dth alarm set", i) 443 } 444 } 445 }