github.com/uber/kraken@v0.1.4/lib/torrent/scheduler/testutils_test.go (about) 1 // Copyright (c) 2016-2019 Uber Technologies, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 package scheduler 15 16 import ( 17 "flag" 18 "io/ioutil" 19 "net" 20 "os" 21 "reflect" 22 "strconv" 23 "testing" 24 "time" 25 26 "go.uber.org/zap" 27 28 "github.com/golang/mock/gomock" 29 "github.com/stretchr/testify/require" 30 "github.com/uber-go/tally" 31 32 "github.com/uber/kraken/core" 33 "github.com/uber/kraken/lib/hashring" 34 "github.com/uber/kraken/lib/hostlist" 35 "github.com/uber/kraken/lib/store" 36 "github.com/uber/kraken/lib/torrent/networkevent" 37 "github.com/uber/kraken/lib/torrent/scheduler/announcequeue" 38 "github.com/uber/kraken/lib/torrent/scheduler/conn" 39 "github.com/uber/kraken/lib/torrent/scheduler/connstate" 40 "github.com/uber/kraken/lib/torrent/scheduler/dispatch" 41 "github.com/uber/kraken/lib/torrent/storage" 42 "github.com/uber/kraken/lib/torrent/storage/agentstorage" 43 "github.com/uber/kraken/lib/torrent/storage/piecereader" 44 mockmetainfoclient "github.com/uber/kraken/mocks/tracker/metainfoclient" 45 "github.com/uber/kraken/tracker/announceclient" 46 "github.com/uber/kraken/tracker/trackerserver" 47 "github.com/uber/kraken/utils/log" 48 "github.com/uber/kraken/utils/testutil" 49 ) 50 51 const testTempDir = "/tmp/kraken_scheduler" 52 53 func Init() { 54 os.Mkdir(testTempDir, 0775) 55 56 debug := flag.Bool("scheduler.debug", false, "log all Scheduler debugging output") 57 flag.Parse() 58 59 zapConfig := zap.NewProductionConfig() 60 zapConfig.Level = zap.NewAtomicLevelAt(zap.DebugLevel) 61 zapConfig.Encoding = "console" 62 63 if !*debug { 64 zapConfig.OutputPaths = []string{} 65 } 66 67 log.ConfigureLogger(zapConfig) 68 } 69 70 func configFixture() Config { 71 return Config{ 72 SeederTTI: 10 * time.Second, 73 LeecherTTI: time.Minute, 74 PreemptionInterval: 500 * time.Millisecond, 75 ConnTTI: 10 * time.Second, 76 ConnTTL: 5 * time.Minute, 77 ConnState: connstate.Config{}, 78 Conn: conn.ConfigFixture(), 79 Dispatch: dispatch.Config{}, 80 TorrentLog: log.Config{Disable: true}, 81 }.applyDefaults() 82 } 83 84 type testMocks struct { 85 ctrl *gomock.Controller 86 metaInfoClient *mockmetainfoclient.MockClient 87 trackerAddr string 88 cleanup *testutil.Cleanup 89 } 90 91 func newTestMocks(t gomock.TestReporter) (*testMocks, func()) { 92 var cleanup testutil.Cleanup 93 94 ctrl := gomock.NewController(t) 95 cleanup.Add(ctrl.Finish) 96 97 trackerAddr, stop := testutil.StartServer(trackerserver.Fixture().Handler()) 98 cleanup.Add(stop) 99 100 return &testMocks{ 101 ctrl: ctrl, 102 metaInfoClient: mockmetainfoclient.NewMockClient(ctrl), 103 trackerAddr: trackerAddr, 104 cleanup: &cleanup, 105 }, cleanup.Run 106 } 107 108 type testPeer struct { 109 pctx core.PeerContext 110 scheduler *scheduler 111 torrentArchive storage.TorrentArchive 112 stats tally.TestScope 113 testProducer *networkevent.TestProducer 114 cads *store.CADownloadStore 115 cleanup *testutil.Cleanup 116 } 117 118 func (m *testMocks) newPeer(config Config, options ...option) *testPeer { 119 var cleanup testutil.Cleanup 120 m.cleanup.Add(cleanup.Run) 121 122 cads, c := store.CADownloadStoreFixture() 123 cleanup.Add(c) 124 125 stats := tally.NewTestScope("", nil) 126 127 ta := agentstorage.NewTorrentArchive(stats, cads, m.metaInfoClient) 128 129 pctx := core.PeerContext{ 130 PeerID: core.PeerIDFixture(), 131 Zone: "zone1", 132 IP: "localhost", 133 Port: findFreePort(), 134 } 135 ac := announceclient.New(pctx, hashring.NoopPassiveRing(hostlist.Fixture(m.trackerAddr)), nil) 136 tp := networkevent.NewTestProducer() 137 138 s, err := newScheduler(config, ta, stats, pctx, ac, tp, options...) 139 if err != nil { 140 panic(err) 141 } 142 if err := s.start(announcequeue.New()); err != nil { 143 panic(err) 144 } 145 cleanup.Add(s.Stop) 146 147 return &testPeer{pctx, s, ta, stats, tp, cads, &cleanup} 148 } 149 150 func (m *testMocks) newPeers(n int, config Config) []*testPeer { 151 var peers []*testPeer 152 for i := 0; i < n; i++ { 153 peers = append(peers, m.newPeer(config)) 154 } 155 return peers 156 } 157 158 // writeTorrent writes the given content into a torrent file into peers storage. 159 // Useful for populating a completed torrent before seeding it. 160 func (p *testPeer) writeTorrent(namespace string, blob *core.BlobFixture) { 161 t, err := p.torrentArchive.CreateTorrent(namespace, blob.Digest) 162 if err != nil { 163 panic(err) 164 } 165 for i := 0; i < t.NumPieces(); i++ { 166 start := int64(i) * blob.MetaInfo.PieceLength() 167 end := start + t.PieceLength(i) 168 if err := t.WritePiece(piecereader.NewBuffer(blob.Content[start:end]), i); err != nil { 169 panic(err) 170 } 171 } 172 } 173 174 func (p *testPeer) checkTorrent(t *testing.T, namespace string, blob *core.BlobFixture) { 175 require := require.New(t) 176 177 tor, err := p.torrentArchive.GetTorrent(namespace, blob.Digest) 178 require.NoError(err) 179 180 require.True(tor.Complete()) 181 182 result := make([]byte, tor.Length()) 183 cursor := result 184 for i := 0; i < tor.NumPieces(); i++ { 185 pr, err := tor.GetPieceReader(i) 186 require.NoError(err) 187 defer pr.Close() 188 pieceData, err := ioutil.ReadAll(pr) 189 require.NoError(err) 190 copy(cursor, pieceData) 191 cursor = cursor[tor.PieceLength(i):] 192 } 193 require.Equal(blob.Content, result) 194 } 195 196 func findFreePort() int { 197 l, err := net.Listen("tcp", "localhost:0") 198 if err != nil { 199 panic(err) 200 } 201 defer l.Close() 202 _, portStr, err := net.SplitHostPort(l.Addr().String()) 203 if err != nil { 204 panic(err) 205 } 206 port, err := strconv.Atoi(portStr) 207 if err != nil { 208 panic(err) 209 } 210 return port 211 } 212 213 type hasConnEvent struct { 214 peerID core.PeerID 215 infoHash core.InfoHash 216 result chan bool 217 } 218 219 func (e hasConnEvent) apply(s *state) { 220 found := false 221 conns := s.conns.ActiveConns() 222 for _, c := range conns { 223 if c.PeerID() == e.peerID && c.InfoHash() == e.infoHash { 224 found = true 225 break 226 } 227 } 228 e.result <- found 229 } 230 231 // waitForConnEstablished waits until s has established a connection to peerID for the 232 // torrent of infoHash. 233 func waitForConnEstablished(t *testing.T, s *scheduler, peerID core.PeerID, infoHash core.InfoHash) { 234 err := testutil.PollUntilTrue(5*time.Second, func() bool { 235 result := make(chan bool) 236 s.eventLoop.send(hasConnEvent{peerID, infoHash, result}) 237 return <-result 238 }) 239 if err != nil { 240 t.Fatalf( 241 "scheduler=%s did not establish conn to peer=%s hash=%s: %s", 242 s.pctx.PeerID, peerID, infoHash, err) 243 } 244 } 245 246 // waitForConnRemoved waits until s has closed the connection to peerID for the 247 // torrent of infoHash. 248 func waitForConnRemoved(t *testing.T, s *scheduler, peerID core.PeerID, infoHash core.InfoHash) { 249 err := testutil.PollUntilTrue(5*time.Second, func() bool { 250 result := make(chan bool) 251 s.eventLoop.send(hasConnEvent{peerID, infoHash, result}) 252 return !<-result 253 }) 254 if err != nil { 255 t.Fatalf( 256 "scheduler=%s did not remove conn to peer=%s hash=%s: %s", 257 s.pctx.PeerID, peerID, infoHash, err) 258 } 259 } 260 261 // hasConn checks whether s has an established connection to peerID for the 262 // torrent of infoHash. 263 func hasConn(s *scheduler, peerID core.PeerID, infoHash core.InfoHash) bool { 264 result := make(chan bool) 265 s.eventLoop.send(hasConnEvent{peerID, infoHash, result}) 266 return <-result 267 } 268 269 type hasTorrentEvent struct { 270 infoHash core.InfoHash 271 result chan bool 272 } 273 274 func (e hasTorrentEvent) apply(s *state) { 275 _, ok := s.torrentControls[e.infoHash] 276 e.result <- ok 277 } 278 279 func waitForTorrentRemoved(t *testing.T, s *scheduler, infoHash core.InfoHash) { 280 err := testutil.PollUntilTrue(5*time.Second, func() bool { 281 result := make(chan bool) 282 s.eventLoop.send(hasTorrentEvent{infoHash, result}) 283 return !<-result 284 }) 285 if err != nil { 286 t.Fatalf( 287 "scheduler=%s did not remove torrent for hash=%s: %s", 288 s.pctx.PeerID, infoHash, err) 289 } 290 } 291 292 func waitForTorrentAdded(t *testing.T, s *scheduler, infoHash core.InfoHash) { 293 err := testutil.PollUntilTrue(5*time.Second, func() bool { 294 result := make(chan bool) 295 s.eventLoop.send(hasTorrentEvent{infoHash, result}) 296 return <-result 297 }) 298 if err != nil { 299 t.Fatalf( 300 "scheduler=%s did not add torrent for hash=%s: %s", 301 s.pctx.PeerID, infoHash, err) 302 } 303 } 304 305 // eventWatcher wraps an eventLoop and watches all events being sent. Note, clients 306 // must call WaitFor else all sends will block. 307 type eventWatcher struct { 308 l eventLoop 309 events chan event 310 } 311 312 func newEventWatcher() *eventWatcher { 313 return &eventWatcher{ 314 l: newEventLoop(), 315 events: make(chan event), 316 } 317 } 318 319 // waitFor waits for e to send on w. 320 func (w *eventWatcher) waitFor(t *testing.T, e event) { 321 name := reflect.TypeOf(e).Name() 322 timeout := time.After(5 * time.Second) 323 for { 324 select { 325 case ee := <-w.events: 326 if name == reflect.TypeOf(ee).Name() { 327 return 328 } 329 case <-timeout: 330 t.Fatalf("timed out waiting for %s to occur", name) 331 } 332 } 333 } 334 335 func (w *eventWatcher) send(e event) bool { 336 if w.l.send(e) { 337 go func() { w.events <- e }() 338 return true 339 } 340 return false 341 } 342 343 func (w *eventWatcher) sendTimeout(e event, timeout time.Duration) error { 344 panic("unimplemented") 345 } 346 347 func (w *eventWatcher) run(s *state) { 348 w.l.run(s) 349 } 350 351 func (w *eventWatcher) stop() { 352 w.l.stop() 353 }