github.com/keltia/go-ipfs@v0.3.8-0.20150909044612-210793031c63/unixfs/mod/dagmodifier_test.go (about) 1 package mod 2 3 import ( 4 "fmt" 5 "io" 6 "io/ioutil" 7 "math/rand" 8 "os" 9 "testing" 10 11 "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" 12 "github.com/ipfs/go-ipfs/blocks/blockstore" 13 key "github.com/ipfs/go-ipfs/blocks/key" 14 bs "github.com/ipfs/go-ipfs/blockservice" 15 "github.com/ipfs/go-ipfs/exchange/offline" 16 imp "github.com/ipfs/go-ipfs/importer" 17 "github.com/ipfs/go-ipfs/importer/chunk" 18 h "github.com/ipfs/go-ipfs/importer/helpers" 19 trickle "github.com/ipfs/go-ipfs/importer/trickle" 20 mdag "github.com/ipfs/go-ipfs/merkledag" 21 pin "github.com/ipfs/go-ipfs/pin" 22 ft "github.com/ipfs/go-ipfs/unixfs" 23 uio "github.com/ipfs/go-ipfs/unixfs/io" 24 u "github.com/ipfs/go-ipfs/util" 25 26 ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" 27 context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" 28 ) 29 30 func getMockDagServ(t testing.TB) (mdag.DAGService, pin.ManualPinner) { 31 dstore := ds.NewMapDatastore() 32 tsds := sync.MutexWrap(dstore) 33 bstore := blockstore.NewBlockstore(tsds) 34 bserv := bs.New(bstore, offline.Exchange(bstore)) 35 dserv := mdag.NewDAGService(bserv) 36 return dserv, pin.NewPinner(tsds, dserv).GetManual() 37 } 38 39 func getMockDagServAndBstore(t testing.TB) (mdag.DAGService, blockstore.Blockstore, pin.ManualPinner) { 40 dstore := ds.NewMapDatastore() 41 tsds := sync.MutexWrap(dstore) 42 bstore := blockstore.NewBlockstore(tsds) 43 bserv := bs.New(bstore, offline.Exchange(bstore)) 44 dserv := mdag.NewDAGService(bserv) 45 return dserv, bstore, pin.NewPinner(tsds, dserv).GetManual() 46 } 47 48 func getNode(t testing.TB, dserv mdag.DAGService, size int64, pinner pin.ManualPinner) ([]byte, *mdag.Node) { 49 in := io.LimitReader(u.NewTimeSeededRand(), size) 50 node, err := imp.BuildTrickleDagFromReader(dserv, sizeSplitterGen(500)(in), imp.BasicPinnerCB(pinner)) 51 if err != nil { 52 t.Fatal(err) 53 } 54 55 dr, err := uio.NewDagReader(context.Background(), node, dserv) 56 if err != nil { 57 t.Fatal(err) 58 } 59 60 b, err := ioutil.ReadAll(dr) 61 if err != nil { 62 t.Fatal(err) 63 } 64 65 return b, node 66 } 67 68 func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier) []byte { 69 newdata := make([]byte, size) 70 r := u.NewTimeSeededRand() 71 r.Read(newdata) 72 73 if size+beg > uint64(len(orig)) { 74 orig = append(orig, make([]byte, (size+beg)-uint64(len(orig)))...) 75 } 76 copy(orig[beg:], newdata) 77 78 nmod, err := dm.WriteAt(newdata, int64(beg)) 79 if err != nil { 80 t.Fatal(err) 81 } 82 83 if nmod != int(size) { 84 t.Fatalf("Mod length not correct! %d != %d", nmod, size) 85 } 86 87 nd, err := dm.GetNode() 88 if err != nil { 89 t.Fatal(err) 90 } 91 92 err = trickle.VerifyTrickleDagStructure(nd, dm.dagserv, h.DefaultLinksPerBlock, 4) 93 if err != nil { 94 t.Fatal(err) 95 } 96 97 rd, err := uio.NewDagReader(context.Background(), nd, dm.dagserv) 98 if err != nil { 99 t.Fatal(err) 100 } 101 102 after, err := ioutil.ReadAll(rd) 103 if err != nil { 104 t.Fatal(err) 105 } 106 107 err = arrComp(after, orig) 108 if err != nil { 109 t.Fatal(err) 110 } 111 return orig 112 } 113 114 func sizeSplitterGen(size int64) chunk.SplitterGen { 115 return func(r io.Reader) chunk.Splitter { 116 return chunk.NewSizeSplitter(r, size) 117 } 118 } 119 120 func TestDagModifierBasic(t *testing.T) { 121 dserv, pin := getMockDagServ(t) 122 b, n := getNode(t, dserv, 50000, pin) 123 ctx, cancel := context.WithCancel(context.Background()) 124 defer cancel() 125 126 dagmod, err := NewDagModifier(ctx, n, dserv, pin, sizeSplitterGen(512)) 127 if err != nil { 128 t.Fatal(err) 129 } 130 131 // Within zero block 132 beg := uint64(15) 133 length := uint64(60) 134 135 t.Log("Testing mod within zero block") 136 b = testModWrite(t, beg, length, b, dagmod) 137 138 // Within bounds of existing file 139 beg = 1000 140 length = 4000 141 t.Log("Testing mod within bounds of existing multiblock file.") 142 b = testModWrite(t, beg, length, b, dagmod) 143 144 // Extend bounds 145 beg = 49500 146 length = 4000 147 148 t.Log("Testing mod that extends file.") 149 b = testModWrite(t, beg, length, b, dagmod) 150 151 // "Append" 152 beg = uint64(len(b)) 153 length = 3000 154 t.Log("Testing pure append") 155 b = testModWrite(t, beg, length, b, dagmod) 156 157 // Verify reported length 158 node, err := dagmod.GetNode() 159 if err != nil { 160 t.Fatal(err) 161 } 162 163 size, err := ft.DataSize(node.Data) 164 if err != nil { 165 t.Fatal(err) 166 } 167 168 expected := uint64(50000 + 3500 + 3000) 169 if size != expected { 170 t.Fatalf("Final reported size is incorrect [%d != %d]", size, expected) 171 } 172 } 173 174 func TestMultiWrite(t *testing.T) { 175 dserv, pins := getMockDagServ(t) 176 _, n := getNode(t, dserv, 0, pins) 177 178 ctx, cancel := context.WithCancel(context.Background()) 179 defer cancel() 180 181 dagmod, err := NewDagModifier(ctx, n, dserv, pins, sizeSplitterGen(512)) 182 if err != nil { 183 t.Fatal(err) 184 } 185 186 data := make([]byte, 4000) 187 u.NewTimeSeededRand().Read(data) 188 189 for i := 0; i < len(data); i++ { 190 n, err := dagmod.WriteAt(data[i:i+1], int64(i)) 191 if err != nil { 192 t.Fatal(err) 193 } 194 if n != 1 { 195 t.Fatal("Somehow wrote the wrong number of bytes! (n != 1)") 196 } 197 198 size, err := dagmod.Size() 199 if err != nil { 200 t.Fatal(err) 201 } 202 203 if size != int64(i+1) { 204 t.Fatal("Size was reported incorrectly") 205 } 206 } 207 nd, err := dagmod.GetNode() 208 if err != nil { 209 t.Fatal(err) 210 } 211 212 read, err := uio.NewDagReader(context.Background(), nd, dserv) 213 if err != nil { 214 t.Fatal(err) 215 } 216 rbuf, err := ioutil.ReadAll(read) 217 if err != nil { 218 t.Fatal(err) 219 } 220 221 err = arrComp(rbuf, data) 222 if err != nil { 223 t.Fatal(err) 224 } 225 } 226 227 func TestMultiWriteAndFlush(t *testing.T) { 228 dserv, pins := getMockDagServ(t) 229 _, n := getNode(t, dserv, 0, pins) 230 231 ctx, cancel := context.WithCancel(context.Background()) 232 defer cancel() 233 234 dagmod, err := NewDagModifier(ctx, n, dserv, pins, sizeSplitterGen(512)) 235 if err != nil { 236 t.Fatal(err) 237 } 238 239 data := make([]byte, 20) 240 u.NewTimeSeededRand().Read(data) 241 242 for i := 0; i < len(data); i++ { 243 n, err := dagmod.WriteAt(data[i:i+1], int64(i)) 244 if err != nil { 245 t.Fatal(err) 246 } 247 if n != 1 { 248 t.Fatal("Somehow wrote the wrong number of bytes! (n != 1)") 249 } 250 err = dagmod.Sync() 251 if err != nil { 252 t.Fatal(err) 253 } 254 } 255 nd, err := dagmod.GetNode() 256 if err != nil { 257 t.Fatal(err) 258 } 259 260 read, err := uio.NewDagReader(context.Background(), nd, dserv) 261 if err != nil { 262 t.Fatal(err) 263 } 264 rbuf, err := ioutil.ReadAll(read) 265 if err != nil { 266 t.Fatal(err) 267 } 268 269 err = arrComp(rbuf, data) 270 if err != nil { 271 t.Fatal(err) 272 } 273 } 274 275 func TestWriteNewFile(t *testing.T) { 276 dserv, pins := getMockDagServ(t) 277 _, n := getNode(t, dserv, 0, pins) 278 279 ctx, cancel := context.WithCancel(context.Background()) 280 defer cancel() 281 282 dagmod, err := NewDagModifier(ctx, n, dserv, pins, sizeSplitterGen(512)) 283 if err != nil { 284 t.Fatal(err) 285 } 286 287 towrite := make([]byte, 2000) 288 u.NewTimeSeededRand().Read(towrite) 289 290 nw, err := dagmod.Write(towrite) 291 if err != nil { 292 t.Fatal(err) 293 } 294 if nw != len(towrite) { 295 t.Fatal("Wrote wrong amount") 296 } 297 298 nd, err := dagmod.GetNode() 299 if err != nil { 300 t.Fatal(err) 301 } 302 303 read, err := uio.NewDagReader(ctx, nd, dserv) 304 if err != nil { 305 t.Fatal(err) 306 } 307 308 data, err := ioutil.ReadAll(read) 309 if err != nil { 310 t.Fatal(err) 311 } 312 313 if err := arrComp(data, towrite); err != nil { 314 t.Fatal(err) 315 } 316 } 317 318 func TestMultiWriteCoal(t *testing.T) { 319 dserv, pins := getMockDagServ(t) 320 _, n := getNode(t, dserv, 0, pins) 321 322 ctx, cancel := context.WithCancel(context.Background()) 323 defer cancel() 324 325 dagmod, err := NewDagModifier(ctx, n, dserv, pins, sizeSplitterGen(512)) 326 if err != nil { 327 t.Fatal(err) 328 } 329 330 data := make([]byte, 1000) 331 u.NewTimeSeededRand().Read(data) 332 333 for i := 0; i < len(data); i++ { 334 n, err := dagmod.WriteAt(data[:i+1], 0) 335 if err != nil { 336 fmt.Println("FAIL AT ", i) 337 t.Fatal(err) 338 } 339 if n != i+1 { 340 t.Fatal("Somehow wrote the wrong number of bytes! (n != 1)") 341 } 342 343 } 344 nd, err := dagmod.GetNode() 345 if err != nil { 346 t.Fatal(err) 347 } 348 349 read, err := uio.NewDagReader(context.Background(), nd, dserv) 350 if err != nil { 351 t.Fatal(err) 352 } 353 rbuf, err := ioutil.ReadAll(read) 354 if err != nil { 355 t.Fatal(err) 356 } 357 358 err = arrComp(rbuf, data) 359 if err != nil { 360 t.Fatal(err) 361 } 362 } 363 364 func TestLargeWriteChunks(t *testing.T) { 365 dserv, pins := getMockDagServ(t) 366 _, n := getNode(t, dserv, 0, pins) 367 368 ctx, cancel := context.WithCancel(context.Background()) 369 defer cancel() 370 371 dagmod, err := NewDagModifier(ctx, n, dserv, pins, sizeSplitterGen(512)) 372 if err != nil { 373 t.Fatal(err) 374 } 375 376 wrsize := 1000 377 datasize := 10000000 378 data := make([]byte, datasize) 379 380 u.NewTimeSeededRand().Read(data) 381 382 for i := 0; i < datasize/wrsize; i++ { 383 n, err := dagmod.WriteAt(data[i*wrsize:(i+1)*wrsize], int64(i*wrsize)) 384 if err != nil { 385 t.Fatal(err) 386 } 387 if n != wrsize { 388 t.Fatal("failed to write buffer") 389 } 390 } 391 392 out, err := ioutil.ReadAll(dagmod) 393 if err != nil { 394 t.Fatal(err) 395 } 396 397 if err = arrComp(out, data); err != nil { 398 t.Fatal(err) 399 } 400 401 } 402 403 func TestDagTruncate(t *testing.T) { 404 dserv, pins := getMockDagServ(t) 405 b, n := getNode(t, dserv, 50000, pins) 406 ctx, cancel := context.WithCancel(context.Background()) 407 defer cancel() 408 409 dagmod, err := NewDagModifier(ctx, n, dserv, pins, sizeSplitterGen(512)) 410 if err != nil { 411 t.Fatal(err) 412 } 413 414 err = dagmod.Truncate(12345) 415 if err != nil { 416 t.Fatal(err) 417 } 418 419 _, err = dagmod.Seek(0, os.SEEK_SET) 420 if err != nil { 421 t.Fatal(err) 422 } 423 424 out, err := ioutil.ReadAll(dagmod) 425 if err != nil { 426 t.Fatal(err) 427 } 428 429 if err = arrComp(out, b[:12345]); err != nil { 430 t.Fatal(err) 431 } 432 } 433 434 func TestSparseWrite(t *testing.T) { 435 dserv, pins := getMockDagServ(t) 436 _, n := getNode(t, dserv, 0, pins) 437 ctx, cancel := context.WithCancel(context.Background()) 438 defer cancel() 439 440 dagmod, err := NewDagModifier(ctx, n, dserv, pins, sizeSplitterGen(512)) 441 if err != nil { 442 t.Fatal(err) 443 } 444 445 buf := make([]byte, 5000) 446 u.NewTimeSeededRand().Read(buf[2500:]) 447 448 wrote, err := dagmod.WriteAt(buf[2500:], 2500) 449 if err != nil { 450 t.Fatal(err) 451 } 452 453 if wrote != 2500 { 454 t.Fatal("incorrect write amount") 455 } 456 457 _, err = dagmod.Seek(0, os.SEEK_SET) 458 if err != nil { 459 t.Fatal(err) 460 } 461 462 out, err := ioutil.ReadAll(dagmod) 463 if err != nil { 464 t.Fatal(err) 465 } 466 467 if err = arrComp(out, buf); err != nil { 468 t.Fatal(err) 469 } 470 } 471 472 func basicGC(t *testing.T, bs blockstore.Blockstore, pins pin.ManualPinner) { 473 ctx, cancel := context.WithCancel(context.Background()) 474 defer cancel() // in case error occurs during operation 475 keychan, err := bs.AllKeysChan(ctx) 476 if err != nil { 477 t.Fatal(err) 478 } 479 for k := range keychan { // rely on AllKeysChan to close chan 480 if !pins.IsPinned(k) { 481 err := bs.DeleteBlock(k) 482 if err != nil { 483 t.Fatal(err) 484 } 485 } 486 } 487 } 488 func TestCorrectPinning(t *testing.T) { 489 dserv, bstore, pins := getMockDagServAndBstore(t) 490 b, n := getNode(t, dserv, 50000, pins) 491 ctx, cancel := context.WithCancel(context.Background()) 492 defer cancel() 493 494 dagmod, err := NewDagModifier(ctx, n, dserv, pins, sizeSplitterGen(512)) 495 if err != nil { 496 t.Fatal(err) 497 } 498 499 buf := make([]byte, 1024) 500 for i := 0; i < 100; i++ { 501 size, err := dagmod.Size() 502 if err != nil { 503 t.Fatal(err) 504 } 505 offset := rand.Intn(int(size)) 506 u.NewTimeSeededRand().Read(buf) 507 508 if offset+len(buf) > int(size) { 509 b = append(b[:offset], buf...) 510 } else { 511 copy(b[offset:], buf) 512 } 513 514 n, err := dagmod.WriteAt(buf, int64(offset)) 515 if err != nil { 516 t.Fatal(err) 517 } 518 if n != len(buf) { 519 t.Fatal("wrote incorrect number of bytes") 520 } 521 } 522 523 fisize, err := dagmod.Size() 524 if err != nil { 525 t.Fatal(err) 526 } 527 528 if int(fisize) != len(b) { 529 t.Fatal("reported filesize incorrect", fisize, len(b)) 530 } 531 532 // Run a GC, then ensure we can still read the file correctly 533 basicGC(t, bstore, pins) 534 535 nd, err := dagmod.GetNode() 536 if err != nil { 537 t.Fatal(err) 538 } 539 read, err := uio.NewDagReader(context.Background(), nd, dserv) 540 if err != nil { 541 t.Fatal(err) 542 } 543 544 out, err := ioutil.ReadAll(read) 545 if err != nil { 546 t.Fatal(err) 547 } 548 549 if err = arrComp(out, b); err != nil { 550 t.Fatal(err) 551 } 552 553 rootk, err := nd.Key() 554 if err != nil { 555 t.Fatal(err) 556 } 557 558 // Verify only one recursive pin 559 recpins := pins.RecursiveKeys() 560 if len(recpins) != 1 { 561 t.Fatal("Incorrect number of pinned entries") 562 } 563 564 // verify the correct node is pinned 565 if recpins[0] != rootk { 566 t.Fatal("Incorrect node recursively pinned") 567 } 568 569 indirpins := pins.IndirectKeys() 570 children := enumerateChildren(t, nd, dserv) 571 if len(indirpins) != len(children) { 572 t.Log(len(indirpins), len(children)) 573 t.Fatal("Incorrect number of indirectly pinned blocks") 574 } 575 576 } 577 578 func enumerateChildren(t *testing.T, nd *mdag.Node, ds mdag.DAGService) []key.Key { 579 var out []key.Key 580 for _, lnk := range nd.Links { 581 out = append(out, key.Key(lnk.Hash)) 582 child, err := lnk.GetNode(context.Background(), ds) 583 if err != nil { 584 t.Fatal(err) 585 } 586 children := enumerateChildren(t, child, ds) 587 out = append(out, children...) 588 } 589 return out 590 } 591 592 func BenchmarkDagmodWrite(b *testing.B) { 593 b.StopTimer() 594 dserv, pins := getMockDagServ(b) 595 _, n := getNode(b, dserv, 0, pins) 596 ctx, cancel := context.WithCancel(context.Background()) 597 defer cancel() 598 599 wrsize := 4096 600 601 dagmod, err := NewDagModifier(ctx, n, dserv, pins, sizeSplitterGen(512)) 602 if err != nil { 603 b.Fatal(err) 604 } 605 606 buf := make([]byte, b.N*wrsize) 607 u.NewTimeSeededRand().Read(buf) 608 b.StartTimer() 609 b.SetBytes(int64(wrsize)) 610 for i := 0; i < b.N; i++ { 611 n, err := dagmod.Write(buf[i*wrsize : (i+1)*wrsize]) 612 if err != nil { 613 b.Fatal(err) 614 } 615 if n != wrsize { 616 b.Fatal("Wrote bad size") 617 } 618 } 619 } 620 621 func arrComp(a, b []byte) error { 622 if len(a) != len(b) { 623 return fmt.Errorf("Arrays differ in length. %d != %d", len(a), len(b)) 624 } 625 for i, v := range a { 626 if v != b[i] { 627 return fmt.Errorf("Arrays differ at index: %d", i) 628 } 629 } 630 return nil 631 } 632 633 func printDag(nd *mdag.Node, ds mdag.DAGService, indent int) { 634 pbd, err := ft.FromBytes(nd.Data) 635 if err != nil { 636 panic(err) 637 } 638 639 for i := 0; i < indent; i++ { 640 fmt.Print(" ") 641 } 642 fmt.Printf("{size = %d, type = %s, children = %d", pbd.GetFilesize(), pbd.GetType().String(), len(pbd.GetBlocksizes())) 643 if len(nd.Links) > 0 { 644 fmt.Println() 645 } 646 for _, lnk := range nd.Links { 647 child, err := lnk.GetNode(context.Background(), ds) 648 if err != nil { 649 panic(err) 650 } 651 printDag(child, ds, indent+1) 652 } 653 if len(nd.Links) > 0 { 654 for i := 0; i < indent; i++ { 655 fmt.Print(" ") 656 } 657 } 658 fmt.Println("}") 659 }