github.com/janelia-flyem/dvid@v1.0.0/datatype/labelblk/sync.go (about) 1 /* 2 This file supports interactive syncing between data instances. It is different 3 from ingestion syncs that can more effectively batch changes. 4 */ 5 6 package labelblk 7 8 import ( 9 "encoding/binary" 10 "fmt" 11 "sync" 12 13 "github.com/janelia-flyem/dvid/datastore" 14 "github.com/janelia-flyem/dvid/datatype/common/labels" 15 "github.com/janelia-flyem/dvid/datatype/imageblk" 16 "github.com/janelia-flyem/dvid/dvid" 17 ) 18 19 const ( 20 numBlockHandlers = 32 21 22 DownsizeBlockEvent = "LABELBLK_DOWNSIZE_ADD" 23 DownsizeCommitEvent = "LABELBLK_DOWNSIZE_COMMIT" 24 ) 25 26 type deltaBlock struct { 27 mutID uint64 28 bcoord dvid.IZYXString // block coordinate of the originating labelblk resolution. 29 data []byte 30 } 31 32 type procMsg struct { 33 v dvid.VersionID 34 op interface{} 35 } 36 37 type blockOp struct { 38 delta interface{} 39 } 40 41 type mergeOp struct { 42 mutID uint64 43 labels.MergeOp 44 bcoord dvid.IZYXString 45 } 46 47 type splitOp struct { 48 mutID uint64 49 labels.SplitOp 50 bcoord dvid.IZYXString 51 } 52 53 type octant [8][]byte // octant has nil []byte if not modified. 54 55 // cache of all blocks modified where the ZYX index is the lower-res 56 // (down-res by 2x) coordinate. 57 type blockCache map[dvid.IZYXString]octant 58 59 // Returns the slice to which any down-resolution data should be written for the given higher-res block coord. 60 func (d *Data) getLoresCache(v dvid.VersionID, block dvid.IZYXString) ([]byte, error) { 61 // Setup the octant buffer and block cache. 62 downresBlock, err := block.Halfres() 63 if err != nil { 64 return nil, fmt.Errorf("unable to downres labelblk %q block: %v\n", d.DataName(), err) 65 } 66 67 var chunkPt dvid.ChunkPoint3d 68 chunkPt, err = block.ToChunkPoint3d() 69 if err != nil { 70 return nil, err 71 } 72 73 // determine which down-res sector (0-7 where it's x, then y, then z ordering) in 2x2x2 block 74 // the given block will sit. 75 nx := chunkPt[0] % 2 76 ny := chunkPt[1] % 2 77 nz := chunkPt[2] % 2 78 sector := (nz * 4) + (ny * 2) + nx 79 80 // Get the sector slice from the octant corresponding to the downres block coord. 81 // Initialize blockCache if necessary. 82 d.vcache_mu.Lock() 83 defer d.vcache_mu.Unlock() 84 85 var bc blockCache 86 if d.vcache == nil { 87 d.vcache = make(map[dvid.VersionID]blockCache) 88 } else { 89 var found bool 90 bc, found = d.vcache[v] 91 if !found { 92 bc = nil 93 } 94 } 95 if bc == nil { 96 bc = make(blockCache) 97 d.vcache[v] = bc 98 } 99 100 // Get the relevant slice. 101 oct := bc[downresBlock] 102 if oct[sector] == nil { 103 nbytes := d.BlockSize().Prod() // actually / 8 (downres 2^3) then * 8 bytes for label 104 oct[sector] = make([]byte, nbytes) 105 } 106 d.vcache[v][downresBlock] = oct 107 return oct[sector], nil 108 } 109 110 func (d *Data) getReadOnlyBlockCache(v dvid.VersionID) (blockCache, error) { 111 if d.vcache == nil { 112 return nil, fmt.Errorf("downsize commit for %q attempted when no prior blocks were sent!\n", d.DataName()) 113 } 114 115 bc, found := d.vcache[v] 116 if !found { 117 return nil, fmt.Errorf("downsize commit for %q sent when no cache for version %d was present!\n", d.DataName(), v) 118 } 119 return bc, nil 120 } 121 122 // serializes octant contents. Writes octants into preallocated block buffer that may have old data, 123 // and then returns a serialized data slice suitable for storage. 124 func (d *Data) serializeOctants(oct octant, blockBuf []byte) ([]byte, error) { 125 blockSize := d.BlockSize() 126 nx := blockSize.Value(0) 127 nxy := blockSize.Value(1) * nx 128 129 halfx := blockSize.Value(0) >> 1 130 halfy := blockSize.Value(1) >> 1 131 halfz := blockSize.Value(2) >> 1 132 sectorbytes := int(halfx * halfy * halfz * 8) 133 xbytes := halfx * 8 134 135 for sector, data := range oct { 136 if len(data) > 0 { 137 if len(data) != sectorbytes { 138 dvid.Criticalf("Expected %d bytes in octant for %s, instead got %d bytes.\n", sectorbytes, d.DataName(), len(data)) 139 } 140 // Get the corner voxel (in block coordinates) for this sector. 141 iz := sector >> 2 142 sector -= iz * 4 143 iy := sector >> 1 144 ix := sector % 2 145 146 ox := int32(ix) * halfx 147 oy := int32(iy) * halfy 148 oz := int32(iz) * halfz 149 150 // Copy data from octant into larger block buffer. 151 var oi int32 152 for z := oz; z < oz+halfz; z++ { 153 for y := oy; y < oy+halfy; y++ { 154 di := (z*nxy + y*nx + ox) * 8 155 copy(blockBuf[di:di+xbytes], data[oi:oi+xbytes]) 156 oi += xbytes 157 } 158 } 159 } 160 } 161 162 return dvid.SerializeData(blockBuf, d.Compression(), d.Checksum()) 163 } 164 165 // InitDataHandlers launches goroutines to handle each labelblk instance's syncs. 166 func (d *Data) InitDataHandlers() error { 167 if d.syncCh != nil || d.syncDone != nil { 168 return nil 169 } 170 d.syncCh = make(chan datastore.SyncMessage, 100) 171 d.syncDone = make(chan *sync.WaitGroup) 172 173 // Start N goroutines to process mutations for each block that will be consistently 174 // assigned to one of the N goroutines. 175 for i := 0; i < numBlockHandlers; i++ { 176 d.procCh[i] = make(chan procMsg, 100) 177 go d.processBlock(d.procCh[i]) 178 } 179 180 dvid.Infof("Launching sync event handler for data %q...\n", d.DataName()) 181 go d.processEvents() 182 return nil 183 } 184 185 // Shutdown terminates blocks until syncs are done then terminates background goroutines processing data. 186 func (d *Data) Shutdown(wg *sync.WaitGroup) { 187 if d.syncDone != nil { 188 dwg := new(sync.WaitGroup) 189 dwg.Add(1) 190 d.syncDone <- dwg 191 dwg.Wait() // Block until we are done. 192 } 193 wg.Done() 194 } 195 196 // GetSyncSubs implements the datastore.Syncer interface 197 func (d *Data) GetSyncSubs(synced dvid.Data) (datastore.SyncSubs, error) { 198 if d.syncCh == nil { 199 if err := d.InitDataHandlers(); err != nil { 200 return nil, fmt.Errorf("unable to initialize handlers for data %q: %v\n", d.DataName(), err) 201 } 202 } 203 204 var evts []string 205 switch synced.TypeName() { 206 case "labelblk": // For down-res support 207 evts = []string{ 208 DownsizeBlockEvent, DownsizeCommitEvent, 209 labels.IngestBlockEvent, labels.MutateBlockEvent, labels.DeleteBlockEvent, 210 } 211 case "labelvol": 212 evts = []string{labels.MergeBlockEvent, labels.SplitLabelEvent} 213 default: 214 return nil, fmt.Errorf("Unable to sync %s with %s since datatype %q is not supported.", d.DataName(), synced.DataName(), synced.TypeName()) 215 } 216 217 subs := make(datastore.SyncSubs, len(evts)) 218 for i, evt := range evts { 219 subs[i] = datastore.SyncSub{ 220 Event: datastore.SyncEvent{synced.DataUUID(), evt}, 221 Notify: d.DataUUID(), 222 Ch: d.syncCh, 223 } 224 } 225 return subs, nil 226 } 227 228 // Store the cache then relay changes to any downstream instance. 229 // If we are getting these events, this particular data instance's goroutines 230 // should only be occupied processing the downsize events. 231 func (d *Data) downsizeCommit(v dvid.VersionID, mutID uint64) { 232 // block until we have all of the operation completed. 233 d.MutWait(mutID) 234 d.MutDelete(mutID) 235 236 d.vcache_mu.RLock() 237 defer d.vcache_mu.RUnlock() 238 239 bc, err := d.getReadOnlyBlockCache(v) 240 if err != nil { 241 dvid.Criticalf("downsize commit for %q: %v\n", d.DataName(), err) 242 } 243 244 // Allocate block buffer for writing so that it gets reused instead of reallocated over loop. 245 blockSize := d.BlockSize() 246 blockBytes := blockSize.Prod() * 8 247 248 // Do GET/PUT for each block, unless we have all 8 octants and can just do a PUT. 249 // For each block, send to downstream if any. 250 store, err := datastore.GetKeyValueDB(d) 251 if err != nil { 252 dvid.Errorf("Data type labelblk had error initializing store: %v\n", err) 253 return 254 } 255 ctx := datastore.NewVersionedCtx(d, v) 256 for downresBlock, oct := range bc { 257 blockData := make([]byte, blockBytes) 258 tk := NewTKeyByCoord(downresBlock) 259 260 // Are all 8 octants set? 261 partial := false 262 for _, data := range oct { 263 if data == nil { 264 partial = true 265 break 266 } 267 } 268 269 // If not, GET the previous block data for reintegration and insert into nil octants. 270 if partial { 271 serialization, err := store.Get(ctx, tk) 272 if err != nil { 273 dvid.Errorf("unable to get data for %q, block %s: %v\n", d.DataName(), downresBlock, err) 274 continue 275 } 276 uncompress := true 277 deserialized, _, err := dvid.DeserializeData(serialization, uncompress) 278 if err != nil { 279 dvid.Criticalf("Unable to deserialize data for %q, block %s: %v", d.DataName(), downresBlock, err) 280 continue 281 } 282 copy(blockData, deserialized) 283 } 284 285 // Write the data. 286 serialization, err := d.serializeOctants(oct, blockData) 287 if err != nil { 288 dvid.Errorf("unable to serialize octant data in %q, block %s: %v\n", d.DataName(), downresBlock, err) 289 continue 290 } 291 292 if err := store.Put(ctx, tk, serialization); err != nil { 293 dvid.Errorf("unable to write downsized data in %q, block %s: %v\n", d.DataName(), downresBlock, err) 294 continue 295 } 296 297 // Notify any downstream downres that we've just modified a block at this level. 298 d.publishBlockChange(v, mutID, downresBlock, blockData) 299 } 300 301 // Notify and downstream downres that we're done and can commit. 302 d.publishDownresCommit(v, mutID) 303 } 304 305 // Handle upstream mods on a labelblk we are downresing. 306 func (d *Data) downsizeAdd(v dvid.VersionID, delta deltaBlock) { 307 defer d.MutDone(delta.mutID) 308 309 lobuf, err := d.getLoresCache(v, delta.bcoord) 310 if err != nil { 311 dvid.Criticalf("unable to initialize block cache for labelblk %q: %v\n", d.DataName(), err) 312 return 313 } 314 315 // Offsets from corner of 2x2x2 voxel neighborhood to neighbor in highres block. 316 blockSize := d.BlockSize() 317 bx := blockSize.Value(0) * 8 318 bxy := blockSize.Value(1) * bx 319 320 var off [8]int32 321 off[0] = 0 322 off[1] = 8 323 off[2] = bx 324 off[3] = bx + 8 325 off[4] = bxy 326 off[5] = bxy + off[1] 327 off[6] = bxy + off[2] 328 off[7] = bxy + off[3] 329 330 var lo int32 // lores byte offset 331 for z := int32(0); z < blockSize.Value(2); z += 2 { 332 for y := int32(0); y < blockSize.Value(1); y += 2 { 333 hi := z*bxy + y*bx // hires byte offset to 2^3 neighborhood corner 334 for x := int32(0); x < blockSize.Value(0); x += 2 { 335 counts := make(map[uint64]int) 336 for n := 0; n < 8; n++ { 337 i := hi + off[n] 338 label := binary.LittleEndian.Uint64(delta.data[i : i+8]) 339 counts[label]++ 340 } 341 342 // get best label and if there's a tie use smaller label 343 var most int 344 var best uint64 345 for label, count := range counts { 346 if count > most { 347 best = label 348 most = count 349 } else if count == most && label > best { 350 best = label 351 } 352 } 353 354 // store into downres cache 355 //dvid.Infof("Data %q: best %d for (%d,%d,%d)\n", d.DataName(), best, x/2, y/2, z/2) 356 binary.LittleEndian.PutUint64(lobuf[lo:lo+8], best) 357 358 // Move to next corner of 8 block voxels 359 lo += 8 360 hi += 16 // 2 * label byte size 361 } 362 } 363 } 364 } 365 366 // gets all the changes relevant to labelblk, then breaks up any multi-block op into 367 // separate block ops and puts them onto channels to index-specific handlers. 368 func (d *Data) processEvents() { 369 var stop bool 370 var wg *sync.WaitGroup 371 for { 372 select { 373 case wg = <-d.syncDone: 374 queued := len(d.syncCh) 375 if queued > 0 { 376 dvid.Infof("Received shutdown signal for %q sync events (%d in queue)\n", d.DataName(), queued) 377 stop = true 378 } else { 379 dvid.Infof("Shutting down sync event handler for instance %q...\n", d.DataName()) 380 wg.Done() 381 return 382 } 383 case msg := <-d.syncCh: 384 switch msg.Event { 385 case DownsizeCommitEvent: 386 mutID := msg.Delta.(uint64) 387 go func(v dvid.VersionID, mutID uint64) { 388 d.downsizeCommit(v, mutID) // async since we will wait on any in waitgroup. 389 d.StopUpdate() 390 }(msg.Version, mutID) 391 392 default: 393 d.handleEvent(msg) 394 } 395 396 if stop && len(d.syncCh) == 0 { 397 dvid.Infof("Shutting down sync even handler for instance %q after draining sync events.\n", d.DataName()) 398 wg.Done() 399 return 400 } 401 } 402 } 403 } 404 405 func (d *Data) handleEvent(msg datastore.SyncMessage) { 406 switch delta := msg.Delta.(type) { 407 case labels.DeltaMerge: 408 d.processMerge(msg.Version, delta) 409 410 case labels.DeltaSplit: 411 d.processSplit(msg.Version, delta) 412 413 case deltaBlock: // received downres processing from upstream 414 // NOTE: need to add wait here since there will be delay through channel compared to commit event. 415 if d.MutAdd(delta.mutID) { 416 d.StartUpdate() // stopped when the upstream instance issues a DownsizeCommitEvent: see processEvents() 417 } 418 n := delta.bcoord.Hash(numBlockHandlers) 419 d.procCh[n] <- procMsg{op: delta, v: msg.Version} 420 421 case imageblk.Block: 422 if d.MutAdd(delta.MutID) { 423 d.StartUpdate() 424 } 425 n := delta.Index.Hash(numBlockHandlers) 426 block := delta.Index.ToIZYXString() 427 d.procCh[n] <- procMsg{op: deltaBlock{delta.MutID, block, delta.Data}, v: msg.Version} 428 429 case imageblk.MutatedBlock: 430 if d.MutAdd(delta.MutID) { 431 d.StartUpdate() 432 } 433 n := delta.Index.Hash(numBlockHandlers) 434 block := delta.Index.ToIZYXString() 435 d.procCh[n] <- procMsg{op: deltaBlock{delta.MutID, block, delta.Data}, v: msg.Version} 436 437 default: 438 dvid.Criticalf("Received unknown delta in labelblk.processEvents(): %v\n", msg) 439 } 440 } 441 442 func (d *Data) publishDownresCommit(v dvid.VersionID, mutID uint64) { 443 evt := datastore.SyncEvent{Data: d.DataUUID(), Event: DownsizeCommitEvent} 444 msg := datastore.SyncMessage{Event: DownsizeCommitEvent, Version: v, Delta: mutID} 445 if err := datastore.NotifySubscribers(evt, msg); err != nil { 446 dvid.Criticalf("unable to notify subscribers of event %s: %v\n", evt, err) 447 } 448 } 449 450 // Notify any downstream downres instance of block change. 451 func (d *Data) publishBlockChange(v dvid.VersionID, mutID uint64, block dvid.IZYXString, blockData []byte) { 452 evt := datastore.SyncEvent{d.DataUUID(), DownsizeBlockEvent} 453 delta := deltaBlock{ 454 mutID: mutID, 455 bcoord: block, 456 data: blockData, 457 } 458 msg := datastore.SyncMessage{DownsizeBlockEvent, v, delta} 459 if err := datastore.NotifySubscribers(evt, msg); err != nil { 460 dvid.Criticalf("unable to notify subscribers of event %s: %v\n", evt, err) 461 } 462 } 463 464 func (d *Data) processMerge(v dvid.VersionID, delta labels.DeltaMerge) { 465 timedLog := dvid.NewTimeLog() 466 d.StartUpdate() 467 468 mutID := d.NewMutationID() 469 for izyxStr := range delta.BlockMap { 470 n := izyxStr.Hash(numBlockHandlers) 471 d.MutAdd(mutID) 472 op := mergeOp{mutID: mutID, MergeOp: delta.MergeOp, bcoord: izyxStr} 473 d.procCh[n] <- procMsg{op: op, v: v} 474 } 475 // When we've processed all the delta blocks, we can remove this merge op 476 // from the merge cache since all labels will have completed. 477 go func() { 478 d.MutWait(mutID) 479 d.MutDelete(mutID) 480 timedLog.Debugf("labelblk sync complete for merge (%d blocks) of %s -> %d", len(delta.BlockMap), delta.MergeOp.Merged, delta.MergeOp.Target) 481 d.StopUpdate() 482 d.publishDownresCommit(v, mutID) 483 }() 484 } 485 486 func (d *Data) processSplit(v dvid.VersionID, delta labels.DeltaSplit) { 487 timedLog := dvid.NewTimeLog() 488 d.StartUpdate() 489 490 mutID := delta.MutID 491 if delta.Split == nil { 492 // Coarse Split 493 for _, izyxStr := range delta.SortedBlocks { 494 n := izyxStr.Hash(numBlockHandlers) 495 d.MutAdd(mutID) 496 op := splitOp{ 497 mutID: mutID, 498 SplitOp: labels.SplitOp{ 499 Target: delta.OldLabel, 500 NewLabel: delta.NewLabel, 501 }, 502 bcoord: izyxStr, 503 } 504 d.procCh[n] <- procMsg{op: op, v: v} 505 } 506 } else { 507 // Fine Split 508 for izyxStr, blockRLEs := range delta.Split { 509 n := izyxStr.Hash(numBlockHandlers) 510 d.MutAdd(mutID) 511 op := splitOp{ 512 mutID: mutID, 513 SplitOp: labels.SplitOp{ 514 Target: delta.OldLabel, 515 NewLabel: delta.NewLabel, 516 RLEs: blockRLEs, 517 }, 518 bcoord: izyxStr, 519 } 520 d.procCh[n] <- procMsg{op: op, v: v} 521 } 522 } 523 // Wait for all blocks to be split then mark end of split op. 524 go func() { 525 d.MutWait(mutID) 526 d.MutDelete(mutID) 527 timedLog.Debugf("labelblk sync complete for split (%d blocks) of %d -> %d", len(delta.Split), delta.OldLabel, delta.NewLabel) 528 d.StopUpdate() 529 d.publishDownresCommit(v, mutID) 530 }() 531 } 532 533 // Handles a stream of block operations for a unique shard of block coordinates. 534 // Since the same block coordinate always gets mapped to the same goroutine we can 535 // do a GET/PUT without worrying about interleaving PUT from other goroutines, as 536 // long as there is only one DVID server. 537 func (d *Data) processBlock(ch <-chan procMsg) { 538 for msg := range ch { 539 ctx := datastore.NewVersionedCtx(d, msg.v) 540 switch op := msg.op.(type) { 541 case mergeOp: 542 d.mergeBlock(ctx, op) 543 544 case splitOp: 545 d.splitBlock(ctx, op) 546 547 case deltaBlock: 548 d.downsizeAdd(msg.v, op) 549 550 default: 551 dvid.Criticalf("Received unknown processing msg in processBlock: %v\n", msg) 552 } 553 } 554 } 555 556 // handles relabeling of blocks during a merge operation. 557 func (d *Data) mergeBlock(ctx *datastore.VersionedCtx, op mergeOp) { 558 defer d.MutDone(op.mutID) 559 560 store, err := datastore.GetKeyValueDB(d) 561 if err != nil { 562 dvid.Errorf("Data type labelblk had error initializing store: %v\n", err) 563 return 564 } 565 566 tk := NewTKeyByCoord(op.bcoord) 567 data, err := store.Get(ctx, tk) 568 if err != nil { 569 dvid.Errorf("Error on GET of labelblk with coord string %s\n", op.bcoord) 570 return 571 } 572 if data == nil { 573 dvid.Errorf("nil label block where merge was done!\n") 574 return 575 } 576 577 blockData, _, err := dvid.DeserializeData(data, true) 578 if err != nil { 579 dvid.Criticalf("unable to deserialize label block in '%s': %v\n", d.DataName(), err) 580 return 581 } 582 blockBytes := int(d.BlockSize().Prod() * 8) 583 if len(blockData) != blockBytes { 584 dvid.Criticalf("After labelblk deserialization got back %d bytes, expected %d bytes\n", len(blockData), blockBytes) 585 return 586 } 587 588 // Iterate through this block of labels and relabel if label in merge. 589 for i := 0; i < blockBytes; i += 8 { 590 label := binary.LittleEndian.Uint64(blockData[i : i+8]) 591 if _, merged := op.Merged[label]; merged { 592 binary.LittleEndian.PutUint64(blockData[i:i+8], op.Target) 593 } 594 } 595 596 // Store this block. 597 serialization, err := dvid.SerializeData(blockData, d.Compression(), d.Checksum()) 598 if err != nil { 599 dvid.Criticalf("Unable to serialize block in %q: %v\n", d.DataName(), err) 600 return 601 } 602 if err := store.Put(ctx, tk, serialization); err != nil { 603 dvid.Errorf("Error in putting key %v: %v\n", tk, err) 604 } 605 606 // Notify any downstream downres instance. 607 d.publishBlockChange(ctx.VersionID(), op.mutID, op.bcoord, blockData) 608 } 609 610 // Goroutine that handles splits across a lot of blocks for one label. 611 func (d *Data) splitBlock(ctx *datastore.VersionedCtx, op splitOp) { 612 defer d.MutDone(op.mutID) 613 614 store, err := datastore.GetOrderedKeyValueDB(d) 615 if err != nil { 616 dvid.Errorf("Data type labelblk had error initializing store: %v\n", err) 617 return 618 } 619 620 // Read the block. 621 tk := NewTKeyByCoord(op.bcoord) 622 data, err := store.Get(ctx, tk) 623 if err != nil { 624 dvid.Errorf("Error on GET of labelblk with coord string %s\n", op.bcoord) 625 return 626 } 627 if data == nil { 628 dvid.Errorf("nil label block where split was done, coord %s\n", op.bcoord) 629 return 630 } 631 blockData, _, err := dvid.DeserializeData(data, true) 632 if err != nil { 633 dvid.Criticalf("unable to deserialize label block in %q key %v: %v\n", d.DataName(), op.bcoord, err) 634 return 635 } 636 blockBytes := int(d.BlockSize().Prod() * 8) 637 if len(blockData) != blockBytes { 638 dvid.Criticalf("splitBlock: coord %s got back %d bytes, expected %d bytes\n", op.bcoord, len(blockData), blockBytes) 639 return 640 } 641 642 // Modify the block using either voxel-level changes or coarser block-level mods. 643 if op.RLEs != nil { 644 if err := d.storeRLEs(blockData, op.NewLabel, op.bcoord, op.RLEs); err != nil { 645 dvid.Errorf("can't store label %d RLEs into block %s: %v\n", op.NewLabel, op.bcoord, err) 646 return 647 } 648 } else { 649 // We are doing coarse split and will replace all 650 if err := d.replaceLabel(blockData, op.Target, op.NewLabel); err != nil { 651 dvid.Errorf("can't replace label %d with %d in block %s: %v\n", op.Target, op.NewLabel, op.bcoord, err) 652 return 653 } 654 } 655 656 // Write the modified block. 657 serialization, err := dvid.SerializeData(blockData, d.Compression(), d.Checksum()) 658 if err != nil { 659 dvid.Criticalf("Unable to serialize block %s in %q: %v\n", op.bcoord, d.DataName(), err) 660 return 661 } 662 if err := store.Put(ctx, tk, serialization); err != nil { 663 dvid.Errorf("Error in putting key %v: %v\n", tk, err) 664 } 665 666 // Notify any downstream downres instance. 667 d.publishBlockChange(ctx.VersionID(), op.mutID, op.bcoord, blockData) 668 } 669 670 // Replace a label in a block. 671 func (d *Data) replaceLabel(data []byte, fromLabel, toLabel uint64) error { 672 n := len(data) 673 if n%8 != 0 { 674 return fmt.Errorf("label data in block not aligned to uint64: %d bytes", n) 675 } 676 for i := 0; i < n; i += 8 { 677 label := binary.LittleEndian.Uint64(data[i : i+8]) 678 if label == fromLabel { 679 binary.LittleEndian.PutUint64(data[i:i+8], toLabel) 680 } 681 } 682 return nil 683 } 684 685 // Store a label into a block using RLEs. 686 func (d *Data) storeRLEs(data []byte, toLabel uint64, zyxStr dvid.IZYXString, rles dvid.RLEs) error { 687 // Get the block coordinate 688 bcoord, err := zyxStr.ToChunkPoint3d() 689 if err != nil { 690 return err 691 } 692 693 // Get the first voxel offset 694 blockSize := d.BlockSize() 695 offset := bcoord.MinPoint(blockSize) 696 697 // Iterate through rles, getting span for this block of bytes. 698 nx := blockSize.Value(0) * 8 699 nxy := nx * blockSize.Value(1) 700 for _, rle := range rles { 701 p := rle.StartPt().Sub(offset) 702 i := p.Value(2)*nxy + p.Value(1)*nx + p.Value(0)*8 703 for n := int32(0); n < rle.Length(); n++ { 704 binary.LittleEndian.PutUint64(data[i:i+8], toLabel) 705 i += 8 706 } 707 } 708 return nil 709 }