github.com/janelia-flyem/dvid@v1.0.0/datatype/common/labels/compressed.go (about) 1 package labels 2 3 import ( 4 "bytes" 5 "compress/gzip" 6 "encoding/binary" 7 "fmt" 8 "io" 9 "math" 10 "runtime" 11 "sync" 12 13 "github.com/janelia-flyem/dvid/dvid" 14 ) 15 16 const SubBlockSize = 8 17 const subBlockShift = 3 // 2^3 == SubBlockSize 18 const DefaultSubBlocksPerBlock = 8 19 const DefaultBlockSize = DefaultSubBlocksPerBlock * SubBlockSize 20 21 const MaxBlockSize = 1024 // N^3 < max uint32, so N <= 2^10 22 const MaxSubBlockSize = MaxBlockSize / SubBlockSize 23 24 // PositionedBlock is a Block that also knows its position in DVID space via a chunk coordinate. 25 type PositionedBlock struct { 26 Block 27 BCoord dvid.IZYXString 28 } 29 30 func (pb PositionedBlock) String() string { 31 return pb.BCoord.String() 32 } 33 34 // OffsetDVID returns the DVID voxel coordinate corresponding to the first voxel of the Block, 35 // i.e., the lowest (x,y,z). 36 func (pb PositionedBlock) OffsetDVID() (dvid.Point3d, error) { 37 return pb.BCoord.VoxelOffset(pb.Size) 38 } 39 40 // DoSplitWithStats writes a new label into the RLEs defined by the split and returns how each supervoxel 41 // (counts key) was split. This is done by doing full expansion of block into uint64 array. 42 func (pb PositionedBlock) DoSplitWithStats(op SplitOp, m *SVSplitMap, newLabelFunc func() (uint64, error)) (split *Block, counts map[uint64]SVSplitCount, err error) { 43 var offset dvid.Point3d 44 if offset, err = pb.OffsetDVID(); err != nil { 45 return 46 } 47 lblarrayBytes, _ := pb.MakeLabelVolume() 48 lblarray, err := dvid.AliasByteToUint64(lblarrayBytes) 49 if err != nil { 50 return 51 } 52 counts = make(map[uint64]SVSplitCount) 53 54 replacements := make(map[uint64]uint64) 55 rles := op.RLEs.Offset(offset) 56 for _, rle := range rles { 57 pt := rle.StartPt() 58 i := pt[2]*pb.Size[1]*pb.Size[0] + pt[1]*pb.Size[0] + pt[0] 59 for x := int32(0); x < rle.Length(); x++ { 60 lbl := lblarray[i] 61 if lbl != 0 { 62 svc := counts[lbl] 63 64 var relabel SVSplit 65 relabel, _, err = m.getMapping(lbl, newLabelFunc) 66 if err != nil { 67 return 68 } 69 lblarray[i] = relabel.Split 70 replacements[lbl] = relabel.Remain 71 svc.SVSplit = relabel 72 svc.Voxels++ 73 counts[lbl] = svc 74 } 75 i++ 76 } 77 } 78 79 if split, err = MakeBlock(lblarrayBytes, pb.Size); err != nil { 80 return 81 } 82 if len(replacements) > 0 { 83 split, _, err = split.ReplaceLabels(replacements) 84 } 85 return 86 } 87 88 // SplitStats checks voxels under the split RLEs and returns how each supervoxel 89 // should be split without doing a split. This is done by doing full expansion 90 // of block into uint64 array. 91 func (pb PositionedBlock) SplitStats(rles dvid.RLEs, m *SVSplitMap, newLabelFunc func() (uint64, error)) (counts map[uint64]SVSplitCount, err error) { 92 var offset dvid.Point3d 93 if offset, err = pb.OffsetDVID(); err != nil { 94 return 95 } 96 lblarrayBytes, _ := pb.MakeLabelVolume() 97 lblarray, err := dvid.AliasByteToUint64(lblarrayBytes) 98 if err != nil { 99 return 100 } 101 102 var relabel SVSplit 103 counts = make(map[uint64]SVSplitCount) 104 for _, rle := range rles.Offset(offset) { 105 pt := rle.StartPt() 106 i := pt[2]*pb.Size[1]*pb.Size[0] + pt[1]*pb.Size[0] + pt[0] 107 for x := int32(0); x < rle.Length(); x++ { 108 lbl := lblarray[i] 109 if lbl != 0 { 110 svc := counts[lbl] 111 relabel, _, err = m.getMapping(lbl, newLabelFunc) 112 if err != nil { 113 return 114 } 115 svc.SVSplit = relabel 116 svc.Voxels++ 117 counts[lbl] = svc 118 } 119 i++ 120 } 121 } 122 runtime.KeepAlive(&lblarrayBytes) 123 return 124 } 125 126 // Split a target label using RLEs within a block. Only the target label is split. 127 // A nil split block is returned if target label is not within block. 128 // TODO: If RLEs continue to be used for splits, refactor / split up to make this more readable. 129 func (pb PositionedBlock) Split(op SplitOp) (split *Block, keptSize, splitSize uint64, err error) { 130 return pb.splitSlow(op) 131 // return pb.splitFast(op) 132 } 133 134 // splitSlow splits a target label using RLEs within a block by doing full expansion of block into 135 // uint64 array. Only the target label is split. A nil split block is returned if target label 136 // is not within block. 137 func (pb PositionedBlock) splitSlow(op SplitOp) (split *Block, keptSize, splitSize uint64, err error) { 138 var offset dvid.Point3d 139 if offset, err = pb.OffsetDVID(); err != nil { 140 return 141 } 142 lblarrayBytes, _ := pb.MakeLabelVolume() 143 lblarray, err := dvid.AliasByteToUint64(lblarrayBytes) 144 if err != nil { 145 return 146 } 147 for i := 0; i < len(lblarray); i++ { 148 if lblarray[i] == op.Target { 149 keptSize++ 150 } 151 } 152 if keptSize == 0 { 153 return 154 } 155 156 rles := op.RLEs.Offset(offset) 157 for _, rle := range rles { 158 pt := rle.StartPt() 159 i := pt[2]*pb.Size[1]*pb.Size[0] + pt[1]*pb.Size[0] + pt[0] 160 for x := int32(0); x < rle.Length(); x++ { 161 lbl := lblarray[i] 162 if lbl == op.Target { 163 lblarray[i] = op.NewLabel 164 splitSize++ 165 keptSize-- 166 } 167 i++ 168 } 169 } 170 171 split, err = MakeBlock(lblarrayBytes, pb.Size) 172 return 173 } 174 175 // not working at this time. 176 func (pb PositionedBlock) splitFast(op SplitOp) (split *Block, keptSize, splitSize uint64, err error) { 177 var offset dvid.Point3d 178 if offset, err = pb.OffsetDVID(); err != nil { 179 return 180 } 181 182 gx, gy, gz := pb.Size[0]/SubBlockSize, pb.Size[1]/SubBlockSize, pb.Size[2]/SubBlockSize 183 numSubBlocks := uint32(gx * gy * gz) 184 185 // Create a bitmask for all split voxels of the Block. 186 rles := op.RLEs.Offset(offset) 187 splitVoxels := make([]bool, pb.Size[0]*pb.Size[1]*pb.Size[2]) 188 for _, rle := range rles { 189 pt := rle.StartPt() 190 i := pt[2]*pb.Size[1]*pb.Size[0] + pt[1]*pb.Size[0] + pt[0] 191 for x := int32(0); x < rle.Length(); x++ { 192 if i >= int32(len(splitVoxels)) { 193 err = fmt.Errorf("bad RLE / block size: rle %s, block size %s, offset %s", rle, pb.Size, offset) 194 return 195 } 196 // fmt.Printf("Added split voxel @ (%d, %d, %d)\n", pt[0]+x+offset[0], pt[1]+offset[1], pt[2]+offset[2]) 197 splitVoxels[i] = true 198 i++ 199 } 200 } 201 202 // Check if the target and split label is present. 203 var splitIndex, targetIndex uint32 204 var splitPresent, targetPresent bool 205 for i, label := range pb.Labels { 206 if label == op.NewLabel { 207 splitPresent = true 208 splitIndex = uint32(i) 209 } 210 if label == op.Target { 211 targetPresent = true 212 targetIndex = uint32(i) 213 } 214 } 215 if !targetPresent { 216 return 217 } 218 numLabels := uint32(len(pb.Labels)) 219 numNewLabels := numLabels 220 if !splitPresent { 221 splitIndex = numLabels 222 numNewLabels++ 223 } 224 225 // Iterate through all the sub-blocks, determining if the split label adds to that sub-block's indices 226 // and therefore changes the # of encoding bits necessary for the values. 227 subBlockNumVoxels := uint32(SubBlockSize * SubBlockSize * SubBlockSize) 228 indexAdded := make([]bool, numSubBlocks) // true if we added index to split label for the sub-block 229 svalues := make([]byte, numSubBlocks*512*2) // max size allocation for sub-blocks' encoded values 230 var sbNum, indexPos uint32 231 var bitpos, bitposNew uint32 232 var numNewSubBlockIndices uint32 233 for sz := int32(0); sz < gz; sz++ { 234 for sy := int32(0); sy < gy; sy++ { 235 for sx := int32(0); sx < gx; sx, sbNum = sx+1, sbNum+1 { 236 numSBLabels := pb.NumSBLabels[sbNum] 237 bits := bitsFor(numSBLabels) 238 numSBLabelsNew := numSBLabels 239 numNewSubBlockIndices += uint32(numSBLabels) 240 241 // is the target or split label in index? 242 var sbSplitFound, sbTargetFound bool 243 var sbSplitIndex, sbTargetIndex uint16 244 for i := uint16(0); i < numSBLabels; i++ { 245 index := pb.SBIndices[indexPos] 246 if index == splitIndex { 247 sbSplitFound = true 248 sbSplitIndex = i 249 } 250 if index == targetIndex { 251 sbTargetFound = true 252 sbTargetIndex = i 253 } 254 indexPos++ 255 } 256 if !sbTargetFound { 257 // We can skip this sub-block. 258 bytepos := bitpos >> 3 259 byteposNew := bitposNew >> 3 260 sbBits := bits * subBlockNumVoxels 261 if sbBits%8 != 0 { 262 sbBits += 8 - (sbBits % 8) 263 } 264 sbBytes := sbBits >> 3 265 copy(svalues[byteposNew:byteposNew+sbBytes], pb.SBValues[bytepos:bytepos+sbBytes]) 266 bitpos += sbBits 267 bitposNew += sbBits 268 continue 269 } 270 if !sbSplitFound { 271 indexAdded[sbNum] = true 272 sbSplitIndex = numSBLabels 273 numSBLabelsNew++ 274 numNewSubBlockIndices++ 275 } 276 bitsNew := bitsFor(numSBLabelsNew) 277 278 if bitsNew > 0 { 279 // Transfer the data from old to new with possible added index size. 280 for z := int32(0); z < SubBlockSize; z++ { 281 blockZ := sz*SubBlockSize + z 282 for y := int32(0); y < SubBlockSize; y++ { 283 blockY := sy*SubBlockSize + y 284 for x := int32(0); x < SubBlockSize; x++ { 285 var oldIndex, newIndex uint16 286 bithead := bitpos % 8 287 bytepos := bitpos >> 3 288 if bithead+bits <= 8 { 289 // index totally within this byte 290 rightshift := uint(8 - bithead - bits) 291 oldIndex = uint16((pb.SBValues[bytepos] & leftBitMask[bithead]) >> rightshift) 292 } else { 293 // index spans byte boundaries 294 oldIndex = uint16(pb.SBValues[bytepos]&leftBitMask[bithead]) << 8 295 oldIndex |= uint16(pb.SBValues[bytepos+1]) 296 oldIndex >>= uint(16 - bithead - bits) 297 } 298 299 newIndex = oldIndex 300 if oldIndex == sbTargetIndex { 301 blockPos := blockZ*pb.Size[1]*pb.Size[0] + blockY*pb.Size[0] + sx*SubBlockSize + x 302 if splitVoxels[blockPos] { 303 newIndex = sbSplitIndex 304 splitSize++ 305 } else { 306 keptSize++ 307 } 308 } 309 310 bitheadNew := bitposNew % 8 311 byteposNew := bitposNew >> 3 312 if bithead+bits <= 8 { 313 // index totally within this byte 314 leftshift := uint(8 - bitsNew - bitheadNew) 315 svalues[byteposNew] |= byte(newIndex << leftshift) 316 } else { 317 // this straddles a byte boundary 318 leftshift := uint(16 - bitsNew - bitheadNew) 319 newIndex <<= leftshift 320 svalues[byteposNew] |= byte((newIndex & 0xFF00) >> 8) 321 svalues[byteposNew+1] = byte(newIndex & 0x00FF) 322 } 323 324 bitpos += bits 325 bitposNew += bitsNew 326 } 327 } 328 } 329 // make sure a byte doesn't have two sub-blocks' encoded values 330 if bitpos%8 != 0 { 331 bitpos += 8 - (bitpos % 8) 332 } 333 if bitposNew%8 != 0 { 334 bitposNew += 8 - (bitposNew % 8) 335 } 336 } 337 } 338 } 339 } 340 341 // Write all the labels, num sb labels, sb indices, and values to final buffer. 342 subBlockIndexBytes := numNewSubBlockIndices * 4 343 subBlockValueBytes := uint32(bitposNew >> 3) 344 blockBytes := 16 + numNewLabels*8 + numSubBlocks*2 + subBlockIndexBytes + subBlockValueBytes 345 346 split = new(Block) 347 split.Size = pb.Size 348 split.data, split.dataSrc = dvid.New8ByteAlignBytes(blockBytes) 349 pos := uint32(16) 350 nbytes := numLabels * 8 351 copy(split.data[:pos+nbytes], pb.data[:pos+nbytes]) 352 if !splitPresent { 353 binary.LittleEndian.PutUint32(split.data[12:16], numNewLabels) 354 binary.LittleEndian.PutUint64(split.data[pos+nbytes:pos+nbytes+8], op.NewLabel) 355 nbytes += 8 356 } 357 if split.Labels, err = dvid.AliasByteToUint64(split.data[pos : pos+nbytes]); err != nil { 358 return 359 } 360 361 pos += nbytes 362 nbytes = numSubBlocks * 2 363 if split.NumSBLabels, err = dvid.AliasByteToUint16(split.data[pos : pos+nbytes]); err != nil { 364 return 365 } 366 for i, num := range pb.NumSBLabels { 367 if indexAdded[i] { 368 split.NumSBLabels[i] = num + 1 369 } else { 370 split.NumSBLabels[i] = num 371 } 372 } 373 374 pos += nbytes 375 if split.SBIndices, err = dvid.AliasByteToUint32(split.data[pos : pos+subBlockIndexBytes]); err != nil { 376 return 377 } 378 indexPos = 0 379 var newIndexPos uint32 380 for sbNum := uint32(0); sbNum < numSubBlocks; sbNum++ { 381 for i := uint16(0); i < pb.NumSBLabels[sbNum]; i++ { 382 split.SBIndices[newIndexPos] = pb.SBIndices[indexPos] 383 newIndexPos++ 384 indexPos++ 385 } 386 if indexAdded[sbNum] { 387 split.SBIndices[newIndexPos] = splitIndex 388 newIndexPos++ 389 } 390 } 391 392 pos += subBlockIndexBytes 393 split.SBValues = split.data[pos:] 394 copy(split.SBValues, svalues[:subBlockValueBytes]) 395 return 396 } 397 398 // SplitSupervoxel splits a target supervoxel using RLEs within a block. 399 func (pb PositionedBlock) SplitSupervoxel(op SplitSupervoxelOp) (split *Block, keptSize, splitSize uint64, err error) { 400 var offset dvid.Point3d 401 if offset, err = pb.OffsetDVID(); err != nil { 402 return 403 } 404 lblarrayBytes, _ := pb.MakeLabelVolume() 405 lblarray, err := dvid.AliasByteToUint64(lblarrayBytes) 406 if err != nil { 407 return 408 } 409 410 brles, found := op.Split[pb.BCoord] 411 if found { 412 rles := brles.Offset(offset) 413 for _, rle := range rles { 414 pt := rle.StartPt() 415 i := pt[2]*pb.Size[1]*pb.Size[0] + pt[1]*pb.Size[0] + pt[0] 416 for x := int32(0); x < rle.Length(); x++ { 417 if lblarray[i] == op.Supervoxel { 418 lblarray[i] = op.SplitSupervoxel 419 splitSize++ 420 } 421 i++ 422 } 423 } 424 } 425 426 for i := 0; i < len(lblarray); i++ { 427 if lblarray[i] == op.Supervoxel { 428 lblarray[i] = op.RemainSupervoxel 429 keptSize++ 430 } 431 } 432 433 split, err = MakeBlock(lblarrayBytes, pb.Size) 434 return 435 } 436 437 // SplitSupervoxels replaces all split supervoxels in a block with either a split label or a 438 // remain label depending on whether it falls under the split RLEs. 439 func (pb PositionedBlock) SplitSupervoxels(rles dvid.RLEs, svsplits map[uint64]SVSplit) (split *Block, err error) { 440 var offset dvid.Point3d 441 if offset, err = pb.OffsetDVID(); err != nil { 442 return 443 } 444 lblarrayBytes, _ := pb.MakeLabelVolume() 445 lblarray, err := dvid.AliasByteToUint64(lblarrayBytes) 446 if err != nil { 447 return 448 } 449 // assign voxels under split first 450 for _, rle := range rles.Offset(offset) { 451 pt := rle.StartPt() 452 i := pt[2]*pb.Size[1]*pb.Size[0] + pt[1]*pb.Size[0] + pt[0] 453 for x := int32(0); x < rle.Length(); x++ { 454 lbl := lblarray[i] 455 svsplit, found := svsplits[lbl] 456 if found { 457 lblarray[i] = svsplit.Split 458 } 459 i++ 460 } 461 } 462 463 // iterate over rest of block and if an affected supervoxel still exists, it was in 464 // outside of split. 465 numVoxels := int(pb.Size[0] * pb.Size[1] * pb.Size[2]) 466 for i := 0; i < numVoxels; i++ { 467 lbl := lblarray[i] 468 svsplit, found := svsplits[lbl] 469 if found { 470 lblarray[i] = svsplit.Remain 471 } 472 } 473 474 return MakeBlock(lblarrayBytes, pb.Size) 475 } 476 477 // MakeSolidBlock returns a Block that represents a single label of the given block size. 478 func MakeSolidBlock(label uint64, blockSize dvid.Point3d) *Block { 479 b := new(Block) 480 b.data = make([]byte, 24) 481 482 b.Labels = []uint64{label} 483 b.Size = blockSize 484 485 gx := uint32(blockSize[0] / SubBlockSize) 486 gy := uint32(blockSize[1] / SubBlockSize) 487 gz := uint32(blockSize[2] / SubBlockSize) 488 489 binary.LittleEndian.PutUint32(b.data[0:4], gx) 490 binary.LittleEndian.PutUint32(b.data[4:8], gy) 491 binary.LittleEndian.PutUint32(b.data[8:12], gz) 492 binary.LittleEndian.PutUint32(b.data[12:16], 1) 493 binary.LittleEndian.PutUint64(b.data[16:24], label) 494 495 return b 496 } 497 498 // SubvolumeToBlock converts a portion of the given label array into a compressed Block. 499 // It accepts a packed little-endian uint64 label array and a description of its subvolume, 500 // i.e., its extents in dvid space, and returns a compressed Block for the given chunk when 501 // tiling dvid space with the given chunk size. 502 func SubvolumeToBlock(sv *dvid.Subvolume, lbls []byte, idx dvid.IndexZYX, bsize dvid.Point3d) (*Block, error) { 503 dvidOff := idx.ToVoxelOffset(bsize) // offset to block in dvid space 504 blockOff := dvidOff.Sub(sv.StartPoint()) 505 s, err := setSubvolume(lbls, sv.Size(), blockOff, bsize) 506 if err != nil { 507 return nil, err 508 } 509 return s.encodeBlock() 510 } 511 512 // MakeBlock returns a compressed label Block given a packed little-endian uint64 513 // label array. It is the inverse of MakeLabelVolume(). There is no sharing of 514 // underlying memory between the returned Block and the given byte slice. 515 func MakeBlock(uint64array []byte, bsize dvid.Point3d) (*Block, error) { 516 // iterate through the subvolume corresponding to the Block and do encoding 517 s, err := setSubvolume(uint64array, bsize, dvid.Point3d{0, 0, 0}, bsize) 518 if err != nil { 519 return nil, err 520 } 521 return s.encodeBlock() 522 } 523 524 // Block is the unit of storage for compressed DVID labels. It is inspired by the 525 // Neuroglancer compression scheme and makes the following changes: (1) a 526 // block-level label list with sub-block indices into the list (minimal required 527 // bits vs 64 bits in original Neuroglancer scheme), (2) the number of bits for 528 // encoding values is not required to be a power of two. A block-level label list 529 // allows easy sharing of labels between sub-blocks, and sub-block storage can be 530 // more efficient due to the smaller index (at the cost of an indirection) and 531 // better encoded value packing (at the cost of byte alignment). In both cases 532 // memory is gained for increased computation. 533 // 534 // Blocks cover nx * ny * nz voxels. This implementation allows any choice of nx, 535 // ny, and nz with two restrictions: (1) nx, ny, and nz must be a multiple of 8 536 // greater than 16, and (2) the total number of labels cannot exceed the capacity 537 // of a uint32. 538 // 539 // Internally, labels are stored in 8x8x8 sub-blocks. There are gx * gy * gz 540 // sub-blocks where gx = nx / 8; gy = ny / 8; gz = nz / 8. 541 // 542 // The byte layout will be the following if there are N labels in the Block: 543 // 544 // 3 * uint32 values of gx, gy, and gz 545 // uint32 # of labels (N), cannot exceed uint32. 546 // N * uint64 packed labels in little-endian format. Label 0 can be 547 // used to represent deleted labels, e.g., after a merge 548 // operation to avoid changing all sub-block indices. 549 // 550 // ----- Data below is only included if N > 1, otherwise it is a solid block. 551 // Nsb = # sub-blocks = gx * gy * gz 552 // 553 // Nsb * uint16 # of labels for sub-blocks (Ns[i]). 554 // Each uint16 Ns[i] = # labels for sub-block i. 555 // If Ns[i] == 0, the sub-block has no data 556 // (uninitialized), which is useful for constructing 557 // Blocks with sparse data. 558 // 559 // Nsb * Ns * uint32 label indices for sub-blocks where Ns = sum of Ns[i] 560 // over all sub-blocks. For each sub-block i, we have 561 // Ns[i] label indices of lBits. 562 // 563 // Nsb * values sub-block indices for each voxel. 564 // Data encompasses 512 * ceil(log2(Ns[i])) bits, 565 // padded so no two sub-blocks have indices in the 566 // same byte. At most we use 9 bits per voxel for up 567 // to the 512 labels in sub-block. A value gives the 568 // sub-block index which points to the index into 569 // the N labels. If Ns[i] <= 1, there are no values. 570 // If Ns[i] = 0, the 8x8x8 voxels are set to label 0. 571 // If Ns[i] = 1, all voxels are the given label index. 572 type Block struct { 573 Labels []uint64 // labels in Block. 574 Size dvid.Point3d // # voxels in each dimension for this block 575 576 // The folloing exported properties are only non-nil if len(Labels) > 1 577 578 NumSBLabels []uint16 // # of labels for each sub-block 579 SBIndices []uint32 // indices into Labels array 580 SBValues []byte // compressed voxel values giving index into SBIndices. 581 582 data []byte // serialized format as described above 583 dataSrc []uint64 // slice that provides the 8-byte aligned memory used for data. 584 } 585 586 // CompressGZIP returns a gzip compressed encoding of the serialized block data. 587 func (b Block) CompressGZIP() ([]byte, error) { 588 var gzipOut bytes.Buffer 589 zw := gzip.NewWriter(&gzipOut) 590 if _, err := zw.Write(b.data); err != nil { 591 return nil, err 592 } 593 zw.Flush() 594 zw.Close() 595 return gzipOut.Bytes(), nil 596 } 597 598 // CalcNumLabels calculates the change in the number of voxels under each non-zero label. 599 // If a previous Block is given, the change is calculated from the previous numbers. 600 func (b Block) CalcNumLabels(prev *Block) map[uint64]int32 { 601 delta := make(map[uint64]int32) 602 603 // if previous block given, init those counts as negative 604 if prev != nil { 605 prev.calcNumLabels(delta, false) 606 } 607 b.calcNumLabels(delta, true) 608 609 return delta 610 } 611 612 func (b Block) calcNumLabels(delta map[uint64]int32, add bool) { 613 numVoxels := int32(b.Size.Prod()) 614 615 switch len(b.Labels) { 616 case 0: 617 dvid.Infof("Block has 0 labels!\n") 618 return 619 case 1: 620 if b.Labels[0] == 0 { 621 return 622 } 623 if add { 624 delta[b.Labels[0]] += numVoxels 625 } else { 626 delta[b.Labels[0]] -= numVoxels 627 } 628 return 629 default: 630 } 631 632 gx, gy, gz := b.Size[0]/SubBlockSize, b.Size[1]/SubBlockSize, b.Size[2]/SubBlockSize 633 subBlockNumVoxels := int32(SubBlockSize * SubBlockSize * SubBlockSize) 634 sbLabels := make([]uint64, subBlockNumVoxels) // preallocate max # of labels for sub-block 635 636 var indexPos uint32 637 var bitpos, subBlockNum int 638 var sx, sy, sz int32 639 for sz = 0; sz < gz; sz++ { 640 for sy = 0; sy < gy; sy++ { 641 for sx = 0; sx < gx; sx, subBlockNum = sx+1, subBlockNum+1 { 642 643 numSBLabels := b.NumSBLabels[subBlockNum] 644 645 switch numSBLabels { 646 case 0: 647 continue 648 case 1: 649 label := b.Labels[b.SBIndices[indexPos]] 650 indexPos++ 651 if label == 0 { 652 continue 653 } 654 if add { 655 delta[label] += subBlockNumVoxels 656 } else { 657 delta[label] -= subBlockNumVoxels 658 } 659 continue 660 default: 661 } 662 bits := int(bitsFor(numSBLabels)) 663 for i := uint16(0); i < numSBLabels; i++ { 664 sbLabels[i] = b.Labels[b.SBIndices[indexPos]] 665 indexPos++ 666 } 667 668 var x, y, z int32 669 for z = 0; z < SubBlockSize; z++ { 670 for y = 0; y < SubBlockSize; y++ { 671 for x = 0; x < SubBlockSize; x++ { 672 var index uint16 673 bithead := bitpos % 8 674 bytepos := bitpos >> 3 675 if bithead+bits <= 8 { 676 // index totally within this byte 677 rightshift := uint(8 - bithead - bits) 678 index = uint16((b.SBValues[bytepos] & leftBitMask[bithead]) >> rightshift) 679 } else { 680 // index spans byte boundaries 681 index = uint16(b.SBValues[bytepos]&leftBitMask[bithead]) << 8 682 index |= uint16(b.SBValues[bytepos+1]) 683 index >>= uint(16 - bithead - bits) 684 } 685 label := sbLabels[index] 686 if label != 0 { 687 if add { 688 delta[label]++ 689 } else { 690 delta[label]-- 691 } 692 } 693 bitpos += bits 694 } 695 } 696 } 697 if bitpos%8 != 0 { 698 bitpos += 8 - (bitpos % 8) 699 } 700 } 701 } 702 } 703 } 704 705 // get the # voxels corresponding to a particular label index. 706 func (b *Block) getNumVoxels(labelIndex uint32) (labelVoxels uint64) { 707 gx, gy, gz := b.Size[0]/SubBlockSize, b.Size[1]/SubBlockSize, b.Size[2]/SubBlockSize 708 subBlockNumVoxels := uint64(SubBlockSize * SubBlockSize * SubBlockSize) 709 710 if len(b.Labels) == 0 { 711 return 0 712 } else if len(b.Labels) == 1 { 713 if labelIndex == 0 { 714 return uint64(b.Size.Prod()) 715 } 716 return 0 717 } 718 719 var indexPos uint32 720 var bitpos, subBlockNum int 721 var sx, sy, sz int32 722 for sz = 0; sz < gz; sz++ { 723 for sy = 0; sy < gy; sy++ { 724 for sx = 0; sx < gx; sx, subBlockNum = sx+1, subBlockNum+1 { 725 726 numSBLabels := b.NumSBLabels[subBlockNum] 727 728 switch numSBLabels { 729 case 0: 730 continue 731 case 1: 732 if b.SBIndices[indexPos] == labelIndex { 733 labelVoxels += subBlockNumVoxels 734 } 735 indexPos++ 736 continue 737 default: 738 } 739 740 var found bool 741 var targetIndex uint16 742 for i := uint16(0); i < numSBLabels; i++ { 743 if b.SBIndices[indexPos] == labelIndex { 744 found = true 745 targetIndex = i 746 } 747 indexPos++ 748 } 749 if !found { 750 continue 751 } 752 bits := int(bitsFor(numSBLabels)) 753 754 var x, y, z int32 755 for z = 0; z < SubBlockSize; z++ { 756 for y = 0; y < SubBlockSize; y++ { 757 for x = 0; x < SubBlockSize; x++ { 758 var index uint16 759 bithead := bitpos % 8 760 bytepos := bitpos >> 3 761 if bithead+bits <= 8 { 762 // index totally within this byte 763 rightshift := uint(8 - bithead - bits) 764 index = uint16((b.SBValues[bytepos] & leftBitMask[bithead]) >> rightshift) 765 } else { 766 // index spans byte boundaries 767 index = uint16(b.SBValues[bytepos]&leftBitMask[bithead]) << 8 768 index |= uint16(b.SBValues[bytepos+1]) 769 index >>= uint(16 - bithead - bits) 770 } 771 if index == targetIndex { 772 labelVoxels++ 773 } 774 bitpos += bits 775 } 776 } 777 } 778 if bitpos%8 != 0 { 779 bitpos += 8 - (bitpos % 8) 780 } 781 } 782 } 783 } 784 return 785 } 786 787 type ptIndex struct { 788 pos dvid.Point3d 789 index int 790 } 791 792 // GetPointLabels returns the labels associated with each point by traversing the compressed data. 793 // If the point is outside the block, a zero is returned. 794 func (b *Block) GetPointLabels(pts []dvid.Point3d) []uint64 { 795 if len(pts) == 0 { 796 return []uint64{} 797 } 798 results := make([]uint64, len(pts)) 799 800 var label uint64 801 singleValue := false 802 if len(b.Labels) == 0 { 803 singleValue = true 804 label = 0 805 } else if len(b.Labels) == 1 { 806 singleValue = true 807 label = b.Labels[0] 808 } 809 if singleValue { 810 for i := 0; i < len(pts); i++ { 811 results[i] = label 812 } 813 return results 814 } 815 816 gx, gy, gz := b.Size[0]/SubBlockSize, b.Size[1]/SubBlockSize, b.Size[2]/SubBlockSize 817 maxSubBlocks := int(gx * gy * gz) 818 subBlockPts := make(map[int][]ptIndex) 819 for i, pt := range pts { 820 sx := pt[0] >> subBlockShift 821 sy := pt[1] >> subBlockShift 822 sz := pt[2] >> subBlockShift 823 sbx := pt[0] % SubBlockSize 824 sby := pt[1] % SubBlockSize 825 sbz := pt[2] % SubBlockSize 826 subBlockNum := int(sz*gy*gx + sy*gx + sx) 827 if subBlockNum >= maxSubBlocks { 828 dvid.Errorf("attempted to get label for point %s that was out of block, skipping point\n", pt) 829 continue 830 } 831 pti := ptIndex{pos: dvid.Point3d{sbx, sby, sbz}, index: i} 832 sbPts, found := subBlockPts[subBlockNum] 833 if found { 834 sbPts = append(sbPts, pti) 835 } else { 836 sbPts = []ptIndex{pti} 837 } 838 subBlockPts[subBlockNum] = sbPts 839 } 840 if len(subBlockPts) == 0 { 841 // all the points are outside the block! 842 return results 843 } 844 845 subBlockNumVoxels := SubBlockSize * SubBlockSize * SubBlockSize 846 847 var indexPos uint32 848 var bitpos int 849 for subBlockNum := 0; subBlockNum < maxSubBlocks; subBlockNum++ { 850 sbPts := subBlockPts[subBlockNum] 851 852 numSBLabels := b.NumSBLabels[subBlockNum] 853 switch numSBLabels { 854 case 0: 855 continue 856 case 1: 857 // make all points in this sub-block point to the single label 858 labelIndex := b.SBIndices[indexPos] 859 label := b.Labels[labelIndex] 860 for _, pti := range sbPts { 861 results[pti.index] = label 862 } 863 indexPos++ 864 continue 865 default: 866 } 867 868 bits := int(bitsFor(numSBLabels)) 869 for _, pti := range sbPts { 870 bitposPt := int(pti.pos[2]*SubBlockSize*SubBlockSize+pti.pos[1]*SubBlockSize+pti.pos[0])*bits + bitpos 871 var index uint16 872 bithead := bitposPt % 8 873 bytepos := bitposPt >> 3 874 if bithead+bits <= 8 { 875 // index totally within this byte 876 rightshift := uint(8 - bithead - bits) 877 index = uint16((b.SBValues[bytepos] & leftBitMask[bithead]) >> rightshift) 878 } else { 879 // index spans byte boundaries 880 index = uint16(b.SBValues[bytepos]&leftBitMask[bithead]) << 8 881 index |= uint16(b.SBValues[bytepos+1]) 882 index >>= uint(16 - bithead - bits) 883 } 884 label := b.Labels[b.SBIndices[indexPos+uint32(index)]] 885 results[pti.index] = label 886 } 887 888 bitpos += subBlockNumVoxels * bits 889 if bitpos%8 != 0 { 890 bitpos += 8 - (bitpos % 8) 891 } 892 indexPos += uint32(numSBLabels) // advance to next sub-block indices 893 } 894 return results 895 } 896 897 // MergeLabels returns a new block that has computed the given MergeOp. 898 func (b *Block) MergeLabels(op MergeOp) (merged *Block, err error) { 899 merged = new(Block) 900 merged.data, merged.dataSrc = dvid.New8ByteAlignBytes(uint32(len(b.data))) // at most the length of the unmerged Block 901 copy(merged.data, b.data) 902 if err = merged.setExportedVars(); err != nil { 903 merged = nil 904 return 905 } 906 907 var targetFound bool 908 var targetIndex uint32 909 mergedIndices := make(map[uint32]struct{}, len(op.Merged)) 910 var numMerged uint32 911 for i, label := range b.Labels { 912 _, found := op.Merged[label] 913 if found { 914 numMerged++ 915 mergedIndices[uint32(i)] = struct{}{} 916 merged.Labels[i] = 0 917 } 918 if label == op.Target { 919 targetFound = true 920 targetIndex = uint32(i) 921 } 922 } 923 924 if numMerged == 0 { 925 return 926 } 927 928 if !targetFound { 929 var mergedIndex uint32 930 for mergedIndex = range mergedIndices { 931 break 932 } 933 targetIndex = mergedIndex // use the first of the merged labels as target label 934 merged.Labels[targetIndex] = op.Target 935 } 936 937 for i, index := range merged.SBIndices { 938 if _, found := mergedIndices[index]; found { 939 merged.SBIndices[i] = targetIndex 940 } 941 } 942 return 943 } 944 945 // ReplaceLabel replaces references to the target label with newLabel. 946 func (b *Block) ReplaceLabel(target, newLabel uint64) (replace *Block, replaceSize uint64, err error) { 947 replace = new(Block) 948 replace.data, replace.dataSrc = dvid.New8ByteAlignBytes(uint32(len(b.data))) 949 copy(replace.data, b.data) 950 if err = replace.setExportedVars(); err != nil { 951 replace = nil 952 return 953 } 954 955 for i, label := range replace.Labels { 956 if label == target { 957 replaceSize += replace.getNumVoxels(uint32(i)) 958 replace.Labels[i] = newLabel 959 } 960 } 961 return 962 } 963 964 // ReplaceLabels replaces labels according to mapping and doesn't compute sizes. 965 func (b *Block) ReplaceLabels(mapping map[uint64]uint64) (replace *Block, replaced bool, err error) { 966 replace = new(Block) 967 replace.data, replace.dataSrc = dvid.New8ByteAlignBytes(uint32(len(b.data))) 968 copy(replace.data, b.data) 969 if err = replace.setExportedVars(); err != nil { 970 replace = nil 971 return 972 } 973 974 for i, label := range replace.Labels { 975 replacement, found := mapping[label] 976 if found { 977 replaced = true 978 replace.Labels[i] = replacement 979 } 980 } 981 return 982 } 983 984 // sub-block data after downres. 985 type sbData struct { 986 indices []uint32 987 values []byte 988 } 989 990 // returns true if we set Block to blank 0 or some solid label 991 func (b *Block) setBlank(octants [8]*Block) bool { 992 var ok bool 993 var lbl uint64 994 if octants[0] == nil { 995 ok = true // nil octants are solid label 0 block 996 } else if len(octants[0].Labels) == 1 { 997 lbl = octants[0].Labels[0] 998 ok = true 999 } 1000 if ok { 1001 for i := 1; i < 8; i++ { 1002 if octants[i] == nil { 1003 if lbl != 0 { 1004 ok = false 1005 break 1006 } 1007 } else if len(octants[i].Labels) != 1 || lbl != octants[i].Labels[0] { 1008 ok = false 1009 break 1010 } 1011 } 1012 if ok { 1013 *b = *MakeSolidBlock(lbl, b.Size) 1014 return true 1015 } 1016 } 1017 return false 1018 } 1019 1020 // DownresSlow is same as Downres() but uses simpler and more memory/compute-intensive 1021 // approach to computing down-res block. 1022 func (b *Block) DownresSlow(octants [8]*Block) error { 1023 filled := true 1024 for i := 0; i < 8; i++ { 1025 if octants[i] == nil { 1026 filled = false 1027 break 1028 } 1029 } 1030 var result []byte 1031 if filled { 1032 numArrayBytes := b.Size.Prod() * 8 1033 result = make([]byte, numArrayBytes) 1034 } else { 1035 result, _ = b.MakeLabelVolume() 1036 } 1037 1038 for i := 0; i < 8; i++ { 1039 if octants[i] == nil { 1040 continue 1041 } 1042 uint64array, size := octants[i].MakeLabelVolume() 1043 if !size.Equals(b.Size) { 1044 return fmt.Errorf("octant block size %s not equal to original size %s", size, b.Size) 1045 } 1046 1047 oz := int32(i) >> 2 1048 oy := (int32(i) - oz*4) >> 1 1049 ox := int32(i) % 2 1050 vz := oz * b.Size[2] >> 1 // voxel offset into result array 1051 vy := oy * b.Size[1] >> 1 1052 vx := ox * b.Size[0] >> 1 1053 1054 downresArray(uint64array, result, vx, vy, vz, size) 1055 } 1056 1057 resultBlock, err := MakeBlock(result, b.Size) 1058 if err != nil { 1059 return err 1060 } 1061 *b = *resultBlock 1062 return nil 1063 } 1064 1065 // TODO -- Not completely working, so stick with DownresSlow() until this becomes rises in priority. 1066 func (b *Block) DownresFast(octants [8]*Block) error { 1067 // check consistency of octants 1068 var gx, gy, gz int32 // # sub-blocks in each dimension 1069 var filled, maxLabels, maxSBIndices int 1070 for i, oct := range octants { 1071 if oct != nil { 1072 filled++ 1073 maxLabels += len(oct.Labels) 1074 x, y, z := oct.Size[0]/SubBlockSize, oct.Size[1]/SubBlockSize, oct.Size[2]/SubBlockSize 1075 if filled > 1 && (x != gx || y != gy || z != gz) { 1076 return fmt.Errorf("can't downres with octants of different size: (%d,%d,%d) vs (%d,%d,%d)", x, y, z, gx, gy, gz) 1077 } 1078 gx, gy, gz = x, y, z 1079 maxSBIndices += len(oct.SBIndices) 1080 fmt.Printf("Octant %d: %d labels, %d indices in %d x %d x %d sub-blocks\n", i, len(oct.Labels), len(oct.SBIndices), gx, gy, gz) 1081 // for n, lbl := range oct.Labels { 1082 // fmt.Printf("Label %d: %d\n", n, lbl) 1083 // } 1084 // for n, idx := range oct.SBIndices { 1085 // fmt.Printf("Index %d: %d\n", n, idx) 1086 // } 1087 } else { 1088 fmt.Printf("Octant %d is nil\n", i) 1089 } 1090 } 1091 // fmt.Printf("Complete\n") 1092 if filled == 0 { 1093 return nil 1094 } 1095 if gx%2 != 0 || gy%2 != 0 || gz%2 != 0 { 1096 return fmt.Errorf("downres of octants only works for block-sizes with even # of sub-blocks: not %d x %d x %d", gx, gy, gz) 1097 } 1098 1099 // Allocate temp buffers for lores block, which we need since we'll be visiting them out of order. 1100 numSubBlocks := gx * gy * gz 1101 var loNumLabels uint32 1102 loLabels := make(map[uint64]uint32, maxLabels) 1103 loSBIndices := make([][]uint32, numSubBlocks) 1104 loSBValues := make([][]byte, numSubBlocks) 1105 1106 // Iterate through all octants, doing downres and filling in corresponding sub-blocks. 1107 octIndices := make([][]uint32, numSubBlocks) 1108 octValues := make([][]byte, numSubBlocks) 1109 var hires [8]sbData 1110 for i, oct := range octants { 1111 oz := int32(i) >> 2 1112 oy := (int32(i) - oz*4) >> 1 1113 ox := int32(i) % 2 1114 osbz := oz * gz >> 1 // octant sub-block offset 1115 osby := oy * gy >> 1 1116 osbx := ox * gx >> 1 1117 fmt.Printf("octant (%d,%d,%d)\n", ox, oy, oz) 1118 if oct == nil || len(oct.Labels) <= 1 { 1119 // blank out all sub-blocks for this octant. 1120 var indices []uint32 1121 var lbl uint64 1122 if oct != nil && len(oct.Labels) == 1 { 1123 lbl = oct.Labels[0] 1124 } 1125 _, found := loLabels[lbl] 1126 if !found { 1127 loLabels[lbl] = loNumLabels 1128 indices = []uint32{loNumLabels} 1129 loNumLabels++ 1130 } 1131 var sbx, sby, sbz int32 1132 for sbz = 0; sbz < gz; sbz += 2 { 1133 for sby = 0; sby < gy; sby += 2 { 1134 for sbx = 0; sbx < gx; sbx += 2 { 1135 loSBNum := (osbz+(sbz>>1))*gx*gy + (osby+(sby>>1))*gx + osbx + (sbx >> 1) 1136 loSBIndices[loSBNum] = indices 1137 loSBValues[loSBNum] = nil 1138 } 1139 } 1140 } 1141 } else { 1142 // get the index and value slices for each sub-block. 1143 var indexPos, valuePos int 1144 for sb := int32(0); sb < numSubBlocks; sb++ { 1145 numLabels := int(oct.NumSBLabels[sb]) 1146 valueBits := SubBlockSize * SubBlockSize * SubBlockSize * int(bitsFor(uint16(numLabels))) 1147 valueBytes := valueBits >> 3 1148 if valueBits%8 != 0 { 1149 valueBytes++ 1150 } 1151 octIndices[sb] = oct.SBIndices[indexPos : indexPos+numLabels] 1152 octValues[sb] = oct.SBValues[valuePos : valuePos+valueBytes] 1153 indexPos += numLabels 1154 valuePos += valueBytes 1155 // fmt.Printf(" sub-block %d: %d labels, %d value bits, %d value bytes / indexpos %d, valuepos %d\n", sb, numLabels, valueBits, valueBytes, indexPos, valuePos) 1156 } 1157 1158 // iterate through sub-blocks for each octant, down-res them, and integrate them into the low-res block. 1159 var x, y, z, sbx, sby, sbz int32 1160 for sbz = 0; sbz < gz; sbz += 2 { 1161 for sby = 0; sby < gy; sby += 2 { 1162 for sbx = 0; sbx < gx; sbx += 2 { 1163 var i int 1164 for z = 0; z < 2; z++ { 1165 for y = 0; y < 2; y++ { 1166 for x = 0; x < 2; x++ { 1167 sbNum := (sbz+z)*gx*gy + (sby+y)*gx + sbx + x 1168 hires[i].indices = octIndices[sbNum] 1169 hires[i].values = octValues[sbNum] 1170 i++ 1171 } 1172 } 1173 } 1174 1175 // fmt.Printf("sub-block(%d,%d,%d) hires: %v\n", sbx, sby, sbz, hires) 1176 lores, err := downresSubBlock(hires) 1177 if err != nil { 1178 return err 1179 } 1180 1181 // set this low-res sub-block where we use index into lo-res block 1182 loSBNum := (osbz+(sbz>>1))*gx*gy + (osby+(sby>>1))*gx + osbx + (sbx >> 1) 1183 loSBIndices[loSBNum] = make([]uint32, len(lores.indices)) 1184 loSBValues[loSBNum] = lores.values 1185 for i, idx := range lores.indices { 1186 lbl := oct.Labels[idx] 1187 bindex, found := loLabels[lbl] 1188 if !found { 1189 bindex = loNumLabels 1190 loLabels[lbl] = loNumLabels 1191 loNumLabels++ 1192 } 1193 loSBIndices[loSBNum][i] = bindex 1194 } 1195 } 1196 } 1197 } 1198 } 1199 } 1200 1201 // Reconstitute the Block using the temporary buffers for the lores block. 1202 return b.setData(uint32(gx), uint32(gy), uint32(gz), loLabels, loSBIndices, loSBValues) 1203 } 1204 1205 // downres hires array by 2x and store in portion of lores array with offset voxel (vx, vy, vz) 1206 func downresArray(hires, lores []byte, vx, vy, vz int32, blockSize dvid.Point3d) { 1207 votemap := make(map[uint64]int, 8) 1208 nyx := blockSize[0] * blockSize[1] 1209 var lx, ly, lz, x, y, z int32 1210 for z = 0; z < blockSize[2]; z += 2 { 1211 lz = z >> 1 1212 for y = 0; y < blockSize[1]; y += 2 { 1213 ly = y >> 1 1214 for x = 0; x < blockSize[0]; x += 2 { 1215 lx = x >> 1 1216 1217 var ix, iy, iz int32 1218 for iz = 0; iz < 2; iz++ { 1219 for iy = 0; iy < 2; iy++ { 1220 for ix = 0; ix < 2; ix++ { 1221 i := (z+iz)*nyx + (y+iy)*blockSize[0] + x + ix 1222 lbl := binary.LittleEndian.Uint64(hires[i*8 : i*8+8]) 1223 if lbl != 0 { 1224 votemap[lbl]++ 1225 } 1226 } 1227 } 1228 } 1229 1230 var winner uint64 1231 var winnerVotes int 1232 for lbl, votes := range votemap { 1233 if winnerVotes < votes { 1234 winnerVotes = votes 1235 winner = lbl 1236 } else if winnerVotes == votes && lbl < winner { 1237 winnerVotes = votes 1238 winner = lbl 1239 } 1240 delete(votemap, lbl) 1241 } 1242 li := (lz+vz)*nyx + (ly+vy)*blockSize[0] + lx + vx 1243 binary.LittleEndian.PutUint64(lores[li*8:li*8+8], winner) 1244 } 1245 } 1246 } 1247 } 1248 1249 // DownresLabels takes an array of uint64 in []byte format and returns a 2x down-sampled 1250 // array of uint64. An error is returned if size is not multiple of two or passed array 1251 // is incorrect size. 1252 func DownresLabels(hires []byte, hisize dvid.Point3d) (lores []byte, err error) { 1253 loresBytes := int(hisize[0] * hisize[1] * hisize[2]) 1254 hiresBytes := loresBytes * 8 1255 if len(hires) != hiresBytes { 1256 err = fmt.Errorf("hi-res array had %d bytes, expected %d bytes for %s uint64 volume", len(hires), hiresBytes, hisize) 1257 return 1258 } 1259 if hisize[0]%2 != 0 || hisize[1]%2 != 0 || hisize[2]%2 != 0 { 1260 err = fmt.Errorf("hi-res array to be down-sampled must have size equal to power of two") 1261 return 1262 } 1263 var losize dvid.Point3d 1264 losize[0] = hisize[0] >> 1 1265 losize[1] = hisize[1] >> 1 1266 losize[2] = hisize[2] >> 1 1267 lores = make([]byte, loresBytes) 1268 1269 votemap := make(map[uint64]int, 8) 1270 var lx, ly, lz, x, y, z int32 1271 for z = 0; z < hisize[2]; z += 2 { 1272 lz = z >> 1 1273 for y = 0; y < hisize[1]; y += 2 { 1274 ly = y >> 1 1275 for x = 0; x < hisize[0]; x += 2 { 1276 lx = x >> 1 1277 1278 var ix, iy, iz int32 1279 for iz = 0; iz < 2; iz++ { 1280 for iy = 0; iy < 2; iy++ { 1281 for ix = 0; ix < 2; ix++ { 1282 i := (z+iz)*hisize[0]*hisize[1] + (y+iy)*hisize[0] + x + ix 1283 lbl := binary.LittleEndian.Uint64(hires[i*8 : i*8+8]) 1284 if lbl != 0 { 1285 votemap[lbl]++ 1286 } 1287 } 1288 } 1289 } 1290 1291 var winner uint64 1292 var winnerVotes int 1293 for lbl, votes := range votemap { 1294 if winnerVotes < votes { 1295 winnerVotes = votes 1296 winner = lbl 1297 } else if winnerVotes == votes && lbl < winner { 1298 winnerVotes = votes 1299 winner = lbl 1300 } 1301 delete(votemap, lbl) 1302 } 1303 li := lz*losize[1]*losize[0] + ly*losize[0] + lx 1304 binary.LittleEndian.PutUint64(lores[li*8:li*8+8], winner) 1305 } 1306 } 1307 } 1308 return 1309 } 1310 1311 // Downres takes eight Blocks that represent higher-resolution octants (by 2x) of 1312 // the receiving block, and modifies the receiving Block to be a half-resolution 1313 // representation. If a given octant is a nil Block, the receiving Block is not modified 1314 // for that portion of the higher-resolution octant. 1315 func (b *Block) Downres(octants [8]*Block) error { 1316 if b == nil { 1317 return fmt.Errorf("block cannot be nil but can be an empty block") 1318 } 1319 1320 // if all octants are solid and of same label, produce a solid block as well. 1321 if b.setBlank(octants) { 1322 return nil 1323 } 1324 1325 err := b.DownresSlow(octants) 1326 return err 1327 } 1328 1329 func (b *Block) setData(gx, gy, gz uint32, sbLabels map[uint64]uint32, sbIndices [][]uint32, sbValues [][]byte) error { 1330 if len(sbIndices) != len(sbValues) { 1331 return fmt.Errorf("indices and values slices must have same # of sub-blocks: %d != %d", len(sbIndices), len(sbValues)) 1332 } 1333 numLabels := uint32(len(sbLabels)) 1334 numSubBlocks := gx * gy * gz 1335 1336 var numSBIndices, sbIndexBytes, numSBValueBytes uint32 1337 if numLabels <= 1 { 1338 b.data, b.dataSrc = dvid.New8ByteAlignBytes(16 + numLabels*8) 1339 } else { 1340 for _, indices := range sbIndices { 1341 numSBIndices += uint32(len(indices)) 1342 } 1343 sbIndexBytes = numSBIndices * 4 1344 for _, valueBytes := range sbValues { 1345 numSBValueBytes += uint32(len(valueBytes)) 1346 } 1347 blockBytes := 16 + numLabels*8 + numSubBlocks*2 + sbIndexBytes + numSBValueBytes 1348 b.data, b.dataSrc = dvid.New8ByteAlignBytes(blockBytes) 1349 } 1350 1351 binary.LittleEndian.PutUint32(b.data[0:4], gx) 1352 binary.LittleEndian.PutUint32(b.data[4:8], gy) 1353 binary.LittleEndian.PutUint32(b.data[8:12], gz) 1354 binary.LittleEndian.PutUint32(b.data[12:16], numLabels) 1355 1356 pos := uint32(16) 1357 var err error 1358 b.Labels, err = dvid.AliasByteToUint64(b.data[pos : pos+numLabels*8]) 1359 if err != nil { 1360 return err 1361 } 1362 for label, index := range sbLabels { 1363 b.Labels[index] = label 1364 } 1365 1366 if numLabels <= 1 { 1367 return nil 1368 } 1369 1370 if gx*gy*gz != uint32(len(sbIndices)) { 1371 return fmt.Errorf("number of sub-blocks from gx * gy * gz (%d) != sub-block indices (%d)", gx*gy*gz, len(sbIndices)) 1372 } 1373 1374 pos += numLabels * 8 1375 nbytes := numSubBlocks * 2 1376 b.NumSBLabels, err = dvid.AliasByteToUint16(b.data[pos : pos+nbytes]) 1377 if err != nil { 1378 return err 1379 } 1380 pos += nbytes 1381 b.SBIndices, err = dvid.AliasByteToUint32(b.data[pos : pos+sbIndexBytes]) 1382 if err != nil { 1383 return err 1384 } 1385 pos += sbIndexBytes 1386 b.SBValues = b.data[pos:] 1387 1388 idxpos := 0 1389 valpos := 0 1390 for sbNum, indices := range sbIndices { 1391 numIndices := len(indices) 1392 b.NumSBLabels[sbNum] = uint16(numIndices) 1393 copy(b.SBIndices[idxpos:idxpos+numIndices], indices) 1394 idxpos += numIndices 1395 1396 valbytes := len(sbValues[sbNum]) 1397 copy(b.SBValues[valpos:valpos+valbytes], sbValues[sbNum]) 1398 valpos += valbytes 1399 } 1400 return nil 1401 } 1402 1403 // take eight 3d neighboring sub-blocks and make one 2x downres sub-block. 1404 // allow missing sub-blocks. 1405 func downresSubBlock(hires [8]sbData) (lores sbData, err error) { 1406 numSBVoxels := uint32(SubBlockSize * SubBlockSize * SubBlockSize) 1407 sbValues := make([]uint32, numSBVoxels*8) 1408 1409 // first pass: write 8 indices for each down-res voxel 1410 for i := uint32(0); i < 8; i++ { 1411 if len(hires[i].indices) == 0 { 1412 continue 1413 } 1414 oz := i >> 2 1415 oy := (i - oz*4) >> 1 1416 ox := i % 2 1417 1418 oz *= SubBlockSize >> 1 1419 oy *= SubBlockSize >> 1 1420 ox *= SubBlockSize >> 1 1421 1422 var x, y, z, idx, bitpos uint32 1423 bits := bitsFor(uint16(len(hires[i].indices))) 1424 if bits == 0 { 1425 idx = hires[i].indices[0] 1426 } 1427 for z = 0; z < SubBlockSize; z++ { 1428 dz := (z>>1)*SubBlockSize*SubBlockSize + oz 1429 offz := (z % 2) >> 2 1430 for y = 0; y < SubBlockSize; y++ { 1431 dyz := dz + (y>>1)*SubBlockSize + oy 1432 offyz := offz + (y%2)>>1 1433 for x = 0; x < SubBlockSize; x++ { 1434 if bits > 0 { 1435 val := getPackedValue(hires[i].values, bitpos, bits) 1436 idx = hires[i].indices[val] 1437 bitpos += bits 1438 } 1439 1440 dx := (x >> 1) + ox 1441 off := offyz + x%2 1442 v := (dyz+dx)*8 + off 1443 sbValues[v] = idx 1444 } 1445 } 1446 } 1447 } 1448 1449 // second pass: find best index for each down-res voxel and store. 1450 var numIndices uint32 1451 sbIndices := make(map[uint32]uint32) // cache sub-block index and eventual down-res block index 1452 loresValues := make([]uint32, numSBVoxels) 1453 1454 votemap := make(map[uint32]int, 8) 1455 for v := uint32(0); v < numSBVoxels; v++ { 1456 for i := uint32(0); i < 8; i++ { 1457 idx := sbValues[v*8+i] 1458 if idx != 0 { 1459 votemap[idx]++ 1460 } 1461 } 1462 var winner uint32 1463 var winnerVotes int 1464 for idx, votes := range votemap { 1465 if winnerVotes < votes { 1466 winnerVotes = votes 1467 winner = idx 1468 } 1469 delete(votemap, idx) 1470 } 1471 1472 index, found := sbIndices[winner] 1473 if !found { 1474 index = numIndices 1475 sbIndices[winner] = index 1476 numIndices++ 1477 } 1478 loresValues[v] = index 1479 } 1480 1481 // third pass: pack indices using minimal bits encoding. 1482 bits := bitsFor(uint16(numIndices)) 1483 valueBits := numSBVoxels * bits 1484 valueBytes := valueBits >> 3 1485 if valueBits%8 != 0 { 1486 valueBytes++ 1487 } 1488 // fmt.Printf("numIndices %d, bits %d, valueBits %d, valueBytes %d\n", numIndices, bits, valueBits, valueBytes) 1489 1490 lores.indices = make([]uint32, numIndices) 1491 for idx, i := range sbIndices { 1492 lores.indices[i] = idx 1493 } 1494 if valueBytes > 0 { 1495 lores.values = make([]byte, valueBytes) 1496 var bitpos uint32 1497 for v := uint32(0); v < numSBVoxels; v++ { 1498 index := loresValues[v] 1499 bithead := bitpos % 8 1500 bytepos := bitpos >> 3 1501 if bithead+bits <= 8 { 1502 // index totally within this byte 1503 leftshift := uint(8 - bits - bithead) 1504 lores.values[bytepos] |= byte(index << leftshift) 1505 } else { 1506 // this straddles a byte boundary 1507 leftshift := uint(16 - bits - bithead) 1508 index <<= leftshift 1509 lores.values[bytepos] |= byte((index & 0xFF00) >> 8) 1510 lores.values[bytepos+1] = byte(index & 0x00FF) 1511 } 1512 bitpos += bits 1513 } 1514 } 1515 return 1516 } 1517 1518 // Value returns the label for a voxel using its 3d location within block. If the given 1519 // location is outside the block extent, label 0 is returned. Note that this function 1520 // is inefficient for multi-voxel value retrieval. 1521 func (b *Block) Value(pos dvid.Point3d) uint64 { 1522 if pos[0] < 0 || pos[0] >= b.Size[0] || pos[1] < 0 || pos[1] >= b.Size[1] || pos[2] < 0 || pos[2] >= b.Size[2] { 1523 return 0 1524 } 1525 if len(b.Labels) == 0 { 1526 return 0 1527 } 1528 if len(b.Labels) == 1 { 1529 return b.Labels[0] 1530 } 1531 sbz := pos[2] / SubBlockSize 1532 sby := pos[1] / SubBlockSize 1533 sbx := pos[0] / SubBlockSize 1534 gx, gy := b.Size[0]/SubBlockSize, b.Size[1]/SubBlockSize 1535 sbNum := sbz*gx*gy + sby*gx + sbx 1536 var bitPos uint32 1537 var idxPos int 1538 for sb := int32(0); sb < sbNum; sb++ { 1539 n := b.NumSBLabels[sb] 1540 idxPos += int(n) 1541 if n > 1 { 1542 bits := bitsFor(n) 1543 bitPos += SubBlockSize * SubBlockSize * SubBlockSize * bits 1544 if bitPos%8 != 0 { 1545 bitPos += 8 - (bitPos % 8) 1546 } 1547 } 1548 } 1549 n := b.NumSBLabels[sbNum] 1550 bits := bitsFor(n) 1551 if bits == 0 { 1552 idx := b.SBIndices[idxPos] 1553 return b.Labels[idx] 1554 } 1555 x, y, z := pos[0]%SubBlockSize, pos[1]%SubBlockSize, pos[2]%SubBlockSize 1556 bitPos += uint32(z*SubBlockSize*SubBlockSize+y*SubBlockSize+x) * bits 1557 val := getPackedValue(b.SBValues, bitPos, bits) 1558 index := b.SBIndices[idxPos+int(val)] 1559 return b.Labels[index] 1560 } 1561 1562 // MakeLabelVolume returns a byte slice with packed little-endian uint64 labels in ZYX order, 1563 // i.e., a uint64 for each voxel where consecutive values are in the (x,y,z) order: 1564 // (0,0,0), (1,0,0), (2,0,0) ... (0,1,0) 1565 // There is no sharing of memory between the returned byte slice and the Block data. 1566 func (b Block) MakeLabelVolume() (uint64array []byte, size dvid.Point3d) { 1567 size = b.Size 1568 1569 numVoxels := b.Size.Prod() 1570 uint64array = make([]byte, numVoxels*8) 1571 outarray, _ := dvid.AliasByteToUint64(uint64array) 1572 1573 gx, gy, gz := b.Size[0]/SubBlockSize, b.Size[1]/SubBlockSize, b.Size[2]/SubBlockSize 1574 1575 if len(b.Labels) < 2 { 1576 var label uint64 1577 if len(b.Labels) == 1 { 1578 label = b.Labels[0] 1579 } 1580 for i := int64(0); i < numVoxels; i++ { 1581 outarray[i] = label 1582 } 1583 return 1584 } 1585 1586 subBlockNumVoxels := SubBlockSize * SubBlockSize * SubBlockSize 1587 sbLabels := make([]uint64, subBlockNumVoxels) // preallocate max # of labels for sub-block 1588 1589 var indexPos, bitpos uint32 1590 var subBlockNum int 1591 var sx, sy, sz int32 1592 for sz = 0; sz < gz; sz++ { 1593 for sy = 0; sy < gy; sy++ { 1594 for sx = 0; sx < gx; sx++ { 1595 1596 numSBLabels := b.NumSBLabels[subBlockNum] 1597 bits := bitsFor(numSBLabels) 1598 1599 for i := uint16(0); i < numSBLabels; i++ { 1600 sbLabels[i] = b.Labels[b.SBIndices[indexPos]] 1601 indexPos++ 1602 } 1603 1604 lblpos := sz*SubBlockSize*b.Size[0]*b.Size[1] + sy*SubBlockSize*b.Size[0] + sx*SubBlockSize 1605 1606 var x, y, z int32 1607 for z = 0; z < SubBlockSize; z++ { 1608 for y = 0; y < SubBlockSize; y++ { 1609 for x = 0; x < SubBlockSize; x++ { 1610 switch numSBLabels { 1611 case 0: 1612 outarray[lblpos] = 0 1613 case 1: 1614 outarray[lblpos] = sbLabels[0] 1615 default: 1616 index := getPackedValue(b.SBValues, bitpos, bits) 1617 outarray[lblpos] = sbLabels[index] 1618 bitpos += bits 1619 } 1620 lblpos++ 1621 } 1622 lblpos += b.Size[0] - SubBlockSize 1623 } 1624 lblpos += b.Size[0]*b.Size[1] - b.Size[0]*SubBlockSize 1625 } 1626 if bitpos%8 != 0 { 1627 bitpos += 8 - (bitpos % 8) 1628 } 1629 subBlockNum++ 1630 } 1631 } 1632 } 1633 return 1634 } 1635 1636 // WriteLabelVolume streams uncompressed, packed little-endian uint64 labels in ZYX order, 1637 // i.e., a uint64 for each voxel where consecutive values are in the (x,y,z) order: 1638 // (0,0,0), (1,0,0), (2,0,0) ... (0,1,0) 1639 func (b Block) WriteLabelVolume(w io.Writer) error { 1640 gx, gy, gz := b.Size[0]/SubBlockSize, b.Size[1]/SubBlockSize, b.Size[2]/SubBlockSize 1641 1642 blockSize := b.Size.Prod() 1643 1644 if len(b.Labels) < 2 { 1645 var label uint64 1646 if len(b.Labels) == 1 { 1647 label = b.Labels[0] 1648 } 1649 for i := int64(0); i < blockSize; i++ { 1650 if err := binary.Write(w, binary.LittleEndian, label); err != nil { 1651 return err 1652 } 1653 } 1654 return nil 1655 } 1656 1657 sliceSize := b.Size[0] * b.Size[1] * SubBlockSize // # of voxels in a single XY subblock plane 1658 sliceVoxels := make([]uint64, sliceSize) 1659 1660 subBlockNumVoxels := SubBlockSize * SubBlockSize * SubBlockSize 1661 sbLabels := make([]uint64, subBlockNumVoxels) // preallocate max # of labels for sub-block 1662 1663 // Since we must write in ZYX order, to prevent bookkeeping, we should process outputs 1664 // one XY sub-block plane at a time. 1665 var writePos int32 1666 var indexPos, bitpos uint32 1667 var subBlockNum int 1668 var sx, sy, sz int32 1669 for sz = 0; sz < gz; sz++ { 1670 1671 // Load all sub-block labels for this Z plane. 1672 for sy = 0; sy < gy; sy++ { 1673 for sx = 0; sx < gx; sx++ { 1674 numSBLabels := b.NumSBLabels[subBlockNum] 1675 bits := bitsFor(numSBLabels) 1676 1677 for i := uint16(0); i < numSBLabels; i++ { 1678 sbLabels[i] = b.Labels[b.SBIndices[indexPos]] 1679 indexPos++ 1680 } 1681 1682 lblpos := sy*SubBlockSize*b.Size[0] + sx*SubBlockSize 1683 var x, y, z int32 1684 for z = 0; z < SubBlockSize; z++ { 1685 for y = 0; y < SubBlockSize; y++ { 1686 for x = 0; x < SubBlockSize; x++ { 1687 switch numSBLabels { 1688 case 0: 1689 sliceVoxels[lblpos] = 0 1690 case 1: 1691 sliceVoxels[lblpos] = sbLabels[0] 1692 default: 1693 index := getPackedValue(b.SBValues, bitpos, bits) 1694 sliceVoxels[lblpos] = sbLabels[index] 1695 bitpos += bits 1696 } 1697 lblpos++ 1698 } 1699 lblpos += b.Size[0] - SubBlockSize 1700 } 1701 lblpos += b.Size[0]*b.Size[1] - b.Size[0]*SubBlockSize 1702 } 1703 if bitpos%8 != 0 { 1704 bitpos += 8 - (bitpos % 8) 1705 } 1706 subBlockNum++ 1707 } 1708 } 1709 1710 // Write out the slice in ZYX order. 1711 var lblpos int32 1712 for z := int32(0); z < SubBlockSize; z++ { 1713 for y := int32(0); y < b.Size[1]; y++ { 1714 for x := int32(0); x < b.Size[0]; x++ { 1715 if err := binary.Write(w, binary.LittleEndian, sliceVoxels[lblpos]); err != nil { 1716 return fmt.Errorf("aborted block write at voxel %d of %d: %v", 1717 writePos+lblpos, blockSize, err) 1718 } 1719 lblpos++ 1720 } 1721 } 1722 } 1723 writePos += sliceSize 1724 } 1725 return nil 1726 } 1727 1728 // TODO if needed for performance: 1729 // WriteNeuroglancer streams neuroglancer segmentation compression format. 1730 // This integrates the approach of labelmap.compressGoogle() into block.WriteLabelVolume() 1731 // in a sub-block slice-at-a-time fashion. 1732 // However the write error termination is only useful if we are not doing gzip compression 1733 // afterwards. 1734 // 1735 // func (b Block) WriteNeuroglancer(w io.Writer) error { 1736 // } 1737 1738 // MarshalBinary implements the encoding.BinaryMarshaler interface. Note that for 1739 // efficiency, the returned byte slice will share memory with the receiver Block. 1740 func (b Block) MarshalBinary() ([]byte, error) { 1741 return b.data, nil 1742 } 1743 1744 // UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. The source 1745 // byte slice is copied into a new 8-byte aligned slice so the receiver block does 1746 // not depend on the passed slice. 1747 func (b *Block) UnmarshalBinary(data []byte) error { 1748 if len(data) < 24 { 1749 return fmt.Errorf("can't unmarshal block binary of length %d", len(data)) 1750 } 1751 numBytes := uint32(len(data)) 1752 b.data, b.dataSrc = dvid.New8ByteAlignBytes(numBytes) 1753 copy(b.data, data) 1754 return b.setExportedVars() 1755 } 1756 1757 // StringDump returns a string that lists pretty-printed data from the block. 1758 func (b Block) StringDump(verbose bool) string { 1759 uint64array, size := b.MakeLabelVolume() 1760 s := fmt.Sprintf("Block size: %s\n", size) 1761 counts := make(map[uint64]int, len(b.Labels)) 1762 for i := 0; i < len(uint64array); i += 8 { 1763 label := binary.LittleEndian.Uint64(uint64array[i : i+8]) 1764 counts[label]++ 1765 } 1766 seen := make(Set) 1767 for _, label := range b.Labels { 1768 s += fmt.Sprintf("Label %10d: ", label) 1769 if _, found := seen[label]; found { 1770 s += fmt.Sprintf("duplicate\n") 1771 } else { 1772 s += fmt.Sprintf("%d voxels\n", counts[label]) 1773 seen[label] = struct{}{} 1774 } 1775 } 1776 1777 subBlocksPerDim := int(size[0]) / SubBlockSize 1778 numSubBlocks := subBlocksPerDim * subBlocksPerDim * subBlocksPerDim 1779 if len(b.NumSBLabels) != numSubBlocks { 1780 s += fmt.Sprintf("Expected %d entries for number of sub-block indices but got %d instead!\n", numSubBlocks, len(b.NumSBLabels)) 1781 return s 1782 } 1783 if !verbose { 1784 return s 1785 } 1786 i := 0 1787 sb := 0 1788 for z := 0; z < subBlocksPerDim; z++ { 1789 z0 := z * SubBlockSize 1790 z1 := z0 + SubBlockSize - 1 1791 for y := 0; y < subBlocksPerDim; y++ { 1792 y0 := y * SubBlockSize 1793 y1 := y0 + SubBlockSize - 1 1794 for x := 0; x < subBlocksPerDim; x++ { 1795 x0 := x * SubBlockSize 1796 x1 := x0 + SubBlockSize - 1 1797 s += fmt.Sprintf("(%2d-%2d, %2d-%2d, %2d-%2d) ", x0, x1, y0, y1, z0, z1) 1798 for n := 0; n < int(b.NumSBLabels[sb]); n++ { 1799 index := b.SBIndices[i] 1800 label := b.Labels[index] 1801 s += fmt.Sprintf("%d [%d] ", b.SBIndices[i], label) 1802 i++ 1803 } 1804 s += fmt.Sprintf("\n") 1805 sb++ 1806 } 1807 } 1808 } 1809 return s 1810 } 1811 1812 // assumes b.data is set and we need to compute all other properties of a Block 1813 func (b *Block) setExportedVars() (err error) { 1814 // Get the sub-blocks along each dimension 1815 gx := binary.LittleEndian.Uint32(b.data[0:4]) 1816 gy := binary.LittleEndian.Uint32(b.data[4:8]) 1817 gz := binary.LittleEndian.Uint32(b.data[8:12]) 1818 numSubBlocks := uint32(gx * gy * gz) 1819 1820 b.Size[0] = int32(gx * SubBlockSize) 1821 b.Size[1] = int32(gy * SubBlockSize) 1822 b.Size[2] = int32(gz * SubBlockSize) 1823 1824 numLabels := binary.LittleEndian.Uint32(b.data[12:16]) 1825 if numLabels == 0 { 1826 return fmt.Errorf("block has 0 labels, which is not allowed") 1827 } 1828 1829 if gx > MaxSubBlockSize || gy > MaxSubBlockSize || gz > MaxSubBlockSize { 1830 return fmt.Errorf("%d x %d x %d sub-blocks exceed max dimension of %d voxels (%d sub-blocks)", gx, gy, gz, MaxBlockSize, MaxSubBlockSize) 1831 } 1832 if numLabels > MaxBlockSize*MaxBlockSize*MaxBlockSize { 1833 return fmt.Errorf("number of labels (%d) exceeds what can be contained in max block size %d", numLabels, MaxBlockSize) 1834 } 1835 1836 b.Labels, err = dvid.AliasByteToUint64(b.data[16 : 16+numLabels*8]) 1837 if err != nil { 1838 return 1839 } 1840 1841 if len(b.Labels) <= 1 { 1842 b.NumSBLabels = nil 1843 b.SBIndices = nil 1844 b.SBValues = nil 1845 return 1846 } 1847 1848 pos := uint32(16) 1849 pos += numLabels * 8 1850 nbytes := numSubBlocks * 2 1851 b.NumSBLabels, err = dvid.AliasByteToUint16(b.data[pos : pos+nbytes]) 1852 if err != nil { 1853 return 1854 } 1855 var numSubBlockIndices uint32 1856 for _, num := range b.NumSBLabels { 1857 numSubBlockIndices += uint32(num) 1858 } 1859 1860 pos += nbytes 1861 subBlockIndexBytes := numSubBlockIndices * 4 1862 b.SBIndices, err = dvid.AliasByteToUint32(b.data[pos : pos+subBlockIndexBytes]) 1863 if err != nil { 1864 return 1865 } 1866 1867 pos += subBlockIndexBytes 1868 b.SBValues = b.data[pos:] 1869 return 1870 } 1871 1872 // immutable representation of (y,z) coordinate, suitable for maps. 1873 type yzString string 1874 1875 func getImmutableYZ(y, z int32) yzString { 1876 buf := make([]byte, 8) 1877 binary.LittleEndian.PutUint32(buf[0:4], uint32(y)) 1878 binary.LittleEndian.PutUint32(buf[4:8], uint32(z)) 1879 return yzString(buf) 1880 } 1881 1882 // tracks all RLEs that halted at X edge of a past Block. 1883 type rleBuffer struct { 1884 rles map[yzString]dvid.RLE 1885 coord dvid.ChunkPoint3d // block coord 1886 } 1887 1888 // WriteTo fulfills the io.WriterTo interface. 1889 func (r rleBuffer) WriteTo(w io.Writer) (n int64, err error) { 1890 for _, rle := range r.rles { 1891 var curN int64 1892 curN, err = rle.WriteTo(w) 1893 if err != nil { 1894 return 1895 } 1896 n += curN 1897 } 1898 return 1899 } 1900 1901 // sends all RLEs in buffer without clearing. 1902 func (r rleBuffer) flush(w io.Writer) error { 1903 if len(r.rles) != 0 { 1904 if _, err := r.WriteTo(w); err != nil { 1905 return err 1906 } 1907 } 1908 return nil 1909 } 1910 1911 func (r rleBuffer) clear() { 1912 for yz := range r.rles { 1913 delete(r.rles, yz) 1914 } 1915 } 1916 1917 func (r rleBuffer) extend(yz yzString, pt dvid.Point3d) { 1918 rle, found := r.rles[yz] 1919 if found { 1920 rle.Extend(1) 1921 r.rles[yz] = rle 1922 } else { 1923 r.rles[yz] = dvid.NewRLE(pt, 1) 1924 } 1925 } 1926 1927 // OutputOp provides a way to communicate with writing goroutines, 1928 // TODO: concurrency support on the given io.Writer. 1929 type OutputOp struct { 1930 w io.Writer 1931 pbCh chan *PositionedBlock 1932 errCh chan error 1933 sync.Mutex // lock on writing 1934 } 1935 1936 func NewOutputOp(w io.Writer) *OutputOp { 1937 op := new(OutputOp) 1938 op.w = w 1939 op.pbCh = make(chan *PositionedBlock, 1000) 1940 op.errCh = make(chan error) 1941 return op 1942 } 1943 1944 func (op OutputOp) Process(pb *PositionedBlock) { 1945 op.pbCh <- pb 1946 } 1947 1948 // Finish signals all input to an OutputOp is done and waits for completion. 1949 // Any error from the OutputOp is returned. 1950 func (op OutputOp) Finish() error { 1951 close(op.pbCh) 1952 err := <-op.errCh 1953 return err 1954 } 1955 1956 // WriteRLEs, like WriteBinaryBlocks, writes a compact serialization of a binarized Block to 1957 // the supplied Writer. In this case, the serialization uses little-endian encoded integers 1958 // and RLEs with the repeating units of the following format: 1959 // 1960 // int32 Coordinate of run start (dimension 0) 1961 // int32 Coordinate of run start (dimension 1) 1962 // int32 Coordinate of run start (dimension 2) 1963 // int32 Length of run in X direction 1964 // 1965 // The offset is the DVID space offset to the first voxel in the Block. After the RLEs have 1966 // been written to the io.Writer, an error message is sent down the given errCh. 1967 func WriteRLEs(lbls Set, op *OutputOp, bounds dvid.Bounds) { 1968 var rleBuf rleBuffer 1969 for pb := range op.pbCh { 1970 bcoord, err := pb.BCoord.ToChunkPoint3d() 1971 if err != nil { 1972 op.errCh <- err 1973 return 1974 } 1975 1976 labelIndices := make(map[uint32]struct{}) 1977 var inBlock bool 1978 for i, label := range pb.Labels { 1979 _, found := lbls[label] 1980 if found { 1981 labelIndices[uint32(i)] = struct{}{} 1982 inBlock = true 1983 // can't break here because there could be multiple entries for a label in a block 1984 // e.g., due to fast merge where a variety of previous labels gets set to merge label. 1985 } 1986 } 1987 if !inBlock { 1988 continue 1989 } 1990 1991 if rleBuf.rles == nil { // first target-containing block 1992 yzCap := pb.Size[1] * pb.Size[2] 1993 rleBuf.rles = make(map[yzString]dvid.RLE, yzCap) 1994 } else { 1995 expected := rleBuf.coord 1996 expected[0]++ 1997 if !expected.Equals(bcoord) { 1998 rleBuf.flush(op.w) 1999 rleBuf.clear() 2000 } 2001 } 2002 if err := pb.writeRLEs(labelIndices, op, &rleBuf, bounds); err != nil { 2003 op.errCh <- err 2004 return 2005 } 2006 rleBuf.coord = bcoord 2007 } 2008 2009 op.errCh <- rleBuf.flush(op.w) 2010 } 2011 2012 func (pb *PositionedBlock) writeRLEs(indices map[uint32]struct{}, op *OutputOp, rleBuf *rleBuffer, bounds dvid.Bounds) error { 2013 offset, err := pb.OffsetDVID() 2014 if err != nil { 2015 return err 2016 } 2017 minPt := offset 2018 maxPt := dvid.Point3d{ 2019 offset[0] + pb.Size[0] - 1, 2020 offset[1] + pb.Size[1] - 1, 2021 offset[2] + pb.Size[2] - 1, 2022 } 2023 if bounds.Exact && bounds.Voxel.IsSet() { 2024 bounds.Voxel.Adjust(&minPt, &maxPt) 2025 } 2026 2027 if len(pb.Labels) == 1 { 2028 writeSolidBlockRLEs(minPt, maxPt, offset, op, rleBuf, bounds) 2029 return nil 2030 } 2031 2032 var multiForeground bool 2033 var labelIndex uint32 2034 if len(indices) > 1 { 2035 multiForeground = true 2036 } else { 2037 for labelIndex = range indices { 2038 break 2039 } 2040 } 2041 2042 gx, gy, gz := pb.Size[0]/SubBlockSize, pb.Size[1]/SubBlockSize, pb.Size[2]/SubBlockSize 2043 numSubBlocks := uint32(gx * gy * gz) 2044 subBlockNumVoxels := SubBlockSize * SubBlockSize * SubBlockSize 2045 2046 sbIndexPos := make([]uint32, numSubBlocks) 2047 sbValuePos := make([]uint32, numSubBlocks) 2048 var j, k uint32 2049 for i, n := range pb.NumSBLabels { 2050 sbIndexPos[i] = j 2051 j += uint32(n) 2052 sbValuePos[i] = k 2053 bits := uint32(bitsFor(n)) 2054 sbBits := uint32(subBlockNumVoxels) * bits 2055 if sbBits%8 != 0 { 2056 sbBits += 8 - (sbBits % 8) 2057 } 2058 k += sbBits 2059 } 2060 2061 curIndices := make([]uint32, subBlockNumVoxels) // preallocate max # of indices for sub-block 2062 2063 // Keep track of the bit position in each sub-blocks values byte slice so we can easily 2064 // traverse the sub-blocks in block coordinates. 2065 for vz := minPt[2]; vz <= maxPt[2]; vz++ { 2066 z := vz - offset[2] 2067 blockz := vz % SubBlockSize 2068 dsz := (z / SubBlockSize) * gy * gx 2069 2070 for vy := minPt[1]; vy <= maxPt[1]; vy++ { 2071 y := vy - offset[1] 2072 blocky := vy % SubBlockSize 2073 sbNumStart := dsz + (y/SubBlockSize)*gx 2074 yz := getImmutableYZ(vy, vz) 2075 rle, inRun := rleBuf.rles[yz] 2076 2077 var sbNum int32 = -1 2078 var numSBLabels uint16 2079 var foreground, stepByVoxel bool 2080 var bitpos, bits uint32 2081 var dx int32 2082 vx := minPt[0] 2083 2084 for { 2085 x := vx - offset[0] 2086 sbNumCur := sbNumStart + x/SubBlockSize 2087 if sbNum != sbNumCur { 2088 sbNum = sbNumCur 2089 numSBLabels = pb.NumSBLabels[sbNum] 2090 bits = bitsFor(numSBLabels) 2091 bitpos = sbValuePos[sbNum] + uint32(blockz*SubBlockSize*SubBlockSize+blocky*SubBlockSize+vx%SubBlockSize)*bits 2092 indexPos := sbIndexPos[sbNum] 2093 for i := uint16(0); i < numSBLabels; i++ { 2094 curIndices[i] = pb.SBIndices[indexPos] 2095 indexPos++ 2096 } 2097 2098 switch numSBLabels { 2099 case 0: 2100 return fmt.Errorf("Sub-block with 0 labels detected: %s\n", pb.BCoord) 2101 case 1: 2102 dx = SubBlockSize - x%SubBlockSize 2103 if multiForeground { 2104 _, foreground = indices[curIndices[0]] 2105 } else { 2106 foreground = (curIndices[0] == labelIndex) 2107 } 2108 stepByVoxel = false 2109 default: 2110 dx = 1 2111 stepByVoxel = true 2112 } 2113 } 2114 if stepByVoxel { 2115 index := getPackedValue(pb.SBValues, bitpos, bits) 2116 if multiForeground { 2117 _, foreground = indices[curIndices[index]] 2118 } else { 2119 foreground = (curIndices[index] == labelIndex) 2120 } 2121 bitpos += bits 2122 } 2123 if foreground { 2124 if inRun { 2125 rle.Extend(dx) 2126 } else { 2127 rle = dvid.NewRLE(dvid.Point3d{vx, vy, vz}, dx) 2128 inRun = true 2129 } 2130 } else if inRun { 2131 if _, err := rle.WriteTo(op.w); err != nil { 2132 return err 2133 } 2134 inRun = false 2135 } 2136 vx += dx 2137 if vx > maxPt[0] { 2138 break 2139 } 2140 } 2141 2142 if inRun { 2143 rleBuf.rles[yz] = rle 2144 } else { 2145 delete(rleBuf.rles, yz) 2146 } 2147 } 2148 } 2149 return nil 2150 } 2151 2152 // writes RLEs for solid block that is known to be within selected labels. 2153 func writeSolidBlockRLEs(minPt, maxPt, offset dvid.Point3d, op *OutputOp, rleBuf *rleBuffer, bounds dvid.Bounds) { 2154 for vz := minPt[2]; vz <= maxPt[2]; vz++ { 2155 for vy := minPt[1]; vy <= maxPt[1]; vy++ { 2156 yz := getImmutableYZ(vy, vz) 2157 rle, inRun := rleBuf.rles[yz] 2158 2159 vx := minPt[0] 2160 dx := maxPt[0] - vx + 1 2161 if inRun { 2162 rle.Extend(dx) 2163 } else { 2164 rle = dvid.NewRLE(dvid.Point3d{vx, vy, vz}, dx) 2165 } 2166 rleBuf.rles[yz] = rle 2167 } 2168 } 2169 } 2170 2171 type BinaryBlock struct { 2172 Offset dvid.Point3d // voxel offset for the first voxels in this block 2173 Size dvid.Point3d 2174 Label uint64 2175 Voxels []bool 2176 } 2177 2178 func (b BinaryBlock) String() string { 2179 return fmt.Sprintf("[offset %s, size %s, label %d, voxel array of %d bytes", b.Offset, b.Size, b.Label, len(b.Voxels)) 2180 } 2181 2182 func (b *BinaryBlock) Read(r io.Reader, gx, gy, gz int32, label uint64) error { 2183 header := make([]byte, 13) 2184 if n, err := io.ReadFull(r, header); err != nil { 2185 return err 2186 } else if n != 13 { 2187 return fmt.Errorf("Unable to read header (only %d bytes) of block", n) 2188 } 2189 nx := gx * SubBlockSize 2190 ny := gy * SubBlockSize 2191 nz := gz * SubBlockSize 2192 ox := int32(binary.LittleEndian.Uint32(header[:4])) 2193 oy := int32(binary.LittleEndian.Uint32(header[4:8])) 2194 oz := int32(binary.LittleEndian.Uint32(header[8:12])) 2195 2196 b.Offset = dvid.Point3d{ox, oy, oz} 2197 b.Size = dvid.Point3d{nx, ny, nz} 2198 b.Label = label 2199 b.Voxels = make([]bool, nx*ny*nz) 2200 switch header[12] { 2201 case 0: 2202 // do nothing because all background. 2203 case 1: 2204 // all foreground 2205 for i := 0; i < len(b.Voxels); i++ { 2206 b.Voxels[i] = true 2207 } 2208 case 2: 2209 // iterate through all sub-blocks and fill in volume. 2210 buf := make([]byte, 65) 2211 var vz, x, y, z int32 2212 var sx, sy, sz int32 // sub-block pos 2213 for sz = 0; sz < gz; sz++ { 2214 var vy int32 2215 for sy = 0; sy < gy; sy++ { 2216 vy = sy * SubBlockSize 2217 var vx int32 2218 for sx = 0; sx < gx; sx++ { 2219 if n, err := r.Read(buf[:1]); err != nil { 2220 return err 2221 } else if n != 1 { 2222 return fmt.Errorf("couldn't get content flag for sub-block (%d,%d,%d)", sx, sy, sz) 2223 } 2224 switch buf[0] { 2225 case 0: 2226 // do nothing, all background 2227 case 1: 2228 for z = 0; z < SubBlockSize; z++ { 2229 for y = 0; y < SubBlockSize; y++ { 2230 for x = 0; x < SubBlockSize; x++ { 2231 i := (vz+z)*nx*ny + (vy+y)*nx + vx + x 2232 b.Voxels[i] = true 2233 } 2234 } 2235 } 2236 case 2: 2237 if n, err := io.ReadFull(r, buf[1:]); err != nil { 2238 return err 2239 } else if n != 64 { 2240 return fmt.Errorf("couldn't get mask for sub-block (%d,%d,%d), %d bytes received", sx, sy, sz, n) 2241 } 2242 var bithead int32 2243 for z = 0; z < SubBlockSize; z++ { 2244 for y = 0; y < SubBlockSize; y++ { 2245 for x = 0; x < SubBlockSize; x++ { 2246 bytepos := bithead >> 3 2247 bitpos := bithead % 8 2248 val := buf[1+bytepos] & bitMask[bitpos] 2249 i := (vz+z)*nx*ny + (vy+y)*nx + vx + x 2250 b.Voxels[i] = (val > 0) 2251 bithead++ 2252 } 2253 } 2254 } 2255 default: 2256 return fmt.Errorf("bad content flag (%d) for sub-block (%d,%d,%d)", buf[0], sx, sy, sz) 2257 } 2258 vx += SubBlockSize 2259 } 2260 vy += SubBlockSize 2261 } 2262 vz += SubBlockSize 2263 } 2264 2265 default: 2266 return fmt.Errorf("got bad content flag (%d) in block", header[12]) 2267 } 2268 return nil 2269 } 2270 2271 // ReceiveBinaryBlocks returns a slice of BinaryBlock, easily parseable but not necessarily 2272 // optimally compressed format. 2273 func ReceiveBinaryBlocks(r io.Reader) ([]BinaryBlock, error) { 2274 header := make([]byte, 20) 2275 if n, err := io.ReadFull(r, header); err != nil { 2276 return nil, err 2277 } else if n != 20 { 2278 return nil, fmt.Errorf("Unable to read header (only %d bytes) of binary blocks serialization", n) 2279 } 2280 gx := int32(binary.LittleEndian.Uint32(header[:4])) 2281 gy := int32(binary.LittleEndian.Uint32(header[4:8])) 2282 gz := int32(binary.LittleEndian.Uint32(header[8:12])) 2283 label := binary.LittleEndian.Uint64(header[12:20]) 2284 var blocks []BinaryBlock 2285 for { 2286 var block BinaryBlock 2287 err := block.Read(r, gx, gy, gz, label) 2288 if err == io.EOF { 2289 break 2290 } 2291 if err != nil { 2292 return nil, err 2293 } 2294 blocks = append(blocks, block) 2295 } 2296 return blocks, nil 2297 } 2298 2299 // WriteBinaryBlocks writes a compact serialization of a binarized Block to the 2300 // supplied Writer. The serialization is a header + stream of blocks. The header 2301 // is the following: 2302 // 2303 // 3 * uint32 values of gx, gy, and gz 2304 // uint64 foreground label 2305 // 2306 // The format of each binary block in the stream is detailed by the WriteBinaryBlock() function. 2307 func WriteBinaryBlocks(mainLabel uint64, lbls Set, op *OutputOp, bounds dvid.Bounds) { 2308 var blockWritten bool 2309 for pb := range op.pbCh { 2310 labelIndices := make(map[uint32]struct{}) 2311 var inBlock, hasBackground bool 2312 for i, label := range pb.Labels { 2313 _, found := lbls[label] 2314 if found { 2315 labelIndices[uint32(i)] = struct{}{} 2316 inBlock = true 2317 } else { 2318 hasBackground = true // true if any non-targeted label exists 2319 if len(labelIndices) == len(lbls) { 2320 break 2321 } 2322 } 2323 } 2324 if inBlock { 2325 if !blockWritten { 2326 blockWritten = true 2327 gx := uint32(pb.Block.Size[0] / SubBlockSize) 2328 gy := uint32(pb.Block.Size[1] / SubBlockSize) 2329 gz := uint32(pb.Block.Size[2] / SubBlockSize) 2330 buf := make([]byte, 20) 2331 binary.LittleEndian.PutUint32(buf[0:4], gx) 2332 binary.LittleEndian.PutUint32(buf[4:8], gy) 2333 binary.LittleEndian.PutUint32(buf[8:12], gz) 2334 binary.LittleEndian.PutUint64(buf[12:20], mainLabel) 2335 if _, err := op.w.Write(buf); err != nil { 2336 op.errCh <- fmt.Errorf("Unable to write header for block %s: %v\n", pb.BCoord, err) 2337 return 2338 } 2339 } 2340 if err := pb.WriteBinaryBlock(labelIndices, hasBackground, op, bounds); err != nil { 2341 op.errCh <- err 2342 return 2343 } 2344 } 2345 } 2346 op.errCh <- nil 2347 } 2348 2349 // WriteBinaryBlock writes the binary version of a Block to the supplied Writer, where 2350 // the serialized data represents just the label voxels. By definition, a binary block 2351 // has at most two labels (0 = background, 1 = given label) and encoding is a bit per voxel. 2352 // The binary format is related to the Google and internal DVID label block compression 2353 // but is simplified, the DVID space offset of the block is included, and the sub-block 2354 // data are arranged to allow streaming. 2355 // 2356 // Internally, the mask is stored in 8x8x8 sub-blocks. There are gx * gy * gz sub-blocks where 2357 // gx = nx / 8; gy = ny / 8; gz = nz / 8, and (gx, gy, gz) is relayed in a header outside of 2358 // the data returned by this function. For example, for a full sparse volume response, there 2359 // would be a header followed by some number of these binary blocks. 2360 // 2361 // The byte layout will be the following: 2362 // 2363 // 3 * int32 offset of first voxel of Block in DVID space (x, y, z) 2364 // byte content flag: 2365 // 0 = background ONLY (no more data for this block) 2366 // 1 = foreground ONLY (no more data for this block) 2367 // 2 = both background and foreground so stream of sub-blocks required. 2368 // 2369 // Stream of gx * gy * gz sub-blocks with the following data: 2370 // 2371 // byte content flag: 2372 // 0 = background ONLY (no more data for this sub-block) 2373 // 1 = foreground ONLY (no more data for this sub-block) 2374 // 2 = both background and foreground so mask data required. 2375 // mask 64 byte bitmask where each voxel is 0 (background) or 1 (foreground) 2376 func (pb *PositionedBlock) WriteBinaryBlock(indices map[uint32]struct{}, hasBackground bool, op *OutputOp, bounds dvid.Bounds) error { 2377 offset, err := pb.OffsetDVID() 2378 if err != nil { 2379 return err 2380 } 2381 buf := make([]byte, 13) 2382 binary.LittleEndian.PutUint32(buf[0:4], uint32(offset[0])) 2383 binary.LittleEndian.PutUint32(buf[4:8], uint32(offset[1])) 2384 binary.LittleEndian.PutUint32(buf[8:12], uint32(offset[2])) 2385 2386 var mixedData bool 2387 if len(indices) == 0 { 2388 buf[12] = 0 // background only 2389 } else if hasBackground { 2390 buf[12] = 2 // background + foreground 2391 mixedData = true 2392 } else { 2393 buf[12] = 1 // foreground only 2394 } 2395 if n, err := op.w.Write(buf); err != nil { 2396 return err 2397 } else if n != 13 { 2398 return fmt.Errorf("couldn't write header for block %s, only %d bytes written", pb.BCoord, n) 2399 } 2400 if !mixedData { 2401 return nil 2402 } 2403 2404 var multiForeground bool 2405 var labelIndex uint32 2406 if len(indices) > 1 { 2407 multiForeground = true 2408 } else { 2409 for labelIndex = range indices { 2410 break 2411 } 2412 } 2413 2414 gx, gy, gz := pb.Size[0]/SubBlockSize, pb.Size[1]/SubBlockSize, pb.Size[2]/SubBlockSize 2415 2416 subBlockNumVoxels := SubBlockSize * SubBlockSize * SubBlockSize 2417 curIndices := make([]uint32, subBlockNumVoxels) // preallocate max # of indices for sub-block 2418 2419 data := make([]byte, 65) // sub-block data will at most be status byte + 64 bytes (8x8x8 bits). 2420 2421 var indexPos, bitpos uint32 2422 var subBlockNum int 2423 var sx, sy, sz int32 2424 for sz = 0; sz < gz; sz++ { 2425 for sy = 0; sy < gy; sy++ { 2426 for sx = 0; sx < gx; sx, subBlockNum = sx+1, subBlockNum+1 { 2427 2428 numSBLabels := pb.NumSBLabels[subBlockNum] 2429 bits := bitsFor(numSBLabels) 2430 2431 for i := uint16(0); i < numSBLabels; i++ { 2432 curIndices[i] = pb.SBIndices[indexPos] 2433 indexPos++ 2434 } 2435 2436 switch numSBLabels { 2437 case 0: 2438 data[0] = 0 2439 if _, err := op.w.Write(data[:1]); err != nil { 2440 return err 2441 } 2442 continue 2443 case 1: 2444 var foreground bool 2445 if multiForeground { 2446 _, foreground = indices[curIndices[0]] 2447 } else { 2448 foreground = (curIndices[0] == labelIndex) 2449 } 2450 if foreground { 2451 data[0] = 1 2452 } else { 2453 data[0] = 0 2454 } 2455 if _, err := op.w.Write(data[:1]); err != nil { 2456 return err 2457 } 2458 continue 2459 default: 2460 } 2461 2462 outbytepos := int(1) 2463 outbitpos := int(8) // Start at 2nd byte for output 2464 2465 var background bool // true if a non-index voxel is in block 2466 var foreground bool // true if index is in block 2467 2468 var x, y, z int32 2469 for z = 0; z < SubBlockSize; z++ { 2470 for y = 0; y < SubBlockSize; y++ { 2471 for x = 0; x < SubBlockSize; x++ { 2472 index := getPackedValue(pb.SBValues, bitpos, bits) 2473 2474 // write binary sub-block data 2475 var curForeground bool 2476 if multiForeground { 2477 _, curForeground = indices[curIndices[index]] 2478 } else if curIndices[index] == labelIndex { 2479 curForeground = true 2480 } 2481 if curForeground { 2482 data[outbytepos] |= bitMask[outbitpos%8] 2483 foreground = true 2484 } else { 2485 data[outbytepos] &^= bitMask[outbitpos%8] 2486 background = true 2487 } 2488 2489 bitpos += bits 2490 outbitpos++ 2491 if outbitpos%8 == 0 { 2492 outbytepos++ 2493 } 2494 } 2495 } 2496 } 2497 2498 if background && foreground { 2499 data[0] = 2 2500 if _, err := op.w.Write(data); err != nil { 2501 return err 2502 } 2503 } else if foreground { 2504 data[0] = 1 2505 if _, err := op.w.Write(data[:1]); err != nil { 2506 return err 2507 } 2508 } else { 2509 data[0] = 0 2510 if _, err := op.w.Write(data[:1]); err != nil { 2511 return err 2512 } 2513 } 2514 2515 rem := bitpos % 8 2516 if rem != 0 { 2517 bitpos += 8 - rem 2518 } 2519 } 2520 } 2521 } 2522 2523 return nil 2524 } 2525 2526 // GoogleCompression writes label compression compliant with the Google Neuroglancer 2527 // specification: https://goo.gl/IyQbzL 2528 func (b Block) WriteGoogleCompression(w io.Writer) error { 2529 return fmt.Errorf("labels.Block -> Google Compression not implemented yet") 2530 } 2531 2532 // label array and portion of data that is being processed 2533 type subvolumeData struct { 2534 data []uint64 2535 dataBytes []byte // necessary to prevent GC if aliased into data 2536 volsize [3]uint32 // full size of volume 2537 blockOff [3]uint32 // offset from corner of subvolume to block being processed 2538 blockSize [3]uint32 // size of block extending from blockOff 2539 } 2540 2541 // get # sub-blocks in each dimension 2542 func (s subvolumeData) getSubBlockDims() (gx, gy, gz uint32) { 2543 return s.blockSize[0] / SubBlockSize, s.blockSize[1] / SubBlockSize, s.blockSize[2] / SubBlockSize 2544 } 2545 2546 // get block size of the subvolume 2547 func (s subvolumeData) getBlockSize() dvid.Point3d { 2548 return dvid.Point3d{int32(s.blockSize[0]), int32(s.blockSize[1]), int32(s.blockSize[2])} 2549 } 2550 2551 // run checks and do conversions 2552 func setSubvolume(uint64array []byte, volsize, blockOff dvid.Point, blockSize dvid.Point3d) (*subvolumeData, error) { 2553 if volsize.Prod() >= math.MaxUint32 { 2554 return nil, fmt.Errorf("Volume %s is too large. Please decrease array dimensions to have at most %d voxels", volsize, math.MaxUint32) 2555 } 2556 if blockSize[0]%SubBlockSize != 0 || blockSize[1]%SubBlockSize != 0 || blockSize[2]%SubBlockSize != 0 { 2557 return nil, fmt.Errorf("uint64 array of size %s not supported, must be multiple of %d", blockSize, SubBlockSize) 2558 } 2559 if blockSize[0] < 16 || blockSize[1] < 16 || blockSize[2] < 16 { 2560 return nil, fmt.Errorf("Blocks must be at least 16x16x16, so this size is illegal: %s", blockSize) 2561 } 2562 boundsCheck := volsize.Sub(blockOff.Add(blockSize)) 2563 if boundsCheck.Value(0) < 0 || boundsCheck.Value(1) < 0 || boundsCheck.Value(2) < 0 { 2564 return nil, fmt.Errorf("Bad block offset %s + block size %s > volume size %s", blockOff, blockSize, volsize) 2565 } 2566 s := new(subvolumeData) 2567 s.dataBytes = uint64array 2568 var err error 2569 s.data, err = dvid.AliasByteToUint64(uint64array) 2570 if err != nil { 2571 return nil, err 2572 } 2573 s.volsize[0] = uint32(volsize.Value(0)) 2574 s.volsize[1] = uint32(volsize.Value(1)) 2575 s.volsize[2] = uint32(volsize.Value(2)) 2576 2577 s.blockOff[0] = uint32(blockOff.Value(0)) 2578 s.blockOff[1] = uint32(blockOff.Value(1)) 2579 s.blockOff[2] = uint32(blockOff.Value(2)) 2580 2581 s.blockSize[0] = uint32(blockSize[0]) 2582 s.blockSize[1] = uint32(blockSize[1]) 2583 s.blockSize[2] = uint32(blockSize[2]) 2584 return s, nil 2585 } 2586 2587 // left mask for bithead at each bit position in a byte 2588 var leftBitMask [8]byte = [8]byte{ 2589 0xFF, 0x7F, 0x3F, 0x1F, 0x0F, 0x07, 0x03, 0x01, 2590 } 2591 2592 var bitMask [8]byte = [8]byte{ 2593 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 2594 } 2595 2596 // map of label -> index position in sub-block 2597 type subBlockIndex map[uint64]uint16 2598 2599 // iterate through the subvolume corresponding to the Block and do encoding 2600 func (s *subvolumeData) encodeBlock() (*Block, error) { 2601 gx, gy, gz := s.getSubBlockDims() 2602 if gx > MaxSubBlockSize || gy > MaxSubBlockSize || gz > MaxSubBlockSize { 2603 return nil, fmt.Errorf("%d x %d x %d sub-blocks exceed max dimension of %d voxels (%d sub-blocks)", gx, gy, gz, MaxBlockSize, MaxSubBlockSize) 2604 } 2605 numSubBlocks := gx * gy * gz 2606 2607 numSubBlockLabels := make([]uint16, numSubBlocks) // # of labels in each sub-block 2608 subBlockIndices := make([]subBlockIndex, numSubBlocks) // sub-block indexing 2609 2610 // Full Pass: Compute everything but the label indices for sub-blocks since we don't have 2611 // the entire block-level label list until the end of the first pass. 2612 dy := s.volsize[0] 2613 dz := s.volsize[0] * s.volsize[1] 2614 2615 svalues := make([]byte, numSubBlocks*512*2) // max size allocation for sub-blocks' encoded values 2616 var bitpos int 2617 2618 var subBlockNum int 2619 for sz := uint32(0); sz < gz; sz++ { 2620 uz := sz*SubBlockSize + s.blockOff[2] 2621 for sy := uint32(0); sy < gy; sy++ { 2622 uy := sy*SubBlockSize + s.blockOff[1] 2623 for sx := uint32(0); sx < gx; sx++ { 2624 ux := sx*SubBlockSize + s.blockOff[0] 2625 2626 // 1st pass: get # labels for this sub-block 2627 var numSBLabels uint16 2628 slabels := make(subBlockIndex) // map of label -> index position in sub-block 2629 2630 upos := uz*dz + uy*dy + ux 2631 var x, y, z int32 2632 for z = 0; z < SubBlockSize; z++ { 2633 for y = 0; y < SubBlockSize; y++ { 2634 for x = 0; x < SubBlockSize; x++ { 2635 label := s.data[upos] 2636 if _, found := slabels[label]; !found { 2637 slabels[label] = numSBLabels 2638 numSBLabels++ 2639 } 2640 upos++ 2641 } 2642 upos += dy - SubBlockSize 2643 } 2644 upos += dz - s.volsize[0]*SubBlockSize 2645 } 2646 subBlockIndices[subBlockNum] = slabels 2647 numSubBlockLabels[subBlockNum] = numSBLabels 2648 2649 // 2nd pass through sub-block, write indices now that we know required bits per voxel. 2650 bits := int(bitsFor(numSBLabels)) 2651 if bits > 0 { 2652 upos = uz*dz + uy*dy + ux 2653 for z = 0; z < SubBlockSize; z++ { 2654 for y = 0; y < SubBlockSize; y++ { 2655 for x = 0; x < SubBlockSize; x++ { 2656 index := slabels[s.data[upos]] 2657 bithead := bitpos % 8 2658 bytepos := bitpos >> 3 2659 if bithead+bits <= 8 { 2660 // index totally within this byte 2661 leftshift := uint(8 - bits - bithead) 2662 svalues[bytepos] |= byte(index << leftshift) 2663 } else { 2664 // this straddles a byte boundary 2665 leftshift := uint(16 - bits - bithead) 2666 index <<= leftshift 2667 svalues[bytepos] |= byte((index & 0xFF00) >> 8) 2668 svalues[bytepos+1] = byte(index & 0x00FF) 2669 } 2670 bitpos += bits 2671 upos++ 2672 } 2673 upos += dy - SubBlockSize 2674 } 2675 upos += dz - s.volsize[0]*SubBlockSize 2676 } 2677 2678 // make sure a byte doesn't have two sub-blocks' encoded values 2679 if bitpos%8 != 0 { 2680 bitpos += 8 - (bitpos % 8) 2681 } 2682 } 2683 subBlockNum++ 2684 } 2685 } 2686 } 2687 2688 // Compute block-level label table 2689 2690 var numLabels uint32 2691 var numSubBlockIndices uint32 2692 labels := make(map[uint64]uint32) 2693 for _, slabels := range subBlockIndices { 2694 numSubBlockIndices += uint32(len(slabels)) 2695 for label := range slabels { 2696 _, found := labels[label] 2697 if !found { 2698 labels[label] = numLabels 2699 numLabels++ 2700 } 2701 } 2702 } 2703 if numLabels == 1 { 2704 var label uint64 2705 for label = range labels { 2706 break 2707 } 2708 blockSize := dvid.Point3d{int32(s.blockSize[0]), int32(s.blockSize[1]), int32(s.blockSize[2])} 2709 return MakeSolidBlock(label, blockSize), nil 2710 } 2711 2712 // Write all the data to the Block buffer. 2713 b := new(Block) 2714 subBlockIndexBytes := numSubBlockIndices * 4 2715 subBlockValueBytes := uint32(bitpos >> 3) 2716 blockBytes := 16 + numLabels*8 + numSubBlocks*2 + subBlockIndexBytes + subBlockValueBytes 2717 b.data, b.dataSrc = dvid.New8ByteAlignBytes(blockBytes) 2718 2719 b.Size = s.getBlockSize() 2720 2721 binary.LittleEndian.PutUint32(b.data[0:4], gx) 2722 binary.LittleEndian.PutUint32(b.data[4:8], gy) 2723 binary.LittleEndian.PutUint32(b.data[8:12], gz) 2724 binary.LittleEndian.PutUint32(b.data[12:16], numLabels) 2725 2726 pos := uint32(16) 2727 var err error 2728 b.Labels, err = dvid.AliasByteToUint64(b.data[pos : pos+numLabels*8]) 2729 if err != nil { 2730 return nil, err 2731 } 2732 for label, index := range labels { 2733 b.Labels[index] = label 2734 } 2735 2736 pos += numLabels * 8 2737 nbytes := numSubBlocks * 2 2738 b.NumSBLabels, err = dvid.AliasByteToUint16(b.data[pos : pos+nbytes]) 2739 if err != nil { 2740 return nil, err 2741 } 2742 copy(b.NumSBLabels, numSubBlockLabels) 2743 2744 pos += nbytes 2745 b.SBIndices, err = dvid.AliasByteToUint32(b.data[pos : pos+subBlockIndexBytes]) 2746 if err != nil { 2747 return nil, err 2748 } 2749 var i uint32 2750 for sbNum, sbmap := range subBlockIndices { 2751 for label, sbindex := range sbmap { 2752 labelIndex, found := labels[label] 2753 if !found { 2754 return nil, fmt.Errorf("Found label %d not in block-level map!", label) 2755 } 2756 b.SBIndices[i+uint32(sbindex)] = labelIndex 2757 } 2758 i += uint32(b.NumSBLabels[sbNum]) 2759 } 2760 2761 pos += subBlockIndexBytes 2762 b.SBValues = b.data[pos:] 2763 copy(b.SBValues, svalues[:subBlockValueBytes]) 2764 2765 return b, nil 2766 } 2767 2768 // returns the # of bits necessary to hold an index for n values. 2769 // 0 and 1 should return 0. 2770 func bitsFor(n uint16) (bits uint32) { 2771 if n < 2 { 2772 return 0 2773 } 2774 n-- 2775 for { 2776 if n > 0 { 2777 bits++ 2778 } else { 2779 return 2780 } 2781 n >>= 1 2782 } 2783 } 2784 2785 // returns the uint byte size to hold an index. Can be 1, 2, or 4 bytes. 2786 func indexBytes(n uint32) (bytes uint32) { 2787 if n <= 256 { 2788 return 1 2789 } 2790 if n <= 65536 { 2791 return 2 2792 } 2793 return 4 2794 } 2795 2796 // getPackedValue returns a 9 bit value from a packed array of values of "bits" bits 2797 // starting from "bithead" bits into the given byte slice. Values cannot straddle 2798 // more than 2 bytes. 2799 func getPackedValue(b []byte, bitHead, bits uint32) (index uint16) { 2800 bytePos := bitHead >> 3 2801 bitPos := bitHead % 8 2802 if bitPos+bits <= 8 { 2803 // index totally within this byte 2804 rightshift := uint(8 - bitPos - bits) 2805 index = uint16((b[bytePos] & leftBitMask[bitPos]) >> rightshift) 2806 } else { 2807 // index spans byte boundaries 2808 index = uint16(b[bytePos]&leftBitMask[bitPos]) << 8 2809 index |= uint16(b[bytePos+1]) 2810 index >>= uint(16 - bitPos - bits) 2811 } 2812 return 2813 }